blob: b9e5b856501024458c6a85dd5bc96d4671a2bca2 [file] [log] [blame]
Alex Elder30c6d9d2015-05-22 13:02:08 -05001/*
2 * SVC Greybus driver.
3 *
4 * Copyright 2015 Google Inc.
5 * Copyright 2015 Linaro Ltd.
6 *
7 * Released under the GPLv2 only.
8 */
9
Rui Miguel Silvaebe99d62016-01-21 01:42:17 +000010#include <linux/input.h>
Viresh Kumar067906f2015-08-06 12:44:55 +053011#include <linux/workqueue.h>
Alex Elder30c6d9d2015-05-22 13:02:08 -050012
Viresh Kumarf66427a2015-09-02 21:27:13 +053013#include "greybus.h"
14
Johan Hovoldf6c6c132015-11-11 10:07:08 +010015#define CPORT_FLAGS_E2EFC BIT(0)
16#define CPORT_FLAGS_CSD_N BIT(1)
17#define CPORT_FLAGS_CSV_N BIT(2)
Perry Hung0b226492015-07-24 19:02:34 -040018
Rui Miguel Silvaebe99d62016-01-21 01:42:17 +000019#define SVC_KEY_ARA_BUTTON KEY_A
Viresh Kumarb45864d2015-07-24 15:32:21 +053020
Johan Hovold9ae41092015-12-02 18:23:29 +010021struct gb_svc_deferred_request {
Viresh Kumar067906f2015-08-06 12:44:55 +053022 struct work_struct work;
Johan Hovold9ae41092015-12-02 18:23:29 +010023 struct gb_operation *operation;
Viresh Kumar067906f2015-08-06 12:44:55 +053024};
25
Viresh Kumaread35462015-07-21 17:44:19 +053026
Johan Hovold66069fb2015-11-25 15:59:09 +010027static ssize_t endo_id_show(struct device *dev,
28 struct device_attribute *attr, char *buf)
29{
30 struct gb_svc *svc = to_gb_svc(dev);
31
32 return sprintf(buf, "0x%04x\n", svc->endo_id);
33}
34static DEVICE_ATTR_RO(endo_id);
35
36static ssize_t ap_intf_id_show(struct device *dev,
37 struct device_attribute *attr, char *buf)
38{
39 struct gb_svc *svc = to_gb_svc(dev);
40
41 return sprintf(buf, "%u\n", svc->ap_intf_id);
42}
43static DEVICE_ATTR_RO(ap_intf_id);
44
Rui Miguel Silva2c92bd52016-01-11 13:46:33 +000045
46// FIXME
47// This is a hack, we need to do this "right" and clean the interface up
48// properly, not just forcibly yank the thing out of the system and hope for the
49// best. But for now, people want their modules to come out without having to
50// throw the thing to the ground or get out a screwdriver.
51static ssize_t intf_eject_store(struct device *dev,
52 struct device_attribute *attr, const char *buf,
53 size_t len)
54{
55 struct gb_svc *svc = to_gb_svc(dev);
56 unsigned short intf_id;
57 int ret;
58
59 ret = kstrtou16(buf, 10, &intf_id);
60 if (ret < 0)
61 return ret;
62
63 dev_warn(dev, "Forcibly trying to eject interface %d\n", intf_id);
64
65 ret = gb_svc_intf_eject(svc, intf_id);
66 if (ret < 0)
67 return ret;
68
69 return len;
70}
71static DEVICE_ATTR_WO(intf_eject);
72
Johan Hovold66069fb2015-11-25 15:59:09 +010073static struct attribute *svc_attrs[] = {
74 &dev_attr_endo_id.attr,
75 &dev_attr_ap_intf_id.attr,
Rui Miguel Silva2c92bd52016-01-11 13:46:33 +000076 &dev_attr_intf_eject.attr,
Johan Hovold66069fb2015-11-25 15:59:09 +010077 NULL,
78};
79ATTRIBUTE_GROUPS(svc);
80
Viresh Kumar505f16c2015-08-31 17:21:07 +053081static int gb_svc_intf_device_id(struct gb_svc *svc, u8 intf_id, u8 device_id)
Alex Elder30c6d9d2015-05-22 13:02:08 -050082{
83 struct gb_svc_intf_device_id_request request;
84
85 request.intf_id = intf_id;
86 request.device_id = device_id;
87
88 return gb_operation_sync(svc->connection, GB_SVC_TYPE_INTF_DEVICE_ID,
89 &request, sizeof(request), NULL, 0);
90}
91
Viresh Kumar3f0e9182015-08-31 17:21:06 +053092int gb_svc_intf_reset(struct gb_svc *svc, u8 intf_id)
Alex Elder30c6d9d2015-05-22 13:02:08 -050093{
94 struct gb_svc_intf_reset_request request;
95
96 request.intf_id = intf_id;
97
98 return gb_operation_sync(svc->connection, GB_SVC_TYPE_INTF_RESET,
99 &request, sizeof(request), NULL, 0);
100}
Viresh Kumar3f0e9182015-08-31 17:21:06 +0530101EXPORT_SYMBOL_GPL(gb_svc_intf_reset);
Alex Elder30c6d9d2015-05-22 13:02:08 -0500102
Rui Miguel Silvac5d55fb2016-01-11 13:46:31 +0000103int gb_svc_intf_eject(struct gb_svc *svc, u8 intf_id)
104{
105 struct gb_svc_intf_eject_request request;
106
107 request.intf_id = intf_id;
108
109 /*
110 * The pulse width for module release in svc is long so we need to
111 * increase the timeout so the operation will not return to soon.
112 */
113 return gb_operation_sync_timeout(svc->connection,
114 GB_SVC_TYPE_INTF_EJECT, &request,
115 sizeof(request), NULL, 0,
116 GB_SVC_EJECT_TIME);
117}
118EXPORT_SYMBOL_GPL(gb_svc_intf_eject);
119
Viresh Kumar19151c32015-09-09 21:08:29 +0530120int gb_svc_dme_peer_get(struct gb_svc *svc, u8 intf_id, u16 attr, u16 selector,
121 u32 *value)
122{
123 struct gb_svc_dme_peer_get_request request;
124 struct gb_svc_dme_peer_get_response response;
125 u16 result;
126 int ret;
127
128 request.intf_id = intf_id;
129 request.attr = cpu_to_le16(attr);
130 request.selector = cpu_to_le16(selector);
131
132 ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_DME_PEER_GET,
133 &request, sizeof(request),
134 &response, sizeof(response));
135 if (ret) {
Viresh Kumarb933fa42015-12-04 21:30:10 +0530136 dev_err(&svc->dev, "failed to get DME attribute (%u 0x%04x %u): %d\n",
Johan Hovold684156a2015-11-25 15:59:19 +0100137 intf_id, attr, selector, ret);
Viresh Kumar19151c32015-09-09 21:08:29 +0530138 return ret;
139 }
140
141 result = le16_to_cpu(response.result_code);
142 if (result) {
Viresh Kumarb933fa42015-12-04 21:30:10 +0530143 dev_err(&svc->dev, "UniPro error while getting DME attribute (%u 0x%04x %u): %u\n",
Johan Hovold684156a2015-11-25 15:59:19 +0100144 intf_id, attr, selector, result);
Viresh Kumar4aac6c52015-12-04 21:30:08 +0530145 return -EIO;
Viresh Kumar19151c32015-09-09 21:08:29 +0530146 }
147
148 if (value)
149 *value = le32_to_cpu(response.attr_value);
150
151 return 0;
152}
153EXPORT_SYMBOL_GPL(gb_svc_dme_peer_get);
154
155int gb_svc_dme_peer_set(struct gb_svc *svc, u8 intf_id, u16 attr, u16 selector,
156 u32 value)
157{
158 struct gb_svc_dme_peer_set_request request;
159 struct gb_svc_dme_peer_set_response response;
160 u16 result;
161 int ret;
162
163 request.intf_id = intf_id;
164 request.attr = cpu_to_le16(attr);
165 request.selector = cpu_to_le16(selector);
166 request.value = cpu_to_le32(value);
167
168 ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_DME_PEER_SET,
169 &request, sizeof(request),
170 &response, sizeof(response));
171 if (ret) {
Viresh Kumarb933fa42015-12-04 21:30:10 +0530172 dev_err(&svc->dev, "failed to set DME attribute (%u 0x%04x %u %u): %d\n",
Johan Hovold684156a2015-11-25 15:59:19 +0100173 intf_id, attr, selector, value, ret);
Viresh Kumar19151c32015-09-09 21:08:29 +0530174 return ret;
175 }
176
177 result = le16_to_cpu(response.result_code);
178 if (result) {
Viresh Kumarb933fa42015-12-04 21:30:10 +0530179 dev_err(&svc->dev, "UniPro error while setting DME attribute (%u 0x%04x %u %u): %u\n",
Johan Hovold684156a2015-11-25 15:59:19 +0100180 intf_id, attr, selector, value, result);
Viresh Kumar4aac6c52015-12-04 21:30:08 +0530181 return -EIO;
Viresh Kumar19151c32015-09-09 21:08:29 +0530182 }
183
184 return 0;
185}
186EXPORT_SYMBOL_GPL(gb_svc_dme_peer_set);
187
Viresh Kumar6bec5c72015-09-24 14:40:29 -0700188/*
189 * T_TstSrcIncrement is written by the module on ES2 as a stand-in for boot
Eli Sennesh3563ff82015-12-22 17:26:57 -0500190 * status attribute ES3_INIT_STATUS. AP needs to read and clear it, after
191 * reading a non-zero value from it.
Viresh Kumar6bec5c72015-09-24 14:40:29 -0700192 *
193 * FIXME: This is module-hardware dependent and needs to be extended for every
194 * type of module we want to support.
195 */
196static int gb_svc_read_and_clear_module_boot_status(struct gb_interface *intf)
197{
Johan Hovold25376362015-11-03 18:03:23 +0100198 struct gb_host_device *hd = intf->hd;
Viresh Kumar6bec5c72015-09-24 14:40:29 -0700199 int ret;
200 u32 value;
Eli Sennesh3563ff82015-12-22 17:26:57 -0500201 u16 attr;
202 u8 init_status;
Viresh Kumar6bec5c72015-09-24 14:40:29 -0700203
Eli Sennesh3563ff82015-12-22 17:26:57 -0500204 /*
205 * Check if the module is ES2 or ES3, and choose attr number
206 * appropriately.
207 * FIXME: Remove ES2 support from the kernel entirely.
208 */
209 if (intf->ddbl1_manufacturer_id == ES2_DDBL1_MFR_ID &&
210 intf->ddbl1_product_id == ES2_DDBL1_PROD_ID)
211 attr = DME_ATTR_T_TST_SRC_INCREMENT;
212 else
213 attr = DME_ATTR_ES3_INIT_STATUS;
214
215 /* Read and clear boot status in ES3_INIT_STATUS */
216 ret = gb_svc_dme_peer_get(hd->svc, intf->interface_id, attr,
Viresh Kumar6bec5c72015-09-24 14:40:29 -0700217 DME_ATTR_SELECTOR_INDEX, &value);
218
219 if (ret)
220 return ret;
221
222 /*
223 * A nonzero boot status indicates the module has finished
224 * booting. Clear it.
225 */
226 if (!value) {
227 dev_err(&intf->dev, "Module not ready yet\n");
228 return -ENODEV;
229 }
230
Viresh Kumar1575ef12015-10-07 15:40:24 -0400231 /*
Eli Sennesh3563ff82015-12-22 17:26:57 -0500232 * Check if the module needs to boot from UniPro.
Viresh Kumar1575ef12015-10-07 15:40:24 -0400233 * For ES2: We need to check lowest 8 bits of 'value'.
234 * For ES3: We need to check highest 8 bits out of 32 of 'value'.
Eli Sennesh3563ff82015-12-22 17:26:57 -0500235 * FIXME: Remove ES2 support from the kernel entirely.
Viresh Kumar1575ef12015-10-07 15:40:24 -0400236 */
Eli Sennesh3563ff82015-12-22 17:26:57 -0500237 if (intf->ddbl1_manufacturer_id == ES2_DDBL1_MFR_ID &&
238 intf->ddbl1_product_id == ES2_DDBL1_PROD_ID)
239 init_status = value;
240 else
241 init_status = value >> 24;
242
243 if (init_status == DME_DIS_UNIPRO_BOOT_STARTED ||
244 init_status == DME_DIS_FALLBACK_UNIPRO_BOOT_STARTED)
Viresh Kumar1575ef12015-10-07 15:40:24 -0400245 intf->boot_over_unipro = true;
246
Eli Sennesh3563ff82015-12-22 17:26:57 -0500247 return gb_svc_dme_peer_set(hd->svc, intf->interface_id, attr,
Viresh Kumar6bec5c72015-09-24 14:40:29 -0700248 DME_ATTR_SELECTOR_INDEX, 0);
249}
250
Viresh Kumar3f0e9182015-08-31 17:21:06 +0530251int gb_svc_connection_create(struct gb_svc *svc,
Alex Elder30c6d9d2015-05-22 13:02:08 -0500252 u8 intf1_id, u16 cport1_id,
Viresh Kumar1575ef12015-10-07 15:40:24 -0400253 u8 intf2_id, u16 cport2_id,
254 bool boot_over_unipro)
Alex Elder30c6d9d2015-05-22 13:02:08 -0500255{
256 struct gb_svc_conn_create_request request;
257
258 request.intf1_id = intf1_id;
Rui Miguel Silva24980502015-09-15 15:33:51 +0100259 request.cport1_id = cpu_to_le16(cport1_id);
Alex Elder30c6d9d2015-05-22 13:02:08 -0500260 request.intf2_id = intf2_id;
Rui Miguel Silva24980502015-09-15 15:33:51 +0100261 request.cport2_id = cpu_to_le16(cport2_id);
Perry Hung0b226492015-07-24 19:02:34 -0400262 /*
263 * XXX: fix connections paramaters to TC0 and all CPort flags
264 * for now.
265 */
266 request.tc = 0;
Viresh Kumar1575ef12015-10-07 15:40:24 -0400267
268 /*
269 * We need to skip setting E2EFC and other flags to the connection
270 * create request, for all cports, on an interface that need to boot
271 * over unipro, i.e. interfaces required to download firmware.
272 */
273 if (boot_over_unipro)
274 request.flags = CPORT_FLAGS_CSV_N | CPORT_FLAGS_CSD_N;
275 else
276 request.flags = CPORT_FLAGS_CSV_N | CPORT_FLAGS_E2EFC;
Alex Elder30c6d9d2015-05-22 13:02:08 -0500277
278 return gb_operation_sync(svc->connection, GB_SVC_TYPE_CONN_CREATE,
279 &request, sizeof(request), NULL, 0);
280}
Viresh Kumar3f0e9182015-08-31 17:21:06 +0530281EXPORT_SYMBOL_GPL(gb_svc_connection_create);
Alex Elder30c6d9d2015-05-22 13:02:08 -0500282
Viresh Kumar3f0e9182015-08-31 17:21:06 +0530283void gb_svc_connection_destroy(struct gb_svc *svc, u8 intf1_id, u16 cport1_id,
284 u8 intf2_id, u16 cport2_id)
Alex Elder30c6d9d2015-05-22 13:02:08 -0500285{
286 struct gb_svc_conn_destroy_request request;
Viresh Kumard9fcfff2015-08-31 17:21:05 +0530287 struct gb_connection *connection = svc->connection;
288 int ret;
Alex Elder30c6d9d2015-05-22 13:02:08 -0500289
290 request.intf1_id = intf1_id;
Rui Miguel Silva24980502015-09-15 15:33:51 +0100291 request.cport1_id = cpu_to_le16(cport1_id);
Alex Elder30c6d9d2015-05-22 13:02:08 -0500292 request.intf2_id = intf2_id;
Rui Miguel Silva24980502015-09-15 15:33:51 +0100293 request.cport2_id = cpu_to_le16(cport2_id);
Alex Elder30c6d9d2015-05-22 13:02:08 -0500294
Viresh Kumard9fcfff2015-08-31 17:21:05 +0530295 ret = gb_operation_sync(connection, GB_SVC_TYPE_CONN_DESTROY,
296 &request, sizeof(request), NULL, 0);
Johan Hovold684156a2015-11-25 15:59:19 +0100297 if (ret) {
Viresh Kumar2f3db922015-12-04 21:30:09 +0530298 dev_err(&svc->dev, "failed to destroy connection (%u:%u %u:%u): %d\n",
Johan Hovold684156a2015-11-25 15:59:19 +0100299 intf1_id, cport1_id, intf2_id, cport2_id, ret);
300 }
Alex Elder30c6d9d2015-05-22 13:02:08 -0500301}
Viresh Kumar3f0e9182015-08-31 17:21:06 +0530302EXPORT_SYMBOL_GPL(gb_svc_connection_destroy);
Alex Elder30c6d9d2015-05-22 13:02:08 -0500303
Viresh Kumarbb106852015-09-07 16:01:25 +0530304/* Creates bi-directional routes between the devices */
Viresh Kumar505f16c2015-08-31 17:21:07 +0530305static int gb_svc_route_create(struct gb_svc *svc, u8 intf1_id, u8 dev1_id,
306 u8 intf2_id, u8 dev2_id)
Perry Hunge08aaa42015-07-24 19:02:31 -0400307{
308 struct gb_svc_route_create_request request;
309
310 request.intf1_id = intf1_id;
311 request.dev1_id = dev1_id;
312 request.intf2_id = intf2_id;
313 request.dev2_id = dev2_id;
314
315 return gb_operation_sync(svc->connection, GB_SVC_TYPE_ROUTE_CREATE,
316 &request, sizeof(request), NULL, 0);
317}
Perry Hunge08aaa42015-07-24 19:02:31 -0400318
Viresh Kumar0a020572015-09-07 18:05:26 +0530319/* Destroys bi-directional routes between the devices */
320static void gb_svc_route_destroy(struct gb_svc *svc, u8 intf1_id, u8 intf2_id)
321{
322 struct gb_svc_route_destroy_request request;
323 int ret;
324
325 request.intf1_id = intf1_id;
326 request.intf2_id = intf2_id;
327
328 ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_ROUTE_DESTROY,
329 &request, sizeof(request), NULL, 0);
Johan Hovold684156a2015-11-25 15:59:19 +0100330 if (ret) {
Viresh Kumar2f3db922015-12-04 21:30:09 +0530331 dev_err(&svc->dev, "failed to destroy route (%u %u): %d\n",
Johan Hovold684156a2015-11-25 15:59:19 +0100332 intf1_id, intf2_id, ret);
333 }
Viresh Kumar0a020572015-09-07 18:05:26 +0530334}
335
Laurent Pinchartaab4a1a2016-01-06 16:16:46 +0200336int gb_svc_intf_set_power_mode(struct gb_svc *svc, u8 intf_id, u8 hs_series,
337 u8 tx_mode, u8 tx_gear, u8 tx_nlanes,
338 u8 rx_mode, u8 rx_gear, u8 rx_nlanes,
339 u8 flags, u32 quirks)
Laurent Pinchart784f8762015-12-18 21:23:22 +0200340{
Laurent Pinchartaab4a1a2016-01-06 16:16:46 +0200341 struct gb_svc_intf_set_pwrm_request request;
342 struct gb_svc_intf_set_pwrm_response response;
343 int ret;
Laurent Pinchart784f8762015-12-18 21:23:22 +0200344
345 request.intf_id = intf_id;
Laurent Pinchartaab4a1a2016-01-06 16:16:46 +0200346 request.hs_series = hs_series;
347 request.tx_mode = tx_mode;
348 request.tx_gear = tx_gear;
349 request.tx_nlanes = tx_nlanes;
350 request.rx_mode = rx_mode;
351 request.rx_gear = rx_gear;
352 request.rx_nlanes = rx_nlanes;
Laurent Pinchart784f8762015-12-18 21:23:22 +0200353 request.flags = flags;
Laurent Pinchartaab4a1a2016-01-06 16:16:46 +0200354 request.quirks = cpu_to_le32(quirks);
Laurent Pinchart784f8762015-12-18 21:23:22 +0200355
Laurent Pinchartaab4a1a2016-01-06 16:16:46 +0200356 ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_INTF_SET_PWRM,
357 &request, sizeof(request),
358 &response, sizeof(response));
359 if (ret < 0)
360 return ret;
361
362 return le16_to_cpu(response.result_code);
Laurent Pinchart784f8762015-12-18 21:23:22 +0200363}
Laurent Pinchartaab4a1a2016-01-06 16:16:46 +0200364EXPORT_SYMBOL_GPL(gb_svc_intf_set_power_mode);
Laurent Pinchart784f8762015-12-18 21:23:22 +0200365
Greg Kroah-Hartman55ec09e2016-01-19 23:30:42 -0800366int gb_svc_ping(struct gb_svc *svc)
367{
Greg Kroah-Hartman839ac5b2016-01-26 08:57:50 -0800368 return gb_operation_sync_timeout(svc->connection, GB_SVC_TYPE_PING,
369 NULL, 0, NULL, 0,
370 GB_OPERATION_TIMEOUT_DEFAULT * 2);
Greg Kroah-Hartman55ec09e2016-01-19 23:30:42 -0800371}
372EXPORT_SYMBOL_GPL(gb_svc_ping);
373
Viresh Kumaread35462015-07-21 17:44:19 +0530374static int gb_svc_version_request(struct gb_operation *op)
375{
376 struct gb_connection *connection = op->connection;
Johan Hovold684156a2015-11-25 15:59:19 +0100377 struct gb_svc *svc = connection->private;
Johan Hovoldcfb16902015-09-15 10:48:01 +0200378 struct gb_protocol_version_request *request;
379 struct gb_protocol_version_response *response;
Viresh Kumaread35462015-07-21 17:44:19 +0530380
Johan Hovold55510842015-11-19 18:28:01 +0100381 if (op->request->payload_size < sizeof(*request)) {
Johan Hovold684156a2015-11-25 15:59:19 +0100382 dev_err(&svc->dev, "short version request (%zu < %zu)\n",
Johan Hovold55510842015-11-19 18:28:01 +0100383 op->request->payload_size,
384 sizeof(*request));
385 return -EINVAL;
386 }
387
Johan Hovoldcfb16902015-09-15 10:48:01 +0200388 request = op->request->payload;
Viresh Kumaread35462015-07-21 17:44:19 +0530389
Johan Hovoldcfb16902015-09-15 10:48:01 +0200390 if (request->major > GB_SVC_VERSION_MAJOR) {
Viresh Kumar2f3db922015-12-04 21:30:09 +0530391 dev_warn(&svc->dev, "unsupported major version (%u > %u)\n",
Johan Hovold684156a2015-11-25 15:59:19 +0100392 request->major, GB_SVC_VERSION_MAJOR);
Viresh Kumaread35462015-07-21 17:44:19 +0530393 return -ENOTSUPP;
394 }
395
Johan Hovold357de002016-01-19 12:51:19 +0100396 svc->protocol_major = request->major;
397 svc->protocol_minor = request->minor;
Viresh Kumar3ea959e32015-08-11 07:36:14 +0530398
Johan Hovold684156a2015-11-25 15:59:19 +0100399 if (!gb_operation_response_alloc(op, sizeof(*response), GFP_KERNEL))
Viresh Kumaread35462015-07-21 17:44:19 +0530400 return -ENOMEM;
Viresh Kumaread35462015-07-21 17:44:19 +0530401
Johan Hovoldcfb16902015-09-15 10:48:01 +0200402 response = op->response->payload;
Johan Hovold357de002016-01-19 12:51:19 +0100403 response->major = svc->protocol_major;
404 response->minor = svc->protocol_minor;
Johan Hovold59832932015-09-15 10:48:00 +0200405
Viresh Kumaread35462015-07-21 17:44:19 +0530406 return 0;
407}
408
409static int gb_svc_hello(struct gb_operation *op)
410{
411 struct gb_connection *connection = op->connection;
Johan Hovold88f7b962015-11-25 15:59:08 +0100412 struct gb_svc *svc = connection->private;
Viresh Kumaread35462015-07-21 17:44:19 +0530413 struct gb_svc_hello_request *hello_request;
Viresh Kumaread35462015-07-21 17:44:19 +0530414 int ret;
415
Viresh Kumar0c32d2a2015-08-11 07:29:19 +0530416 if (op->request->payload_size < sizeof(*hello_request)) {
Johan Hovold684156a2015-11-25 15:59:19 +0100417 dev_warn(&svc->dev, "short hello request (%zu < %zu)\n",
418 op->request->payload_size,
419 sizeof(*hello_request));
Viresh Kumaread35462015-07-21 17:44:19 +0530420 return -EINVAL;
421 }
422
423 hello_request = op->request->payload;
Johan Hovold66069fb2015-11-25 15:59:09 +0100424 svc->endo_id = le16_to_cpu(hello_request->endo_id);
425 svc->ap_intf_id = hello_request->interface_id;
Viresh Kumaread35462015-07-21 17:44:19 +0530426
Johan Hovold88f7b962015-11-25 15:59:08 +0100427 ret = device_add(&svc->dev);
428 if (ret) {
429 dev_err(&svc->dev, "failed to register svc device: %d\n", ret);
430 return ret;
431 }
432
Rui Miguel Silvaebe99d62016-01-21 01:42:17 +0000433 ret = input_register_device(svc->input);
434 if (ret) {
435 dev_err(&svc->dev, "failed to register input: %d\n", ret);
436 device_del(&svc->dev);
437 return ret;
438 }
439
Greg Kroah-Hartmaned7279a2016-01-20 22:51:49 -0800440 ret = gb_svc_watchdog_create(svc);
441 if (ret) {
442 dev_err(&svc->dev, "failed to create watchdog: %d\n", ret);
443 input_unregister_device(svc->input);
444 device_del(&svc->dev);
Greg Kroah-Hartman539d6e12016-01-23 17:36:00 -0800445 return ret;
Greg Kroah-Hartmaned7279a2016-01-20 22:51:49 -0800446 }
447
Viresh Kumaread35462015-07-21 17:44:19 +0530448 return 0;
449}
450
Johan Hovoldb4ee82e2015-12-02 18:23:28 +0100451static void gb_svc_intf_remove(struct gb_svc *svc, struct gb_interface *intf)
Viresh Kumarbbaca712015-09-23 16:48:08 -0700452{
Viresh Kumarbbaca712015-09-23 16:48:08 -0700453 u8 intf_id = intf->interface_id;
Johan Hovold141af4f2015-12-15 15:28:57 +0100454 u8 device_id = intf->device_id;
Viresh Kumarbbaca712015-09-23 16:48:08 -0700455
Johan Hovold141af4f2015-12-15 15:28:57 +0100456 intf->disconnected = true;
457
Viresh Kumar80d1ede2015-09-23 16:48:10 -0700458 gb_interface_remove(intf);
Viresh Kumarbbaca712015-09-23 16:48:08 -0700459
460 /*
461 * Destroy the two-way route between the AP and the interface.
462 */
Johan Hovold66069fb2015-11-25 15:59:09 +0100463 gb_svc_route_destroy(svc, svc->ap_intf_id, intf_id);
Viresh Kumarbbaca712015-09-23 16:48:08 -0700464
465 ida_simple_remove(&svc->device_id_map, device_id);
466}
467
Johan Hovold9ae41092015-12-02 18:23:29 +0100468static void gb_svc_process_intf_hotplug(struct gb_operation *operation)
Alex Elder30c6d9d2015-05-22 13:02:08 -0500469{
Johan Hovold24456a092015-12-02 18:23:27 +0100470 struct gb_svc_intf_hotplug_request *request;
Johan Hovold9ae41092015-12-02 18:23:29 +0100471 struct gb_connection *connection = operation->connection;
Viresh Kumar067906f2015-08-06 12:44:55 +0530472 struct gb_svc *svc = connection->private;
Johan Hovold25376362015-11-03 18:03:23 +0100473 struct gb_host_device *hd = connection->hd;
Viresh Kumaread35462015-07-21 17:44:19 +0530474 struct gb_interface *intf;
475 u8 intf_id, device_id;
Viresh Kumarf3e6c092016-01-22 16:16:08 +0530476 u32 vendor_id = 0;
477 u32 product_id = 0;
Viresh Kumaread35462015-07-21 17:44:19 +0530478 int ret;
Alex Elder30c6d9d2015-05-22 13:02:08 -0500479
Johan Hovold24456a092015-12-02 18:23:27 +0100480 /* The request message size has already been verified. */
Johan Hovold9ae41092015-12-02 18:23:29 +0100481 request = operation->request->payload;
Johan Hovold24456a092015-12-02 18:23:27 +0100482 intf_id = request->intf_id;
483
484 dev_dbg(&svc->dev, "%s - id = %u\n", __func__, intf_id);
Alex Elder30c6d9d2015-05-22 13:02:08 -0500485
Viresh Kumarbbaca712015-09-23 16:48:08 -0700486 intf = gb_interface_find(hd, intf_id);
487 if (intf) {
488 /*
Viresh Kumarf3e6c092016-01-22 16:16:08 +0530489 * For ES2, we need to maintain the same vendor/product ids we
490 * got from bootrom, otherwise userspace can't distinguish
491 * between modules.
492 */
493 vendor_id = intf->vendor_id;
494 product_id = intf->product_id;
495
496 /*
Viresh Kumarbbaca712015-09-23 16:48:08 -0700497 * We have received a hotplug request for an interface that
498 * already exists.
499 *
500 * This can happen in cases like:
501 * - bootrom loading the firmware image and booting into that,
502 * which only generates a hotplug event. i.e. no hot-unplug
503 * event.
504 * - Or the firmware on the module crashed and sent hotplug
505 * request again to the SVC, which got propagated to AP.
506 *
507 * Remove the interface and add it again, and let user know
508 * about this with a print message.
509 */
Viresh Kumar2f3db922015-12-04 21:30:09 +0530510 dev_info(&svc->dev, "removing interface %u to add it again\n",
Johan Hovold684156a2015-11-25 15:59:19 +0100511 intf_id);
Johan Hovoldb4ee82e2015-12-02 18:23:28 +0100512 gb_svc_intf_remove(svc, intf);
Viresh Kumarbbaca712015-09-23 16:48:08 -0700513 }
514
Viresh Kumaread35462015-07-21 17:44:19 +0530515 intf = gb_interface_create(hd, intf_id);
516 if (!intf) {
Viresh Kumar2f3db922015-12-04 21:30:09 +0530517 dev_err(&svc->dev, "failed to create interface %u\n",
Johan Hovold684156a2015-11-25 15:59:19 +0100518 intf_id);
Johan Hovold9ae41092015-12-02 18:23:29 +0100519 return;
Viresh Kumaread35462015-07-21 17:44:19 +0530520 }
521
Viresh Kumar63d742b2015-12-23 09:07:42 +0530522 intf->ddbl1_manufacturer_id = le32_to_cpu(request->data.ddbl1_mfr_id);
523 intf->ddbl1_product_id = le32_to_cpu(request->data.ddbl1_prod_id);
524 intf->vendor_id = le32_to_cpu(request->data.ara_vend_id);
525 intf->product_id = le32_to_cpu(request->data.ara_prod_id);
Viresh Kumar57c6bcc2015-12-28 11:59:00 +0530526 intf->serial_number = le64_to_cpu(request->data.serial_number);
Viresh Kumar63d742b2015-12-23 09:07:42 +0530527
Viresh Kumarf3e6c092016-01-22 16:16:08 +0530528 /*
529 * Use VID/PID specified at hotplug if:
530 * - Bridge ASIC chip isn't ES2
531 * - Received non-zero Vendor/Product ids
532 *
533 * Otherwise, use the ids we received from bootrom.
534 */
535 if (intf->ddbl1_manufacturer_id == ES2_DDBL1_MFR_ID &&
536 intf->ddbl1_product_id == ES2_DDBL1_PROD_ID &&
537 intf->vendor_id == 0 && intf->product_id == 0) {
538 intf->vendor_id = vendor_id;
539 intf->product_id = product_id;
540 }
541
Viresh Kumar6bec5c72015-09-24 14:40:29 -0700542 ret = gb_svc_read_and_clear_module_boot_status(intf);
Johan Hovoldb395754a2015-12-07 15:05:28 +0100543 if (ret) {
544 dev_err(&svc->dev, "failed to clear boot status of interface %u: %d\n",
545 intf_id, ret);
Viresh Kumar6bec5c72015-09-24 14:40:29 -0700546 goto destroy_interface;
Johan Hovoldb395754a2015-12-07 15:05:28 +0100547 }
Viresh Kumar6bec5c72015-09-24 14:40:29 -0700548
Viresh Kumaread35462015-07-21 17:44:19 +0530549 /*
550 * Create a device id for the interface:
551 * - device id 0 (GB_DEVICE_ID_SVC) belongs to the SVC
552 * - device id 1 (GB_DEVICE_ID_AP) belongs to the AP
553 *
554 * XXX Do we need to allocate device ID for SVC or the AP here? And what
555 * XXX about an AP with multiple interface blocks?
556 */
Johan Hovoldc09db182015-09-15 09:18:08 +0200557 device_id = ida_simple_get(&svc->device_id_map,
Johan Hovold89f637f2015-09-01 12:25:25 +0200558 GB_DEVICE_ID_MODULES_START, 0, GFP_KERNEL);
Viresh Kumaread35462015-07-21 17:44:19 +0530559 if (device_id < 0) {
560 ret = device_id;
Viresh Kumar2f3db922015-12-04 21:30:09 +0530561 dev_err(&svc->dev, "failed to allocate device id for interface %u: %d\n",
Johan Hovold684156a2015-11-25 15:59:19 +0100562 intf_id, ret);
Viresh Kumaread35462015-07-21 17:44:19 +0530563 goto destroy_interface;
564 }
565
Viresh Kumar3f0e9182015-08-31 17:21:06 +0530566 ret = gb_svc_intf_device_id(svc, intf_id, device_id);
Viresh Kumaread35462015-07-21 17:44:19 +0530567 if (ret) {
Viresh Kumar2f3db922015-12-04 21:30:09 +0530568 dev_err(&svc->dev, "failed to set device id %u for interface %u: %d\n",
Johan Hovold684156a2015-11-25 15:59:19 +0100569 device_id, intf_id, ret);
Viresh Kumaread35462015-07-21 17:44:19 +0530570 goto ida_put;
571 }
572
Perry Hung7e275462015-07-24 19:02:32 -0400573 /*
574 * Create a two-way route between the AP and the new interface
575 */
Johan Hovold66069fb2015-11-25 15:59:09 +0100576 ret = gb_svc_route_create(svc, svc->ap_intf_id, GB_DEVICE_ID_AP,
Viresh Kumar3f0e9182015-08-31 17:21:06 +0530577 intf_id, device_id);
Perry Hung7e275462015-07-24 19:02:32 -0400578 if (ret) {
Viresh Kumar2f3db922015-12-04 21:30:09 +0530579 dev_err(&svc->dev, "failed to create route to interface %u (device id %u): %d\n",
Johan Hovold684156a2015-11-25 15:59:19 +0100580 intf_id, device_id, ret);
Viresh Kumar0a020572015-09-07 18:05:26 +0530581 goto svc_id_free;
Perry Hung7e275462015-07-24 19:02:32 -0400582 }
583
Viresh Kumaread35462015-07-21 17:44:19 +0530584 ret = gb_interface_init(intf, device_id);
585 if (ret) {
Viresh Kumar2f3db922015-12-04 21:30:09 +0530586 dev_err(&svc->dev, "failed to initialize interface %u (device id %u): %d\n",
Johan Hovold684156a2015-11-25 15:59:19 +0100587 intf_id, device_id, ret);
Viresh Kumar0a020572015-09-07 18:05:26 +0530588 goto destroy_route;
Viresh Kumaread35462015-07-21 17:44:19 +0530589 }
Alex Elder30c6d9d2015-05-22 13:02:08 -0500590
Johan Hovold9ae41092015-12-02 18:23:29 +0100591 return;
Viresh Kumaread35462015-07-21 17:44:19 +0530592
Viresh Kumar0a020572015-09-07 18:05:26 +0530593destroy_route:
Johan Hovold66069fb2015-11-25 15:59:09 +0100594 gb_svc_route_destroy(svc, svc->ap_intf_id, intf_id);
Viresh Kumaread35462015-07-21 17:44:19 +0530595svc_id_free:
596 /*
597 * XXX Should we tell SVC that this id doesn't belong to interface
598 * XXX anymore.
599 */
600ida_put:
Johan Hovoldc09db182015-09-15 09:18:08 +0200601 ida_simple_remove(&svc->device_id_map, device_id);
Viresh Kumaread35462015-07-21 17:44:19 +0530602destroy_interface:
Viresh Kumar80d1ede2015-09-23 16:48:10 -0700603 gb_interface_remove(intf);
Johan Hovold9ae41092015-12-02 18:23:29 +0100604}
605
Johan Hovold57ccd4b2015-12-02 18:23:30 +0100606static void gb_svc_process_intf_hot_unplug(struct gb_operation *operation)
607{
608 struct gb_svc *svc = operation->connection->private;
609 struct gb_svc_intf_hot_unplug_request *request;
610 struct gb_host_device *hd = operation->connection->hd;
611 struct gb_interface *intf;
612 u8 intf_id;
613
614 /* The request message size has already been verified. */
615 request = operation->request->payload;
616 intf_id = request->intf_id;
617
618 dev_dbg(&svc->dev, "%s - id = %u\n", __func__, intf_id);
619
620 intf = gb_interface_find(hd, intf_id);
621 if (!intf) {
Viresh Kumar2f3db922015-12-04 21:30:09 +0530622 dev_warn(&svc->dev, "could not find hot-unplug interface %u\n",
Johan Hovold57ccd4b2015-12-02 18:23:30 +0100623 intf_id);
624 return;
625 }
626
627 gb_svc_intf_remove(svc, intf);
628}
629
Johan Hovold9ae41092015-12-02 18:23:29 +0100630static void gb_svc_process_deferred_request(struct work_struct *work)
631{
632 struct gb_svc_deferred_request *dr;
633 struct gb_operation *operation;
634 struct gb_svc *svc;
635 u8 type;
636
637 dr = container_of(work, struct gb_svc_deferred_request, work);
638 operation = dr->operation;
639 svc = operation->connection->private;
640 type = operation->request->header->type;
641
642 switch (type) {
643 case GB_SVC_TYPE_INTF_HOTPLUG:
644 gb_svc_process_intf_hotplug(operation);
645 break;
Johan Hovold57ccd4b2015-12-02 18:23:30 +0100646 case GB_SVC_TYPE_INTF_HOT_UNPLUG:
647 gb_svc_process_intf_hot_unplug(operation);
648 break;
Johan Hovold9ae41092015-12-02 18:23:29 +0100649 default:
Viresh Kumarb933fa42015-12-04 21:30:10 +0530650 dev_err(&svc->dev, "bad deferred request type: 0x%02x\n", type);
Johan Hovold9ae41092015-12-02 18:23:29 +0100651 }
652
653 gb_operation_put(operation);
654 kfree(dr);
655}
656
657static int gb_svc_queue_deferred_request(struct gb_operation *operation)
658{
Johan Hovold3e48aca2015-12-02 18:23:31 +0100659 struct gb_svc *svc = operation->connection->private;
Johan Hovold9ae41092015-12-02 18:23:29 +0100660 struct gb_svc_deferred_request *dr;
661
662 dr = kmalloc(sizeof(*dr), GFP_KERNEL);
663 if (!dr)
664 return -ENOMEM;
665
666 gb_operation_get(operation);
667
668 dr->operation = operation;
669 INIT_WORK(&dr->work, gb_svc_process_deferred_request);
670
Johan Hovold3e48aca2015-12-02 18:23:31 +0100671 queue_work(svc->wq, &dr->work);
Johan Hovold9ae41092015-12-02 18:23:29 +0100672
673 return 0;
Viresh Kumar067906f2015-08-06 12:44:55 +0530674}
Viresh Kumaread35462015-07-21 17:44:19 +0530675
Viresh Kumar067906f2015-08-06 12:44:55 +0530676/*
677 * Bringing up a module can be time consuming, as that may require lots of
678 * initialization on the module side. Over that, we may also need to download
679 * the firmware first and flash that on the module.
680 *
Johan Hovold3e48aca2015-12-02 18:23:31 +0100681 * In order not to make other svc events wait for all this to finish,
Viresh Kumar067906f2015-08-06 12:44:55 +0530682 * handle most of module hotplug stuff outside of the hotplug callback, with
683 * help of a workqueue.
684 */
685static int gb_svc_intf_hotplug_recv(struct gb_operation *op)
686{
Johan Hovold684156a2015-11-25 15:59:19 +0100687 struct gb_svc *svc = op->connection->private;
Johan Hovoldd34a3642015-12-02 18:23:26 +0100688 struct gb_svc_intf_hotplug_request *request;
Viresh Kumar067906f2015-08-06 12:44:55 +0530689
Johan Hovoldd34a3642015-12-02 18:23:26 +0100690 if (op->request->payload_size < sizeof(*request)) {
Johan Hovold684156a2015-11-25 15:59:19 +0100691 dev_warn(&svc->dev, "short hotplug request received (%zu < %zu)\n",
Johan Hovoldd34a3642015-12-02 18:23:26 +0100692 op->request->payload_size, sizeof(*request));
Viresh Kumar067906f2015-08-06 12:44:55 +0530693 return -EINVAL;
694 }
695
Johan Hovoldd34a3642015-12-02 18:23:26 +0100696 request = op->request->payload;
697
698 dev_dbg(&svc->dev, "%s - id = %u\n", __func__, request->intf_id);
699
Johan Hovold9ae41092015-12-02 18:23:29 +0100700 return gb_svc_queue_deferred_request(op);
Alex Elder30c6d9d2015-05-22 13:02:08 -0500701}
702
703static int gb_svc_intf_hot_unplug_recv(struct gb_operation *op)
704{
Johan Hovold684156a2015-11-25 15:59:19 +0100705 struct gb_svc *svc = op->connection->private;
Johan Hovoldd34a3642015-12-02 18:23:26 +0100706 struct gb_svc_intf_hot_unplug_request *request;
Alex Elder30c6d9d2015-05-22 13:02:08 -0500707
Johan Hovoldd34a3642015-12-02 18:23:26 +0100708 if (op->request->payload_size < sizeof(*request)) {
Johan Hovold684156a2015-11-25 15:59:19 +0100709 dev_warn(&svc->dev, "short hot unplug request received (%zu < %zu)\n",
Johan Hovoldd34a3642015-12-02 18:23:26 +0100710 op->request->payload_size, sizeof(*request));
Alex Elder30c6d9d2015-05-22 13:02:08 -0500711 return -EINVAL;
712 }
Alex Elder30c6d9d2015-05-22 13:02:08 -0500713
Johan Hovoldd34a3642015-12-02 18:23:26 +0100714 request = op->request->payload;
Johan Hovoldd34a3642015-12-02 18:23:26 +0100715
Johan Hovold57ccd4b2015-12-02 18:23:30 +0100716 dev_dbg(&svc->dev, "%s - id = %u\n", __func__, request->intf_id);
Alex Elder30c6d9d2015-05-22 13:02:08 -0500717
Johan Hovold57ccd4b2015-12-02 18:23:30 +0100718 return gb_svc_queue_deferred_request(op);
Alex Elder30c6d9d2015-05-22 13:02:08 -0500719}
720
721static int gb_svc_intf_reset_recv(struct gb_operation *op)
722{
Johan Hovold684156a2015-11-25 15:59:19 +0100723 struct gb_svc *svc = op->connection->private;
Alex Elder30c6d9d2015-05-22 13:02:08 -0500724 struct gb_message *request = op->request;
725 struct gb_svc_intf_reset_request *reset;
726 u8 intf_id;
727
728 if (request->payload_size < sizeof(*reset)) {
Johan Hovold684156a2015-11-25 15:59:19 +0100729 dev_warn(&svc->dev, "short reset request received (%zu < %zu)\n",
730 request->payload_size, sizeof(*reset));
Alex Elder30c6d9d2015-05-22 13:02:08 -0500731 return -EINVAL;
732 }
733 reset = request->payload;
734
735 intf_id = reset->intf_id;
736
737 /* FIXME Reset the interface here */
738
739 return 0;
740}
741
Rui Miguel Silvaebe99d62016-01-21 01:42:17 +0000742static int gb_svc_key_code_map(struct gb_svc *svc, u16 key_code, u16 *code)
743{
744 switch (key_code) {
745 case GB_KEYCODE_ARA:
746 *code = SVC_KEY_ARA_BUTTON;
747 break;
748 default:
749 dev_warn(&svc->dev, "unknown keycode received: %u\n", key_code);
750 return -EINVAL;
751 }
752
753 return 0;
754}
755
756static int gb_svc_key_event_recv(struct gb_operation *op)
757{
758 struct gb_svc *svc = op->connection->private;
759 struct gb_message *request = op->request;
760 struct gb_svc_key_event_request *key;
761 u16 code;
762 u8 event;
763 int ret;
764
765 if (request->payload_size < sizeof(*key)) {
766 dev_warn(&svc->dev, "short key request received (%zu < %zu)\n",
767 request->payload_size, sizeof(*key));
768 return -EINVAL;
769 }
770
771 key = request->payload;
772
773 ret = gb_svc_key_code_map(svc, le16_to_cpu(key->key_code), &code);
774 if (ret < 0)
775 return ret;
776
777 event = key->key_event;
778 if ((event != GB_SVC_KEY_PRESSED) && (event != GB_SVC_KEY_RELEASED)) {
779 dev_warn(&svc->dev, "unknown key event received: %u\n", event);
780 return -EINVAL;
781 }
782
783 input_report_key(svc->input, code, (event == GB_SVC_KEY_PRESSED));
784 input_sync(svc->input);
785
786 return 0;
787}
788
Johan Hovold84427942016-01-19 12:51:15 +0100789static int gb_svc_request_handler(struct gb_operation *op)
Alex Elder30c6d9d2015-05-22 13:02:08 -0500790{
Viresh Kumar3ccb1602015-09-03 15:42:22 +0530791 struct gb_connection *connection = op->connection;
792 struct gb_svc *svc = connection->private;
Johan Hovold84427942016-01-19 12:51:15 +0100793 u8 type = op->type;
Viresh Kumar3ccb1602015-09-03 15:42:22 +0530794 int ret = 0;
795
796 /*
797 * SVC requests need to follow a specific order (at least initially) and
798 * below code takes care of enforcing that. The expected order is:
799 * - PROTOCOL_VERSION
800 * - SVC_HELLO
801 * - Any other request, but the earlier two.
802 *
803 * Incoming requests are guaranteed to be serialized and so we don't
804 * need to protect 'state' for any races.
805 */
Alex Elder30c6d9d2015-05-22 13:02:08 -0500806 switch (type) {
Viresh Kumar0e2462d2015-08-14 07:57:38 +0530807 case GB_REQUEST_TYPE_PROTOCOL_VERSION:
Viresh Kumar3ccb1602015-09-03 15:42:22 +0530808 if (svc->state != GB_SVC_STATE_RESET)
809 ret = -EINVAL;
810 break;
Viresh Kumaread35462015-07-21 17:44:19 +0530811 case GB_SVC_TYPE_SVC_HELLO:
Viresh Kumar3ccb1602015-09-03 15:42:22 +0530812 if (svc->state != GB_SVC_STATE_PROTOCOL_VERSION)
813 ret = -EINVAL;
814 break;
815 default:
816 if (svc->state != GB_SVC_STATE_SVC_HELLO)
817 ret = -EINVAL;
818 break;
819 }
820
821 if (ret) {
Johan Hovold684156a2015-11-25 15:59:19 +0100822 dev_warn(&svc->dev, "unexpected request 0x%02x received (state %u)\n",
823 type, svc->state);
Viresh Kumar3ccb1602015-09-03 15:42:22 +0530824 return ret;
825 }
826
827 switch (type) {
828 case GB_REQUEST_TYPE_PROTOCOL_VERSION:
829 ret = gb_svc_version_request(op);
830 if (!ret)
831 svc->state = GB_SVC_STATE_PROTOCOL_VERSION;
832 return ret;
833 case GB_SVC_TYPE_SVC_HELLO:
834 ret = gb_svc_hello(op);
835 if (!ret)
836 svc->state = GB_SVC_STATE_SVC_HELLO;
837 return ret;
Alex Elder30c6d9d2015-05-22 13:02:08 -0500838 case GB_SVC_TYPE_INTF_HOTPLUG:
839 return gb_svc_intf_hotplug_recv(op);
840 case GB_SVC_TYPE_INTF_HOT_UNPLUG:
841 return gb_svc_intf_hot_unplug_recv(op);
842 case GB_SVC_TYPE_INTF_RESET:
843 return gb_svc_intf_reset_recv(op);
Rui Miguel Silvaebe99d62016-01-21 01:42:17 +0000844 case GB_SVC_TYPE_KEY_EVENT:
845 return gb_svc_key_event_recv(op);
Alex Elder30c6d9d2015-05-22 13:02:08 -0500846 default:
Johan Hovold684156a2015-11-25 15:59:19 +0100847 dev_warn(&svc->dev, "unsupported request 0x%02x\n", type);
Alex Elder30c6d9d2015-05-22 13:02:08 -0500848 return -EINVAL;
849 }
850}
851
Rui Miguel Silvaebe99d62016-01-21 01:42:17 +0000852static struct input_dev *gb_svc_input_create(struct gb_svc *svc)
853{
854 struct input_dev *input_dev;
855
856 input_dev = input_allocate_device();
857 if (!input_dev)
858 return ERR_PTR(-ENOMEM);
859
860 input_dev->name = dev_name(&svc->dev);
861 svc->input_phys = kasprintf(GFP_KERNEL, "greybus-%s/input0",
862 input_dev->name);
863 if (!svc->input_phys)
864 goto err_free_input;
865
866 input_dev->phys = svc->input_phys;
867 input_dev->dev.parent = &svc->dev;
868
869 input_set_drvdata(input_dev, svc);
870
871 input_set_capability(input_dev, EV_KEY, SVC_KEY_ARA_BUTTON);
872
873 return input_dev;
874
875err_free_input:
876 input_free_device(svc->input);
877 return ERR_PTR(-ENOMEM);
878}
879
Johan Hovoldefe6ef72015-11-25 15:59:06 +0100880static void gb_svc_release(struct device *dev)
881{
Johan Hovold88f7b962015-11-25 15:59:08 +0100882 struct gb_svc *svc = to_gb_svc(dev);
Johan Hovoldefe6ef72015-11-25 15:59:06 +0100883
Johan Hovold7adeaae72015-12-07 15:05:37 +0100884 if (svc->connection)
885 gb_connection_destroy(svc->connection);
Johan Hovoldefe6ef72015-11-25 15:59:06 +0100886 ida_destroy(&svc->device_id_map);
Johan Hovold3e48aca2015-12-02 18:23:31 +0100887 destroy_workqueue(svc->wq);
Rui Miguel Silvaebe99d62016-01-21 01:42:17 +0000888 kfree(svc->input_phys);
Johan Hovoldefe6ef72015-11-25 15:59:06 +0100889 kfree(svc);
890}
891
892struct device_type greybus_svc_type = {
893 .name = "greybus_svc",
894 .release = gb_svc_release,
895};
896
Johan Hovold7adeaae72015-12-07 15:05:37 +0100897struct gb_svc *gb_svc_create(struct gb_host_device *hd)
Alex Elder30c6d9d2015-05-22 13:02:08 -0500898{
899 struct gb_svc *svc;
Alex Elder30c6d9d2015-05-22 13:02:08 -0500900
901 svc = kzalloc(sizeof(*svc), GFP_KERNEL);
902 if (!svc)
Johan Hovold7adeaae72015-12-07 15:05:37 +0100903 return NULL;
Alex Elder30c6d9d2015-05-22 13:02:08 -0500904
Johan Hovold3e48aca2015-12-02 18:23:31 +0100905 svc->wq = alloc_workqueue("%s:svc", WQ_UNBOUND, 1, dev_name(&hd->dev));
906 if (!svc->wq) {
907 kfree(svc);
Johan Hovold7adeaae72015-12-07 15:05:37 +0100908 return NULL;
Johan Hovold3e48aca2015-12-02 18:23:31 +0100909 }
910
Johan Hovoldefe6ef72015-11-25 15:59:06 +0100911 svc->dev.parent = &hd->dev;
912 svc->dev.bus = &greybus_bus_type;
913 svc->dev.type = &greybus_svc_type;
Johan Hovold66069fb2015-11-25 15:59:09 +0100914 svc->dev.groups = svc_groups;
Johan Hovoldefe6ef72015-11-25 15:59:06 +0100915 svc->dev.dma_mask = svc->dev.parent->dma_mask;
916 device_initialize(&svc->dev);
917
918 dev_set_name(&svc->dev, "%d-svc", hd->bus_id);
919
Johan Hovold6106e512015-11-25 15:59:07 +0100920 ida_init(&svc->device_id_map);
Viresh Kumar3ccb1602015-09-03 15:42:22 +0530921 svc->state = GB_SVC_STATE_RESET;
Johan Hovoldf0960d02015-12-03 19:18:02 +0100922 svc->hd = hd;
Viresh Kumard3d44842015-07-21 17:44:18 +0530923
Rui Miguel Silvaebe99d62016-01-21 01:42:17 +0000924 svc->input = gb_svc_input_create(svc);
925 if (IS_ERR(svc->input)) {
926 dev_err(&svc->dev, "failed to create input device: %ld\n",
927 PTR_ERR(svc->input));
928 goto err_put_device;
929 }
930
Johan Hovoldf7ee0812016-01-21 17:34:21 +0100931 svc->connection = gb_connection_create_static(hd, GB_SVC_CPORT_ID,
932 gb_svc_request_handler);
Johan Hovold24e094d2016-01-21 17:34:16 +0100933 if (IS_ERR(svc->connection)) {
934 dev_err(&svc->dev, "failed to create connection: %ld\n",
935 PTR_ERR(svc->connection));
Rui Miguel Silvaebe99d62016-01-21 01:42:17 +0000936 goto err_free_input;
Johan Hovold7adeaae72015-12-07 15:05:37 +0100937 }
938
939 svc->connection->private = svc;
940
941 return svc;
Rui Miguel Silvaebe99d62016-01-21 01:42:17 +0000942
943err_free_input:
944 input_free_device(svc->input);
945err_put_device:
946 put_device(&svc->dev);
947 return NULL;
Johan Hovold7adeaae72015-12-07 15:05:37 +0100948}
949
950int gb_svc_add(struct gb_svc *svc)
951{
952 int ret;
953
954 /*
955 * The SVC protocol is currently driven by the SVC, so the SVC device
956 * is added from the connection request handler when enough
957 * information has been received.
958 */
Johan Hovoldf7ee0812016-01-21 17:34:21 +0100959 ret = gb_connection_enable(svc->connection);
Johan Hovold7adeaae72015-12-07 15:05:37 +0100960 if (ret)
961 return ret;
962
963 return 0;
964}
965
966void gb_svc_del(struct gb_svc *svc)
967{
Johan Hovold84427942016-01-19 12:51:15 +0100968 gb_connection_disable(svc->connection);
Johan Hovold7adeaae72015-12-07 15:05:37 +0100969
Rui Miguel Silvaebe99d62016-01-21 01:42:17 +0000970 /*
971 * The SVC device and input device may have been registered
972 * from the request handler.
973 */
974 if (device_is_registered(&svc->dev)) {
Greg Kroah-Hartmaned7279a2016-01-20 22:51:49 -0800975 gb_svc_watchdog_destroy(svc);
Rui Miguel Silvaebe99d62016-01-21 01:42:17 +0000976 input_unregister_device(svc->input);
977 device_del(&svc->dev);
978 }
979
Johan Hovold7adeaae72015-12-07 15:05:37 +0100980 flush_workqueue(svc->wq);
981}
982
983void gb_svc_put(struct gb_svc *svc)
984{
985 put_device(&svc->dev);
986}