blob: be62d308d1c991da03483cc52702700ec3089e66 [file] [log] [blame]
Alex Elder30c6d9d2015-05-22 13:02:08 -05001/*
2 * SVC Greybus driver.
3 *
4 * Copyright 2015 Google Inc.
5 * Copyright 2015 Linaro Ltd.
6 *
7 * Released under the GPLv2 only.
8 */
9
Viresh Kumar067906f2015-08-06 12:44:55 +053010#include <linux/workqueue.h>
Alex Elder30c6d9d2015-05-22 13:02:08 -050011
Viresh Kumarf66427a2015-09-02 21:27:13 +053012#include "greybus.h"
13
Johan Hovoldf6c6c132015-11-11 10:07:08 +010014#define CPORT_FLAGS_E2EFC BIT(0)
15#define CPORT_FLAGS_CSD_N BIT(1)
16#define CPORT_FLAGS_CSV_N BIT(2)
Perry Hung0b226492015-07-24 19:02:34 -040017
Viresh Kumarb45864d2015-07-24 15:32:21 +053018
Johan Hovold9ae41092015-12-02 18:23:29 +010019struct gb_svc_deferred_request {
Viresh Kumar067906f2015-08-06 12:44:55 +053020 struct work_struct work;
Johan Hovold9ae41092015-12-02 18:23:29 +010021 struct gb_operation *operation;
Viresh Kumar067906f2015-08-06 12:44:55 +053022};
23
Viresh Kumaread35462015-07-21 17:44:19 +053024
Johan Hovold66069fb2015-11-25 15:59:09 +010025static ssize_t endo_id_show(struct device *dev,
26 struct device_attribute *attr, char *buf)
27{
28 struct gb_svc *svc = to_gb_svc(dev);
29
30 return sprintf(buf, "0x%04x\n", svc->endo_id);
31}
32static DEVICE_ATTR_RO(endo_id);
33
34static ssize_t ap_intf_id_show(struct device *dev,
35 struct device_attribute *attr, char *buf)
36{
37 struct gb_svc *svc = to_gb_svc(dev);
38
39 return sprintf(buf, "%u\n", svc->ap_intf_id);
40}
41static DEVICE_ATTR_RO(ap_intf_id);
42
Rui Miguel Silva2c92bd52016-01-11 13:46:33 +000043
44// FIXME
45// This is a hack, we need to do this "right" and clean the interface up
46// properly, not just forcibly yank the thing out of the system and hope for the
47// best. But for now, people want their modules to come out without having to
48// throw the thing to the ground or get out a screwdriver.
49static ssize_t intf_eject_store(struct device *dev,
50 struct device_attribute *attr, const char *buf,
51 size_t len)
52{
53 struct gb_svc *svc = to_gb_svc(dev);
54 unsigned short intf_id;
55 int ret;
56
57 ret = kstrtou16(buf, 10, &intf_id);
58 if (ret < 0)
59 return ret;
60
61 dev_warn(dev, "Forcibly trying to eject interface %d\n", intf_id);
62
63 ret = gb_svc_intf_eject(svc, intf_id);
64 if (ret < 0)
65 return ret;
66
67 return len;
68}
69static DEVICE_ATTR_WO(intf_eject);
70
Johan Hovold66069fb2015-11-25 15:59:09 +010071static struct attribute *svc_attrs[] = {
72 &dev_attr_endo_id.attr,
73 &dev_attr_ap_intf_id.attr,
Rui Miguel Silva2c92bd52016-01-11 13:46:33 +000074 &dev_attr_intf_eject.attr,
Johan Hovold66069fb2015-11-25 15:59:09 +010075 NULL,
76};
77ATTRIBUTE_GROUPS(svc);
78
Viresh Kumar505f16c2015-08-31 17:21:07 +053079static int gb_svc_intf_device_id(struct gb_svc *svc, u8 intf_id, u8 device_id)
Alex Elder30c6d9d2015-05-22 13:02:08 -050080{
81 struct gb_svc_intf_device_id_request request;
82
83 request.intf_id = intf_id;
84 request.device_id = device_id;
85
86 return gb_operation_sync(svc->connection, GB_SVC_TYPE_INTF_DEVICE_ID,
87 &request, sizeof(request), NULL, 0);
88}
89
Viresh Kumar3f0e9182015-08-31 17:21:06 +053090int gb_svc_intf_reset(struct gb_svc *svc, u8 intf_id)
Alex Elder30c6d9d2015-05-22 13:02:08 -050091{
92 struct gb_svc_intf_reset_request request;
93
94 request.intf_id = intf_id;
95
96 return gb_operation_sync(svc->connection, GB_SVC_TYPE_INTF_RESET,
97 &request, sizeof(request), NULL, 0);
98}
Viresh Kumar3f0e9182015-08-31 17:21:06 +053099EXPORT_SYMBOL_GPL(gb_svc_intf_reset);
Alex Elder30c6d9d2015-05-22 13:02:08 -0500100
Rui Miguel Silvac5d55fb2016-01-11 13:46:31 +0000101int gb_svc_intf_eject(struct gb_svc *svc, u8 intf_id)
102{
103 struct gb_svc_intf_eject_request request;
104
105 request.intf_id = intf_id;
106
107 /*
108 * The pulse width for module release in svc is long so we need to
109 * increase the timeout so the operation will not return to soon.
110 */
111 return gb_operation_sync_timeout(svc->connection,
112 GB_SVC_TYPE_INTF_EJECT, &request,
113 sizeof(request), NULL, 0,
114 GB_SVC_EJECT_TIME);
115}
116EXPORT_SYMBOL_GPL(gb_svc_intf_eject);
117
Viresh Kumar19151c32015-09-09 21:08:29 +0530118int gb_svc_dme_peer_get(struct gb_svc *svc, u8 intf_id, u16 attr, u16 selector,
119 u32 *value)
120{
121 struct gb_svc_dme_peer_get_request request;
122 struct gb_svc_dme_peer_get_response response;
123 u16 result;
124 int ret;
125
126 request.intf_id = intf_id;
127 request.attr = cpu_to_le16(attr);
128 request.selector = cpu_to_le16(selector);
129
130 ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_DME_PEER_GET,
131 &request, sizeof(request),
132 &response, sizeof(response));
133 if (ret) {
Viresh Kumarb933fa42015-12-04 21:30:10 +0530134 dev_err(&svc->dev, "failed to get DME attribute (%u 0x%04x %u): %d\n",
Johan Hovold684156a2015-11-25 15:59:19 +0100135 intf_id, attr, selector, ret);
Viresh Kumar19151c32015-09-09 21:08:29 +0530136 return ret;
137 }
138
139 result = le16_to_cpu(response.result_code);
140 if (result) {
Viresh Kumarb933fa42015-12-04 21:30:10 +0530141 dev_err(&svc->dev, "UniPro error while getting DME attribute (%u 0x%04x %u): %u\n",
Johan Hovold684156a2015-11-25 15:59:19 +0100142 intf_id, attr, selector, result);
Viresh Kumar4aac6c52015-12-04 21:30:08 +0530143 return -EIO;
Viresh Kumar19151c32015-09-09 21:08:29 +0530144 }
145
146 if (value)
147 *value = le32_to_cpu(response.attr_value);
148
149 return 0;
150}
151EXPORT_SYMBOL_GPL(gb_svc_dme_peer_get);
152
153int gb_svc_dme_peer_set(struct gb_svc *svc, u8 intf_id, u16 attr, u16 selector,
154 u32 value)
155{
156 struct gb_svc_dme_peer_set_request request;
157 struct gb_svc_dme_peer_set_response response;
158 u16 result;
159 int ret;
160
161 request.intf_id = intf_id;
162 request.attr = cpu_to_le16(attr);
163 request.selector = cpu_to_le16(selector);
164 request.value = cpu_to_le32(value);
165
166 ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_DME_PEER_SET,
167 &request, sizeof(request),
168 &response, sizeof(response));
169 if (ret) {
Viresh Kumarb933fa42015-12-04 21:30:10 +0530170 dev_err(&svc->dev, "failed to set DME attribute (%u 0x%04x %u %u): %d\n",
Johan Hovold684156a2015-11-25 15:59:19 +0100171 intf_id, attr, selector, value, ret);
Viresh Kumar19151c32015-09-09 21:08:29 +0530172 return ret;
173 }
174
175 result = le16_to_cpu(response.result_code);
176 if (result) {
Viresh Kumarb933fa42015-12-04 21:30:10 +0530177 dev_err(&svc->dev, "UniPro error while setting DME attribute (%u 0x%04x %u %u): %u\n",
Johan Hovold684156a2015-11-25 15:59:19 +0100178 intf_id, attr, selector, value, result);
Viresh Kumar4aac6c52015-12-04 21:30:08 +0530179 return -EIO;
Viresh Kumar19151c32015-09-09 21:08:29 +0530180 }
181
182 return 0;
183}
184EXPORT_SYMBOL_GPL(gb_svc_dme_peer_set);
185
Viresh Kumar6bec5c72015-09-24 14:40:29 -0700186/*
187 * T_TstSrcIncrement is written by the module on ES2 as a stand-in for boot
Eli Sennesh3563ff82015-12-22 17:26:57 -0500188 * status attribute ES3_INIT_STATUS. AP needs to read and clear it, after
189 * reading a non-zero value from it.
Viresh Kumar6bec5c72015-09-24 14:40:29 -0700190 *
191 * FIXME: This is module-hardware dependent and needs to be extended for every
192 * type of module we want to support.
193 */
194static int gb_svc_read_and_clear_module_boot_status(struct gb_interface *intf)
195{
Johan Hovold25376362015-11-03 18:03:23 +0100196 struct gb_host_device *hd = intf->hd;
Viresh Kumar6bec5c72015-09-24 14:40:29 -0700197 int ret;
198 u32 value;
Eli Sennesh3563ff82015-12-22 17:26:57 -0500199 u16 attr;
200 u8 init_status;
Viresh Kumar6bec5c72015-09-24 14:40:29 -0700201
Eli Sennesh3563ff82015-12-22 17:26:57 -0500202 /*
203 * Check if the module is ES2 or ES3, and choose attr number
204 * appropriately.
205 * FIXME: Remove ES2 support from the kernel entirely.
206 */
207 if (intf->ddbl1_manufacturer_id == ES2_DDBL1_MFR_ID &&
208 intf->ddbl1_product_id == ES2_DDBL1_PROD_ID)
209 attr = DME_ATTR_T_TST_SRC_INCREMENT;
210 else
211 attr = DME_ATTR_ES3_INIT_STATUS;
212
213 /* Read and clear boot status in ES3_INIT_STATUS */
214 ret = gb_svc_dme_peer_get(hd->svc, intf->interface_id, attr,
Viresh Kumar6bec5c72015-09-24 14:40:29 -0700215 DME_ATTR_SELECTOR_INDEX, &value);
216
217 if (ret)
218 return ret;
219
220 /*
221 * A nonzero boot status indicates the module has finished
222 * booting. Clear it.
223 */
224 if (!value) {
225 dev_err(&intf->dev, "Module not ready yet\n");
226 return -ENODEV;
227 }
228
Viresh Kumar1575ef12015-10-07 15:40:24 -0400229 /*
Eli Sennesh3563ff82015-12-22 17:26:57 -0500230 * Check if the module needs to boot from UniPro.
Viresh Kumar1575ef12015-10-07 15:40:24 -0400231 * For ES2: We need to check lowest 8 bits of 'value'.
232 * For ES3: We need to check highest 8 bits out of 32 of 'value'.
Eli Sennesh3563ff82015-12-22 17:26:57 -0500233 * FIXME: Remove ES2 support from the kernel entirely.
Viresh Kumar1575ef12015-10-07 15:40:24 -0400234 */
Eli Sennesh3563ff82015-12-22 17:26:57 -0500235 if (intf->ddbl1_manufacturer_id == ES2_DDBL1_MFR_ID &&
236 intf->ddbl1_product_id == ES2_DDBL1_PROD_ID)
237 init_status = value;
238 else
239 init_status = value >> 24;
240
241 if (init_status == DME_DIS_UNIPRO_BOOT_STARTED ||
242 init_status == DME_DIS_FALLBACK_UNIPRO_BOOT_STARTED)
Viresh Kumar1575ef12015-10-07 15:40:24 -0400243 intf->boot_over_unipro = true;
244
Eli Sennesh3563ff82015-12-22 17:26:57 -0500245 return gb_svc_dme_peer_set(hd->svc, intf->interface_id, attr,
Viresh Kumar6bec5c72015-09-24 14:40:29 -0700246 DME_ATTR_SELECTOR_INDEX, 0);
247}
248
Viresh Kumar3f0e9182015-08-31 17:21:06 +0530249int gb_svc_connection_create(struct gb_svc *svc,
Alex Elder30c6d9d2015-05-22 13:02:08 -0500250 u8 intf1_id, u16 cport1_id,
Viresh Kumar1575ef12015-10-07 15:40:24 -0400251 u8 intf2_id, u16 cport2_id,
252 bool boot_over_unipro)
Alex Elder30c6d9d2015-05-22 13:02:08 -0500253{
254 struct gb_svc_conn_create_request request;
255
256 request.intf1_id = intf1_id;
Rui Miguel Silva24980502015-09-15 15:33:51 +0100257 request.cport1_id = cpu_to_le16(cport1_id);
Alex Elder30c6d9d2015-05-22 13:02:08 -0500258 request.intf2_id = intf2_id;
Rui Miguel Silva24980502015-09-15 15:33:51 +0100259 request.cport2_id = cpu_to_le16(cport2_id);
Perry Hung0b226492015-07-24 19:02:34 -0400260 /*
261 * XXX: fix connections paramaters to TC0 and all CPort flags
262 * for now.
263 */
264 request.tc = 0;
Viresh Kumar1575ef12015-10-07 15:40:24 -0400265
266 /*
267 * We need to skip setting E2EFC and other flags to the connection
268 * create request, for all cports, on an interface that need to boot
269 * over unipro, i.e. interfaces required to download firmware.
270 */
271 if (boot_over_unipro)
272 request.flags = CPORT_FLAGS_CSV_N | CPORT_FLAGS_CSD_N;
273 else
274 request.flags = CPORT_FLAGS_CSV_N | CPORT_FLAGS_E2EFC;
Alex Elder30c6d9d2015-05-22 13:02:08 -0500275
276 return gb_operation_sync(svc->connection, GB_SVC_TYPE_CONN_CREATE,
277 &request, sizeof(request), NULL, 0);
278}
Viresh Kumar3f0e9182015-08-31 17:21:06 +0530279EXPORT_SYMBOL_GPL(gb_svc_connection_create);
Alex Elder30c6d9d2015-05-22 13:02:08 -0500280
Viresh Kumar3f0e9182015-08-31 17:21:06 +0530281void gb_svc_connection_destroy(struct gb_svc *svc, u8 intf1_id, u16 cport1_id,
282 u8 intf2_id, u16 cport2_id)
Alex Elder30c6d9d2015-05-22 13:02:08 -0500283{
284 struct gb_svc_conn_destroy_request request;
Viresh Kumard9fcfff2015-08-31 17:21:05 +0530285 struct gb_connection *connection = svc->connection;
286 int ret;
Alex Elder30c6d9d2015-05-22 13:02:08 -0500287
288 request.intf1_id = intf1_id;
Rui Miguel Silva24980502015-09-15 15:33:51 +0100289 request.cport1_id = cpu_to_le16(cport1_id);
Alex Elder30c6d9d2015-05-22 13:02:08 -0500290 request.intf2_id = intf2_id;
Rui Miguel Silva24980502015-09-15 15:33:51 +0100291 request.cport2_id = cpu_to_le16(cport2_id);
Alex Elder30c6d9d2015-05-22 13:02:08 -0500292
Viresh Kumard9fcfff2015-08-31 17:21:05 +0530293 ret = gb_operation_sync(connection, GB_SVC_TYPE_CONN_DESTROY,
294 &request, sizeof(request), NULL, 0);
Johan Hovold684156a2015-11-25 15:59:19 +0100295 if (ret) {
Viresh Kumar2f3db922015-12-04 21:30:09 +0530296 dev_err(&svc->dev, "failed to destroy connection (%u:%u %u:%u): %d\n",
Johan Hovold684156a2015-11-25 15:59:19 +0100297 intf1_id, cport1_id, intf2_id, cport2_id, ret);
298 }
Alex Elder30c6d9d2015-05-22 13:02:08 -0500299}
Viresh Kumar3f0e9182015-08-31 17:21:06 +0530300EXPORT_SYMBOL_GPL(gb_svc_connection_destroy);
Alex Elder30c6d9d2015-05-22 13:02:08 -0500301
Viresh Kumarbb106852015-09-07 16:01:25 +0530302/* Creates bi-directional routes between the devices */
Viresh Kumar505f16c2015-08-31 17:21:07 +0530303static int gb_svc_route_create(struct gb_svc *svc, u8 intf1_id, u8 dev1_id,
304 u8 intf2_id, u8 dev2_id)
Perry Hunge08aaa42015-07-24 19:02:31 -0400305{
306 struct gb_svc_route_create_request request;
307
308 request.intf1_id = intf1_id;
309 request.dev1_id = dev1_id;
310 request.intf2_id = intf2_id;
311 request.dev2_id = dev2_id;
312
313 return gb_operation_sync(svc->connection, GB_SVC_TYPE_ROUTE_CREATE,
314 &request, sizeof(request), NULL, 0);
315}
Perry Hunge08aaa42015-07-24 19:02:31 -0400316
Viresh Kumar0a020572015-09-07 18:05:26 +0530317/* Destroys bi-directional routes between the devices */
318static void gb_svc_route_destroy(struct gb_svc *svc, u8 intf1_id, u8 intf2_id)
319{
320 struct gb_svc_route_destroy_request request;
321 int ret;
322
323 request.intf1_id = intf1_id;
324 request.intf2_id = intf2_id;
325
326 ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_ROUTE_DESTROY,
327 &request, sizeof(request), NULL, 0);
Johan Hovold684156a2015-11-25 15:59:19 +0100328 if (ret) {
Viresh Kumar2f3db922015-12-04 21:30:09 +0530329 dev_err(&svc->dev, "failed to destroy route (%u %u): %d\n",
Johan Hovold684156a2015-11-25 15:59:19 +0100330 intf1_id, intf2_id, ret);
331 }
Viresh Kumar0a020572015-09-07 18:05:26 +0530332}
333
Laurent Pinchartaab4a1a2016-01-06 16:16:46 +0200334int gb_svc_intf_set_power_mode(struct gb_svc *svc, u8 intf_id, u8 hs_series,
335 u8 tx_mode, u8 tx_gear, u8 tx_nlanes,
336 u8 rx_mode, u8 rx_gear, u8 rx_nlanes,
337 u8 flags, u32 quirks)
Laurent Pinchart784f8762015-12-18 21:23:22 +0200338{
Laurent Pinchartaab4a1a2016-01-06 16:16:46 +0200339 struct gb_svc_intf_set_pwrm_request request;
340 struct gb_svc_intf_set_pwrm_response response;
341 int ret;
Laurent Pinchart784f8762015-12-18 21:23:22 +0200342
343 request.intf_id = intf_id;
Laurent Pinchartaab4a1a2016-01-06 16:16:46 +0200344 request.hs_series = hs_series;
345 request.tx_mode = tx_mode;
346 request.tx_gear = tx_gear;
347 request.tx_nlanes = tx_nlanes;
348 request.rx_mode = rx_mode;
349 request.rx_gear = rx_gear;
350 request.rx_nlanes = rx_nlanes;
Laurent Pinchart784f8762015-12-18 21:23:22 +0200351 request.flags = flags;
Laurent Pinchartaab4a1a2016-01-06 16:16:46 +0200352 request.quirks = cpu_to_le32(quirks);
Laurent Pinchart784f8762015-12-18 21:23:22 +0200353
Laurent Pinchartaab4a1a2016-01-06 16:16:46 +0200354 ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_INTF_SET_PWRM,
355 &request, sizeof(request),
356 &response, sizeof(response));
357 if (ret < 0)
358 return ret;
359
360 return le16_to_cpu(response.result_code);
Laurent Pinchart784f8762015-12-18 21:23:22 +0200361}
Laurent Pinchartaab4a1a2016-01-06 16:16:46 +0200362EXPORT_SYMBOL_GPL(gb_svc_intf_set_power_mode);
Laurent Pinchart784f8762015-12-18 21:23:22 +0200363
Viresh Kumaread35462015-07-21 17:44:19 +0530364static int gb_svc_version_request(struct gb_operation *op)
365{
366 struct gb_connection *connection = op->connection;
Johan Hovold684156a2015-11-25 15:59:19 +0100367 struct gb_svc *svc = connection->private;
Johan Hovoldcfb16902015-09-15 10:48:01 +0200368 struct gb_protocol_version_request *request;
369 struct gb_protocol_version_response *response;
Viresh Kumaread35462015-07-21 17:44:19 +0530370
Johan Hovold55510842015-11-19 18:28:01 +0100371 if (op->request->payload_size < sizeof(*request)) {
Johan Hovold684156a2015-11-25 15:59:19 +0100372 dev_err(&svc->dev, "short version request (%zu < %zu)\n",
Johan Hovold55510842015-11-19 18:28:01 +0100373 op->request->payload_size,
374 sizeof(*request));
375 return -EINVAL;
376 }
377
Johan Hovoldcfb16902015-09-15 10:48:01 +0200378 request = op->request->payload;
Viresh Kumaread35462015-07-21 17:44:19 +0530379
Johan Hovoldcfb16902015-09-15 10:48:01 +0200380 if (request->major > GB_SVC_VERSION_MAJOR) {
Viresh Kumar2f3db922015-12-04 21:30:09 +0530381 dev_warn(&svc->dev, "unsupported major version (%u > %u)\n",
Johan Hovold684156a2015-11-25 15:59:19 +0100382 request->major, GB_SVC_VERSION_MAJOR);
Viresh Kumaread35462015-07-21 17:44:19 +0530383 return -ENOTSUPP;
384 }
385
Johan Hovoldcfb16902015-09-15 10:48:01 +0200386 connection->module_major = request->major;
387 connection->module_minor = request->minor;
Viresh Kumar3ea959e32015-08-11 07:36:14 +0530388
Johan Hovold684156a2015-11-25 15:59:19 +0100389 if (!gb_operation_response_alloc(op, sizeof(*response), GFP_KERNEL))
Viresh Kumaread35462015-07-21 17:44:19 +0530390 return -ENOMEM;
Viresh Kumaread35462015-07-21 17:44:19 +0530391
Johan Hovoldcfb16902015-09-15 10:48:01 +0200392 response = op->response->payload;
393 response->major = connection->module_major;
394 response->minor = connection->module_minor;
Johan Hovold59832932015-09-15 10:48:00 +0200395
Viresh Kumaread35462015-07-21 17:44:19 +0530396 return 0;
397}
398
399static int gb_svc_hello(struct gb_operation *op)
400{
401 struct gb_connection *connection = op->connection;
Johan Hovold88f7b962015-11-25 15:59:08 +0100402 struct gb_svc *svc = connection->private;
Viresh Kumaread35462015-07-21 17:44:19 +0530403 struct gb_svc_hello_request *hello_request;
Viresh Kumaread35462015-07-21 17:44:19 +0530404 int ret;
405
Viresh Kumar0c32d2a2015-08-11 07:29:19 +0530406 if (op->request->payload_size < sizeof(*hello_request)) {
Johan Hovold684156a2015-11-25 15:59:19 +0100407 dev_warn(&svc->dev, "short hello request (%zu < %zu)\n",
408 op->request->payload_size,
409 sizeof(*hello_request));
Viresh Kumaread35462015-07-21 17:44:19 +0530410 return -EINVAL;
411 }
412
413 hello_request = op->request->payload;
Johan Hovold66069fb2015-11-25 15:59:09 +0100414 svc->endo_id = le16_to_cpu(hello_request->endo_id);
415 svc->ap_intf_id = hello_request->interface_id;
Viresh Kumaread35462015-07-21 17:44:19 +0530416
Johan Hovold88f7b962015-11-25 15:59:08 +0100417 ret = device_add(&svc->dev);
418 if (ret) {
419 dev_err(&svc->dev, "failed to register svc device: %d\n", ret);
420 return ret;
421 }
422
Viresh Kumaread35462015-07-21 17:44:19 +0530423 return 0;
424}
425
Johan Hovoldb4ee82e2015-12-02 18:23:28 +0100426static void gb_svc_intf_remove(struct gb_svc *svc, struct gb_interface *intf)
Viresh Kumarbbaca712015-09-23 16:48:08 -0700427{
Viresh Kumarbbaca712015-09-23 16:48:08 -0700428 u8 intf_id = intf->interface_id;
Johan Hovold141af4f2015-12-15 15:28:57 +0100429 u8 device_id = intf->device_id;
Viresh Kumarbbaca712015-09-23 16:48:08 -0700430
Johan Hovold141af4f2015-12-15 15:28:57 +0100431 intf->disconnected = true;
432
Viresh Kumar80d1ede2015-09-23 16:48:10 -0700433 gb_interface_remove(intf);
Viresh Kumarbbaca712015-09-23 16:48:08 -0700434
435 /*
436 * Destroy the two-way route between the AP and the interface.
437 */
Johan Hovold66069fb2015-11-25 15:59:09 +0100438 gb_svc_route_destroy(svc, svc->ap_intf_id, intf_id);
Viresh Kumarbbaca712015-09-23 16:48:08 -0700439
440 ida_simple_remove(&svc->device_id_map, device_id);
441}
442
Johan Hovold9ae41092015-12-02 18:23:29 +0100443static void gb_svc_process_intf_hotplug(struct gb_operation *operation)
Alex Elder30c6d9d2015-05-22 13:02:08 -0500444{
Johan Hovold24456a092015-12-02 18:23:27 +0100445 struct gb_svc_intf_hotplug_request *request;
Johan Hovold9ae41092015-12-02 18:23:29 +0100446 struct gb_connection *connection = operation->connection;
Viresh Kumar067906f2015-08-06 12:44:55 +0530447 struct gb_svc *svc = connection->private;
Johan Hovold25376362015-11-03 18:03:23 +0100448 struct gb_host_device *hd = connection->hd;
Viresh Kumaread35462015-07-21 17:44:19 +0530449 struct gb_interface *intf;
450 u8 intf_id, device_id;
Viresh Kumaread35462015-07-21 17:44:19 +0530451 int ret;
Alex Elder30c6d9d2015-05-22 13:02:08 -0500452
Johan Hovold24456a092015-12-02 18:23:27 +0100453 /* The request message size has already been verified. */
Johan Hovold9ae41092015-12-02 18:23:29 +0100454 request = operation->request->payload;
Johan Hovold24456a092015-12-02 18:23:27 +0100455 intf_id = request->intf_id;
456
457 dev_dbg(&svc->dev, "%s - id = %u\n", __func__, intf_id);
Alex Elder30c6d9d2015-05-22 13:02:08 -0500458
Viresh Kumarbbaca712015-09-23 16:48:08 -0700459 intf = gb_interface_find(hd, intf_id);
460 if (intf) {
461 /*
462 * We have received a hotplug request for an interface that
463 * already exists.
464 *
465 * This can happen in cases like:
466 * - bootrom loading the firmware image and booting into that,
467 * which only generates a hotplug event. i.e. no hot-unplug
468 * event.
469 * - Or the firmware on the module crashed and sent hotplug
470 * request again to the SVC, which got propagated to AP.
471 *
472 * Remove the interface and add it again, and let user know
473 * about this with a print message.
474 */
Viresh Kumar2f3db922015-12-04 21:30:09 +0530475 dev_info(&svc->dev, "removing interface %u to add it again\n",
Johan Hovold684156a2015-11-25 15:59:19 +0100476 intf_id);
Johan Hovoldb4ee82e2015-12-02 18:23:28 +0100477 gb_svc_intf_remove(svc, intf);
Viresh Kumarbbaca712015-09-23 16:48:08 -0700478 }
479
Viresh Kumaread35462015-07-21 17:44:19 +0530480 intf = gb_interface_create(hd, intf_id);
481 if (!intf) {
Viresh Kumar2f3db922015-12-04 21:30:09 +0530482 dev_err(&svc->dev, "failed to create interface %u\n",
Johan Hovold684156a2015-11-25 15:59:19 +0100483 intf_id);
Johan Hovold9ae41092015-12-02 18:23:29 +0100484 return;
Viresh Kumaread35462015-07-21 17:44:19 +0530485 }
486
Viresh Kumar63d742b2015-12-23 09:07:42 +0530487 intf->ddbl1_manufacturer_id = le32_to_cpu(request->data.ddbl1_mfr_id);
488 intf->ddbl1_product_id = le32_to_cpu(request->data.ddbl1_prod_id);
489 intf->vendor_id = le32_to_cpu(request->data.ara_vend_id);
490 intf->product_id = le32_to_cpu(request->data.ara_prod_id);
Viresh Kumar57c6bcc2015-12-28 11:59:00 +0530491 intf->serial_number = le64_to_cpu(request->data.serial_number);
Viresh Kumar63d742b2015-12-23 09:07:42 +0530492
Viresh Kumar6bec5c72015-09-24 14:40:29 -0700493 ret = gb_svc_read_and_clear_module_boot_status(intf);
Johan Hovoldb395754a2015-12-07 15:05:28 +0100494 if (ret) {
495 dev_err(&svc->dev, "failed to clear boot status of interface %u: %d\n",
496 intf_id, ret);
Viresh Kumar6bec5c72015-09-24 14:40:29 -0700497 goto destroy_interface;
Johan Hovoldb395754a2015-12-07 15:05:28 +0100498 }
Viresh Kumar6bec5c72015-09-24 14:40:29 -0700499
Viresh Kumaread35462015-07-21 17:44:19 +0530500 /*
501 * Create a device id for the interface:
502 * - device id 0 (GB_DEVICE_ID_SVC) belongs to the SVC
503 * - device id 1 (GB_DEVICE_ID_AP) belongs to the AP
504 *
505 * XXX Do we need to allocate device ID for SVC or the AP here? And what
506 * XXX about an AP with multiple interface blocks?
507 */
Johan Hovoldc09db182015-09-15 09:18:08 +0200508 device_id = ida_simple_get(&svc->device_id_map,
Johan Hovold89f637f2015-09-01 12:25:25 +0200509 GB_DEVICE_ID_MODULES_START, 0, GFP_KERNEL);
Viresh Kumaread35462015-07-21 17:44:19 +0530510 if (device_id < 0) {
511 ret = device_id;
Viresh Kumar2f3db922015-12-04 21:30:09 +0530512 dev_err(&svc->dev, "failed to allocate device id for interface %u: %d\n",
Johan Hovold684156a2015-11-25 15:59:19 +0100513 intf_id, ret);
Viresh Kumaread35462015-07-21 17:44:19 +0530514 goto destroy_interface;
515 }
516
Viresh Kumar3f0e9182015-08-31 17:21:06 +0530517 ret = gb_svc_intf_device_id(svc, intf_id, device_id);
Viresh Kumaread35462015-07-21 17:44:19 +0530518 if (ret) {
Viresh Kumar2f3db922015-12-04 21:30:09 +0530519 dev_err(&svc->dev, "failed to set device id %u for interface %u: %d\n",
Johan Hovold684156a2015-11-25 15:59:19 +0100520 device_id, intf_id, ret);
Viresh Kumaread35462015-07-21 17:44:19 +0530521 goto ida_put;
522 }
523
Perry Hung7e275462015-07-24 19:02:32 -0400524 /*
525 * Create a two-way route between the AP and the new interface
526 */
Johan Hovold66069fb2015-11-25 15:59:09 +0100527 ret = gb_svc_route_create(svc, svc->ap_intf_id, GB_DEVICE_ID_AP,
Viresh Kumar3f0e9182015-08-31 17:21:06 +0530528 intf_id, device_id);
Perry Hung7e275462015-07-24 19:02:32 -0400529 if (ret) {
Viresh Kumar2f3db922015-12-04 21:30:09 +0530530 dev_err(&svc->dev, "failed to create route to interface %u (device id %u): %d\n",
Johan Hovold684156a2015-11-25 15:59:19 +0100531 intf_id, device_id, ret);
Viresh Kumar0a020572015-09-07 18:05:26 +0530532 goto svc_id_free;
Perry Hung7e275462015-07-24 19:02:32 -0400533 }
534
Viresh Kumaread35462015-07-21 17:44:19 +0530535 ret = gb_interface_init(intf, device_id);
536 if (ret) {
Viresh Kumar2f3db922015-12-04 21:30:09 +0530537 dev_err(&svc->dev, "failed to initialize interface %u (device id %u): %d\n",
Johan Hovold684156a2015-11-25 15:59:19 +0100538 intf_id, device_id, ret);
Viresh Kumar0a020572015-09-07 18:05:26 +0530539 goto destroy_route;
Viresh Kumaread35462015-07-21 17:44:19 +0530540 }
Alex Elder30c6d9d2015-05-22 13:02:08 -0500541
Johan Hovold9ae41092015-12-02 18:23:29 +0100542 return;
Viresh Kumaread35462015-07-21 17:44:19 +0530543
Viresh Kumar0a020572015-09-07 18:05:26 +0530544destroy_route:
Johan Hovold66069fb2015-11-25 15:59:09 +0100545 gb_svc_route_destroy(svc, svc->ap_intf_id, intf_id);
Viresh Kumaread35462015-07-21 17:44:19 +0530546svc_id_free:
547 /*
548 * XXX Should we tell SVC that this id doesn't belong to interface
549 * XXX anymore.
550 */
551ida_put:
Johan Hovoldc09db182015-09-15 09:18:08 +0200552 ida_simple_remove(&svc->device_id_map, device_id);
Viresh Kumaread35462015-07-21 17:44:19 +0530553destroy_interface:
Viresh Kumar80d1ede2015-09-23 16:48:10 -0700554 gb_interface_remove(intf);
Johan Hovold9ae41092015-12-02 18:23:29 +0100555}
556
Johan Hovold57ccd4b2015-12-02 18:23:30 +0100557static void gb_svc_process_intf_hot_unplug(struct gb_operation *operation)
558{
559 struct gb_svc *svc = operation->connection->private;
560 struct gb_svc_intf_hot_unplug_request *request;
561 struct gb_host_device *hd = operation->connection->hd;
562 struct gb_interface *intf;
563 u8 intf_id;
564
565 /* The request message size has already been verified. */
566 request = operation->request->payload;
567 intf_id = request->intf_id;
568
569 dev_dbg(&svc->dev, "%s - id = %u\n", __func__, intf_id);
570
571 intf = gb_interface_find(hd, intf_id);
572 if (!intf) {
Viresh Kumar2f3db922015-12-04 21:30:09 +0530573 dev_warn(&svc->dev, "could not find hot-unplug interface %u\n",
Johan Hovold57ccd4b2015-12-02 18:23:30 +0100574 intf_id);
575 return;
576 }
577
578 gb_svc_intf_remove(svc, intf);
579}
580
Johan Hovold9ae41092015-12-02 18:23:29 +0100581static void gb_svc_process_deferred_request(struct work_struct *work)
582{
583 struct gb_svc_deferred_request *dr;
584 struct gb_operation *operation;
585 struct gb_svc *svc;
586 u8 type;
587
588 dr = container_of(work, struct gb_svc_deferred_request, work);
589 operation = dr->operation;
590 svc = operation->connection->private;
591 type = operation->request->header->type;
592
593 switch (type) {
594 case GB_SVC_TYPE_INTF_HOTPLUG:
595 gb_svc_process_intf_hotplug(operation);
596 break;
Johan Hovold57ccd4b2015-12-02 18:23:30 +0100597 case GB_SVC_TYPE_INTF_HOT_UNPLUG:
598 gb_svc_process_intf_hot_unplug(operation);
599 break;
Johan Hovold9ae41092015-12-02 18:23:29 +0100600 default:
Viresh Kumarb933fa42015-12-04 21:30:10 +0530601 dev_err(&svc->dev, "bad deferred request type: 0x%02x\n", type);
Johan Hovold9ae41092015-12-02 18:23:29 +0100602 }
603
604 gb_operation_put(operation);
605 kfree(dr);
606}
607
608static int gb_svc_queue_deferred_request(struct gb_operation *operation)
609{
Johan Hovold3e48aca2015-12-02 18:23:31 +0100610 struct gb_svc *svc = operation->connection->private;
Johan Hovold9ae41092015-12-02 18:23:29 +0100611 struct gb_svc_deferred_request *dr;
612
613 dr = kmalloc(sizeof(*dr), GFP_KERNEL);
614 if (!dr)
615 return -ENOMEM;
616
617 gb_operation_get(operation);
618
619 dr->operation = operation;
620 INIT_WORK(&dr->work, gb_svc_process_deferred_request);
621
Johan Hovold3e48aca2015-12-02 18:23:31 +0100622 queue_work(svc->wq, &dr->work);
Johan Hovold9ae41092015-12-02 18:23:29 +0100623
624 return 0;
Viresh Kumar067906f2015-08-06 12:44:55 +0530625}
Viresh Kumaread35462015-07-21 17:44:19 +0530626
Viresh Kumar067906f2015-08-06 12:44:55 +0530627/*
628 * Bringing up a module can be time consuming, as that may require lots of
629 * initialization on the module side. Over that, we may also need to download
630 * the firmware first and flash that on the module.
631 *
Johan Hovold3e48aca2015-12-02 18:23:31 +0100632 * In order not to make other svc events wait for all this to finish,
Viresh Kumar067906f2015-08-06 12:44:55 +0530633 * handle most of module hotplug stuff outside of the hotplug callback, with
634 * help of a workqueue.
635 */
636static int gb_svc_intf_hotplug_recv(struct gb_operation *op)
637{
Johan Hovold684156a2015-11-25 15:59:19 +0100638 struct gb_svc *svc = op->connection->private;
Johan Hovoldd34a3642015-12-02 18:23:26 +0100639 struct gb_svc_intf_hotplug_request *request;
Viresh Kumar067906f2015-08-06 12:44:55 +0530640
Johan Hovoldd34a3642015-12-02 18:23:26 +0100641 if (op->request->payload_size < sizeof(*request)) {
Johan Hovold684156a2015-11-25 15:59:19 +0100642 dev_warn(&svc->dev, "short hotplug request received (%zu < %zu)\n",
Johan Hovoldd34a3642015-12-02 18:23:26 +0100643 op->request->payload_size, sizeof(*request));
Viresh Kumar067906f2015-08-06 12:44:55 +0530644 return -EINVAL;
645 }
646
Johan Hovoldd34a3642015-12-02 18:23:26 +0100647 request = op->request->payload;
648
649 dev_dbg(&svc->dev, "%s - id = %u\n", __func__, request->intf_id);
650
Johan Hovold9ae41092015-12-02 18:23:29 +0100651 return gb_svc_queue_deferred_request(op);
Alex Elder30c6d9d2015-05-22 13:02:08 -0500652}
653
654static int gb_svc_intf_hot_unplug_recv(struct gb_operation *op)
655{
Johan Hovold684156a2015-11-25 15:59:19 +0100656 struct gb_svc *svc = op->connection->private;
Johan Hovoldd34a3642015-12-02 18:23:26 +0100657 struct gb_svc_intf_hot_unplug_request *request;
Alex Elder30c6d9d2015-05-22 13:02:08 -0500658
Johan Hovoldd34a3642015-12-02 18:23:26 +0100659 if (op->request->payload_size < sizeof(*request)) {
Johan Hovold684156a2015-11-25 15:59:19 +0100660 dev_warn(&svc->dev, "short hot unplug request received (%zu < %zu)\n",
Johan Hovoldd34a3642015-12-02 18:23:26 +0100661 op->request->payload_size, sizeof(*request));
Alex Elder30c6d9d2015-05-22 13:02:08 -0500662 return -EINVAL;
663 }
Alex Elder30c6d9d2015-05-22 13:02:08 -0500664
Johan Hovoldd34a3642015-12-02 18:23:26 +0100665 request = op->request->payload;
Johan Hovoldd34a3642015-12-02 18:23:26 +0100666
Johan Hovold57ccd4b2015-12-02 18:23:30 +0100667 dev_dbg(&svc->dev, "%s - id = %u\n", __func__, request->intf_id);
Alex Elder30c6d9d2015-05-22 13:02:08 -0500668
Johan Hovold57ccd4b2015-12-02 18:23:30 +0100669 return gb_svc_queue_deferred_request(op);
Alex Elder30c6d9d2015-05-22 13:02:08 -0500670}
671
672static int gb_svc_intf_reset_recv(struct gb_operation *op)
673{
Johan Hovold684156a2015-11-25 15:59:19 +0100674 struct gb_svc *svc = op->connection->private;
Alex Elder30c6d9d2015-05-22 13:02:08 -0500675 struct gb_message *request = op->request;
676 struct gb_svc_intf_reset_request *reset;
677 u8 intf_id;
678
679 if (request->payload_size < sizeof(*reset)) {
Johan Hovold684156a2015-11-25 15:59:19 +0100680 dev_warn(&svc->dev, "short reset request received (%zu < %zu)\n",
681 request->payload_size, sizeof(*reset));
Alex Elder30c6d9d2015-05-22 13:02:08 -0500682 return -EINVAL;
683 }
684 reset = request->payload;
685
686 intf_id = reset->intf_id;
687
688 /* FIXME Reset the interface here */
689
690 return 0;
691}
692
693static int gb_svc_request_recv(u8 type, struct gb_operation *op)
694{
Viresh Kumar3ccb1602015-09-03 15:42:22 +0530695 struct gb_connection *connection = op->connection;
696 struct gb_svc *svc = connection->private;
697 int ret = 0;
698
699 /*
700 * SVC requests need to follow a specific order (at least initially) and
701 * below code takes care of enforcing that. The expected order is:
702 * - PROTOCOL_VERSION
703 * - SVC_HELLO
704 * - Any other request, but the earlier two.
705 *
706 * Incoming requests are guaranteed to be serialized and so we don't
707 * need to protect 'state' for any races.
708 */
Alex Elder30c6d9d2015-05-22 13:02:08 -0500709 switch (type) {
Viresh Kumar0e2462d2015-08-14 07:57:38 +0530710 case GB_REQUEST_TYPE_PROTOCOL_VERSION:
Viresh Kumar3ccb1602015-09-03 15:42:22 +0530711 if (svc->state != GB_SVC_STATE_RESET)
712 ret = -EINVAL;
713 break;
Viresh Kumaread35462015-07-21 17:44:19 +0530714 case GB_SVC_TYPE_SVC_HELLO:
Viresh Kumar3ccb1602015-09-03 15:42:22 +0530715 if (svc->state != GB_SVC_STATE_PROTOCOL_VERSION)
716 ret = -EINVAL;
717 break;
718 default:
719 if (svc->state != GB_SVC_STATE_SVC_HELLO)
720 ret = -EINVAL;
721 break;
722 }
723
724 if (ret) {
Johan Hovold684156a2015-11-25 15:59:19 +0100725 dev_warn(&svc->dev, "unexpected request 0x%02x received (state %u)\n",
726 type, svc->state);
Viresh Kumar3ccb1602015-09-03 15:42:22 +0530727 return ret;
728 }
729
730 switch (type) {
731 case GB_REQUEST_TYPE_PROTOCOL_VERSION:
732 ret = gb_svc_version_request(op);
733 if (!ret)
734 svc->state = GB_SVC_STATE_PROTOCOL_VERSION;
735 return ret;
736 case GB_SVC_TYPE_SVC_HELLO:
737 ret = gb_svc_hello(op);
738 if (!ret)
739 svc->state = GB_SVC_STATE_SVC_HELLO;
740 return ret;
Alex Elder30c6d9d2015-05-22 13:02:08 -0500741 case GB_SVC_TYPE_INTF_HOTPLUG:
742 return gb_svc_intf_hotplug_recv(op);
743 case GB_SVC_TYPE_INTF_HOT_UNPLUG:
744 return gb_svc_intf_hot_unplug_recv(op);
745 case GB_SVC_TYPE_INTF_RESET:
746 return gb_svc_intf_reset_recv(op);
747 default:
Johan Hovold684156a2015-11-25 15:59:19 +0100748 dev_warn(&svc->dev, "unsupported request 0x%02x\n", type);
Alex Elder30c6d9d2015-05-22 13:02:08 -0500749 return -EINVAL;
750 }
751}
752
Johan Hovoldefe6ef72015-11-25 15:59:06 +0100753static void gb_svc_release(struct device *dev)
754{
Johan Hovold88f7b962015-11-25 15:59:08 +0100755 struct gb_svc *svc = to_gb_svc(dev);
Johan Hovoldefe6ef72015-11-25 15:59:06 +0100756
Johan Hovold7adeaae72015-12-07 15:05:37 +0100757 if (svc->connection)
758 gb_connection_destroy(svc->connection);
Johan Hovoldefe6ef72015-11-25 15:59:06 +0100759 ida_destroy(&svc->device_id_map);
Johan Hovold3e48aca2015-12-02 18:23:31 +0100760 destroy_workqueue(svc->wq);
Johan Hovoldefe6ef72015-11-25 15:59:06 +0100761 kfree(svc);
762}
763
764struct device_type greybus_svc_type = {
765 .name = "greybus_svc",
766 .release = gb_svc_release,
767};
768
Johan Hovold7adeaae72015-12-07 15:05:37 +0100769struct gb_svc *gb_svc_create(struct gb_host_device *hd)
Alex Elder30c6d9d2015-05-22 13:02:08 -0500770{
771 struct gb_svc *svc;
Alex Elder30c6d9d2015-05-22 13:02:08 -0500772
773 svc = kzalloc(sizeof(*svc), GFP_KERNEL);
774 if (!svc)
Johan Hovold7adeaae72015-12-07 15:05:37 +0100775 return NULL;
Alex Elder30c6d9d2015-05-22 13:02:08 -0500776
Johan Hovold3e48aca2015-12-02 18:23:31 +0100777 svc->wq = alloc_workqueue("%s:svc", WQ_UNBOUND, 1, dev_name(&hd->dev));
778 if (!svc->wq) {
779 kfree(svc);
Johan Hovold7adeaae72015-12-07 15:05:37 +0100780 return NULL;
Johan Hovold3e48aca2015-12-02 18:23:31 +0100781 }
782
Johan Hovoldefe6ef72015-11-25 15:59:06 +0100783 svc->dev.parent = &hd->dev;
784 svc->dev.bus = &greybus_bus_type;
785 svc->dev.type = &greybus_svc_type;
Johan Hovold66069fb2015-11-25 15:59:09 +0100786 svc->dev.groups = svc_groups;
Johan Hovoldefe6ef72015-11-25 15:59:06 +0100787 svc->dev.dma_mask = svc->dev.parent->dma_mask;
788 device_initialize(&svc->dev);
789
790 dev_set_name(&svc->dev, "%d-svc", hd->bus_id);
791
Johan Hovold6106e512015-11-25 15:59:07 +0100792 ida_init(&svc->device_id_map);
Viresh Kumar3ccb1602015-09-03 15:42:22 +0530793 svc->state = GB_SVC_STATE_RESET;
Johan Hovoldf0960d02015-12-03 19:18:02 +0100794 svc->hd = hd;
Viresh Kumard3d44842015-07-21 17:44:18 +0530795
Johan Hovold7adeaae72015-12-07 15:05:37 +0100796 svc->connection = gb_connection_create_static(hd, GB_SVC_CPORT_ID,
797 GREYBUS_PROTOCOL_SVC);
798 if (!svc->connection) {
799 dev_err(&svc->dev, "failed to create connection\n");
800 put_device(&svc->dev);
801 return NULL;
802 }
803
804 svc->connection->private = svc;
805
806 return svc;
807}
808
809int gb_svc_add(struct gb_svc *svc)
810{
811 int ret;
812
813 /*
814 * The SVC protocol is currently driven by the SVC, so the SVC device
815 * is added from the connection request handler when enough
816 * information has been received.
817 */
818 ret = gb_connection_init(svc->connection);
819 if (ret)
820 return ret;
821
822 return 0;
823}
824
825void gb_svc_del(struct gb_svc *svc)
826{
827 /*
828 * The SVC device may have been registered from the request handler.
829 */
830 if (device_is_registered(&svc->dev))
831 device_del(&svc->dev);
832
833 gb_connection_exit(svc->connection);
834
835 flush_workqueue(svc->wq);
836}
837
838void gb_svc_put(struct gb_svc *svc)
839{
840 put_device(&svc->dev);
841}
842
843static int gb_svc_connection_init(struct gb_connection *connection)
844{
845 struct gb_svc *svc = connection->private;
846
847 dev_dbg(&svc->dev, "%s\n", __func__);
Johan Hovoldefe6ef72015-11-25 15:59:06 +0100848
Viresh Kumar18d777c2015-07-21 17:44:20 +0530849 return 0;
Alex Elder30c6d9d2015-05-22 13:02:08 -0500850}
851
852static void gb_svc_connection_exit(struct gb_connection *connection)
853{
854 struct gb_svc *svc = connection->private;
855
Johan Hovold7adeaae72015-12-07 15:05:37 +0100856 dev_dbg(&svc->dev, "%s\n", __func__);
Alex Elder30c6d9d2015-05-22 13:02:08 -0500857}
858
859static struct gb_protocol svc_protocol = {
860 .name = "svc",
861 .id = GREYBUS_PROTOCOL_SVC,
Viresh Kumar06e305f2015-07-01 12:13:51 +0530862 .major = GB_SVC_VERSION_MAJOR,
863 .minor = GB_SVC_VERSION_MINOR,
Alex Elder30c6d9d2015-05-22 13:02:08 -0500864 .connection_init = gb_svc_connection_init,
865 .connection_exit = gb_svc_connection_exit,
866 .request_recv = gb_svc_request_recv,
Viresh Kumar5a5296b2015-09-07 16:01:24 +0530867 .flags = GB_PROTOCOL_SKIP_CONTROL_CONNECTED |
868 GB_PROTOCOL_SKIP_CONTROL_DISCONNECTED |
Johan Hovold4ec15742015-11-25 15:59:13 +0100869 GB_PROTOCOL_SKIP_VERSION,
Alex Elder30c6d9d2015-05-22 13:02:08 -0500870};
Viresh Kumarab69c4c2015-07-03 17:00:29 +0530871gb_builtin_protocol_driver(svc_protocol);