blob: 4e28d34aa47c35bf5f5f73bcc857ad998d3fa87a [file] [log] [blame]
Chiranjeevi Velempatie130fd02011-11-29 05:06:13 +05301/*
2 * f_ccid.c -- CCID function Driver
3 *
Duy Truong790f06d2013-02-13 16:38:12 -08004 * Copyright (c) 2011, The Linux Foundation. All rights reserved.
Chiranjeevi Velempatie130fd02011-11-29 05:06:13 +05305
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details
14 */
15
16#include <linux/slab.h>
17#include <linux/kernel.h>
18#include <linux/device.h>
19#include <linux/usb/android_composite.h>
20#include <linux/fs.h>
21#include <linux/usb/ccid_desc.h>
22#include <linux/miscdevice.h>
23
24#include "f_ccid.h"
25
26#define BULK_IN_BUFFER_SIZE sizeof(struct ccid_bulk_in_header)
27#define BULK_OUT_BUFFER_SIZE sizeof(struct ccid_bulk_out_header)
28#define CTRL_BUF_SIZE 4
29#define FUNCTION_NAME "ccid"
30#define CCID_NOTIFY_INTERVAL 5
31#define CCID_NOTIFY_MAXPACKET 4
32
33/* number of tx requests to allocate */
34#define TX_REQ_MAX 4
35
Chiranjeevi Velempatie130fd02011-11-29 05:06:13 +053036struct ccid_ctrl_dev {
37 atomic_t opened;
38 struct list_head tx_q;
39 wait_queue_head_t tx_wait_q;
40 unsigned char buf[CTRL_BUF_SIZE];
41 int tx_ctrl_done;
42};
43
44struct ccid_bulk_dev {
45 atomic_t error;
46 atomic_t opened;
47 atomic_t rx_req_busy;
48 wait_queue_head_t read_wq;
49 wait_queue_head_t write_wq;
50 struct usb_request *rx_req;
51 int rx_done;
52 struct list_head tx_idle;
53};
54
55struct f_ccid {
56 struct usb_function function;
57 struct usb_composite_dev *cdev;
58 int ifc_id;
59 spinlock_t lock;
60 atomic_t online;
Chiranjeevi Velempatie130fd02011-11-29 05:06:13 +053061 /* usb eps*/
62 struct usb_ep *notify;
63 struct usb_ep *in;
64 struct usb_ep *out;
Chiranjeevi Velempatie130fd02011-11-29 05:06:13 +053065 struct usb_request *notify_req;
66 struct ccid_ctrl_dev ctrl_dev;
67 struct ccid_bulk_dev bulk_dev;
68 int dtr_state;
69};
70
71static struct f_ccid *_ccid_dev;
72static struct miscdevice ccid_bulk_device;
73static struct miscdevice ccid_ctrl_device;
74
75/* Interface Descriptor: */
76static struct usb_interface_descriptor ccid_interface_desc = {
77 .bLength = USB_DT_INTERFACE_SIZE,
78 .bDescriptorType = USB_DT_INTERFACE,
79 .bNumEndpoints = 3,
80 .bInterfaceClass = USB_CLASS_CSCID,
81 .bInterfaceSubClass = 0,
82 .bInterfaceProtocol = 0,
83};
84/* CCID Class Descriptor */
85static struct usb_ccid_class_descriptor ccid_class_desc = {
86 .bLength = sizeof(ccid_class_desc),
87 .bDescriptorType = CCID_DECRIPTOR_TYPE,
88 .bcdCCID = CCID1_10,
89 .bMaxSlotIndex = 0,
90 /* This value indicates what voltages the CCID can supply to slots */
91 .bVoltageSupport = VOLTS_3_0,
92 .dwProtocols = PROTOCOL_TO,
93 /* Default ICC clock frequency in KHz */
94 .dwDefaultClock = 3580,
95 /* Maximum supported ICC clock frequency in KHz */
96 .dwMaximumClock = 3580,
97 .bNumClockSupported = 0,
98 /* Default ICC I/O data rate in bps */
99 .dwDataRate = 9600,
100 /* Maximum supported ICC I/O data rate in bps */
101 .dwMaxDataRate = 9600,
102 .bNumDataRatesSupported = 0,
103 .dwMaxIFSD = 0,
104 .dwSynchProtocols = 0,
105 .dwMechanical = 0,
106 /* This value indicates what intelligent features the CCID has */
107 .dwFeatures = CCID_FEATURES_EXC_SAPDU |
108 CCID_FEATURES_AUTO_PNEGO |
109 CCID_FEATURES_AUTO_BAUD |
110 CCID_FEATURES_AUTO_CLOCK |
111 CCID_FEATURES_AUTO_VOLT |
112 CCID_FEATURES_AUTO_ACTIV |
113 CCID_FEATURES_AUTO_PCONF,
114 /* extended APDU level Message Length */
115 .dwMaxCCIDMessageLength = 0x200,
116 .bClassGetResponse = 0x0,
117 .bClassEnvelope = 0x0,
118 .wLcdLayout = 0,
119 .bPINSupport = 0,
120 .bMaxCCIDBusySlots = 1
121};
122/* Full speed support: */
123static struct usb_endpoint_descriptor ccid_fs_notify_desc = {
124 .bLength = USB_DT_ENDPOINT_SIZE,
125 .bDescriptorType = USB_DT_ENDPOINT,
126 .bEndpointAddress = USB_DIR_IN,
127 .bmAttributes = USB_ENDPOINT_XFER_INT,
128 .wMaxPacketSize = __constant_cpu_to_le16(CCID_NOTIFY_MAXPACKET),
129 .bInterval = 1 << CCID_NOTIFY_INTERVAL,
130};
131
132static struct usb_endpoint_descriptor ccid_fs_in_desc = {
133 .bLength = USB_DT_ENDPOINT_SIZE,
134 .bDescriptorType = USB_DT_ENDPOINT,
135 .bEndpointAddress = USB_DIR_IN,
136 .bmAttributes = USB_ENDPOINT_XFER_BULK,
137 .wMaxPacketSize = __constant_cpu_to_le16(64),
138};
139
140static struct usb_endpoint_descriptor ccid_fs_out_desc = {
141 .bLength = USB_DT_ENDPOINT_SIZE,
142 .bDescriptorType = USB_DT_ENDPOINT,
143 .bEndpointAddress = USB_DIR_OUT,
144 .bmAttributes = USB_ENDPOINT_XFER_BULK,
145 .wMaxPacketSize = __constant_cpu_to_le16(64),
146};
147
148static struct usb_descriptor_header *ccid_fs_descs[] = {
149 (struct usb_descriptor_header *) &ccid_interface_desc,
150 (struct usb_descriptor_header *) &ccid_class_desc,
151 (struct usb_descriptor_header *) &ccid_fs_notify_desc,
152 (struct usb_descriptor_header *) &ccid_fs_in_desc,
153 (struct usb_descriptor_header *) &ccid_fs_out_desc,
154 NULL,
155};
156
157/* High speed support: */
158static struct usb_endpoint_descriptor ccid_hs_notify_desc = {
159 .bLength = USB_DT_ENDPOINT_SIZE,
160 .bDescriptorType = USB_DT_ENDPOINT,
161 .bEndpointAddress = USB_DIR_IN,
162 .bmAttributes = USB_ENDPOINT_XFER_INT,
163 .wMaxPacketSize = __constant_cpu_to_le16(CCID_NOTIFY_MAXPACKET),
164 .bInterval = CCID_NOTIFY_INTERVAL + 4,
165};
166
167static struct usb_endpoint_descriptor ccid_hs_in_desc = {
168 .bLength = USB_DT_ENDPOINT_SIZE,
169 .bDescriptorType = USB_DT_ENDPOINT,
170 .bEndpointAddress = USB_DIR_IN,
171 .bmAttributes = USB_ENDPOINT_XFER_BULK,
172 .wMaxPacketSize = __constant_cpu_to_le16(512),
173};
174
175static struct usb_endpoint_descriptor ccid_hs_out_desc = {
176 .bLength = USB_DT_ENDPOINT_SIZE,
177 .bDescriptorType = USB_DT_ENDPOINT,
178 .bEndpointAddress = USB_DIR_OUT,
179 .bmAttributes = USB_ENDPOINT_XFER_BULK,
180 .wMaxPacketSize = __constant_cpu_to_le16(512),
181};
182
183static struct usb_descriptor_header *ccid_hs_descs[] = {
184 (struct usb_descriptor_header *) &ccid_interface_desc,
185 (struct usb_descriptor_header *) &ccid_class_desc,
186 (struct usb_descriptor_header *) &ccid_hs_notify_desc,
187 (struct usb_descriptor_header *) &ccid_hs_in_desc,
188 (struct usb_descriptor_header *) &ccid_hs_out_desc,
189 NULL,
190};
191
192static inline struct f_ccid *func_to_ccid(struct usb_function *f)
193{
194 return container_of(f, struct f_ccid, function);
195}
196
197static void ccid_req_put(struct f_ccid *ccid_dev, struct list_head *head,
198 struct usb_request *req)
199{
200 unsigned long flags;
201
202 spin_lock_irqsave(&ccid_dev->lock, flags);
203 list_add_tail(&req->list, head);
204 spin_unlock_irqrestore(&ccid_dev->lock, flags);
205}
206
207static struct usb_request *ccid_req_get(struct f_ccid *ccid_dev,
208 struct list_head *head)
209{
210 unsigned long flags;
211 struct usb_request *req = NULL;
212
213 spin_lock_irqsave(&ccid_dev->lock, flags);
214 if (!list_empty(head)) {
215 req = list_first_entry(head, struct usb_request, list);
216 list_del(&req->list);
217 }
218 spin_unlock_irqrestore(&ccid_dev->lock, flags);
219 return req;
220}
221
222static void ccid_notify_complete(struct usb_ep *ep, struct usb_request *req)
223{
224 switch (req->status) {
225 case -ECONNRESET:
226 case -ESHUTDOWN:
227 case 0:
228 break;
229 default:
230 pr_err("CCID notify ep error %d\n", req->status);
231 }
232}
233
234static void ccid_bulk_complete_in(struct usb_ep *ep, struct usb_request *req)
235{
236 struct f_ccid *ccid_dev = _ccid_dev;
237 struct ccid_bulk_dev *bulk_dev = &ccid_dev->bulk_dev;
238
239 if (req->status != 0)
240 atomic_set(&bulk_dev->error, 1);
241
242 ccid_req_put(ccid_dev, &bulk_dev->tx_idle, req);
243 wake_up(&bulk_dev->write_wq);
244}
245
246static void ccid_bulk_complete_out(struct usb_ep *ep, struct usb_request *req)
247{
248 struct f_ccid *ccid_dev = _ccid_dev;
249 struct ccid_bulk_dev *bulk_dev = &ccid_dev->bulk_dev;
250 if (req->status != 0)
251 atomic_set(&bulk_dev->error, 1);
252
253 bulk_dev->rx_done = 1;
254 wake_up(&bulk_dev->read_wq);
255}
256
257static struct usb_request *
258ccid_request_alloc(struct usb_ep *ep, unsigned len, gfp_t kmalloc_flags)
259{
260 struct usb_request *req;
261
262 req = usb_ep_alloc_request(ep, kmalloc_flags);
263
264 if (req != NULL) {
265 req->length = len;
266 req->buf = kmalloc(len, kmalloc_flags);
267 if (req->buf == NULL) {
268 usb_ep_free_request(ep, req);
269 req = NULL;
270 }
271 }
272
273 return req ? req : ERR_PTR(-ENOMEM);
274}
275
276static void ccid_request_free(struct usb_request *req, struct usb_ep *ep)
277{
278 if (req) {
279 kfree(req->buf);
280 usb_ep_free_request(ep, req);
281 }
282}
283
284static int
285ccid_function_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
286{
287 struct f_ccid *ccid_dev = container_of(f, struct f_ccid, function);
288 struct ccid_ctrl_dev *ctrl_dev = &ccid_dev->ctrl_dev;
289 struct usb_composite_dev *cdev = f->config->cdev;
290 struct usb_request *req = cdev->req;
291 int ret = -EOPNOTSUPP;
292 u16 w_index = le16_to_cpu(ctrl->wIndex);
293 u16 w_value = le16_to_cpu(ctrl->wValue);
294 u16 w_length = le16_to_cpu(ctrl->wLength);
295
296 if (!atomic_read(&ccid_dev->online))
297 return -ENOTCONN;
298
299 switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
300
301 case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
302 | CCIDGENERICREQ_ABORT:
303 if (w_length != 0)
304 goto invalid;
305 ctrl_dev->buf[0] = CCIDGENERICREQ_ABORT;
306 ctrl_dev->buf[1] = w_value & 0xFF;
307 ctrl_dev->buf[2] = (w_value >> 8) & 0xFF;
308 ctrl_dev->buf[3] = 0x00;
309 ctrl_dev->tx_ctrl_done = 1;
310 wake_up(&ctrl_dev->tx_wait_q);
311 return 0;
312
313 case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
314 | CCIDGENERICREQ_GET_CLOCK_FREQUENCIES:
315 if (w_length > req->length)
316 goto invalid;
317 *(u32 *) req->buf =
318 cpu_to_le32(ccid_class_desc.dwDefaultClock);
319 ret = min_t(u32, w_length,
320 sizeof(ccid_class_desc.dwDefaultClock));
321 break;
322
323 case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
324 | CCIDGENERICREQ_GET_DATA_RATES:
325 if (w_length > req->length)
326 goto invalid;
327 *(u32 *) req->buf = cpu_to_le32(ccid_class_desc.dwDataRate);
328 ret = min_t(u32, w_length, sizeof(ccid_class_desc.dwDataRate));
329 break;
330
331 default:
332invalid:
333 pr_debug("invalid control req%02x.%02x v%04x i%04x l%d\n",
334 ctrl->bRequestType, ctrl->bRequest,
335 w_value, w_index, w_length);
336 }
337
338 /* respond with data transfer or status phase? */
339 if (ret >= 0) {
340 pr_debug("ccid req%02x.%02x v%04x i%04x l%d\n",
341 ctrl->bRequestType, ctrl->bRequest,
342 w_value, w_index, w_length);
343 req->length = ret;
344 ret = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
345 if (ret < 0)
346 pr_err("ccid ep0 enqueue err %d\n", ret);
347 }
348
349 return ret;
350}
351
352static void ccid_function_disable(struct usb_function *f)
353{
354 struct f_ccid *ccid_dev = func_to_ccid(f);
355 struct ccid_bulk_dev *bulk_dev = &ccid_dev->bulk_dev;
356 struct ccid_ctrl_dev *ctrl_dev = &ccid_dev->ctrl_dev;
357 struct usb_request *req;
358
359 /* Disable endpoints */
360 usb_ep_disable(ccid_dev->notify);
361 usb_ep_disable(ccid_dev->in);
362 usb_ep_disable(ccid_dev->out);
363 /* Free endpoint related requests */
364 ccid_request_free(ccid_dev->notify_req, ccid_dev->notify);
365 if (!atomic_read(&bulk_dev->rx_req_busy))
366 ccid_request_free(bulk_dev->rx_req, ccid_dev->out);
367 while ((req = ccid_req_get(ccid_dev, &bulk_dev->tx_idle)))
368 ccid_request_free(req, ccid_dev->in);
369
370 ccid_dev->dtr_state = 0;
371 atomic_set(&ccid_dev->online, 0);
372 /* Wake up threads */
373 wake_up(&bulk_dev->write_wq);
374 wake_up(&bulk_dev->read_wq);
375 wake_up(&ctrl_dev->tx_wait_q);
376
377}
378
379static int
380ccid_function_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
381{
382 struct f_ccid *ccid_dev = func_to_ccid(f);
383 struct usb_composite_dev *cdev = ccid_dev->cdev;
384 struct ccid_bulk_dev *bulk_dev = &ccid_dev->bulk_dev;
385 struct usb_request *req;
386 int ret = 0;
387 int i;
388
389 ccid_dev->notify_req = ccid_request_alloc(ccid_dev->notify,
390 sizeof(struct usb_ccid_notification), GFP_ATOMIC);
391 if (IS_ERR(ccid_dev->notify_req)) {
392 pr_err("%s: unable to allocate memory for notify req\n",
393 __func__);
394 return PTR_ERR(ccid_dev->notify_req);
395 }
396 ccid_dev->notify_req->complete = ccid_notify_complete;
397 ccid_dev->notify_req->context = ccid_dev;
398
399 /* now allocate requests for our endpoints */
400 req = ccid_request_alloc(ccid_dev->out, BULK_OUT_BUFFER_SIZE,
401 GFP_ATOMIC);
402 if (IS_ERR(req)) {
403 pr_err("%s: unable to allocate memory for out req\n",
404 __func__);
405 ret = PTR_ERR(req);
406 goto free_notify;
407 }
408 req->complete = ccid_bulk_complete_out;
409 req->context = ccid_dev;
410 bulk_dev->rx_req = req;
411
412 for (i = 0; i < TX_REQ_MAX; i++) {
413 req = ccid_request_alloc(ccid_dev->in, BULK_IN_BUFFER_SIZE,
414 GFP_ATOMIC);
415 if (IS_ERR(req)) {
416 pr_err("%s: unable to allocate memory for in req\n",
417 __func__);
418 ret = PTR_ERR(req);
419 goto free_bulk_out;
420 }
421 req->complete = ccid_bulk_complete_in;
422 req->context = ccid_dev;
423 ccid_req_put(ccid_dev, &bulk_dev->tx_idle, req);
424 }
425
426 /* choose the descriptors and enable endpoints */
Tatyana Brokhman31ac3522011-06-28 15:33:50 +0200427 ret = config_ep_by_speed(cdev->gadget, f, ccid_dev->notify);
428 if (ret) {
429 ccid_dev->notify->desc = NULL;
430 pr_err("%s: config_ep_by_speed failed for ep#%s, err#%d\n",
431 __func__, ccid_dev->notify->name, ret);
432 goto free_bulk_in;
433 }
Tatyana Brokhmancf709c12011-06-28 16:33:48 +0300434 ret = usb_ep_enable(ccid_dev->notify);
Chiranjeevi Velempatie130fd02011-11-29 05:06:13 +0530435 if (ret) {
436 pr_err("%s: usb ep#%s enable failed, err#%d\n",
437 __func__, ccid_dev->notify->name, ret);
438 goto free_bulk_in;
439 }
440 ccid_dev->notify->driver_data = ccid_dev;
441
Tatyana Brokhman31ac3522011-06-28 15:33:50 +0200442 ret = config_ep_by_speed(cdev->gadget, f, ccid_dev->in);
443 if (ret) {
444 ccid_dev->in->desc = NULL;
445 pr_err("%s: config_ep_by_speed failed for ep#%s, err#%d\n",
446 __func__, ccid_dev->in->name, ret);
447 goto disable_ep_notify;
448 }
Tatyana Brokhmancf709c12011-06-28 16:33:48 +0300449 ret = usb_ep_enable(ccid_dev->in);
Chiranjeevi Velempatie130fd02011-11-29 05:06:13 +0530450 if (ret) {
451 pr_err("%s: usb ep#%s enable failed, err#%d\n",
452 __func__, ccid_dev->in->name, ret);
453 goto disable_ep_notify;
454 }
455
Tatyana Brokhman31ac3522011-06-28 15:33:50 +0200456 ret = config_ep_by_speed(cdev->gadget, f, ccid_dev->out);
457 if (ret) {
458 ccid_dev->out->desc = NULL;
459 pr_err("%s: config_ep_by_speed failed for ep#%s, err#%d\n",
460 __func__, ccid_dev->out->name, ret);
461 goto disable_ep_in;
462 }
Tatyana Brokhmancf709c12011-06-28 16:33:48 +0300463 ret = usb_ep_enable(ccid_dev->out);
Chiranjeevi Velempatie130fd02011-11-29 05:06:13 +0530464 if (ret) {
465 pr_err("%s: usb ep#%s enable failed, err#%d\n",
466 __func__, ccid_dev->out->name, ret);
467 goto disable_ep_in;
468 }
469 ccid_dev->dtr_state = 1;
470 atomic_set(&ccid_dev->online, 1);
471 return ret;
472
473disable_ep_in:
474 usb_ep_disable(ccid_dev->in);
475disable_ep_notify:
476 usb_ep_disable(ccid_dev->notify);
477 ccid_dev->notify->driver_data = NULL;
478free_bulk_in:
479 while ((req = ccid_req_get(ccid_dev, &bulk_dev->tx_idle)))
480 ccid_request_free(req, ccid_dev->in);
481free_bulk_out:
482 ccid_request_free(bulk_dev->rx_req, ccid_dev->out);
483free_notify:
484 ccid_request_free(ccid_dev->notify_req, ccid_dev->notify);
485 return ret;
486}
487
488static void ccid_function_unbind(struct usb_configuration *c,
489 struct usb_function *f)
490{
491 if (gadget_is_dualspeed(c->cdev->gadget))
492 usb_free_descriptors(f->hs_descriptors);
493 usb_free_descriptors(f->descriptors);
494
495}
496
497static int ccid_function_bind(struct usb_configuration *c,
498 struct usb_function *f)
499{
500 struct f_ccid *ccid_dev = func_to_ccid(f);
501 struct usb_ep *ep;
502 struct usb_composite_dev *cdev = c->cdev;
503 int ret = -ENODEV;
504
505 ccid_dev->ifc_id = usb_interface_id(c, f);
506 if (ccid_dev->ifc_id < 0) {
507 pr_err("%s: unable to allocate ifc id, err:%d",
508 __func__, ccid_dev->ifc_id);
509 return ccid_dev->ifc_id;
510 }
511 ccid_interface_desc.bInterfaceNumber = ccid_dev->ifc_id;
512
513 ep = usb_ep_autoconfig(cdev->gadget, &ccid_fs_notify_desc);
514 if (!ep) {
515 pr_err("%s: usb epnotify autoconfig failed\n", __func__);
516 return -ENODEV;
517 }
518 ccid_dev->notify = ep;
519 ep->driver_data = cdev;
520
521 ep = usb_ep_autoconfig(cdev->gadget, &ccid_fs_in_desc);
522 if (!ep) {
523 pr_err("%s: usb epin autoconfig failed\n", __func__);
524 ret = -ENODEV;
525 goto ep_auto_in_fail;
526 }
527 ccid_dev->in = ep;
528 ep->driver_data = cdev;
529
530 ep = usb_ep_autoconfig(cdev->gadget, &ccid_fs_out_desc);
531 if (!ep) {
532 pr_err("%s: usb epout autoconfig failed\n", __func__);
533 ret = -ENODEV;
534 goto ep_auto_out_fail;
535 }
536 ccid_dev->out = ep;
537 ep->driver_data = cdev;
538
539 f->descriptors = usb_copy_descriptors(ccid_fs_descs);
540 if (!f->descriptors)
541 goto ep_auto_out_fail;
542
Chiranjeevi Velempatie130fd02011-11-29 05:06:13 +0530543 if (gadget_is_dualspeed(cdev->gadget)) {
544 ccid_hs_in_desc.bEndpointAddress =
545 ccid_fs_in_desc.bEndpointAddress;
546 ccid_hs_out_desc.bEndpointAddress =
547 ccid_fs_out_desc.bEndpointAddress;
548 ccid_hs_notify_desc.bEndpointAddress =
549 ccid_fs_notify_desc.bEndpointAddress;
550
551 /* copy descriptors, and track endpoint copies */
552 f->hs_descriptors = usb_copy_descriptors(ccid_hs_descs);
553 if (!f->hs_descriptors)
554 goto ep_auto_out_fail;
Chiranjeevi Velempatie130fd02011-11-29 05:06:13 +0530555 }
556
557 pr_debug("%s: CCID %s Speed, IN:%s OUT:%s\n", __func__,
558 gadget_is_dualspeed(cdev->gadget) ? "dual" : "full",
559 ccid_dev->in->name, ccid_dev->out->name);
560
561 return 0;
562
563ep_auto_out_fail:
564 ccid_dev->out->driver_data = NULL;
565 ccid_dev->out = NULL;
566ep_auto_in_fail:
567 ccid_dev->in->driver_data = NULL;
568 ccid_dev->in = NULL;
569
570 return ret;
571}
572
573static int ccid_bulk_open(struct inode *ip, struct file *fp)
574{
575 struct f_ccid *ccid_dev = _ccid_dev;
576 struct ccid_bulk_dev *bulk_dev = &ccid_dev->bulk_dev;
577 unsigned long flags;
578
579 pr_debug("ccid_bulk_open\n");
580 if (!atomic_read(&ccid_dev->online)) {
581 pr_debug("%s: USB cable not connected\n", __func__);
582 return -ENODEV;
583 }
584
585 if (atomic_read(&bulk_dev->opened)) {
586 pr_debug("%s: bulk device is already opened\n", __func__);
587 return -EBUSY;
588 }
589 atomic_set(&bulk_dev->opened, 1);
590 /* clear the error latch */
591 atomic_set(&bulk_dev->error, 0);
592 spin_lock_irqsave(&ccid_dev->lock, flags);
593 fp->private_data = ccid_dev;
594 spin_unlock_irqrestore(&ccid_dev->lock, flags);
595
596 return 0;
597}
598
599static int ccid_bulk_release(struct inode *ip, struct file *fp)
600{
601 struct f_ccid *ccid_dev = fp->private_data;
602 struct ccid_bulk_dev *bulk_dev = &ccid_dev->bulk_dev;
603
604 pr_debug("ccid_bulk_release\n");
605 atomic_set(&bulk_dev->opened, 0);
606 return 0;
607}
608
609static ssize_t ccid_bulk_read(struct file *fp, char __user *buf,
610 size_t count, loff_t *pos)
611{
612 struct f_ccid *ccid_dev = fp->private_data;
613 struct ccid_bulk_dev *bulk_dev = &ccid_dev->bulk_dev;
614 struct usb_request *req;
615 int r = count, xfer;
616 int ret;
617 unsigned long flags;
618
619 pr_debug("ccid_bulk_read(%d)\n", count);
620
621 if (count > BULK_OUT_BUFFER_SIZE) {
622 pr_err("%s: max_buffer_size:%d given_pkt_size:%d\n",
623 __func__, BULK_OUT_BUFFER_SIZE, count);
624 return -ENOMEM;
625 }
626
627 if (atomic_read(&bulk_dev->error)) {
628 r = -EIO;
629 pr_err("%s bulk_dev_error\n", __func__);
630 goto done;
631 }
632
633requeue_req:
634 spin_lock_irqsave(&ccid_dev->lock, flags);
635 if (!atomic_read(&ccid_dev->online)) {
636 pr_debug("%s: USB cable not connected\n", __func__);
637 return -ENODEV;
638 }
639 /* queue a request */
640 req = bulk_dev->rx_req;
641 req->length = count;
642 bulk_dev->rx_done = 0;
643 spin_unlock_irqrestore(&ccid_dev->lock, flags);
644 ret = usb_ep_queue(ccid_dev->out, req, GFP_KERNEL);
645 if (ret < 0) {
646 r = -EIO;
647 pr_err("%s usb ep queue failed\n", __func__);
648 atomic_set(&bulk_dev->error, 1);
649 goto done;
650 }
651 /* wait for a request to complete */
652 ret = wait_event_interruptible(bulk_dev->read_wq, bulk_dev->rx_done ||
653 atomic_read(&bulk_dev->error) ||
654 !atomic_read(&ccid_dev->online));
655 if (ret < 0) {
656 atomic_set(&bulk_dev->error, 1);
657 r = ret;
658 usb_ep_dequeue(ccid_dev->out, req);
659 goto done;
660 }
661 if (!atomic_read(&bulk_dev->error)) {
662 spin_lock_irqsave(&ccid_dev->lock, flags);
663 if (!atomic_read(&ccid_dev->online)) {
664 spin_unlock_irqrestore(&ccid_dev->lock, flags);
665 pr_debug("%s: USB cable not connected\n", __func__);
666 r = -ENODEV;
667 goto done;
668 }
669 /* If we got a 0-len packet, throw it back and try again. */
670 if (req->actual == 0) {
671 spin_unlock_irqrestore(&ccid_dev->lock, flags);
672 goto requeue_req;
673 }
674 xfer = (req->actual < count) ? req->actual : count;
675 atomic_set(&bulk_dev->rx_req_busy, 1);
676 spin_unlock_irqrestore(&ccid_dev->lock, flags);
677
678 if (copy_to_user(buf, req->buf, xfer))
679 r = -EFAULT;
680
681 spin_lock_irqsave(&ccid_dev->lock, flags);
682 atomic_set(&bulk_dev->rx_req_busy, 0);
683 if (!atomic_read(&ccid_dev->online)) {
684 ccid_request_free(bulk_dev->rx_req, ccid_dev->out);
685 spin_unlock_irqrestore(&ccid_dev->lock, flags);
686 pr_debug("%s: USB cable not connected\n", __func__);
687 r = -ENODEV;
688 goto done;
689 }
690 spin_unlock_irqrestore(&ccid_dev->lock, flags);
691 } else {
692 r = -EIO;
693 }
694done:
695 pr_debug("ccid_bulk_read returning %d\n", r);
696 return r;
697}
698
699static ssize_t ccid_bulk_write(struct file *fp, const char __user *buf,
700 size_t count, loff_t *pos)
701{
702 struct f_ccid *ccid_dev = fp->private_data;
703 struct ccid_bulk_dev *bulk_dev = &ccid_dev->bulk_dev;
704 struct usb_request *req = 0;
705 int r = count;
706 int ret;
707 unsigned long flags;
708
709 pr_debug("ccid_bulk_write(%d)\n", count);
710
711 if (!atomic_read(&ccid_dev->online)) {
712 pr_debug("%s: USB cable not connected\n", __func__);
713 return -ENODEV;
714 }
715
716 if (!count) {
717 pr_err("%s: zero length ctrl pkt\n", __func__);
718 return -ENODEV;
719 }
720 if (count > BULK_IN_BUFFER_SIZE) {
721 pr_err("%s: max_buffer_size:%d given_pkt_size:%d\n",
722 __func__, BULK_IN_BUFFER_SIZE, count);
723 return -ENOMEM;
724 }
725
726
727 /* get an idle tx request to use */
728 ret = wait_event_interruptible(bulk_dev->write_wq,
729 ((req = ccid_req_get(ccid_dev, &bulk_dev->tx_idle)) ||
730 atomic_read(&bulk_dev->error)));
731
732 if (ret < 0) {
733 r = ret;
734 goto done;
735 }
736
737 if (atomic_read(&bulk_dev->error)) {
738 pr_err(" %s dev->error\n", __func__);
739 r = -EIO;
740 goto done;
741 }
742 if (copy_from_user(req->buf, buf, count)) {
743 if (!atomic_read(&ccid_dev->online)) {
744 pr_debug("%s: USB cable not connected\n",
745 __func__);
746 ccid_request_free(req, ccid_dev->in);
747 r = -ENODEV;
748 } else {
749 ccid_req_put(ccid_dev, &bulk_dev->tx_idle, req);
750 r = -EFAULT;
751 }
752 goto done;
753 }
754 req->length = count;
755 ret = usb_ep_queue(ccid_dev->in, req, GFP_KERNEL);
756 if (ret < 0) {
757 pr_debug("ccid_bulk_write: xfer error %d\n", ret);
758 atomic_set(&bulk_dev->error, 1);
759 ccid_req_put(ccid_dev, &bulk_dev->tx_idle, req);
760 r = -EIO;
761 spin_lock_irqsave(&ccid_dev->lock, flags);
762 if (!atomic_read(&ccid_dev->online)) {
763 spin_unlock_irqrestore(&ccid_dev->lock, flags);
764 pr_debug("%s: USB cable not connected\n",
765 __func__);
766 while ((req = ccid_req_get(ccid_dev,
767 &bulk_dev->tx_idle)))
768 ccid_request_free(req, ccid_dev->in);
769 r = -ENODEV;
770 }
771 spin_unlock_irqrestore(&ccid_dev->lock, flags);
772 goto done;
773 }
774done:
775 pr_debug("ccid_bulk_write returning %d\n", r);
776 return r;
777}
778
779static const struct file_operations ccid_bulk_fops = {
780 .owner = THIS_MODULE,
781 .read = ccid_bulk_read,
782 .write = ccid_bulk_write,
783 .open = ccid_bulk_open,
784 .release = ccid_bulk_release,
785};
786
787static struct miscdevice ccid_bulk_device = {
788 .minor = MISC_DYNAMIC_MINOR,
789 .name = "ccid_bulk",
790 .fops = &ccid_bulk_fops,
791};
792
793static int ccid_bulk_device_init(struct f_ccid *dev)
794{
795 int ret;
796 struct ccid_bulk_dev *bulk_dev = &dev->bulk_dev;
797
798 init_waitqueue_head(&bulk_dev->read_wq);
799 init_waitqueue_head(&bulk_dev->write_wq);
800 INIT_LIST_HEAD(&bulk_dev->tx_idle);
801
802 ret = misc_register(&ccid_bulk_device);
803 if (ret) {
804 pr_err("%s: failed to register misc device\n", __func__);
805 return ret;
806 }
807
808 return 0;
809}
810
811static int ccid_ctrl_open(struct inode *inode, struct file *fp)
812{
813 struct f_ccid *ccid_dev = _ccid_dev;
814 struct ccid_ctrl_dev *ctrl_dev = &ccid_dev->ctrl_dev;
815 unsigned long flags;
816
817 if (!atomic_read(&ccid_dev->online)) {
818 pr_debug("%s: USB cable not connected\n", __func__);
819 return -ENODEV;
820 }
821 if (atomic_read(&ctrl_dev->opened)) {
822 pr_debug("%s: ctrl device is already opened\n", __func__);
823 return -EBUSY;
824 }
825 atomic_set(&ctrl_dev->opened, 1);
826 spin_lock_irqsave(&ccid_dev->lock, flags);
827 fp->private_data = ccid_dev;
828 spin_unlock_irqrestore(&ccid_dev->lock, flags);
829
830 return 0;
831}
832
833
834static int ccid_ctrl_release(struct inode *inode, struct file *fp)
835{
836 struct f_ccid *ccid_dev = fp->private_data;
837 struct ccid_ctrl_dev *ctrl_dev = &ccid_dev->ctrl_dev;
838
839 atomic_set(&ctrl_dev->opened, 0);
840
841 return 0;
842}
843
844static ssize_t ccid_ctrl_read(struct file *fp, char __user *buf,
845 size_t count, loff_t *ppos)
846{
847 struct f_ccid *ccid_dev = fp->private_data;
848 struct ccid_ctrl_dev *ctrl_dev = &ccid_dev->ctrl_dev;
849 int ret = 0;
850
851 if (!atomic_read(&ccid_dev->online)) {
852 pr_debug("%s: USB cable not connected\n", __func__);
853 return -ENODEV;
854 }
855 if (count > CTRL_BUF_SIZE)
856 count = CTRL_BUF_SIZE;
857
858 ret = wait_event_interruptible(ctrl_dev->tx_wait_q,
859 ctrl_dev->tx_ctrl_done);
860 if (ret < 0)
861 return ret;
862 ctrl_dev->tx_ctrl_done = 0;
863
864 if (!atomic_read(&ccid_dev->online)) {
865 pr_debug("%s: USB cable not connected\n", __func__);
866 return -ENODEV;
867 }
868 ret = copy_to_user(buf, ctrl_dev->buf, count);
869 if (ret)
870 return -EFAULT;
871
872 return count;
873}
874
875static long
876ccid_ctrl_ioctl(struct file *fp, unsigned cmd, u_long arg)
877{
878 struct f_ccid *ccid_dev = fp->private_data;
879 struct usb_request *req = ccid_dev->notify_req;
880 struct usb_ccid_notification *ccid_notify = req->buf;
881 void __user *argp = (void __user *)arg;
882 int ret = 0;
883
884 switch (cmd) {
885 case CCID_NOTIFY_CARD:
886 if (copy_from_user(ccid_notify, argp,
887 sizeof(struct usb_ccid_notification)))
888 return -EFAULT;
889 req->length = 2;
890 break;
891 case CCID_NOTIFY_HWERROR:
892 if (copy_from_user(ccid_notify, argp,
893 sizeof(struct usb_ccid_notification)))
894 return -EFAULT;
895 req->length = 4;
896 break;
897 case CCID_READ_DTR:
898 if (copy_to_user((int *)arg, &ccid_dev->dtr_state, sizeof(int)))
899 return -EFAULT;
900 return 0;
901 }
902 ret = usb_ep_queue(ccid_dev->notify, ccid_dev->notify_req, GFP_KERNEL);
903 if (ret < 0) {
904 pr_err("ccid notify ep enqueue error %d\n", ret);
905 return ret;
906 }
907 return 0;
908}
909
910static const struct file_operations ccid_ctrl_fops = {
911 .owner = THIS_MODULE,
912 .open = ccid_ctrl_open,
913 .release = ccid_ctrl_release,
914 .read = ccid_ctrl_read,
915 .unlocked_ioctl = ccid_ctrl_ioctl,
916};
917
918static struct miscdevice ccid_ctrl_device = {
919 .minor = MISC_DYNAMIC_MINOR,
920 .name = "ccid_ctrl",
921 .fops = &ccid_ctrl_fops,
922};
923
924static int ccid_ctrl_device_init(struct f_ccid *dev)
925{
926 int ret;
927 struct ccid_ctrl_dev *ctrl_dev = &dev->ctrl_dev;
928
929 INIT_LIST_HEAD(&ctrl_dev->tx_q);
930 init_waitqueue_head(&ctrl_dev->tx_wait_q);
931
932 ret = misc_register(&ccid_ctrl_device);
933 if (ret) {
934 pr_err("%s: failed to register misc device\n", __func__);
935 return ret;
936 }
937
938 return 0;
939}
940
941static int ccid_bind_config(struct usb_configuration *c)
942{
943 struct f_ccid *ccid_dev = _ccid_dev;
944
945 pr_debug("ccid_bind_config\n");
946 ccid_dev->cdev = c->cdev;
947 ccid_dev->function.name = FUNCTION_NAME;
948 ccid_dev->function.descriptors = ccid_fs_descs;
949 ccid_dev->function.hs_descriptors = ccid_hs_descs;
950 ccid_dev->function.bind = ccid_function_bind;
951 ccid_dev->function.unbind = ccid_function_unbind;
952 ccid_dev->function.set_alt = ccid_function_set_alt;
953 ccid_dev->function.setup = ccid_function_setup;
954 ccid_dev->function.disable = ccid_function_disable;
955
956 return usb_add_function(c, &ccid_dev->function);
957
958}
959
960static int ccid_setup(void)
961{
962 struct f_ccid *ccid_dev;
963 int ret;
964
965 ccid_dev = kzalloc(sizeof(*ccid_dev), GFP_KERNEL);
966 if (!ccid_dev)
967 return -ENOMEM;
968
969 _ccid_dev = ccid_dev;
970 spin_lock_init(&ccid_dev->lock);
971
972 ret = ccid_ctrl_device_init(ccid_dev);
973 if (ret) {
974 pr_err("%s: ccid_ctrl_device_init failed, err:%d\n",
975 __func__, ret);
976 goto err_ctrl_init;
977 }
978 ret = ccid_bulk_device_init(ccid_dev);
979 if (ret) {
980 pr_err("%s: ccid_bulk_device_init failed, err:%d\n",
981 __func__, ret);
982 goto err_bulk_init;
983 }
984
985 return 0;
986err_bulk_init:
987 misc_deregister(&ccid_ctrl_device);
988err_ctrl_init:
989 kfree(ccid_dev);
990 pr_err("ccid gadget driver failed to initialize\n");
991 return ret;
992}
993
994static void ccid_cleanup(void)
995{
996 misc_deregister(&ccid_bulk_device);
997 misc_deregister(&ccid_ctrl_device);
998 kfree(_ccid_dev);
999}