blob: b72e85443b6a4ca550e3bc28ab785c6d18d85714 [file] [log] [blame]
Chiranjeevi Velempatie130fd02011-11-29 05:06:13 +05301/*
2 * f_ccid.c -- CCID function Driver
3 *
4 * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
5
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details
14 */
15
16#include <linux/slab.h>
17#include <linux/kernel.h>
18#include <linux/device.h>
19#include <linux/usb/android_composite.h>
20#include <linux/fs.h>
21#include <linux/usb/ccid_desc.h>
22#include <linux/miscdevice.h>
23
24#include "f_ccid.h"
25
26#define BULK_IN_BUFFER_SIZE sizeof(struct ccid_bulk_in_header)
27#define BULK_OUT_BUFFER_SIZE sizeof(struct ccid_bulk_out_header)
28#define CTRL_BUF_SIZE 4
29#define FUNCTION_NAME "ccid"
30#define CCID_NOTIFY_INTERVAL 5
31#define CCID_NOTIFY_MAXPACKET 4
32
33/* number of tx requests to allocate */
34#define TX_REQ_MAX 4
35
David Brownac5d1542012-02-06 10:37:22 -080036struct ccid_descs {
37 struct usb_endpoint_descriptor *in;
38 struct usb_endpoint_descriptor *out;
39 struct usb_endpoint_descriptor *notify;
40};
41
Chiranjeevi Velempatie130fd02011-11-29 05:06:13 +053042struct ccid_ctrl_dev {
43 atomic_t opened;
44 struct list_head tx_q;
45 wait_queue_head_t tx_wait_q;
46 unsigned char buf[CTRL_BUF_SIZE];
47 int tx_ctrl_done;
48};
49
50struct ccid_bulk_dev {
51 atomic_t error;
52 atomic_t opened;
53 atomic_t rx_req_busy;
54 wait_queue_head_t read_wq;
55 wait_queue_head_t write_wq;
56 struct usb_request *rx_req;
57 int rx_done;
58 struct list_head tx_idle;
59};
60
61struct f_ccid {
62 struct usb_function function;
63 struct usb_composite_dev *cdev;
64 int ifc_id;
65 spinlock_t lock;
66 atomic_t online;
David Brownac5d1542012-02-06 10:37:22 -080067 /* usb descriptors */
68 struct ccid_descs fs;
69 struct ccid_descs hs;
Chiranjeevi Velempatie130fd02011-11-29 05:06:13 +053070 /* usb eps*/
71 struct usb_ep *notify;
72 struct usb_ep *in;
73 struct usb_ep *out;
Chiranjeevi Velempatie130fd02011-11-29 05:06:13 +053074 struct usb_request *notify_req;
75 struct ccid_ctrl_dev ctrl_dev;
76 struct ccid_bulk_dev bulk_dev;
77 int dtr_state;
78};
79
80static struct f_ccid *_ccid_dev;
81static struct miscdevice ccid_bulk_device;
82static struct miscdevice ccid_ctrl_device;
83
84/* Interface Descriptor: */
85static struct usb_interface_descriptor ccid_interface_desc = {
86 .bLength = USB_DT_INTERFACE_SIZE,
87 .bDescriptorType = USB_DT_INTERFACE,
88 .bNumEndpoints = 3,
89 .bInterfaceClass = USB_CLASS_CSCID,
90 .bInterfaceSubClass = 0,
91 .bInterfaceProtocol = 0,
92};
93/* CCID Class Descriptor */
94static struct usb_ccid_class_descriptor ccid_class_desc = {
95 .bLength = sizeof(ccid_class_desc),
96 .bDescriptorType = CCID_DECRIPTOR_TYPE,
97 .bcdCCID = CCID1_10,
98 .bMaxSlotIndex = 0,
99 /* This value indicates what voltages the CCID can supply to slots */
100 .bVoltageSupport = VOLTS_3_0,
101 .dwProtocols = PROTOCOL_TO,
102 /* Default ICC clock frequency in KHz */
103 .dwDefaultClock = 3580,
104 /* Maximum supported ICC clock frequency in KHz */
105 .dwMaximumClock = 3580,
106 .bNumClockSupported = 0,
107 /* Default ICC I/O data rate in bps */
108 .dwDataRate = 9600,
109 /* Maximum supported ICC I/O data rate in bps */
110 .dwMaxDataRate = 9600,
111 .bNumDataRatesSupported = 0,
112 .dwMaxIFSD = 0,
113 .dwSynchProtocols = 0,
114 .dwMechanical = 0,
115 /* This value indicates what intelligent features the CCID has */
116 .dwFeatures = CCID_FEATURES_EXC_SAPDU |
117 CCID_FEATURES_AUTO_PNEGO |
118 CCID_FEATURES_AUTO_BAUD |
119 CCID_FEATURES_AUTO_CLOCK |
120 CCID_FEATURES_AUTO_VOLT |
121 CCID_FEATURES_AUTO_ACTIV |
122 CCID_FEATURES_AUTO_PCONF,
123 /* extended APDU level Message Length */
124 .dwMaxCCIDMessageLength = 0x200,
125 .bClassGetResponse = 0x0,
126 .bClassEnvelope = 0x0,
127 .wLcdLayout = 0,
128 .bPINSupport = 0,
129 .bMaxCCIDBusySlots = 1
130};
131/* Full speed support: */
132static struct usb_endpoint_descriptor ccid_fs_notify_desc = {
133 .bLength = USB_DT_ENDPOINT_SIZE,
134 .bDescriptorType = USB_DT_ENDPOINT,
135 .bEndpointAddress = USB_DIR_IN,
136 .bmAttributes = USB_ENDPOINT_XFER_INT,
137 .wMaxPacketSize = __constant_cpu_to_le16(CCID_NOTIFY_MAXPACKET),
138 .bInterval = 1 << CCID_NOTIFY_INTERVAL,
139};
140
141static struct usb_endpoint_descriptor ccid_fs_in_desc = {
142 .bLength = USB_DT_ENDPOINT_SIZE,
143 .bDescriptorType = USB_DT_ENDPOINT,
144 .bEndpointAddress = USB_DIR_IN,
145 .bmAttributes = USB_ENDPOINT_XFER_BULK,
146 .wMaxPacketSize = __constant_cpu_to_le16(64),
147};
148
149static struct usb_endpoint_descriptor ccid_fs_out_desc = {
150 .bLength = USB_DT_ENDPOINT_SIZE,
151 .bDescriptorType = USB_DT_ENDPOINT,
152 .bEndpointAddress = USB_DIR_OUT,
153 .bmAttributes = USB_ENDPOINT_XFER_BULK,
154 .wMaxPacketSize = __constant_cpu_to_le16(64),
155};
156
157static struct usb_descriptor_header *ccid_fs_descs[] = {
158 (struct usb_descriptor_header *) &ccid_interface_desc,
159 (struct usb_descriptor_header *) &ccid_class_desc,
160 (struct usb_descriptor_header *) &ccid_fs_notify_desc,
161 (struct usb_descriptor_header *) &ccid_fs_in_desc,
162 (struct usb_descriptor_header *) &ccid_fs_out_desc,
163 NULL,
164};
165
166/* High speed support: */
167static struct usb_endpoint_descriptor ccid_hs_notify_desc = {
168 .bLength = USB_DT_ENDPOINT_SIZE,
169 .bDescriptorType = USB_DT_ENDPOINT,
170 .bEndpointAddress = USB_DIR_IN,
171 .bmAttributes = USB_ENDPOINT_XFER_INT,
172 .wMaxPacketSize = __constant_cpu_to_le16(CCID_NOTIFY_MAXPACKET),
173 .bInterval = CCID_NOTIFY_INTERVAL + 4,
174};
175
176static struct usb_endpoint_descriptor ccid_hs_in_desc = {
177 .bLength = USB_DT_ENDPOINT_SIZE,
178 .bDescriptorType = USB_DT_ENDPOINT,
179 .bEndpointAddress = USB_DIR_IN,
180 .bmAttributes = USB_ENDPOINT_XFER_BULK,
181 .wMaxPacketSize = __constant_cpu_to_le16(512),
182};
183
184static struct usb_endpoint_descriptor ccid_hs_out_desc = {
185 .bLength = USB_DT_ENDPOINT_SIZE,
186 .bDescriptorType = USB_DT_ENDPOINT,
187 .bEndpointAddress = USB_DIR_OUT,
188 .bmAttributes = USB_ENDPOINT_XFER_BULK,
189 .wMaxPacketSize = __constant_cpu_to_le16(512),
190};
191
192static struct usb_descriptor_header *ccid_hs_descs[] = {
193 (struct usb_descriptor_header *) &ccid_interface_desc,
194 (struct usb_descriptor_header *) &ccid_class_desc,
195 (struct usb_descriptor_header *) &ccid_hs_notify_desc,
196 (struct usb_descriptor_header *) &ccid_hs_in_desc,
197 (struct usb_descriptor_header *) &ccid_hs_out_desc,
198 NULL,
199};
200
201static inline struct f_ccid *func_to_ccid(struct usb_function *f)
202{
203 return container_of(f, struct f_ccid, function);
204}
205
206static void ccid_req_put(struct f_ccid *ccid_dev, struct list_head *head,
207 struct usb_request *req)
208{
209 unsigned long flags;
210
211 spin_lock_irqsave(&ccid_dev->lock, flags);
212 list_add_tail(&req->list, head);
213 spin_unlock_irqrestore(&ccid_dev->lock, flags);
214}
215
216static struct usb_request *ccid_req_get(struct f_ccid *ccid_dev,
217 struct list_head *head)
218{
219 unsigned long flags;
220 struct usb_request *req = NULL;
221
222 spin_lock_irqsave(&ccid_dev->lock, flags);
223 if (!list_empty(head)) {
224 req = list_first_entry(head, struct usb_request, list);
225 list_del(&req->list);
226 }
227 spin_unlock_irqrestore(&ccid_dev->lock, flags);
228 return req;
229}
230
231static void ccid_notify_complete(struct usb_ep *ep, struct usb_request *req)
232{
233 switch (req->status) {
234 case -ECONNRESET:
235 case -ESHUTDOWN:
236 case 0:
237 break;
238 default:
239 pr_err("CCID notify ep error %d\n", req->status);
240 }
241}
242
243static void ccid_bulk_complete_in(struct usb_ep *ep, struct usb_request *req)
244{
245 struct f_ccid *ccid_dev = _ccid_dev;
246 struct ccid_bulk_dev *bulk_dev = &ccid_dev->bulk_dev;
247
248 if (req->status != 0)
249 atomic_set(&bulk_dev->error, 1);
250
251 ccid_req_put(ccid_dev, &bulk_dev->tx_idle, req);
252 wake_up(&bulk_dev->write_wq);
253}
254
255static void ccid_bulk_complete_out(struct usb_ep *ep, struct usb_request *req)
256{
257 struct f_ccid *ccid_dev = _ccid_dev;
258 struct ccid_bulk_dev *bulk_dev = &ccid_dev->bulk_dev;
259 if (req->status != 0)
260 atomic_set(&bulk_dev->error, 1);
261
262 bulk_dev->rx_done = 1;
263 wake_up(&bulk_dev->read_wq);
264}
265
266static struct usb_request *
267ccid_request_alloc(struct usb_ep *ep, unsigned len, gfp_t kmalloc_flags)
268{
269 struct usb_request *req;
270
271 req = usb_ep_alloc_request(ep, kmalloc_flags);
272
273 if (req != NULL) {
274 req->length = len;
275 req->buf = kmalloc(len, kmalloc_flags);
276 if (req->buf == NULL) {
277 usb_ep_free_request(ep, req);
278 req = NULL;
279 }
280 }
281
282 return req ? req : ERR_PTR(-ENOMEM);
283}
284
285static void ccid_request_free(struct usb_request *req, struct usb_ep *ep)
286{
287 if (req) {
288 kfree(req->buf);
289 usb_ep_free_request(ep, req);
290 }
291}
292
293static int
294ccid_function_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
295{
296 struct f_ccid *ccid_dev = container_of(f, struct f_ccid, function);
297 struct ccid_ctrl_dev *ctrl_dev = &ccid_dev->ctrl_dev;
298 struct usb_composite_dev *cdev = f->config->cdev;
299 struct usb_request *req = cdev->req;
300 int ret = -EOPNOTSUPP;
301 u16 w_index = le16_to_cpu(ctrl->wIndex);
302 u16 w_value = le16_to_cpu(ctrl->wValue);
303 u16 w_length = le16_to_cpu(ctrl->wLength);
304
305 if (!atomic_read(&ccid_dev->online))
306 return -ENOTCONN;
307
308 switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
309
310 case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
311 | CCIDGENERICREQ_ABORT:
312 if (w_length != 0)
313 goto invalid;
314 ctrl_dev->buf[0] = CCIDGENERICREQ_ABORT;
315 ctrl_dev->buf[1] = w_value & 0xFF;
316 ctrl_dev->buf[2] = (w_value >> 8) & 0xFF;
317 ctrl_dev->buf[3] = 0x00;
318 ctrl_dev->tx_ctrl_done = 1;
319 wake_up(&ctrl_dev->tx_wait_q);
320 return 0;
321
322 case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
323 | CCIDGENERICREQ_GET_CLOCK_FREQUENCIES:
324 if (w_length > req->length)
325 goto invalid;
326 *(u32 *) req->buf =
327 cpu_to_le32(ccid_class_desc.dwDefaultClock);
328 ret = min_t(u32, w_length,
329 sizeof(ccid_class_desc.dwDefaultClock));
330 break;
331
332 case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
333 | CCIDGENERICREQ_GET_DATA_RATES:
334 if (w_length > req->length)
335 goto invalid;
336 *(u32 *) req->buf = cpu_to_le32(ccid_class_desc.dwDataRate);
337 ret = min_t(u32, w_length, sizeof(ccid_class_desc.dwDataRate));
338 break;
339
340 default:
341invalid:
342 pr_debug("invalid control req%02x.%02x v%04x i%04x l%d\n",
343 ctrl->bRequestType, ctrl->bRequest,
344 w_value, w_index, w_length);
345 }
346
347 /* respond with data transfer or status phase? */
348 if (ret >= 0) {
349 pr_debug("ccid req%02x.%02x v%04x i%04x l%d\n",
350 ctrl->bRequestType, ctrl->bRequest,
351 w_value, w_index, w_length);
352 req->length = ret;
353 ret = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
354 if (ret < 0)
355 pr_err("ccid ep0 enqueue err %d\n", ret);
356 }
357
358 return ret;
359}
360
361static void ccid_function_disable(struct usb_function *f)
362{
363 struct f_ccid *ccid_dev = func_to_ccid(f);
364 struct ccid_bulk_dev *bulk_dev = &ccid_dev->bulk_dev;
365 struct ccid_ctrl_dev *ctrl_dev = &ccid_dev->ctrl_dev;
366 struct usb_request *req;
367
368 /* Disable endpoints */
369 usb_ep_disable(ccid_dev->notify);
370 usb_ep_disable(ccid_dev->in);
371 usb_ep_disable(ccid_dev->out);
372 /* Free endpoint related requests */
373 ccid_request_free(ccid_dev->notify_req, ccid_dev->notify);
374 if (!atomic_read(&bulk_dev->rx_req_busy))
375 ccid_request_free(bulk_dev->rx_req, ccid_dev->out);
376 while ((req = ccid_req_get(ccid_dev, &bulk_dev->tx_idle)))
377 ccid_request_free(req, ccid_dev->in);
378
379 ccid_dev->dtr_state = 0;
380 atomic_set(&ccid_dev->online, 0);
381 /* Wake up threads */
382 wake_up(&bulk_dev->write_wq);
383 wake_up(&bulk_dev->read_wq);
384 wake_up(&ctrl_dev->tx_wait_q);
385
386}
387
388static int
389ccid_function_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
390{
391 struct f_ccid *ccid_dev = func_to_ccid(f);
392 struct usb_composite_dev *cdev = ccid_dev->cdev;
393 struct ccid_bulk_dev *bulk_dev = &ccid_dev->bulk_dev;
394 struct usb_request *req;
395 int ret = 0;
396 int i;
397
398 ccid_dev->notify_req = ccid_request_alloc(ccid_dev->notify,
399 sizeof(struct usb_ccid_notification), GFP_ATOMIC);
400 if (IS_ERR(ccid_dev->notify_req)) {
401 pr_err("%s: unable to allocate memory for notify req\n",
402 __func__);
403 return PTR_ERR(ccid_dev->notify_req);
404 }
405 ccid_dev->notify_req->complete = ccid_notify_complete;
406 ccid_dev->notify_req->context = ccid_dev;
407
408 /* now allocate requests for our endpoints */
409 req = ccid_request_alloc(ccid_dev->out, BULK_OUT_BUFFER_SIZE,
410 GFP_ATOMIC);
411 if (IS_ERR(req)) {
412 pr_err("%s: unable to allocate memory for out req\n",
413 __func__);
414 ret = PTR_ERR(req);
415 goto free_notify;
416 }
417 req->complete = ccid_bulk_complete_out;
418 req->context = ccid_dev;
419 bulk_dev->rx_req = req;
420
421 for (i = 0; i < TX_REQ_MAX; i++) {
422 req = ccid_request_alloc(ccid_dev->in, BULK_IN_BUFFER_SIZE,
423 GFP_ATOMIC);
424 if (IS_ERR(req)) {
425 pr_err("%s: unable to allocate memory for in req\n",
426 __func__);
427 ret = PTR_ERR(req);
428 goto free_bulk_out;
429 }
430 req->complete = ccid_bulk_complete_in;
431 req->context = ccid_dev;
432 ccid_req_put(ccid_dev, &bulk_dev->tx_idle, req);
433 }
434
435 /* choose the descriptors and enable endpoints */
Tatyana Brokhmancf709c12011-06-28 16:33:48 +0300436 ccid_dev->notify->desc = ep_choose(cdev->gadget,
David Brownac5d1542012-02-06 10:37:22 -0800437 ccid_dev->hs.notify,
438 ccid_dev->fs.notify);
Tatyana Brokhmancf709c12011-06-28 16:33:48 +0300439 ret = usb_ep_enable(ccid_dev->notify);
Chiranjeevi Velempatie130fd02011-11-29 05:06:13 +0530440 if (ret) {
441 pr_err("%s: usb ep#%s enable failed, err#%d\n",
442 __func__, ccid_dev->notify->name, ret);
443 goto free_bulk_in;
444 }
445 ccid_dev->notify->driver_data = ccid_dev;
446
Tatyana Brokhmancf709c12011-06-28 16:33:48 +0300447 ccid_dev->in->desc = ep_choose(cdev->gadget,
David Brownac5d1542012-02-06 10:37:22 -0800448 ccid_dev->hs.in, ccid_dev->fs.in);
Tatyana Brokhmancf709c12011-06-28 16:33:48 +0300449 ret = usb_ep_enable(ccid_dev->in);
Chiranjeevi Velempatie130fd02011-11-29 05:06:13 +0530450 if (ret) {
451 pr_err("%s: usb ep#%s enable failed, err#%d\n",
452 __func__, ccid_dev->in->name, ret);
453 goto disable_ep_notify;
454 }
455
Tatyana Brokhmancf709c12011-06-28 16:33:48 +0300456 ccid_dev->out->desc = ep_choose(cdev->gadget,
David Brownac5d1542012-02-06 10:37:22 -0800457 ccid_dev->hs.out, ccid_dev->fs.out);
Tatyana Brokhmancf709c12011-06-28 16:33:48 +0300458 ret = usb_ep_enable(ccid_dev->out);
Chiranjeevi Velempatie130fd02011-11-29 05:06:13 +0530459 if (ret) {
460 pr_err("%s: usb ep#%s enable failed, err#%d\n",
461 __func__, ccid_dev->out->name, ret);
462 goto disable_ep_in;
463 }
464 ccid_dev->dtr_state = 1;
465 atomic_set(&ccid_dev->online, 1);
466 return ret;
467
468disable_ep_in:
469 usb_ep_disable(ccid_dev->in);
470disable_ep_notify:
471 usb_ep_disable(ccid_dev->notify);
472 ccid_dev->notify->driver_data = NULL;
473free_bulk_in:
474 while ((req = ccid_req_get(ccid_dev, &bulk_dev->tx_idle)))
475 ccid_request_free(req, ccid_dev->in);
476free_bulk_out:
477 ccid_request_free(bulk_dev->rx_req, ccid_dev->out);
478free_notify:
479 ccid_request_free(ccid_dev->notify_req, ccid_dev->notify);
480 return ret;
481}
482
483static void ccid_function_unbind(struct usb_configuration *c,
484 struct usb_function *f)
485{
486 if (gadget_is_dualspeed(c->cdev->gadget))
487 usb_free_descriptors(f->hs_descriptors);
488 usb_free_descriptors(f->descriptors);
489
490}
491
492static int ccid_function_bind(struct usb_configuration *c,
493 struct usb_function *f)
494{
495 struct f_ccid *ccid_dev = func_to_ccid(f);
496 struct usb_ep *ep;
497 struct usb_composite_dev *cdev = c->cdev;
498 int ret = -ENODEV;
499
500 ccid_dev->ifc_id = usb_interface_id(c, f);
501 if (ccid_dev->ifc_id < 0) {
502 pr_err("%s: unable to allocate ifc id, err:%d",
503 __func__, ccid_dev->ifc_id);
504 return ccid_dev->ifc_id;
505 }
506 ccid_interface_desc.bInterfaceNumber = ccid_dev->ifc_id;
507
508 ep = usb_ep_autoconfig(cdev->gadget, &ccid_fs_notify_desc);
509 if (!ep) {
510 pr_err("%s: usb epnotify autoconfig failed\n", __func__);
511 return -ENODEV;
512 }
513 ccid_dev->notify = ep;
514 ep->driver_data = cdev;
515
516 ep = usb_ep_autoconfig(cdev->gadget, &ccid_fs_in_desc);
517 if (!ep) {
518 pr_err("%s: usb epin autoconfig failed\n", __func__);
519 ret = -ENODEV;
520 goto ep_auto_in_fail;
521 }
522 ccid_dev->in = ep;
523 ep->driver_data = cdev;
524
525 ep = usb_ep_autoconfig(cdev->gadget, &ccid_fs_out_desc);
526 if (!ep) {
527 pr_err("%s: usb epout autoconfig failed\n", __func__);
528 ret = -ENODEV;
529 goto ep_auto_out_fail;
530 }
531 ccid_dev->out = ep;
532 ep->driver_data = cdev;
533
534 f->descriptors = usb_copy_descriptors(ccid_fs_descs);
535 if (!f->descriptors)
536 goto ep_auto_out_fail;
537
David Brownac5d1542012-02-06 10:37:22 -0800538 ccid_dev->fs.in = usb_find_endpoint(ccid_fs_descs,
539 f->descriptors,
540 &ccid_fs_in_desc);
541 ccid_dev->fs.out = usb_find_endpoint(ccid_fs_descs,
542 f->descriptors,
543 &ccid_fs_out_desc);
544 ccid_dev->fs.notify = usb_find_endpoint(ccid_fs_descs,
545 f->descriptors,
546 &ccid_fs_notify_desc);
547
Chiranjeevi Velempatie130fd02011-11-29 05:06:13 +0530548 if (gadget_is_dualspeed(cdev->gadget)) {
549 ccid_hs_in_desc.bEndpointAddress =
550 ccid_fs_in_desc.bEndpointAddress;
551 ccid_hs_out_desc.bEndpointAddress =
552 ccid_fs_out_desc.bEndpointAddress;
553 ccid_hs_notify_desc.bEndpointAddress =
554 ccid_fs_notify_desc.bEndpointAddress;
555
556 /* copy descriptors, and track endpoint copies */
557 f->hs_descriptors = usb_copy_descriptors(ccid_hs_descs);
558 if (!f->hs_descriptors)
559 goto ep_auto_out_fail;
David Brownac5d1542012-02-06 10:37:22 -0800560
561 ccid_dev->hs.in = usb_find_endpoint(ccid_hs_descs,
562 f->hs_descriptors, &ccid_hs_in_desc);
563 ccid_dev->hs.out = usb_find_endpoint(ccid_hs_descs,
564 f->hs_descriptors, &ccid_hs_out_desc);
565 ccid_dev->hs.notify = usb_find_endpoint(ccid_hs_descs,
566 f->hs_descriptors, &ccid_hs_notify_desc);
Chiranjeevi Velempatie130fd02011-11-29 05:06:13 +0530567 }
568
569 pr_debug("%s: CCID %s Speed, IN:%s OUT:%s\n", __func__,
570 gadget_is_dualspeed(cdev->gadget) ? "dual" : "full",
571 ccid_dev->in->name, ccid_dev->out->name);
572
573 return 0;
574
575ep_auto_out_fail:
576 ccid_dev->out->driver_data = NULL;
577 ccid_dev->out = NULL;
578ep_auto_in_fail:
579 ccid_dev->in->driver_data = NULL;
580 ccid_dev->in = NULL;
581
582 return ret;
583}
584
585static int ccid_bulk_open(struct inode *ip, struct file *fp)
586{
587 struct f_ccid *ccid_dev = _ccid_dev;
588 struct ccid_bulk_dev *bulk_dev = &ccid_dev->bulk_dev;
589 unsigned long flags;
590
591 pr_debug("ccid_bulk_open\n");
592 if (!atomic_read(&ccid_dev->online)) {
593 pr_debug("%s: USB cable not connected\n", __func__);
594 return -ENODEV;
595 }
596
597 if (atomic_read(&bulk_dev->opened)) {
598 pr_debug("%s: bulk device is already opened\n", __func__);
599 return -EBUSY;
600 }
601 atomic_set(&bulk_dev->opened, 1);
602 /* clear the error latch */
603 atomic_set(&bulk_dev->error, 0);
604 spin_lock_irqsave(&ccid_dev->lock, flags);
605 fp->private_data = ccid_dev;
606 spin_unlock_irqrestore(&ccid_dev->lock, flags);
607
608 return 0;
609}
610
611static int ccid_bulk_release(struct inode *ip, struct file *fp)
612{
613 struct f_ccid *ccid_dev = fp->private_data;
614 struct ccid_bulk_dev *bulk_dev = &ccid_dev->bulk_dev;
615
616 pr_debug("ccid_bulk_release\n");
617 atomic_set(&bulk_dev->opened, 0);
618 return 0;
619}
620
621static ssize_t ccid_bulk_read(struct file *fp, char __user *buf,
622 size_t count, loff_t *pos)
623{
624 struct f_ccid *ccid_dev = fp->private_data;
625 struct ccid_bulk_dev *bulk_dev = &ccid_dev->bulk_dev;
626 struct usb_request *req;
627 int r = count, xfer;
628 int ret;
629 unsigned long flags;
630
631 pr_debug("ccid_bulk_read(%d)\n", count);
632
633 if (count > BULK_OUT_BUFFER_SIZE) {
634 pr_err("%s: max_buffer_size:%d given_pkt_size:%d\n",
635 __func__, BULK_OUT_BUFFER_SIZE, count);
636 return -ENOMEM;
637 }
638
639 if (atomic_read(&bulk_dev->error)) {
640 r = -EIO;
641 pr_err("%s bulk_dev_error\n", __func__);
642 goto done;
643 }
644
645requeue_req:
646 spin_lock_irqsave(&ccid_dev->lock, flags);
647 if (!atomic_read(&ccid_dev->online)) {
648 pr_debug("%s: USB cable not connected\n", __func__);
649 return -ENODEV;
650 }
651 /* queue a request */
652 req = bulk_dev->rx_req;
653 req->length = count;
654 bulk_dev->rx_done = 0;
655 spin_unlock_irqrestore(&ccid_dev->lock, flags);
656 ret = usb_ep_queue(ccid_dev->out, req, GFP_KERNEL);
657 if (ret < 0) {
658 r = -EIO;
659 pr_err("%s usb ep queue failed\n", __func__);
660 atomic_set(&bulk_dev->error, 1);
661 goto done;
662 }
663 /* wait for a request to complete */
664 ret = wait_event_interruptible(bulk_dev->read_wq, bulk_dev->rx_done ||
665 atomic_read(&bulk_dev->error) ||
666 !atomic_read(&ccid_dev->online));
667 if (ret < 0) {
668 atomic_set(&bulk_dev->error, 1);
669 r = ret;
670 usb_ep_dequeue(ccid_dev->out, req);
671 goto done;
672 }
673 if (!atomic_read(&bulk_dev->error)) {
674 spin_lock_irqsave(&ccid_dev->lock, flags);
675 if (!atomic_read(&ccid_dev->online)) {
676 spin_unlock_irqrestore(&ccid_dev->lock, flags);
677 pr_debug("%s: USB cable not connected\n", __func__);
678 r = -ENODEV;
679 goto done;
680 }
681 /* If we got a 0-len packet, throw it back and try again. */
682 if (req->actual == 0) {
683 spin_unlock_irqrestore(&ccid_dev->lock, flags);
684 goto requeue_req;
685 }
686 xfer = (req->actual < count) ? req->actual : count;
687 atomic_set(&bulk_dev->rx_req_busy, 1);
688 spin_unlock_irqrestore(&ccid_dev->lock, flags);
689
690 if (copy_to_user(buf, req->buf, xfer))
691 r = -EFAULT;
692
693 spin_lock_irqsave(&ccid_dev->lock, flags);
694 atomic_set(&bulk_dev->rx_req_busy, 0);
695 if (!atomic_read(&ccid_dev->online)) {
696 ccid_request_free(bulk_dev->rx_req, ccid_dev->out);
697 spin_unlock_irqrestore(&ccid_dev->lock, flags);
698 pr_debug("%s: USB cable not connected\n", __func__);
699 r = -ENODEV;
700 goto done;
701 }
702 spin_unlock_irqrestore(&ccid_dev->lock, flags);
703 } else {
704 r = -EIO;
705 }
706done:
707 pr_debug("ccid_bulk_read returning %d\n", r);
708 return r;
709}
710
711static ssize_t ccid_bulk_write(struct file *fp, const char __user *buf,
712 size_t count, loff_t *pos)
713{
714 struct f_ccid *ccid_dev = fp->private_data;
715 struct ccid_bulk_dev *bulk_dev = &ccid_dev->bulk_dev;
716 struct usb_request *req = 0;
717 int r = count;
718 int ret;
719 unsigned long flags;
720
721 pr_debug("ccid_bulk_write(%d)\n", count);
722
723 if (!atomic_read(&ccid_dev->online)) {
724 pr_debug("%s: USB cable not connected\n", __func__);
725 return -ENODEV;
726 }
727
728 if (!count) {
729 pr_err("%s: zero length ctrl pkt\n", __func__);
730 return -ENODEV;
731 }
732 if (count > BULK_IN_BUFFER_SIZE) {
733 pr_err("%s: max_buffer_size:%d given_pkt_size:%d\n",
734 __func__, BULK_IN_BUFFER_SIZE, count);
735 return -ENOMEM;
736 }
737
738
739 /* get an idle tx request to use */
740 ret = wait_event_interruptible(bulk_dev->write_wq,
741 ((req = ccid_req_get(ccid_dev, &bulk_dev->tx_idle)) ||
742 atomic_read(&bulk_dev->error)));
743
744 if (ret < 0) {
745 r = ret;
746 goto done;
747 }
748
749 if (atomic_read(&bulk_dev->error)) {
750 pr_err(" %s dev->error\n", __func__);
751 r = -EIO;
752 goto done;
753 }
754 if (copy_from_user(req->buf, buf, count)) {
755 if (!atomic_read(&ccid_dev->online)) {
756 pr_debug("%s: USB cable not connected\n",
757 __func__);
758 ccid_request_free(req, ccid_dev->in);
759 r = -ENODEV;
760 } else {
761 ccid_req_put(ccid_dev, &bulk_dev->tx_idle, req);
762 r = -EFAULT;
763 }
764 goto done;
765 }
766 req->length = count;
767 ret = usb_ep_queue(ccid_dev->in, req, GFP_KERNEL);
768 if (ret < 0) {
769 pr_debug("ccid_bulk_write: xfer error %d\n", ret);
770 atomic_set(&bulk_dev->error, 1);
771 ccid_req_put(ccid_dev, &bulk_dev->tx_idle, req);
772 r = -EIO;
773 spin_lock_irqsave(&ccid_dev->lock, flags);
774 if (!atomic_read(&ccid_dev->online)) {
775 spin_unlock_irqrestore(&ccid_dev->lock, flags);
776 pr_debug("%s: USB cable not connected\n",
777 __func__);
778 while ((req = ccid_req_get(ccid_dev,
779 &bulk_dev->tx_idle)))
780 ccid_request_free(req, ccid_dev->in);
781 r = -ENODEV;
782 }
783 spin_unlock_irqrestore(&ccid_dev->lock, flags);
784 goto done;
785 }
786done:
787 pr_debug("ccid_bulk_write returning %d\n", r);
788 return r;
789}
790
791static const struct file_operations ccid_bulk_fops = {
792 .owner = THIS_MODULE,
793 .read = ccid_bulk_read,
794 .write = ccid_bulk_write,
795 .open = ccid_bulk_open,
796 .release = ccid_bulk_release,
797};
798
799static struct miscdevice ccid_bulk_device = {
800 .minor = MISC_DYNAMIC_MINOR,
801 .name = "ccid_bulk",
802 .fops = &ccid_bulk_fops,
803};
804
805static int ccid_bulk_device_init(struct f_ccid *dev)
806{
807 int ret;
808 struct ccid_bulk_dev *bulk_dev = &dev->bulk_dev;
809
810 init_waitqueue_head(&bulk_dev->read_wq);
811 init_waitqueue_head(&bulk_dev->write_wq);
812 INIT_LIST_HEAD(&bulk_dev->tx_idle);
813
814 ret = misc_register(&ccid_bulk_device);
815 if (ret) {
816 pr_err("%s: failed to register misc device\n", __func__);
817 return ret;
818 }
819
820 return 0;
821}
822
823static int ccid_ctrl_open(struct inode *inode, struct file *fp)
824{
825 struct f_ccid *ccid_dev = _ccid_dev;
826 struct ccid_ctrl_dev *ctrl_dev = &ccid_dev->ctrl_dev;
827 unsigned long flags;
828
829 if (!atomic_read(&ccid_dev->online)) {
830 pr_debug("%s: USB cable not connected\n", __func__);
831 return -ENODEV;
832 }
833 if (atomic_read(&ctrl_dev->opened)) {
834 pr_debug("%s: ctrl device is already opened\n", __func__);
835 return -EBUSY;
836 }
837 atomic_set(&ctrl_dev->opened, 1);
838 spin_lock_irqsave(&ccid_dev->lock, flags);
839 fp->private_data = ccid_dev;
840 spin_unlock_irqrestore(&ccid_dev->lock, flags);
841
842 return 0;
843}
844
845
846static int ccid_ctrl_release(struct inode *inode, struct file *fp)
847{
848 struct f_ccid *ccid_dev = fp->private_data;
849 struct ccid_ctrl_dev *ctrl_dev = &ccid_dev->ctrl_dev;
850
851 atomic_set(&ctrl_dev->opened, 0);
852
853 return 0;
854}
855
856static ssize_t ccid_ctrl_read(struct file *fp, char __user *buf,
857 size_t count, loff_t *ppos)
858{
859 struct f_ccid *ccid_dev = fp->private_data;
860 struct ccid_ctrl_dev *ctrl_dev = &ccid_dev->ctrl_dev;
861 int ret = 0;
862
863 if (!atomic_read(&ccid_dev->online)) {
864 pr_debug("%s: USB cable not connected\n", __func__);
865 return -ENODEV;
866 }
867 if (count > CTRL_BUF_SIZE)
868 count = CTRL_BUF_SIZE;
869
870 ret = wait_event_interruptible(ctrl_dev->tx_wait_q,
871 ctrl_dev->tx_ctrl_done);
872 if (ret < 0)
873 return ret;
874 ctrl_dev->tx_ctrl_done = 0;
875
876 if (!atomic_read(&ccid_dev->online)) {
877 pr_debug("%s: USB cable not connected\n", __func__);
878 return -ENODEV;
879 }
880 ret = copy_to_user(buf, ctrl_dev->buf, count);
881 if (ret)
882 return -EFAULT;
883
884 return count;
885}
886
887static long
888ccid_ctrl_ioctl(struct file *fp, unsigned cmd, u_long arg)
889{
890 struct f_ccid *ccid_dev = fp->private_data;
891 struct usb_request *req = ccid_dev->notify_req;
892 struct usb_ccid_notification *ccid_notify = req->buf;
893 void __user *argp = (void __user *)arg;
894 int ret = 0;
895
896 switch (cmd) {
897 case CCID_NOTIFY_CARD:
898 if (copy_from_user(ccid_notify, argp,
899 sizeof(struct usb_ccid_notification)))
900 return -EFAULT;
901 req->length = 2;
902 break;
903 case CCID_NOTIFY_HWERROR:
904 if (copy_from_user(ccid_notify, argp,
905 sizeof(struct usb_ccid_notification)))
906 return -EFAULT;
907 req->length = 4;
908 break;
909 case CCID_READ_DTR:
910 if (copy_to_user((int *)arg, &ccid_dev->dtr_state, sizeof(int)))
911 return -EFAULT;
912 return 0;
913 }
914 ret = usb_ep_queue(ccid_dev->notify, ccid_dev->notify_req, GFP_KERNEL);
915 if (ret < 0) {
916 pr_err("ccid notify ep enqueue error %d\n", ret);
917 return ret;
918 }
919 return 0;
920}
921
922static const struct file_operations ccid_ctrl_fops = {
923 .owner = THIS_MODULE,
924 .open = ccid_ctrl_open,
925 .release = ccid_ctrl_release,
926 .read = ccid_ctrl_read,
927 .unlocked_ioctl = ccid_ctrl_ioctl,
928};
929
930static struct miscdevice ccid_ctrl_device = {
931 .minor = MISC_DYNAMIC_MINOR,
932 .name = "ccid_ctrl",
933 .fops = &ccid_ctrl_fops,
934};
935
936static int ccid_ctrl_device_init(struct f_ccid *dev)
937{
938 int ret;
939 struct ccid_ctrl_dev *ctrl_dev = &dev->ctrl_dev;
940
941 INIT_LIST_HEAD(&ctrl_dev->tx_q);
942 init_waitqueue_head(&ctrl_dev->tx_wait_q);
943
944 ret = misc_register(&ccid_ctrl_device);
945 if (ret) {
946 pr_err("%s: failed to register misc device\n", __func__);
947 return ret;
948 }
949
950 return 0;
951}
952
953static int ccid_bind_config(struct usb_configuration *c)
954{
955 struct f_ccid *ccid_dev = _ccid_dev;
956
957 pr_debug("ccid_bind_config\n");
958 ccid_dev->cdev = c->cdev;
959 ccid_dev->function.name = FUNCTION_NAME;
960 ccid_dev->function.descriptors = ccid_fs_descs;
961 ccid_dev->function.hs_descriptors = ccid_hs_descs;
962 ccid_dev->function.bind = ccid_function_bind;
963 ccid_dev->function.unbind = ccid_function_unbind;
964 ccid_dev->function.set_alt = ccid_function_set_alt;
965 ccid_dev->function.setup = ccid_function_setup;
966 ccid_dev->function.disable = ccid_function_disable;
967
968 return usb_add_function(c, &ccid_dev->function);
969
970}
971
972static int ccid_setup(void)
973{
974 struct f_ccid *ccid_dev;
975 int ret;
976
977 ccid_dev = kzalloc(sizeof(*ccid_dev), GFP_KERNEL);
978 if (!ccid_dev)
979 return -ENOMEM;
980
981 _ccid_dev = ccid_dev;
982 spin_lock_init(&ccid_dev->lock);
983
984 ret = ccid_ctrl_device_init(ccid_dev);
985 if (ret) {
986 pr_err("%s: ccid_ctrl_device_init failed, err:%d\n",
987 __func__, ret);
988 goto err_ctrl_init;
989 }
990 ret = ccid_bulk_device_init(ccid_dev);
991 if (ret) {
992 pr_err("%s: ccid_bulk_device_init failed, err:%d\n",
993 __func__, ret);
994 goto err_bulk_init;
995 }
996
997 return 0;
998err_bulk_init:
999 misc_deregister(&ccid_ctrl_device);
1000err_ctrl_init:
1001 kfree(ccid_dev);
1002 pr_err("ccid gadget driver failed to initialize\n");
1003 return ret;
1004}
1005
1006static void ccid_cleanup(void)
1007{
1008 misc_deregister(&ccid_bulk_device);
1009 misc_deregister(&ccid_ctrl_device);
1010 kfree(_ccid_dev);
1011}