blob: 834a0202ae3f96d9f150718286c4943bd77f0b29 [file] [log] [blame]
Yoshihiro Shimodac4144242009-08-19 04:59:39 +00001/*
2 * R8A66597 UDC (USB gadget)
3 *
4 * Copyright (C) 2006-2009 Renesas Solutions Corp.
5 *
Yoshihiro Shimoda5db05c02011-07-07 09:59:07 +09006 * Author : Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
Yoshihiro Shimodac4144242009-08-19 04:59:39 +00007 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; version 2 of the License.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 *
21 */
22
23#include <linux/module.h>
24#include <linux/interrupt.h>
25#include <linux/delay.h>
26#include <linux/io.h>
27#include <linux/platform_device.h>
Magnus Dammd2e27bd2009-08-19 09:50:49 +000028#include <linux/clk.h>
Mark Brownae3a0792010-01-18 12:03:18 +000029#include <linux/err.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090030#include <linux/slab.h>
Yoshihiro Shimodac4144242009-08-19 04:59:39 +000031
32#include <linux/usb/ch9.h>
33#include <linux/usb/gadget.h>
34
35#include "r8a66597-udc.h"
36
37#define DRIVER_VERSION "2009-08-18"
38
39static const char udc_name[] = "r8a66597_udc";
40static const char *r8a66597_ep_name[] = {
41 "ep0", "ep1", "ep2", "ep3", "ep4", "ep5", "ep6", "ep7",
42 "ep8", "ep9",
43};
44
Yusuke Godae576a7a2010-10-20 09:28:08 +090045static void init_controller(struct r8a66597 *r8a66597);
Yoshihiro Shimodac4144242009-08-19 04:59:39 +000046static void disable_controller(struct r8a66597 *r8a66597);
47static void irq_ep0_write(struct r8a66597_ep *ep, struct r8a66597_request *req);
48static void irq_packet_write(struct r8a66597_ep *ep,
49 struct r8a66597_request *req);
50static int r8a66597_queue(struct usb_ep *_ep, struct usb_request *_req,
51 gfp_t gfp_flags);
52
53static void transfer_complete(struct r8a66597_ep *ep,
54 struct r8a66597_request *req, int status);
55
56/*-------------------------------------------------------------------------*/
57static inline u16 get_usb_speed(struct r8a66597 *r8a66597)
58{
59 return r8a66597_read(r8a66597, DVSTCTR0) & RHST;
60}
61
62static void enable_pipe_irq(struct r8a66597 *r8a66597, u16 pipenum,
63 unsigned long reg)
64{
65 u16 tmp;
66
67 tmp = r8a66597_read(r8a66597, INTENB0);
68 r8a66597_bclr(r8a66597, BEMPE | NRDYE | BRDYE,
69 INTENB0);
70 r8a66597_bset(r8a66597, (1 << pipenum), reg);
71 r8a66597_write(r8a66597, tmp, INTENB0);
72}
73
74static void disable_pipe_irq(struct r8a66597 *r8a66597, u16 pipenum,
75 unsigned long reg)
76{
77 u16 tmp;
78
79 tmp = r8a66597_read(r8a66597, INTENB0);
80 r8a66597_bclr(r8a66597, BEMPE | NRDYE | BRDYE,
81 INTENB0);
82 r8a66597_bclr(r8a66597, (1 << pipenum), reg);
83 r8a66597_write(r8a66597, tmp, INTENB0);
84}
85
86static void r8a66597_usb_connect(struct r8a66597 *r8a66597)
87{
88 r8a66597_bset(r8a66597, CTRE, INTENB0);
89 r8a66597_bset(r8a66597, BEMPE | BRDYE, INTENB0);
90
91 r8a66597_bset(r8a66597, DPRPU, SYSCFG0);
92}
93
94static void r8a66597_usb_disconnect(struct r8a66597 *r8a66597)
95__releases(r8a66597->lock)
96__acquires(r8a66597->lock)
97{
98 r8a66597_bclr(r8a66597, CTRE, INTENB0);
99 r8a66597_bclr(r8a66597, BEMPE | BRDYE, INTENB0);
100 r8a66597_bclr(r8a66597, DPRPU, SYSCFG0);
101
102 r8a66597->gadget.speed = USB_SPEED_UNKNOWN;
103 spin_unlock(&r8a66597->lock);
104 r8a66597->driver->disconnect(&r8a66597->gadget);
105 spin_lock(&r8a66597->lock);
106
107 disable_controller(r8a66597);
Yusuke Godae576a7a2010-10-20 09:28:08 +0900108 init_controller(r8a66597);
109 r8a66597_bset(r8a66597, VBSE, INTENB0);
Yoshihiro Shimodac4144242009-08-19 04:59:39 +0000110 INIT_LIST_HEAD(&r8a66597->ep[0].queue);
111}
112
113static inline u16 control_reg_get_pid(struct r8a66597 *r8a66597, u16 pipenum)
114{
115 u16 pid = 0;
116 unsigned long offset;
117
118 if (pipenum == 0)
119 pid = r8a66597_read(r8a66597, DCPCTR) & PID;
120 else if (pipenum < R8A66597_MAX_NUM_PIPE) {
121 offset = get_pipectr_addr(pipenum);
122 pid = r8a66597_read(r8a66597, offset) & PID;
123 } else
124 printk(KERN_ERR "unexpect pipe num (%d)\n", pipenum);
125
126 return pid;
127}
128
129static inline void control_reg_set_pid(struct r8a66597 *r8a66597, u16 pipenum,
130 u16 pid)
131{
132 unsigned long offset;
133
134 if (pipenum == 0)
135 r8a66597_mdfy(r8a66597, pid, PID, DCPCTR);
136 else if (pipenum < R8A66597_MAX_NUM_PIPE) {
137 offset = get_pipectr_addr(pipenum);
138 r8a66597_mdfy(r8a66597, pid, PID, offset);
139 } else
140 printk(KERN_ERR "unexpect pipe num (%d)\n", pipenum);
141}
142
143static inline void pipe_start(struct r8a66597 *r8a66597, u16 pipenum)
144{
145 control_reg_set_pid(r8a66597, pipenum, PID_BUF);
146}
147
148static inline void pipe_stop(struct r8a66597 *r8a66597, u16 pipenum)
149{
150 control_reg_set_pid(r8a66597, pipenum, PID_NAK);
151}
152
153static inline void pipe_stall(struct r8a66597 *r8a66597, u16 pipenum)
154{
155 control_reg_set_pid(r8a66597, pipenum, PID_STALL);
156}
157
158static inline u16 control_reg_get(struct r8a66597 *r8a66597, u16 pipenum)
159{
160 u16 ret = 0;
161 unsigned long offset;
162
163 if (pipenum == 0)
164 ret = r8a66597_read(r8a66597, DCPCTR);
165 else if (pipenum < R8A66597_MAX_NUM_PIPE) {
166 offset = get_pipectr_addr(pipenum);
167 ret = r8a66597_read(r8a66597, offset);
168 } else
169 printk(KERN_ERR "unexpect pipe num (%d)\n", pipenum);
170
171 return ret;
172}
173
174static inline void control_reg_sqclr(struct r8a66597 *r8a66597, u16 pipenum)
175{
176 unsigned long offset;
177
178 pipe_stop(r8a66597, pipenum);
179
180 if (pipenum == 0)
181 r8a66597_bset(r8a66597, SQCLR, DCPCTR);
182 else if (pipenum < R8A66597_MAX_NUM_PIPE) {
183 offset = get_pipectr_addr(pipenum);
184 r8a66597_bset(r8a66597, SQCLR, offset);
185 } else
186 printk(KERN_ERR "unexpect pipe num(%d)\n", pipenum);
187}
188
189static inline int get_buffer_size(struct r8a66597 *r8a66597, u16 pipenum)
190{
191 u16 tmp;
192 int size;
193
194 if (pipenum == 0) {
195 tmp = r8a66597_read(r8a66597, DCPCFG);
196 if ((tmp & R8A66597_CNTMD) != 0)
197 size = 256;
198 else {
199 tmp = r8a66597_read(r8a66597, DCPMAXP);
200 size = tmp & MAXP;
201 }
202 } else {
203 r8a66597_write(r8a66597, pipenum, PIPESEL);
204 tmp = r8a66597_read(r8a66597, PIPECFG);
205 if ((tmp & R8A66597_CNTMD) != 0) {
206 tmp = r8a66597_read(r8a66597, PIPEBUF);
207 size = ((tmp >> 10) + 1) * 64;
208 } else {
209 tmp = r8a66597_read(r8a66597, PIPEMAXP);
210 size = tmp & MXPS;
211 }
212 }
213
214 return size;
215}
216
217static inline unsigned short mbw_value(struct r8a66597 *r8a66597)
218{
219 if (r8a66597->pdata->on_chip)
220 return MBW_32;
221 else
222 return MBW_16;
223}
224
225static inline void pipe_change(struct r8a66597 *r8a66597, u16 pipenum)
226{
227 struct r8a66597_ep *ep = r8a66597->pipenum2ep[pipenum];
228
229 if (ep->use_dma)
230 return;
231
232 r8a66597_mdfy(r8a66597, pipenum, CURPIPE, ep->fifosel);
233
234 ndelay(450);
235
236 r8a66597_bset(r8a66597, mbw_value(r8a66597), ep->fifosel);
237}
238
239static int pipe_buffer_setting(struct r8a66597 *r8a66597,
240 struct r8a66597_pipe_info *info)
241{
242 u16 bufnum = 0, buf_bsize = 0;
243 u16 pipecfg = 0;
244
245 if (info->pipe == 0)
246 return -EINVAL;
247
248 r8a66597_write(r8a66597, info->pipe, PIPESEL);
249
250 if (info->dir_in)
251 pipecfg |= R8A66597_DIR;
252 pipecfg |= info->type;
253 pipecfg |= info->epnum;
254 switch (info->type) {
255 case R8A66597_INT:
256 bufnum = 4 + (info->pipe - R8A66597_BASE_PIPENUM_INT);
257 buf_bsize = 0;
258 break;
259 case R8A66597_BULK:
Magnus Dammef5ce3b2009-08-19 14:19:08 +0000260 /* isochronous pipes may be used as bulk pipes */
Yusuke Goda6d86d522011-01-31 15:49:34 +0900261 if (info->pipe >= R8A66597_BASE_PIPENUM_BULK)
Magnus Dammef5ce3b2009-08-19 14:19:08 +0000262 bufnum = info->pipe - R8A66597_BASE_PIPENUM_BULK;
263 else
264 bufnum = info->pipe - R8A66597_BASE_PIPENUM_ISOC;
265
266 bufnum = R8A66597_BASE_BUFNUM + (bufnum * 16);
Yoshihiro Shimodac4144242009-08-19 04:59:39 +0000267 buf_bsize = 7;
268 pipecfg |= R8A66597_DBLB;
269 if (!info->dir_in)
270 pipecfg |= R8A66597_SHTNAK;
271 break;
272 case R8A66597_ISO:
Magnus Dammef5ce3b2009-08-19 14:19:08 +0000273 bufnum = R8A66597_BASE_BUFNUM +
Yoshihiro Shimodac4144242009-08-19 04:59:39 +0000274 (info->pipe - R8A66597_BASE_PIPENUM_ISOC) * 16;
Yoshihiro Shimodac4144242009-08-19 04:59:39 +0000275 buf_bsize = 7;
276 break;
277 }
Magnus Dammef5ce3b2009-08-19 14:19:08 +0000278
279 if (buf_bsize && ((bufnum + 16) >= R8A66597_MAX_BUFNUM)) {
Joe Perchesc0109b82010-09-11 22:10:58 -0700280 pr_err("r8a66597 pipe memory is insufficient\n");
Yoshihiro Shimodac4144242009-08-19 04:59:39 +0000281 return -ENOMEM;
282 }
283
284 r8a66597_write(r8a66597, pipecfg, PIPECFG);
285 r8a66597_write(r8a66597, (buf_bsize << 10) | (bufnum), PIPEBUF);
286 r8a66597_write(r8a66597, info->maxpacket, PIPEMAXP);
287 if (info->interval)
288 info->interval--;
289 r8a66597_write(r8a66597, info->interval, PIPEPERI);
290
291 return 0;
292}
293
294static void pipe_buffer_release(struct r8a66597 *r8a66597,
295 struct r8a66597_pipe_info *info)
296{
297 if (info->pipe == 0)
298 return;
299
Yoshihiro Shimodac4144242009-08-19 04:59:39 +0000300 if (is_bulk_pipe(info->pipe))
301 r8a66597->bulk--;
302 else if (is_interrupt_pipe(info->pipe))
303 r8a66597->interrupt--;
304 else if (is_isoc_pipe(info->pipe)) {
305 r8a66597->isochronous--;
306 if (info->type == R8A66597_BULK)
307 r8a66597->bulk--;
308 } else
309 printk(KERN_ERR "ep_release: unexpect pipenum (%d)\n",
310 info->pipe);
311}
312
313static void pipe_initialize(struct r8a66597_ep *ep)
314{
315 struct r8a66597 *r8a66597 = ep->r8a66597;
316
317 r8a66597_mdfy(r8a66597, 0, CURPIPE, ep->fifosel);
318
319 r8a66597_write(r8a66597, ACLRM, ep->pipectr);
320 r8a66597_write(r8a66597, 0, ep->pipectr);
321 r8a66597_write(r8a66597, SQCLR, ep->pipectr);
322 if (ep->use_dma) {
323 r8a66597_mdfy(r8a66597, ep->pipenum, CURPIPE, ep->fifosel);
324
325 ndelay(450);
326
327 r8a66597_bset(r8a66597, mbw_value(r8a66597), ep->fifosel);
328 }
329}
330
331static void r8a66597_ep_setting(struct r8a66597 *r8a66597,
332 struct r8a66597_ep *ep,
333 const struct usb_endpoint_descriptor *desc,
334 u16 pipenum, int dma)
335{
336 ep->use_dma = 0;
337 ep->fifoaddr = CFIFO;
338 ep->fifosel = CFIFOSEL;
339 ep->fifoctr = CFIFOCTR;
340 ep->fifotrn = 0;
341
342 ep->pipectr = get_pipectr_addr(pipenum);
343 ep->pipenum = pipenum;
344 ep->ep.maxpacket = le16_to_cpu(desc->wMaxPacketSize);
345 r8a66597->pipenum2ep[pipenum] = ep;
346 r8a66597->epaddr2ep[desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK]
347 = ep;
348 INIT_LIST_HEAD(&ep->queue);
349}
350
351static void r8a66597_ep_release(struct r8a66597_ep *ep)
352{
353 struct r8a66597 *r8a66597 = ep->r8a66597;
354 u16 pipenum = ep->pipenum;
355
356 if (pipenum == 0)
357 return;
358
359 if (ep->use_dma)
360 r8a66597->num_dma--;
361 ep->pipenum = 0;
362 ep->busy = 0;
363 ep->use_dma = 0;
364}
365
366static int alloc_pipe_config(struct r8a66597_ep *ep,
367 const struct usb_endpoint_descriptor *desc)
368{
369 struct r8a66597 *r8a66597 = ep->r8a66597;
370 struct r8a66597_pipe_info info;
371 int dma = 0;
372 unsigned char *counter;
373 int ret;
374
375 ep->desc = desc;
376
377 if (ep->pipenum) /* already allocated pipe */
378 return 0;
379
380 switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
381 case USB_ENDPOINT_XFER_BULK:
382 if (r8a66597->bulk >= R8A66597_MAX_NUM_BULK) {
383 if (r8a66597->isochronous >= R8A66597_MAX_NUM_ISOC) {
384 printk(KERN_ERR "bulk pipe is insufficient\n");
385 return -ENODEV;
386 } else {
387 info.pipe = R8A66597_BASE_PIPENUM_ISOC
388 + r8a66597->isochronous;
389 counter = &r8a66597->isochronous;
390 }
391 } else {
392 info.pipe = R8A66597_BASE_PIPENUM_BULK + r8a66597->bulk;
393 counter = &r8a66597->bulk;
394 }
395 info.type = R8A66597_BULK;
396 dma = 1;
397 break;
398 case USB_ENDPOINT_XFER_INT:
399 if (r8a66597->interrupt >= R8A66597_MAX_NUM_INT) {
400 printk(KERN_ERR "interrupt pipe is insufficient\n");
401 return -ENODEV;
402 }
403 info.pipe = R8A66597_BASE_PIPENUM_INT + r8a66597->interrupt;
404 info.type = R8A66597_INT;
405 counter = &r8a66597->interrupt;
406 break;
407 case USB_ENDPOINT_XFER_ISOC:
408 if (r8a66597->isochronous >= R8A66597_MAX_NUM_ISOC) {
409 printk(KERN_ERR "isochronous pipe is insufficient\n");
410 return -ENODEV;
411 }
412 info.pipe = R8A66597_BASE_PIPENUM_ISOC + r8a66597->isochronous;
413 info.type = R8A66597_ISO;
414 counter = &r8a66597->isochronous;
415 break;
416 default:
417 printk(KERN_ERR "unexpect xfer type\n");
418 return -EINVAL;
419 }
420 ep->type = info.type;
421
422 info.epnum = desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
423 info.maxpacket = le16_to_cpu(desc->wMaxPacketSize);
424 info.interval = desc->bInterval;
425 if (desc->bEndpointAddress & USB_ENDPOINT_DIR_MASK)
426 info.dir_in = 1;
427 else
428 info.dir_in = 0;
429
430 ret = pipe_buffer_setting(r8a66597, &info);
431 if (ret < 0) {
432 printk(KERN_ERR "pipe_buffer_setting fail\n");
433 return ret;
434 }
435
436 (*counter)++;
437 if ((counter == &r8a66597->isochronous) && info.type == R8A66597_BULK)
438 r8a66597->bulk++;
439
440 r8a66597_ep_setting(r8a66597, ep, desc, info.pipe, dma);
441 pipe_initialize(ep);
442
443 return 0;
444}
445
446static int free_pipe_config(struct r8a66597_ep *ep)
447{
448 struct r8a66597 *r8a66597 = ep->r8a66597;
449 struct r8a66597_pipe_info info;
450
451 info.pipe = ep->pipenum;
452 info.type = ep->type;
453 pipe_buffer_release(r8a66597, &info);
454 r8a66597_ep_release(ep);
455
456 return 0;
457}
458
459/*-------------------------------------------------------------------------*/
460static void pipe_irq_enable(struct r8a66597 *r8a66597, u16 pipenum)
461{
462 enable_irq_ready(r8a66597, pipenum);
463 enable_irq_nrdy(r8a66597, pipenum);
464}
465
466static void pipe_irq_disable(struct r8a66597 *r8a66597, u16 pipenum)
467{
468 disable_irq_ready(r8a66597, pipenum);
469 disable_irq_nrdy(r8a66597, pipenum);
470}
471
472/* if complete is true, gadget driver complete function is not call */
473static void control_end(struct r8a66597 *r8a66597, unsigned ccpl)
474{
475 r8a66597->ep[0].internal_ccpl = ccpl;
476 pipe_start(r8a66597, 0);
477 r8a66597_bset(r8a66597, CCPL, DCPCTR);
478}
479
480static void start_ep0_write(struct r8a66597_ep *ep,
481 struct r8a66597_request *req)
482{
483 struct r8a66597 *r8a66597 = ep->r8a66597;
484
485 pipe_change(r8a66597, ep->pipenum);
486 r8a66597_mdfy(r8a66597, ISEL, (ISEL | CURPIPE), CFIFOSEL);
487 r8a66597_write(r8a66597, BCLR, ep->fifoctr);
488 if (req->req.length == 0) {
489 r8a66597_bset(r8a66597, BVAL, ep->fifoctr);
490 pipe_start(r8a66597, 0);
491 transfer_complete(ep, req, 0);
492 } else {
493 r8a66597_write(r8a66597, ~BEMP0, BEMPSTS);
494 irq_ep0_write(ep, req);
495 }
496}
497
498static void start_packet_write(struct r8a66597_ep *ep,
499 struct r8a66597_request *req)
500{
501 struct r8a66597 *r8a66597 = ep->r8a66597;
502 u16 tmp;
503
504 pipe_change(r8a66597, ep->pipenum);
505 disable_irq_empty(r8a66597, ep->pipenum);
506 pipe_start(r8a66597, ep->pipenum);
507
508 tmp = r8a66597_read(r8a66597, ep->fifoctr);
509 if (unlikely((tmp & FRDY) == 0))
510 pipe_irq_enable(r8a66597, ep->pipenum);
511 else
512 irq_packet_write(ep, req);
513}
514
515static void start_packet_read(struct r8a66597_ep *ep,
516 struct r8a66597_request *req)
517{
518 struct r8a66597 *r8a66597 = ep->r8a66597;
519 u16 pipenum = ep->pipenum;
520
521 if (ep->pipenum == 0) {
522 r8a66597_mdfy(r8a66597, 0, (ISEL | CURPIPE), CFIFOSEL);
523 r8a66597_write(r8a66597, BCLR, ep->fifoctr);
524 pipe_start(r8a66597, pipenum);
525 pipe_irq_enable(r8a66597, pipenum);
526 } else {
527 if (ep->use_dma) {
528 r8a66597_bset(r8a66597, TRCLR, ep->fifosel);
529 pipe_change(r8a66597, pipenum);
530 r8a66597_bset(r8a66597, TRENB, ep->fifosel);
531 r8a66597_write(r8a66597,
532 (req->req.length + ep->ep.maxpacket - 1)
533 / ep->ep.maxpacket,
534 ep->fifotrn);
535 }
536 pipe_start(r8a66597, pipenum); /* trigger once */
537 pipe_irq_enable(r8a66597, pipenum);
538 }
539}
540
541static void start_packet(struct r8a66597_ep *ep, struct r8a66597_request *req)
542{
543 if (ep->desc->bEndpointAddress & USB_DIR_IN)
544 start_packet_write(ep, req);
545 else
546 start_packet_read(ep, req);
547}
548
549static void start_ep0(struct r8a66597_ep *ep, struct r8a66597_request *req)
550{
551 u16 ctsq;
552
553 ctsq = r8a66597_read(ep->r8a66597, INTSTS0) & CTSQ;
554
555 switch (ctsq) {
556 case CS_RDDS:
557 start_ep0_write(ep, req);
558 break;
559 case CS_WRDS:
560 start_packet_read(ep, req);
561 break;
562
563 case CS_WRND:
564 control_end(ep->r8a66597, 0);
565 break;
566 default:
567 printk(KERN_ERR "start_ep0: unexpect ctsq(%x)\n", ctsq);
568 break;
569 }
570}
571
572static void init_controller(struct r8a66597 *r8a66597)
573{
574 u16 vif = r8a66597->pdata->vif ? LDRV : 0;
575 u16 irq_sense = r8a66597->irq_sense_low ? INTL : 0;
576 u16 endian = r8a66597->pdata->endian ? BIGEND : 0;
577
578 if (r8a66597->pdata->on_chip) {
Yoshihiro Shimoda5154e9f2011-07-08 14:51:27 +0900579 if (r8a66597->pdata->buswait)
580 r8a66597_write(r8a66597, r8a66597->pdata->buswait,
581 SYSCFG1);
582 else
583 r8a66597_write(r8a66597, 0x0f, SYSCFG1);
Yoshihiro Shimodac4144242009-08-19 04:59:39 +0000584 r8a66597_bset(r8a66597, HSE, SYSCFG0);
585
586 r8a66597_bclr(r8a66597, USBE, SYSCFG0);
587 r8a66597_bclr(r8a66597, DPRPU, SYSCFG0);
588 r8a66597_bset(r8a66597, USBE, SYSCFG0);
589
590 r8a66597_bset(r8a66597, SCKE, SYSCFG0);
591
592 r8a66597_bset(r8a66597, irq_sense, INTENB1);
593 r8a66597_write(r8a66597, BURST | CPU_ADR_RD_WR,
594 DMA0CFG);
595 } else {
596 r8a66597_bset(r8a66597, vif | endian, PINCFG);
597 r8a66597_bset(r8a66597, HSE, SYSCFG0); /* High spd */
598 r8a66597_mdfy(r8a66597, get_xtal_from_pdata(r8a66597->pdata),
599 XTAL, SYSCFG0);
600
601 r8a66597_bclr(r8a66597, USBE, SYSCFG0);
602 r8a66597_bclr(r8a66597, DPRPU, SYSCFG0);
603 r8a66597_bset(r8a66597, USBE, SYSCFG0);
604
605 r8a66597_bset(r8a66597, XCKE, SYSCFG0);
606
607 msleep(3);
608
609 r8a66597_bset(r8a66597, PLLC, SYSCFG0);
610
611 msleep(1);
612
613 r8a66597_bset(r8a66597, SCKE, SYSCFG0);
614
615 r8a66597_bset(r8a66597, irq_sense, INTENB1);
616 r8a66597_write(r8a66597, BURST | CPU_ADR_RD_WR,
617 DMA0CFG);
618 }
619}
620
621static void disable_controller(struct r8a66597 *r8a66597)
622{
623 if (r8a66597->pdata->on_chip) {
624 r8a66597_bset(r8a66597, SCKE, SYSCFG0);
625
Magnus Damm0bb886d2009-08-19 14:26:10 +0000626 /* disable interrupts */
Yoshihiro Shimodac4144242009-08-19 04:59:39 +0000627 r8a66597_write(r8a66597, 0, INTENB0);
628 r8a66597_write(r8a66597, 0, INTENB1);
Magnus Damm0bb886d2009-08-19 14:26:10 +0000629 r8a66597_write(r8a66597, 0, BRDYENB);
630 r8a66597_write(r8a66597, 0, BEMPENB);
631 r8a66597_write(r8a66597, 0, NRDYENB);
632
633 /* clear status */
634 r8a66597_write(r8a66597, 0, BRDYSTS);
635 r8a66597_write(r8a66597, 0, NRDYSTS);
636 r8a66597_write(r8a66597, 0, BEMPSTS);
Yoshihiro Shimodac4144242009-08-19 04:59:39 +0000637
638 r8a66597_bclr(r8a66597, USBE, SYSCFG0);
639 r8a66597_bclr(r8a66597, SCKE, SYSCFG0);
640
641 } else {
642 r8a66597_bclr(r8a66597, SCKE, SYSCFG0);
643 udelay(1);
644 r8a66597_bclr(r8a66597, PLLC, SYSCFG0);
645 udelay(1);
646 udelay(1);
647 r8a66597_bclr(r8a66597, XCKE, SYSCFG0);
648 }
649}
650
651static void r8a66597_start_xclock(struct r8a66597 *r8a66597)
652{
653 u16 tmp;
654
655 if (!r8a66597->pdata->on_chip) {
656 tmp = r8a66597_read(r8a66597, SYSCFG0);
657 if (!(tmp & XCKE))
658 r8a66597_bset(r8a66597, XCKE, SYSCFG0);
659 }
660}
661
662static struct r8a66597_request *get_request_from_ep(struct r8a66597_ep *ep)
663{
664 return list_entry(ep->queue.next, struct r8a66597_request, queue);
665}
666
667/*-------------------------------------------------------------------------*/
668static void transfer_complete(struct r8a66597_ep *ep,
669 struct r8a66597_request *req, int status)
670__releases(r8a66597->lock)
671__acquires(r8a66597->lock)
672{
673 int restart = 0;
674
675 if (unlikely(ep->pipenum == 0)) {
676 if (ep->internal_ccpl) {
677 ep->internal_ccpl = 0;
678 return;
679 }
680 }
681
682 list_del_init(&req->queue);
683 if (ep->r8a66597->gadget.speed == USB_SPEED_UNKNOWN)
684 req->req.status = -ESHUTDOWN;
685 else
686 req->req.status = status;
687
688 if (!list_empty(&ep->queue))
689 restart = 1;
690
691 spin_unlock(&ep->r8a66597->lock);
692 req->req.complete(&ep->ep, &req->req);
693 spin_lock(&ep->r8a66597->lock);
694
695 if (restart) {
696 req = get_request_from_ep(ep);
697 if (ep->desc)
698 start_packet(ep, req);
699 }
700}
701
702static void irq_ep0_write(struct r8a66597_ep *ep, struct r8a66597_request *req)
703{
704 int i;
705 u16 tmp;
706 unsigned bufsize;
707 size_t size;
708 void *buf;
709 u16 pipenum = ep->pipenum;
710 struct r8a66597 *r8a66597 = ep->r8a66597;
711
712 pipe_change(r8a66597, pipenum);
713 r8a66597_bset(r8a66597, ISEL, ep->fifosel);
714
715 i = 0;
716 do {
717 tmp = r8a66597_read(r8a66597, ep->fifoctr);
718 if (i++ > 100000) {
719 printk(KERN_ERR "pipe0 is busy. maybe cpu i/o bus"
720 "conflict. please power off this controller.");
721 return;
722 }
723 ndelay(1);
724 } while ((tmp & FRDY) == 0);
725
726 /* prepare parameters */
727 bufsize = get_buffer_size(r8a66597, pipenum);
728 buf = req->req.buf + req->req.actual;
729 size = min(bufsize, req->req.length - req->req.actual);
730
731 /* write fifo */
732 if (req->req.buf) {
733 if (size > 0)
734 r8a66597_write_fifo(r8a66597, ep->fifoaddr, buf, size);
735 if ((size == 0) || ((size % ep->ep.maxpacket) != 0))
736 r8a66597_bset(r8a66597, BVAL, ep->fifoctr);
737 }
738
739 /* update parameters */
740 req->req.actual += size;
741
742 /* check transfer finish */
743 if ((!req->req.zero && (req->req.actual == req->req.length))
744 || (size % ep->ep.maxpacket)
745 || (size == 0)) {
746 disable_irq_ready(r8a66597, pipenum);
747 disable_irq_empty(r8a66597, pipenum);
748 } else {
749 disable_irq_ready(r8a66597, pipenum);
750 enable_irq_empty(r8a66597, pipenum);
751 }
752 pipe_start(r8a66597, pipenum);
753}
754
755static void irq_packet_write(struct r8a66597_ep *ep,
756 struct r8a66597_request *req)
757{
758 u16 tmp;
759 unsigned bufsize;
760 size_t size;
761 void *buf;
762 u16 pipenum = ep->pipenum;
763 struct r8a66597 *r8a66597 = ep->r8a66597;
764
765 pipe_change(r8a66597, pipenum);
766 tmp = r8a66597_read(r8a66597, ep->fifoctr);
767 if (unlikely((tmp & FRDY) == 0)) {
768 pipe_stop(r8a66597, pipenum);
769 pipe_irq_disable(r8a66597, pipenum);
770 printk(KERN_ERR "write fifo not ready. pipnum=%d\n", pipenum);
771 return;
772 }
773
774 /* prepare parameters */
775 bufsize = get_buffer_size(r8a66597, pipenum);
776 buf = req->req.buf + req->req.actual;
777 size = min(bufsize, req->req.length - req->req.actual);
778
779 /* write fifo */
780 if (req->req.buf) {
781 r8a66597_write_fifo(r8a66597, ep->fifoaddr, buf, size);
782 if ((size == 0)
783 || ((size % ep->ep.maxpacket) != 0)
784 || ((bufsize != ep->ep.maxpacket)
785 && (bufsize > size)))
786 r8a66597_bset(r8a66597, BVAL, ep->fifoctr);
787 }
788
789 /* update parameters */
790 req->req.actual += size;
791 /* check transfer finish */
792 if ((!req->req.zero && (req->req.actual == req->req.length))
793 || (size % ep->ep.maxpacket)
794 || (size == 0)) {
795 disable_irq_ready(r8a66597, pipenum);
796 enable_irq_empty(r8a66597, pipenum);
797 } else {
798 disable_irq_empty(r8a66597, pipenum);
799 pipe_irq_enable(r8a66597, pipenum);
800 }
801}
802
803static void irq_packet_read(struct r8a66597_ep *ep,
804 struct r8a66597_request *req)
805{
806 u16 tmp;
807 int rcv_len, bufsize, req_len;
808 int size;
809 void *buf;
810 u16 pipenum = ep->pipenum;
811 struct r8a66597 *r8a66597 = ep->r8a66597;
812 int finish = 0;
813
814 pipe_change(r8a66597, pipenum);
815 tmp = r8a66597_read(r8a66597, ep->fifoctr);
816 if (unlikely((tmp & FRDY) == 0)) {
817 req->req.status = -EPIPE;
818 pipe_stop(r8a66597, pipenum);
819 pipe_irq_disable(r8a66597, pipenum);
820 printk(KERN_ERR "read fifo not ready");
821 return;
822 }
823
824 /* prepare parameters */
825 rcv_len = tmp & DTLN;
826 bufsize = get_buffer_size(r8a66597, pipenum);
827
828 buf = req->req.buf + req->req.actual;
829 req_len = req->req.length - req->req.actual;
830 if (rcv_len < bufsize)
831 size = min(rcv_len, req_len);
832 else
833 size = min(bufsize, req_len);
834
835 /* update parameters */
836 req->req.actual += size;
837
838 /* check transfer finish */
839 if ((!req->req.zero && (req->req.actual == req->req.length))
840 || (size % ep->ep.maxpacket)
841 || (size == 0)) {
842 pipe_stop(r8a66597, pipenum);
843 pipe_irq_disable(r8a66597, pipenum);
844 finish = 1;
845 }
846
847 /* read fifo */
848 if (req->req.buf) {
849 if (size == 0)
850 r8a66597_write(r8a66597, BCLR, ep->fifoctr);
851 else
852 r8a66597_read_fifo(r8a66597, ep->fifoaddr, buf, size);
853
854 }
855
856 if ((ep->pipenum != 0) && finish)
857 transfer_complete(ep, req, 0);
858}
859
860static void irq_pipe_ready(struct r8a66597 *r8a66597, u16 status, u16 enb)
861{
862 u16 check;
863 u16 pipenum;
864 struct r8a66597_ep *ep;
865 struct r8a66597_request *req;
866
867 if ((status & BRDY0) && (enb & BRDY0)) {
868 r8a66597_write(r8a66597, ~BRDY0, BRDYSTS);
869 r8a66597_mdfy(r8a66597, 0, CURPIPE, CFIFOSEL);
870
871 ep = &r8a66597->ep[0];
872 req = get_request_from_ep(ep);
873 irq_packet_read(ep, req);
874 } else {
875 for (pipenum = 1; pipenum < R8A66597_MAX_NUM_PIPE; pipenum++) {
876 check = 1 << pipenum;
877 if ((status & check) && (enb & check)) {
878 r8a66597_write(r8a66597, ~check, BRDYSTS);
879 ep = r8a66597->pipenum2ep[pipenum];
880 req = get_request_from_ep(ep);
881 if (ep->desc->bEndpointAddress & USB_DIR_IN)
882 irq_packet_write(ep, req);
883 else
884 irq_packet_read(ep, req);
885 }
886 }
887 }
888}
889
890static void irq_pipe_empty(struct r8a66597 *r8a66597, u16 status, u16 enb)
891{
892 u16 tmp;
893 u16 check;
894 u16 pipenum;
895 struct r8a66597_ep *ep;
896 struct r8a66597_request *req;
897
898 if ((status & BEMP0) && (enb & BEMP0)) {
899 r8a66597_write(r8a66597, ~BEMP0, BEMPSTS);
900
901 ep = &r8a66597->ep[0];
902 req = get_request_from_ep(ep);
903 irq_ep0_write(ep, req);
904 } else {
905 for (pipenum = 1; pipenum < R8A66597_MAX_NUM_PIPE; pipenum++) {
906 check = 1 << pipenum;
907 if ((status & check) && (enb & check)) {
908 r8a66597_write(r8a66597, ~check, BEMPSTS);
909 tmp = control_reg_get(r8a66597, pipenum);
910 if ((tmp & INBUFM) == 0) {
911 disable_irq_empty(r8a66597, pipenum);
912 pipe_irq_disable(r8a66597, pipenum);
913 pipe_stop(r8a66597, pipenum);
914 ep = r8a66597->pipenum2ep[pipenum];
915 req = get_request_from_ep(ep);
916 if (!list_empty(&ep->queue))
917 transfer_complete(ep, req, 0);
918 }
919 }
920 }
921 }
922}
923
924static void get_status(struct r8a66597 *r8a66597, struct usb_ctrlrequest *ctrl)
925__releases(r8a66597->lock)
926__acquires(r8a66597->lock)
927{
928 struct r8a66597_ep *ep;
929 u16 pid;
930 u16 status = 0;
931 u16 w_index = le16_to_cpu(ctrl->wIndex);
932
933 switch (ctrl->bRequestType & USB_RECIP_MASK) {
934 case USB_RECIP_DEVICE:
935 status = 1 << USB_DEVICE_SELF_POWERED;
936 break;
937 case USB_RECIP_INTERFACE:
938 status = 0;
939 break;
940 case USB_RECIP_ENDPOINT:
941 ep = r8a66597->epaddr2ep[w_index & USB_ENDPOINT_NUMBER_MASK];
942 pid = control_reg_get_pid(r8a66597, ep->pipenum);
943 if (pid == PID_STALL)
944 status = 1 << USB_ENDPOINT_HALT;
945 else
946 status = 0;
947 break;
948 default:
949 pipe_stall(r8a66597, 0);
950 return; /* exit */
951 }
952
953 r8a66597->ep0_data = cpu_to_le16(status);
954 r8a66597->ep0_req->buf = &r8a66597->ep0_data;
955 r8a66597->ep0_req->length = 2;
956 /* AV: what happens if we get called again before that gets through? */
957 spin_unlock(&r8a66597->lock);
958 r8a66597_queue(r8a66597->gadget.ep0, r8a66597->ep0_req, GFP_KERNEL);
959 spin_lock(&r8a66597->lock);
960}
961
962static void clear_feature(struct r8a66597 *r8a66597,
963 struct usb_ctrlrequest *ctrl)
964{
965 switch (ctrl->bRequestType & USB_RECIP_MASK) {
966 case USB_RECIP_DEVICE:
967 control_end(r8a66597, 1);
968 break;
969 case USB_RECIP_INTERFACE:
970 control_end(r8a66597, 1);
971 break;
972 case USB_RECIP_ENDPOINT: {
973 struct r8a66597_ep *ep;
974 struct r8a66597_request *req;
975 u16 w_index = le16_to_cpu(ctrl->wIndex);
976
977 ep = r8a66597->epaddr2ep[w_index & USB_ENDPOINT_NUMBER_MASK];
Yoshihiro Shimoda9e7291c2009-08-20 07:01:06 +0000978 if (!ep->wedge) {
979 pipe_stop(r8a66597, ep->pipenum);
980 control_reg_sqclr(r8a66597, ep->pipenum);
981 spin_unlock(&r8a66597->lock);
982 usb_ep_clear_halt(&ep->ep);
983 spin_lock(&r8a66597->lock);
984 }
Yoshihiro Shimodac4144242009-08-19 04:59:39 +0000985
986 control_end(r8a66597, 1);
987
988 req = get_request_from_ep(ep);
989 if (ep->busy) {
990 ep->busy = 0;
991 if (list_empty(&ep->queue))
992 break;
993 start_packet(ep, req);
994 } else if (!list_empty(&ep->queue))
995 pipe_start(r8a66597, ep->pipenum);
996 }
997 break;
998 default:
999 pipe_stall(r8a66597, 0);
1000 break;
1001 }
1002}
1003
1004static void set_feature(struct r8a66597 *r8a66597, struct usb_ctrlrequest *ctrl)
1005{
1006
1007 switch (ctrl->bRequestType & USB_RECIP_MASK) {
1008 case USB_RECIP_DEVICE:
1009 control_end(r8a66597, 1);
1010 break;
1011 case USB_RECIP_INTERFACE:
1012 control_end(r8a66597, 1);
1013 break;
1014 case USB_RECIP_ENDPOINT: {
1015 struct r8a66597_ep *ep;
1016 u16 w_index = le16_to_cpu(ctrl->wIndex);
1017
1018 ep = r8a66597->epaddr2ep[w_index & USB_ENDPOINT_NUMBER_MASK];
1019 pipe_stall(r8a66597, ep->pipenum);
1020
1021 control_end(r8a66597, 1);
1022 }
1023 break;
1024 default:
1025 pipe_stall(r8a66597, 0);
1026 break;
1027 }
1028}
1029
1030/* if return value is true, call class driver's setup() */
1031static int setup_packet(struct r8a66597 *r8a66597, struct usb_ctrlrequest *ctrl)
1032{
1033 u16 *p = (u16 *)ctrl;
1034 unsigned long offset = USBREQ;
1035 int i, ret = 0;
1036
1037 /* read fifo */
1038 r8a66597_write(r8a66597, ~VALID, INTSTS0);
1039
1040 for (i = 0; i < 4; i++)
1041 p[i] = r8a66597_read(r8a66597, offset + i*2);
1042
1043 /* check request */
1044 if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
1045 switch (ctrl->bRequest) {
1046 case USB_REQ_GET_STATUS:
1047 get_status(r8a66597, ctrl);
1048 break;
1049 case USB_REQ_CLEAR_FEATURE:
1050 clear_feature(r8a66597, ctrl);
1051 break;
1052 case USB_REQ_SET_FEATURE:
1053 set_feature(r8a66597, ctrl);
1054 break;
1055 default:
1056 ret = 1;
1057 break;
1058 }
1059 } else
1060 ret = 1;
1061 return ret;
1062}
1063
1064static void r8a66597_update_usb_speed(struct r8a66597 *r8a66597)
1065{
1066 u16 speed = get_usb_speed(r8a66597);
1067
1068 switch (speed) {
1069 case HSMODE:
1070 r8a66597->gadget.speed = USB_SPEED_HIGH;
1071 break;
1072 case FSMODE:
1073 r8a66597->gadget.speed = USB_SPEED_FULL;
1074 break;
1075 default:
1076 r8a66597->gadget.speed = USB_SPEED_UNKNOWN;
1077 printk(KERN_ERR "USB speed unknown\n");
1078 }
1079}
1080
1081static void irq_device_state(struct r8a66597 *r8a66597)
1082{
1083 u16 dvsq;
1084
1085 dvsq = r8a66597_read(r8a66597, INTSTS0) & DVSQ;
1086 r8a66597_write(r8a66597, ~DVST, INTSTS0);
1087
1088 if (dvsq == DS_DFLT) {
1089 /* bus reset */
Yoshihiro Shimoda2c2da172011-04-04 13:09:22 +09001090 spin_unlock(&r8a66597->lock);
Yoshihiro Shimodac4144242009-08-19 04:59:39 +00001091 r8a66597->driver->disconnect(&r8a66597->gadget);
Yoshihiro Shimoda2c2da172011-04-04 13:09:22 +09001092 spin_lock(&r8a66597->lock);
Yoshihiro Shimodac4144242009-08-19 04:59:39 +00001093 r8a66597_update_usb_speed(r8a66597);
1094 }
1095 if (r8a66597->old_dvsq == DS_CNFG && dvsq != DS_CNFG)
1096 r8a66597_update_usb_speed(r8a66597);
1097 if ((dvsq == DS_CNFG || dvsq == DS_ADDS)
1098 && r8a66597->gadget.speed == USB_SPEED_UNKNOWN)
1099 r8a66597_update_usb_speed(r8a66597);
1100
1101 r8a66597->old_dvsq = dvsq;
1102}
1103
1104static void irq_control_stage(struct r8a66597 *r8a66597)
1105__releases(r8a66597->lock)
1106__acquires(r8a66597->lock)
1107{
1108 struct usb_ctrlrequest ctrl;
1109 u16 ctsq;
1110
1111 ctsq = r8a66597_read(r8a66597, INTSTS0) & CTSQ;
1112 r8a66597_write(r8a66597, ~CTRT, INTSTS0);
1113
1114 switch (ctsq) {
1115 case CS_IDST: {
1116 struct r8a66597_ep *ep;
1117 struct r8a66597_request *req;
1118 ep = &r8a66597->ep[0];
1119 req = get_request_from_ep(ep);
1120 transfer_complete(ep, req, 0);
1121 }
1122 break;
1123
1124 case CS_RDDS:
1125 case CS_WRDS:
1126 case CS_WRND:
1127 if (setup_packet(r8a66597, &ctrl)) {
1128 spin_unlock(&r8a66597->lock);
1129 if (r8a66597->driver->setup(&r8a66597->gadget, &ctrl)
1130 < 0)
1131 pipe_stall(r8a66597, 0);
1132 spin_lock(&r8a66597->lock);
1133 }
1134 break;
1135 case CS_RDSS:
1136 case CS_WRSS:
1137 control_end(r8a66597, 0);
1138 break;
1139 default:
1140 printk(KERN_ERR "ctrl_stage: unexpect ctsq(%x)\n", ctsq);
1141 break;
1142 }
1143}
1144
1145static irqreturn_t r8a66597_irq(int irq, void *_r8a66597)
1146{
1147 struct r8a66597 *r8a66597 = _r8a66597;
1148 u16 intsts0;
1149 u16 intenb0;
1150 u16 brdysts, nrdysts, bempsts;
1151 u16 brdyenb, nrdyenb, bempenb;
1152 u16 savepipe;
1153 u16 mask0;
1154
1155 spin_lock(&r8a66597->lock);
1156
1157 intsts0 = r8a66597_read(r8a66597, INTSTS0);
1158 intenb0 = r8a66597_read(r8a66597, INTENB0);
1159
1160 savepipe = r8a66597_read(r8a66597, CFIFOSEL);
1161
1162 mask0 = intsts0 & intenb0;
1163 if (mask0) {
1164 brdysts = r8a66597_read(r8a66597, BRDYSTS);
1165 nrdysts = r8a66597_read(r8a66597, NRDYSTS);
1166 bempsts = r8a66597_read(r8a66597, BEMPSTS);
1167 brdyenb = r8a66597_read(r8a66597, BRDYENB);
1168 nrdyenb = r8a66597_read(r8a66597, NRDYENB);
1169 bempenb = r8a66597_read(r8a66597, BEMPENB);
1170
1171 if (mask0 & VBINT) {
1172 r8a66597_write(r8a66597, 0xffff & ~VBINT,
1173 INTSTS0);
1174 r8a66597_start_xclock(r8a66597);
1175
1176 /* start vbus sampling */
1177 r8a66597->old_vbus = r8a66597_read(r8a66597, INTSTS0)
1178 & VBSTS;
1179 r8a66597->scount = R8A66597_MAX_SAMPLING;
1180
1181 mod_timer(&r8a66597->timer,
1182 jiffies + msecs_to_jiffies(50));
1183 }
1184 if (intsts0 & DVSQ)
1185 irq_device_state(r8a66597);
1186
1187 if ((intsts0 & BRDY) && (intenb0 & BRDYE)
1188 && (brdysts & brdyenb))
1189 irq_pipe_ready(r8a66597, brdysts, brdyenb);
1190 if ((intsts0 & BEMP) && (intenb0 & BEMPE)
1191 && (bempsts & bempenb))
1192 irq_pipe_empty(r8a66597, bempsts, bempenb);
1193
1194 if (intsts0 & CTRT)
1195 irq_control_stage(r8a66597);
1196 }
1197
1198 r8a66597_write(r8a66597, savepipe, CFIFOSEL);
1199
1200 spin_unlock(&r8a66597->lock);
1201 return IRQ_HANDLED;
1202}
1203
1204static void r8a66597_timer(unsigned long _r8a66597)
1205{
1206 struct r8a66597 *r8a66597 = (struct r8a66597 *)_r8a66597;
1207 unsigned long flags;
1208 u16 tmp;
1209
1210 spin_lock_irqsave(&r8a66597->lock, flags);
1211 tmp = r8a66597_read(r8a66597, SYSCFG0);
1212 if (r8a66597->scount > 0) {
1213 tmp = r8a66597_read(r8a66597, INTSTS0) & VBSTS;
1214 if (tmp == r8a66597->old_vbus) {
1215 r8a66597->scount--;
1216 if (r8a66597->scount == 0) {
1217 if (tmp == VBSTS)
1218 r8a66597_usb_connect(r8a66597);
1219 else
1220 r8a66597_usb_disconnect(r8a66597);
1221 } else {
1222 mod_timer(&r8a66597->timer,
1223 jiffies + msecs_to_jiffies(50));
1224 }
1225 } else {
1226 r8a66597->scount = R8A66597_MAX_SAMPLING;
1227 r8a66597->old_vbus = tmp;
1228 mod_timer(&r8a66597->timer,
1229 jiffies + msecs_to_jiffies(50));
1230 }
1231 }
1232 spin_unlock_irqrestore(&r8a66597->lock, flags);
1233}
1234
1235/*-------------------------------------------------------------------------*/
1236static int r8a66597_enable(struct usb_ep *_ep,
1237 const struct usb_endpoint_descriptor *desc)
1238{
1239 struct r8a66597_ep *ep;
1240
1241 ep = container_of(_ep, struct r8a66597_ep, ep);
1242 return alloc_pipe_config(ep, desc);
1243}
1244
1245static int r8a66597_disable(struct usb_ep *_ep)
1246{
1247 struct r8a66597_ep *ep;
1248 struct r8a66597_request *req;
1249 unsigned long flags;
1250
1251 ep = container_of(_ep, struct r8a66597_ep, ep);
1252 BUG_ON(!ep);
1253
1254 while (!list_empty(&ep->queue)) {
1255 req = get_request_from_ep(ep);
1256 spin_lock_irqsave(&ep->r8a66597->lock, flags);
1257 transfer_complete(ep, req, -ECONNRESET);
1258 spin_unlock_irqrestore(&ep->r8a66597->lock, flags);
1259 }
1260
1261 pipe_irq_disable(ep->r8a66597, ep->pipenum);
1262 return free_pipe_config(ep);
1263}
1264
1265static struct usb_request *r8a66597_alloc_request(struct usb_ep *_ep,
1266 gfp_t gfp_flags)
1267{
1268 struct r8a66597_request *req;
1269
1270 req = kzalloc(sizeof(struct r8a66597_request), gfp_flags);
1271 if (!req)
1272 return NULL;
1273
1274 INIT_LIST_HEAD(&req->queue);
1275
1276 return &req->req;
1277}
1278
1279static void r8a66597_free_request(struct usb_ep *_ep, struct usb_request *_req)
1280{
1281 struct r8a66597_request *req;
1282
1283 req = container_of(_req, struct r8a66597_request, req);
1284 kfree(req);
1285}
1286
1287static int r8a66597_queue(struct usb_ep *_ep, struct usb_request *_req,
1288 gfp_t gfp_flags)
1289{
1290 struct r8a66597_ep *ep;
1291 struct r8a66597_request *req;
1292 unsigned long flags;
1293 int request = 0;
1294
1295 ep = container_of(_ep, struct r8a66597_ep, ep);
1296 req = container_of(_req, struct r8a66597_request, req);
1297
1298 if (ep->r8a66597->gadget.speed == USB_SPEED_UNKNOWN)
1299 return -ESHUTDOWN;
1300
1301 spin_lock_irqsave(&ep->r8a66597->lock, flags);
1302
1303 if (list_empty(&ep->queue))
1304 request = 1;
1305
1306 list_add_tail(&req->queue, &ep->queue);
1307 req->req.actual = 0;
1308 req->req.status = -EINPROGRESS;
1309
1310 if (ep->desc == NULL) /* control */
1311 start_ep0(ep, req);
1312 else {
1313 if (request && !ep->busy)
1314 start_packet(ep, req);
1315 }
1316
1317 spin_unlock_irqrestore(&ep->r8a66597->lock, flags);
1318
1319 return 0;
1320}
1321
1322static int r8a66597_dequeue(struct usb_ep *_ep, struct usb_request *_req)
1323{
1324 struct r8a66597_ep *ep;
1325 struct r8a66597_request *req;
1326 unsigned long flags;
1327
1328 ep = container_of(_ep, struct r8a66597_ep, ep);
1329 req = container_of(_req, struct r8a66597_request, req);
1330
1331 spin_lock_irqsave(&ep->r8a66597->lock, flags);
1332 if (!list_empty(&ep->queue))
1333 transfer_complete(ep, req, -ECONNRESET);
1334 spin_unlock_irqrestore(&ep->r8a66597->lock, flags);
1335
1336 return 0;
1337}
1338
1339static int r8a66597_set_halt(struct usb_ep *_ep, int value)
1340{
1341 struct r8a66597_ep *ep;
1342 struct r8a66597_request *req;
1343 unsigned long flags;
1344 int ret = 0;
1345
1346 ep = container_of(_ep, struct r8a66597_ep, ep);
1347 req = get_request_from_ep(ep);
1348
1349 spin_lock_irqsave(&ep->r8a66597->lock, flags);
1350 if (!list_empty(&ep->queue)) {
1351 ret = -EAGAIN;
1352 goto out;
1353 }
1354 if (value) {
1355 ep->busy = 1;
1356 pipe_stall(ep->r8a66597, ep->pipenum);
1357 } else {
1358 ep->busy = 0;
Yoshihiro Shimoda9e7291c2009-08-20 07:01:06 +00001359 ep->wedge = 0;
Yoshihiro Shimodac4144242009-08-19 04:59:39 +00001360 pipe_stop(ep->r8a66597, ep->pipenum);
1361 }
1362
1363out:
1364 spin_unlock_irqrestore(&ep->r8a66597->lock, flags);
1365 return ret;
1366}
1367
Yoshihiro Shimoda9e7291c2009-08-20 07:01:06 +00001368static int r8a66597_set_wedge(struct usb_ep *_ep)
1369{
1370 struct r8a66597_ep *ep;
1371 unsigned long flags;
1372
1373 ep = container_of(_ep, struct r8a66597_ep, ep);
1374
1375 if (!ep || !ep->desc)
1376 return -EINVAL;
1377
1378 spin_lock_irqsave(&ep->r8a66597->lock, flags);
1379 ep->wedge = 1;
1380 spin_unlock_irqrestore(&ep->r8a66597->lock, flags);
1381
1382 return usb_ep_set_halt(_ep);
1383}
1384
Yoshihiro Shimodac4144242009-08-19 04:59:39 +00001385static void r8a66597_fifo_flush(struct usb_ep *_ep)
1386{
1387 struct r8a66597_ep *ep;
1388 unsigned long flags;
1389
1390 ep = container_of(_ep, struct r8a66597_ep, ep);
1391 spin_lock_irqsave(&ep->r8a66597->lock, flags);
1392 if (list_empty(&ep->queue) && !ep->busy) {
1393 pipe_stop(ep->r8a66597, ep->pipenum);
1394 r8a66597_bclr(ep->r8a66597, BCLR, ep->fifoctr);
1395 }
1396 spin_unlock_irqrestore(&ep->r8a66597->lock, flags);
1397}
1398
1399static struct usb_ep_ops r8a66597_ep_ops = {
1400 .enable = r8a66597_enable,
1401 .disable = r8a66597_disable,
1402
1403 .alloc_request = r8a66597_alloc_request,
1404 .free_request = r8a66597_free_request,
1405
1406 .queue = r8a66597_queue,
1407 .dequeue = r8a66597_dequeue,
1408
1409 .set_halt = r8a66597_set_halt,
Yoshihiro Shimoda9e7291c2009-08-20 07:01:06 +00001410 .set_wedge = r8a66597_set_wedge,
Yoshihiro Shimodac4144242009-08-19 04:59:39 +00001411 .fifo_flush = r8a66597_fifo_flush,
1412};
1413
1414/*-------------------------------------------------------------------------*/
1415static struct r8a66597 *the_controller;
1416
Sebastian Andrzej Siewior0f913492011-06-28 16:33:47 +03001417static int r8a66597_start(struct usb_gadget_driver *driver,
Uwe Kleine-Königb0fca502010-08-12 17:43:53 +02001418 int (*bind)(struct usb_gadget *))
Yoshihiro Shimodac4144242009-08-19 04:59:39 +00001419{
1420 struct r8a66597 *r8a66597 = the_controller;
1421 int retval;
1422
1423 if (!driver
1424 || driver->speed != USB_SPEED_HIGH
Uwe Kleine-Königb0fca502010-08-12 17:43:53 +02001425 || !bind
Yoshihiro Shimodac4144242009-08-19 04:59:39 +00001426 || !driver->setup)
1427 return -EINVAL;
1428 if (!r8a66597)
1429 return -ENODEV;
1430 if (r8a66597->driver)
1431 return -EBUSY;
1432
1433 /* hook up the driver */
1434 driver->driver.bus = NULL;
1435 r8a66597->driver = driver;
1436 r8a66597->gadget.dev.driver = &driver->driver;
1437
1438 retval = device_add(&r8a66597->gadget.dev);
1439 if (retval) {
1440 printk(KERN_ERR "device_add error (%d)\n", retval);
1441 goto error;
1442 }
1443
Uwe Kleine-Königb0fca502010-08-12 17:43:53 +02001444 retval = bind(&r8a66597->gadget);
Yoshihiro Shimodac4144242009-08-19 04:59:39 +00001445 if (retval) {
1446 printk(KERN_ERR "bind to driver error (%d)\n", retval);
1447 device_del(&r8a66597->gadget.dev);
1448 goto error;
1449 }
1450
Yoshihiro Shimodadeafeb22011-07-08 14:51:21 +09001451 init_controller(r8a66597);
Yoshihiro Shimodac4144242009-08-19 04:59:39 +00001452 r8a66597_bset(r8a66597, VBSE, INTENB0);
1453 if (r8a66597_read(r8a66597, INTSTS0) & VBSTS) {
1454 r8a66597_start_xclock(r8a66597);
1455 /* start vbus sampling */
1456 r8a66597->old_vbus = r8a66597_read(r8a66597,
1457 INTSTS0) & VBSTS;
1458 r8a66597->scount = R8A66597_MAX_SAMPLING;
1459 mod_timer(&r8a66597->timer, jiffies + msecs_to_jiffies(50));
1460 }
1461
1462 return 0;
1463
1464error:
1465 r8a66597->driver = NULL;
1466 r8a66597->gadget.dev.driver = NULL;
1467
1468 return retval;
1469}
Yoshihiro Shimodac4144242009-08-19 04:59:39 +00001470
Sebastian Andrzej Siewior0f913492011-06-28 16:33:47 +03001471static int r8a66597_stop(struct usb_gadget_driver *driver)
Yoshihiro Shimodac4144242009-08-19 04:59:39 +00001472{
1473 struct r8a66597 *r8a66597 = the_controller;
1474 unsigned long flags;
1475
1476 if (driver != r8a66597->driver || !driver->unbind)
1477 return -EINVAL;
1478
1479 spin_lock_irqsave(&r8a66597->lock, flags);
1480 if (r8a66597->gadget.speed != USB_SPEED_UNKNOWN)
1481 r8a66597_usb_disconnect(r8a66597);
Yoshihiro Shimodadeafeb22011-07-08 14:51:21 +09001482 r8a66597_bclr(r8a66597, VBSE, INTENB0);
1483 disable_controller(r8a66597);
Yoshihiro Shimodac4144242009-08-19 04:59:39 +00001484 spin_unlock_irqrestore(&r8a66597->lock, flags);
1485
Yoshihiro Shimodac4144242009-08-19 04:59:39 +00001486 driver->unbind(&r8a66597->gadget);
1487
Yoshihiro Shimodac4144242009-08-19 04:59:39 +00001488 device_del(&r8a66597->gadget.dev);
1489 r8a66597->driver = NULL;
1490 return 0;
1491}
Yoshihiro Shimodac4144242009-08-19 04:59:39 +00001492
1493/*-------------------------------------------------------------------------*/
1494static int r8a66597_get_frame(struct usb_gadget *_gadget)
1495{
1496 struct r8a66597 *r8a66597 = gadget_to_r8a66597(_gadget);
1497 return r8a66597_read(r8a66597, FRMNUM) & 0x03FF;
1498}
1499
1500static struct usb_gadget_ops r8a66597_gadget_ops = {
1501 .get_frame = r8a66597_get_frame,
Sebastian Andrzej Siewior0f913492011-06-28 16:33:47 +03001502 .start = r8a66597_start,
1503 .stop = r8a66597_stop,
Yoshihiro Shimodac4144242009-08-19 04:59:39 +00001504};
1505
1506static int __exit r8a66597_remove(struct platform_device *pdev)
1507{
1508 struct r8a66597 *r8a66597 = dev_get_drvdata(&pdev->dev);
1509
Sebastian Andrzej Siewior0f913492011-06-28 16:33:47 +03001510 usb_del_gadget_udc(&r8a66597->gadget);
Yoshihiro Shimodac4144242009-08-19 04:59:39 +00001511 del_timer_sync(&r8a66597->timer);
Paul Mundte8b48662010-06-02 16:27:12 +09001512 iounmap(r8a66597->reg);
Yoshihiro Shimodac4144242009-08-19 04:59:39 +00001513 free_irq(platform_get_irq(pdev, 0), r8a66597);
1514 r8a66597_free_request(&r8a66597->ep[0].ep, r8a66597->ep0_req);
Magnus Dammd2e27bd2009-08-19 09:50:49 +00001515#ifdef CONFIG_HAVE_CLK
1516 if (r8a66597->pdata->on_chip) {
1517 clk_disable(r8a66597->clk);
1518 clk_put(r8a66597->clk);
1519 }
1520#endif
Yoshihiro Shimodac4144242009-08-19 04:59:39 +00001521 kfree(r8a66597);
1522 return 0;
1523}
1524
1525static void nop_completion(struct usb_ep *ep, struct usb_request *r)
1526{
1527}
1528
1529static int __init r8a66597_probe(struct platform_device *pdev)
1530{
Magnus Dammd2e27bd2009-08-19 09:50:49 +00001531#ifdef CONFIG_HAVE_CLK
1532 char clk_name[8];
1533#endif
Yoshihiro Shimodac4144242009-08-19 04:59:39 +00001534 struct resource *res, *ires;
1535 int irq;
1536 void __iomem *reg = NULL;
1537 struct r8a66597 *r8a66597 = NULL;
1538 int ret = 0;
1539 int i;
1540 unsigned long irq_trigger;
1541
1542 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1543 if (!res) {
1544 ret = -ENODEV;
1545 printk(KERN_ERR "platform_get_resource error.\n");
1546 goto clean_up;
1547 }
1548
1549 ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1550 irq = ires->start;
1551 irq_trigger = ires->flags & IRQF_TRIGGER_MASK;
1552
1553 if (irq < 0) {
1554 ret = -ENODEV;
1555 printk(KERN_ERR "platform_get_irq error.\n");
1556 goto clean_up;
1557 }
1558
1559 reg = ioremap(res->start, resource_size(res));
1560 if (reg == NULL) {
1561 ret = -ENOMEM;
1562 printk(KERN_ERR "ioremap error.\n");
1563 goto clean_up;
1564 }
1565
1566 /* initialize ucd */
1567 r8a66597 = kzalloc(sizeof(struct r8a66597), GFP_KERNEL);
1568 if (r8a66597 == NULL) {
Axel Lin96f2a342010-08-17 09:41:29 +08001569 ret = -ENOMEM;
Yoshihiro Shimodac4144242009-08-19 04:59:39 +00001570 printk(KERN_ERR "kzalloc error\n");
1571 goto clean_up;
1572 }
1573
1574 spin_lock_init(&r8a66597->lock);
1575 dev_set_drvdata(&pdev->dev, r8a66597);
1576 r8a66597->pdata = pdev->dev.platform_data;
1577 r8a66597->irq_sense_low = irq_trigger == IRQF_TRIGGER_LOW;
1578
1579 r8a66597->gadget.ops = &r8a66597_gadget_ops;
1580 device_initialize(&r8a66597->gadget.dev);
1581 dev_set_name(&r8a66597->gadget.dev, "gadget");
1582 r8a66597->gadget.is_dualspeed = 1;
1583 r8a66597->gadget.dev.parent = &pdev->dev;
1584 r8a66597->gadget.dev.dma_mask = pdev->dev.dma_mask;
1585 r8a66597->gadget.dev.release = pdev->dev.release;
1586 r8a66597->gadget.name = udc_name;
1587
1588 init_timer(&r8a66597->timer);
1589 r8a66597->timer.function = r8a66597_timer;
1590 r8a66597->timer.data = (unsigned long)r8a66597;
Paul Mundte8b48662010-06-02 16:27:12 +09001591 r8a66597->reg = reg;
Yoshihiro Shimodac4144242009-08-19 04:59:39 +00001592
Magnus Dammd2e27bd2009-08-19 09:50:49 +00001593#ifdef CONFIG_HAVE_CLK
1594 if (r8a66597->pdata->on_chip) {
1595 snprintf(clk_name, sizeof(clk_name), "usb%d", pdev->id);
1596 r8a66597->clk = clk_get(&pdev->dev, clk_name);
1597 if (IS_ERR(r8a66597->clk)) {
1598 dev_err(&pdev->dev, "cannot get clock \"%s\"\n",
1599 clk_name);
1600 ret = PTR_ERR(r8a66597->clk);
1601 goto clean_up;
1602 }
1603 clk_enable(r8a66597->clk);
1604 }
1605#endif
1606
Yoshihiro Shimodac4144242009-08-19 04:59:39 +00001607 disable_controller(r8a66597); /* make sure controller is disabled */
1608
1609 ret = request_irq(irq, r8a66597_irq, IRQF_DISABLED | IRQF_SHARED,
1610 udc_name, r8a66597);
1611 if (ret < 0) {
1612 printk(KERN_ERR "request_irq error (%d)\n", ret);
Magnus Dammd2e27bd2009-08-19 09:50:49 +00001613 goto clean_up2;
Yoshihiro Shimodac4144242009-08-19 04:59:39 +00001614 }
1615
1616 INIT_LIST_HEAD(&r8a66597->gadget.ep_list);
1617 r8a66597->gadget.ep0 = &r8a66597->ep[0].ep;
1618 INIT_LIST_HEAD(&r8a66597->gadget.ep0->ep_list);
1619 for (i = 0; i < R8A66597_MAX_NUM_PIPE; i++) {
1620 struct r8a66597_ep *ep = &r8a66597->ep[i];
1621
1622 if (i != 0) {
1623 INIT_LIST_HEAD(&r8a66597->ep[i].ep.ep_list);
1624 list_add_tail(&r8a66597->ep[i].ep.ep_list,
1625 &r8a66597->gadget.ep_list);
1626 }
1627 ep->r8a66597 = r8a66597;
1628 INIT_LIST_HEAD(&ep->queue);
1629 ep->ep.name = r8a66597_ep_name[i];
1630 ep->ep.ops = &r8a66597_ep_ops;
1631 ep->ep.maxpacket = 512;
1632 }
1633 r8a66597->ep[0].ep.maxpacket = 64;
1634 r8a66597->ep[0].pipenum = 0;
1635 r8a66597->ep[0].fifoaddr = CFIFO;
1636 r8a66597->ep[0].fifosel = CFIFOSEL;
1637 r8a66597->ep[0].fifoctr = CFIFOCTR;
1638 r8a66597->ep[0].fifotrn = 0;
1639 r8a66597->ep[0].pipectr = get_pipectr_addr(0);
1640 r8a66597->pipenum2ep[0] = &r8a66597->ep[0];
1641 r8a66597->epaddr2ep[0] = &r8a66597->ep[0];
1642
1643 the_controller = r8a66597;
1644
1645 r8a66597->ep0_req = r8a66597_alloc_request(&r8a66597->ep[0].ep,
1646 GFP_KERNEL);
1647 if (r8a66597->ep0_req == NULL)
Magnus Dammd2e27bd2009-08-19 09:50:49 +00001648 goto clean_up3;
Yoshihiro Shimodac4144242009-08-19 04:59:39 +00001649 r8a66597->ep0_req->complete = nop_completion;
1650
Sebastian Andrzej Siewior0f913492011-06-28 16:33:47 +03001651 ret = usb_add_gadget_udc(&pdev->dev, &r8a66597->gadget);
1652 if (ret)
1653 goto err_add_udc;
1654
Yoshihiro Shimodac4144242009-08-19 04:59:39 +00001655 dev_info(&pdev->dev, "version %s\n", DRIVER_VERSION);
1656 return 0;
1657
Sebastian Andrzej Siewior0f913492011-06-28 16:33:47 +03001658err_add_udc:
1659 r8a66597_free_request(&r8a66597->ep[0].ep, r8a66597->ep0_req);
Magnus Dammd2e27bd2009-08-19 09:50:49 +00001660clean_up3:
Yoshihiro Shimodac4144242009-08-19 04:59:39 +00001661 free_irq(irq, r8a66597);
Magnus Dammd2e27bd2009-08-19 09:50:49 +00001662clean_up2:
1663#ifdef CONFIG_HAVE_CLK
1664 if (r8a66597->pdata->on_chip) {
1665 clk_disable(r8a66597->clk);
1666 clk_put(r8a66597->clk);
1667 }
1668#endif
Yoshihiro Shimodac4144242009-08-19 04:59:39 +00001669clean_up:
1670 if (r8a66597) {
1671 if (r8a66597->ep0_req)
1672 r8a66597_free_request(&r8a66597->ep[0].ep,
1673 r8a66597->ep0_req);
1674 kfree(r8a66597);
1675 }
1676 if (reg)
1677 iounmap(reg);
1678
1679 return ret;
1680}
1681
1682/*-------------------------------------------------------------------------*/
1683static struct platform_driver r8a66597_driver = {
1684 .remove = __exit_p(r8a66597_remove),
1685 .driver = {
1686 .name = (char *) udc_name,
1687 },
1688};
Sebastian Andrzej Siewior86081d72011-06-29 16:41:55 +03001689MODULE_ALIAS("platform:r8a66597_udc");
Yoshihiro Shimodac4144242009-08-19 04:59:39 +00001690
1691static int __init r8a66597_udc_init(void)
1692{
1693 return platform_driver_probe(&r8a66597_driver, r8a66597_probe);
1694}
1695module_init(r8a66597_udc_init);
1696
1697static void __exit r8a66597_udc_cleanup(void)
1698{
1699 platform_driver_unregister(&r8a66597_driver);
1700}
1701module_exit(r8a66597_udc_cleanup);
1702
1703MODULE_DESCRIPTION("R8A66597 USB gadget driver");
1704MODULE_LICENSE("GPL");
1705MODULE_AUTHOR("Yoshihiro Shimoda");
1706