blob: c65753ffe0f0aa5ab405a692a33fd669fb0d06e6 [file] [log] [blame]
Yoshihiro Shimodac4144242009-08-19 04:59:39 +00001/*
2 * R8A66597 UDC (USB gadget)
3 *
4 * Copyright (C) 2006-2009 Renesas Solutions Corp.
5 *
6 * Author : Yoshihiro Shimoda <shimoda.yoshihiro@renesas.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; version 2 of the License.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 *
21 */
22
23#include <linux/module.h>
24#include <linux/interrupt.h>
25#include <linux/delay.h>
26#include <linux/io.h>
27#include <linux/platform_device.h>
Magnus Dammd2e27bd2009-08-19 09:50:49 +000028#include <linux/clk.h>
Yoshihiro Shimodac4144242009-08-19 04:59:39 +000029
30#include <linux/usb/ch9.h>
31#include <linux/usb/gadget.h>
32
33#include "r8a66597-udc.h"
34
35#define DRIVER_VERSION "2009-08-18"
36
37static const char udc_name[] = "r8a66597_udc";
38static const char *r8a66597_ep_name[] = {
39 "ep0", "ep1", "ep2", "ep3", "ep4", "ep5", "ep6", "ep7",
40 "ep8", "ep9",
41};
42
43static void disable_controller(struct r8a66597 *r8a66597);
44static void irq_ep0_write(struct r8a66597_ep *ep, struct r8a66597_request *req);
45static void irq_packet_write(struct r8a66597_ep *ep,
46 struct r8a66597_request *req);
47static int r8a66597_queue(struct usb_ep *_ep, struct usb_request *_req,
48 gfp_t gfp_flags);
49
50static void transfer_complete(struct r8a66597_ep *ep,
51 struct r8a66597_request *req, int status);
52
53/*-------------------------------------------------------------------------*/
54static inline u16 get_usb_speed(struct r8a66597 *r8a66597)
55{
56 return r8a66597_read(r8a66597, DVSTCTR0) & RHST;
57}
58
59static void enable_pipe_irq(struct r8a66597 *r8a66597, u16 pipenum,
60 unsigned long reg)
61{
62 u16 tmp;
63
64 tmp = r8a66597_read(r8a66597, INTENB0);
65 r8a66597_bclr(r8a66597, BEMPE | NRDYE | BRDYE,
66 INTENB0);
67 r8a66597_bset(r8a66597, (1 << pipenum), reg);
68 r8a66597_write(r8a66597, tmp, INTENB0);
69}
70
71static void disable_pipe_irq(struct r8a66597 *r8a66597, u16 pipenum,
72 unsigned long reg)
73{
74 u16 tmp;
75
76 tmp = r8a66597_read(r8a66597, INTENB0);
77 r8a66597_bclr(r8a66597, BEMPE | NRDYE | BRDYE,
78 INTENB0);
79 r8a66597_bclr(r8a66597, (1 << pipenum), reg);
80 r8a66597_write(r8a66597, tmp, INTENB0);
81}
82
83static void r8a66597_usb_connect(struct r8a66597 *r8a66597)
84{
85 r8a66597_bset(r8a66597, CTRE, INTENB0);
86 r8a66597_bset(r8a66597, BEMPE | BRDYE, INTENB0);
87
88 r8a66597_bset(r8a66597, DPRPU, SYSCFG0);
89}
90
91static void r8a66597_usb_disconnect(struct r8a66597 *r8a66597)
92__releases(r8a66597->lock)
93__acquires(r8a66597->lock)
94{
95 r8a66597_bclr(r8a66597, CTRE, INTENB0);
96 r8a66597_bclr(r8a66597, BEMPE | BRDYE, INTENB0);
97 r8a66597_bclr(r8a66597, DPRPU, SYSCFG0);
98
99 r8a66597->gadget.speed = USB_SPEED_UNKNOWN;
100 spin_unlock(&r8a66597->lock);
101 r8a66597->driver->disconnect(&r8a66597->gadget);
102 spin_lock(&r8a66597->lock);
103
104 disable_controller(r8a66597);
105 INIT_LIST_HEAD(&r8a66597->ep[0].queue);
106}
107
108static inline u16 control_reg_get_pid(struct r8a66597 *r8a66597, u16 pipenum)
109{
110 u16 pid = 0;
111 unsigned long offset;
112
113 if (pipenum == 0)
114 pid = r8a66597_read(r8a66597, DCPCTR) & PID;
115 else if (pipenum < R8A66597_MAX_NUM_PIPE) {
116 offset = get_pipectr_addr(pipenum);
117 pid = r8a66597_read(r8a66597, offset) & PID;
118 } else
119 printk(KERN_ERR "unexpect pipe num (%d)\n", pipenum);
120
121 return pid;
122}
123
124static inline void control_reg_set_pid(struct r8a66597 *r8a66597, u16 pipenum,
125 u16 pid)
126{
127 unsigned long offset;
128
129 if (pipenum == 0)
130 r8a66597_mdfy(r8a66597, pid, PID, DCPCTR);
131 else if (pipenum < R8A66597_MAX_NUM_PIPE) {
132 offset = get_pipectr_addr(pipenum);
133 r8a66597_mdfy(r8a66597, pid, PID, offset);
134 } else
135 printk(KERN_ERR "unexpect pipe num (%d)\n", pipenum);
136}
137
138static inline void pipe_start(struct r8a66597 *r8a66597, u16 pipenum)
139{
140 control_reg_set_pid(r8a66597, pipenum, PID_BUF);
141}
142
143static inline void pipe_stop(struct r8a66597 *r8a66597, u16 pipenum)
144{
145 control_reg_set_pid(r8a66597, pipenum, PID_NAK);
146}
147
148static inline void pipe_stall(struct r8a66597 *r8a66597, u16 pipenum)
149{
150 control_reg_set_pid(r8a66597, pipenum, PID_STALL);
151}
152
153static inline u16 control_reg_get(struct r8a66597 *r8a66597, u16 pipenum)
154{
155 u16 ret = 0;
156 unsigned long offset;
157
158 if (pipenum == 0)
159 ret = r8a66597_read(r8a66597, DCPCTR);
160 else if (pipenum < R8A66597_MAX_NUM_PIPE) {
161 offset = get_pipectr_addr(pipenum);
162 ret = r8a66597_read(r8a66597, offset);
163 } else
164 printk(KERN_ERR "unexpect pipe num (%d)\n", pipenum);
165
166 return ret;
167}
168
169static inline void control_reg_sqclr(struct r8a66597 *r8a66597, u16 pipenum)
170{
171 unsigned long offset;
172
173 pipe_stop(r8a66597, pipenum);
174
175 if (pipenum == 0)
176 r8a66597_bset(r8a66597, SQCLR, DCPCTR);
177 else if (pipenum < R8A66597_MAX_NUM_PIPE) {
178 offset = get_pipectr_addr(pipenum);
179 r8a66597_bset(r8a66597, SQCLR, offset);
180 } else
181 printk(KERN_ERR "unexpect pipe num(%d)\n", pipenum);
182}
183
184static inline int get_buffer_size(struct r8a66597 *r8a66597, u16 pipenum)
185{
186 u16 tmp;
187 int size;
188
189 if (pipenum == 0) {
190 tmp = r8a66597_read(r8a66597, DCPCFG);
191 if ((tmp & R8A66597_CNTMD) != 0)
192 size = 256;
193 else {
194 tmp = r8a66597_read(r8a66597, DCPMAXP);
195 size = tmp & MAXP;
196 }
197 } else {
198 r8a66597_write(r8a66597, pipenum, PIPESEL);
199 tmp = r8a66597_read(r8a66597, PIPECFG);
200 if ((tmp & R8A66597_CNTMD) != 0) {
201 tmp = r8a66597_read(r8a66597, PIPEBUF);
202 size = ((tmp >> 10) + 1) * 64;
203 } else {
204 tmp = r8a66597_read(r8a66597, PIPEMAXP);
205 size = tmp & MXPS;
206 }
207 }
208
209 return size;
210}
211
212static inline unsigned short mbw_value(struct r8a66597 *r8a66597)
213{
214 if (r8a66597->pdata->on_chip)
215 return MBW_32;
216 else
217 return MBW_16;
218}
219
220static inline void pipe_change(struct r8a66597 *r8a66597, u16 pipenum)
221{
222 struct r8a66597_ep *ep = r8a66597->pipenum2ep[pipenum];
223
224 if (ep->use_dma)
225 return;
226
227 r8a66597_mdfy(r8a66597, pipenum, CURPIPE, ep->fifosel);
228
229 ndelay(450);
230
231 r8a66597_bset(r8a66597, mbw_value(r8a66597), ep->fifosel);
232}
233
234static int pipe_buffer_setting(struct r8a66597 *r8a66597,
235 struct r8a66597_pipe_info *info)
236{
237 u16 bufnum = 0, buf_bsize = 0;
238 u16 pipecfg = 0;
239
240 if (info->pipe == 0)
241 return -EINVAL;
242
243 r8a66597_write(r8a66597, info->pipe, PIPESEL);
244
245 if (info->dir_in)
246 pipecfg |= R8A66597_DIR;
247 pipecfg |= info->type;
248 pipecfg |= info->epnum;
249 switch (info->type) {
250 case R8A66597_INT:
251 bufnum = 4 + (info->pipe - R8A66597_BASE_PIPENUM_INT);
252 buf_bsize = 0;
253 break;
254 case R8A66597_BULK:
255 bufnum = r8a66597->bi_bufnum +
256 (info->pipe - R8A66597_BASE_PIPENUM_BULK) * 16;
257 r8a66597->bi_bufnum += 16;
258 buf_bsize = 7;
259 pipecfg |= R8A66597_DBLB;
260 if (!info->dir_in)
261 pipecfg |= R8A66597_SHTNAK;
262 break;
263 case R8A66597_ISO:
264 bufnum = r8a66597->bi_bufnum +
265 (info->pipe - R8A66597_BASE_PIPENUM_ISOC) * 16;
266 r8a66597->bi_bufnum += 16;
267 buf_bsize = 7;
268 break;
269 }
270 if (r8a66597->bi_bufnum > R8A66597_MAX_BUFNUM) {
271 printk(KERN_ERR "r8a66597 pipe memory is insufficient(%d)\n",
272 r8a66597->bi_bufnum);
273 return -ENOMEM;
274 }
275
276 r8a66597_write(r8a66597, pipecfg, PIPECFG);
277 r8a66597_write(r8a66597, (buf_bsize << 10) | (bufnum), PIPEBUF);
278 r8a66597_write(r8a66597, info->maxpacket, PIPEMAXP);
279 if (info->interval)
280 info->interval--;
281 r8a66597_write(r8a66597, info->interval, PIPEPERI);
282
283 return 0;
284}
285
286static void pipe_buffer_release(struct r8a66597 *r8a66597,
287 struct r8a66597_pipe_info *info)
288{
289 if (info->pipe == 0)
290 return;
291
292 switch (info->type) {
293 case R8A66597_BULK:
294 if (is_bulk_pipe(info->pipe))
295 r8a66597->bi_bufnum -= 16;
296 break;
297 case R8A66597_ISO:
298 if (is_isoc_pipe(info->pipe))
299 r8a66597->bi_bufnum -= 16;
300 break;
301 }
302
303 if (is_bulk_pipe(info->pipe))
304 r8a66597->bulk--;
305 else if (is_interrupt_pipe(info->pipe))
306 r8a66597->interrupt--;
307 else if (is_isoc_pipe(info->pipe)) {
308 r8a66597->isochronous--;
309 if (info->type == R8A66597_BULK)
310 r8a66597->bulk--;
311 } else
312 printk(KERN_ERR "ep_release: unexpect pipenum (%d)\n",
313 info->pipe);
314}
315
316static void pipe_initialize(struct r8a66597_ep *ep)
317{
318 struct r8a66597 *r8a66597 = ep->r8a66597;
319
320 r8a66597_mdfy(r8a66597, 0, CURPIPE, ep->fifosel);
321
322 r8a66597_write(r8a66597, ACLRM, ep->pipectr);
323 r8a66597_write(r8a66597, 0, ep->pipectr);
324 r8a66597_write(r8a66597, SQCLR, ep->pipectr);
325 if (ep->use_dma) {
326 r8a66597_mdfy(r8a66597, ep->pipenum, CURPIPE, ep->fifosel);
327
328 ndelay(450);
329
330 r8a66597_bset(r8a66597, mbw_value(r8a66597), ep->fifosel);
331 }
332}
333
334static void r8a66597_ep_setting(struct r8a66597 *r8a66597,
335 struct r8a66597_ep *ep,
336 const struct usb_endpoint_descriptor *desc,
337 u16 pipenum, int dma)
338{
339 ep->use_dma = 0;
340 ep->fifoaddr = CFIFO;
341 ep->fifosel = CFIFOSEL;
342 ep->fifoctr = CFIFOCTR;
343 ep->fifotrn = 0;
344
345 ep->pipectr = get_pipectr_addr(pipenum);
346 ep->pipenum = pipenum;
347 ep->ep.maxpacket = le16_to_cpu(desc->wMaxPacketSize);
348 r8a66597->pipenum2ep[pipenum] = ep;
349 r8a66597->epaddr2ep[desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK]
350 = ep;
351 INIT_LIST_HEAD(&ep->queue);
352}
353
354static void r8a66597_ep_release(struct r8a66597_ep *ep)
355{
356 struct r8a66597 *r8a66597 = ep->r8a66597;
357 u16 pipenum = ep->pipenum;
358
359 if (pipenum == 0)
360 return;
361
362 if (ep->use_dma)
363 r8a66597->num_dma--;
364 ep->pipenum = 0;
365 ep->busy = 0;
366 ep->use_dma = 0;
367}
368
369static int alloc_pipe_config(struct r8a66597_ep *ep,
370 const struct usb_endpoint_descriptor *desc)
371{
372 struct r8a66597 *r8a66597 = ep->r8a66597;
373 struct r8a66597_pipe_info info;
374 int dma = 0;
375 unsigned char *counter;
376 int ret;
377
378 ep->desc = desc;
379
380 if (ep->pipenum) /* already allocated pipe */
381 return 0;
382
383 switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
384 case USB_ENDPOINT_XFER_BULK:
385 if (r8a66597->bulk >= R8A66597_MAX_NUM_BULK) {
386 if (r8a66597->isochronous >= R8A66597_MAX_NUM_ISOC) {
387 printk(KERN_ERR "bulk pipe is insufficient\n");
388 return -ENODEV;
389 } else {
390 info.pipe = R8A66597_BASE_PIPENUM_ISOC
391 + r8a66597->isochronous;
392 counter = &r8a66597->isochronous;
393 }
394 } else {
395 info.pipe = R8A66597_BASE_PIPENUM_BULK + r8a66597->bulk;
396 counter = &r8a66597->bulk;
397 }
398 info.type = R8A66597_BULK;
399 dma = 1;
400 break;
401 case USB_ENDPOINT_XFER_INT:
402 if (r8a66597->interrupt >= R8A66597_MAX_NUM_INT) {
403 printk(KERN_ERR "interrupt pipe is insufficient\n");
404 return -ENODEV;
405 }
406 info.pipe = R8A66597_BASE_PIPENUM_INT + r8a66597->interrupt;
407 info.type = R8A66597_INT;
408 counter = &r8a66597->interrupt;
409 break;
410 case USB_ENDPOINT_XFER_ISOC:
411 if (r8a66597->isochronous >= R8A66597_MAX_NUM_ISOC) {
412 printk(KERN_ERR "isochronous pipe is insufficient\n");
413 return -ENODEV;
414 }
415 info.pipe = R8A66597_BASE_PIPENUM_ISOC + r8a66597->isochronous;
416 info.type = R8A66597_ISO;
417 counter = &r8a66597->isochronous;
418 break;
419 default:
420 printk(KERN_ERR "unexpect xfer type\n");
421 return -EINVAL;
422 }
423 ep->type = info.type;
424
425 info.epnum = desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
426 info.maxpacket = le16_to_cpu(desc->wMaxPacketSize);
427 info.interval = desc->bInterval;
428 if (desc->bEndpointAddress & USB_ENDPOINT_DIR_MASK)
429 info.dir_in = 1;
430 else
431 info.dir_in = 0;
432
433 ret = pipe_buffer_setting(r8a66597, &info);
434 if (ret < 0) {
435 printk(KERN_ERR "pipe_buffer_setting fail\n");
436 return ret;
437 }
438
439 (*counter)++;
440 if ((counter == &r8a66597->isochronous) && info.type == R8A66597_BULK)
441 r8a66597->bulk++;
442
443 r8a66597_ep_setting(r8a66597, ep, desc, info.pipe, dma);
444 pipe_initialize(ep);
445
446 return 0;
447}
448
449static int free_pipe_config(struct r8a66597_ep *ep)
450{
451 struct r8a66597 *r8a66597 = ep->r8a66597;
452 struct r8a66597_pipe_info info;
453
454 info.pipe = ep->pipenum;
455 info.type = ep->type;
456 pipe_buffer_release(r8a66597, &info);
457 r8a66597_ep_release(ep);
458
459 return 0;
460}
461
462/*-------------------------------------------------------------------------*/
463static void pipe_irq_enable(struct r8a66597 *r8a66597, u16 pipenum)
464{
465 enable_irq_ready(r8a66597, pipenum);
466 enable_irq_nrdy(r8a66597, pipenum);
467}
468
469static void pipe_irq_disable(struct r8a66597 *r8a66597, u16 pipenum)
470{
471 disable_irq_ready(r8a66597, pipenum);
472 disable_irq_nrdy(r8a66597, pipenum);
473}
474
475/* if complete is true, gadget driver complete function is not call */
476static void control_end(struct r8a66597 *r8a66597, unsigned ccpl)
477{
478 r8a66597->ep[0].internal_ccpl = ccpl;
479 pipe_start(r8a66597, 0);
480 r8a66597_bset(r8a66597, CCPL, DCPCTR);
481}
482
483static void start_ep0_write(struct r8a66597_ep *ep,
484 struct r8a66597_request *req)
485{
486 struct r8a66597 *r8a66597 = ep->r8a66597;
487
488 pipe_change(r8a66597, ep->pipenum);
489 r8a66597_mdfy(r8a66597, ISEL, (ISEL | CURPIPE), CFIFOSEL);
490 r8a66597_write(r8a66597, BCLR, ep->fifoctr);
491 if (req->req.length == 0) {
492 r8a66597_bset(r8a66597, BVAL, ep->fifoctr);
493 pipe_start(r8a66597, 0);
494 transfer_complete(ep, req, 0);
495 } else {
496 r8a66597_write(r8a66597, ~BEMP0, BEMPSTS);
497 irq_ep0_write(ep, req);
498 }
499}
500
501static void start_packet_write(struct r8a66597_ep *ep,
502 struct r8a66597_request *req)
503{
504 struct r8a66597 *r8a66597 = ep->r8a66597;
505 u16 tmp;
506
507 pipe_change(r8a66597, ep->pipenum);
508 disable_irq_empty(r8a66597, ep->pipenum);
509 pipe_start(r8a66597, ep->pipenum);
510
511 tmp = r8a66597_read(r8a66597, ep->fifoctr);
512 if (unlikely((tmp & FRDY) == 0))
513 pipe_irq_enable(r8a66597, ep->pipenum);
514 else
515 irq_packet_write(ep, req);
516}
517
518static void start_packet_read(struct r8a66597_ep *ep,
519 struct r8a66597_request *req)
520{
521 struct r8a66597 *r8a66597 = ep->r8a66597;
522 u16 pipenum = ep->pipenum;
523
524 if (ep->pipenum == 0) {
525 r8a66597_mdfy(r8a66597, 0, (ISEL | CURPIPE), CFIFOSEL);
526 r8a66597_write(r8a66597, BCLR, ep->fifoctr);
527 pipe_start(r8a66597, pipenum);
528 pipe_irq_enable(r8a66597, pipenum);
529 } else {
530 if (ep->use_dma) {
531 r8a66597_bset(r8a66597, TRCLR, ep->fifosel);
532 pipe_change(r8a66597, pipenum);
533 r8a66597_bset(r8a66597, TRENB, ep->fifosel);
534 r8a66597_write(r8a66597,
535 (req->req.length + ep->ep.maxpacket - 1)
536 / ep->ep.maxpacket,
537 ep->fifotrn);
538 }
539 pipe_start(r8a66597, pipenum); /* trigger once */
540 pipe_irq_enable(r8a66597, pipenum);
541 }
542}
543
544static void start_packet(struct r8a66597_ep *ep, struct r8a66597_request *req)
545{
546 if (ep->desc->bEndpointAddress & USB_DIR_IN)
547 start_packet_write(ep, req);
548 else
549 start_packet_read(ep, req);
550}
551
552static void start_ep0(struct r8a66597_ep *ep, struct r8a66597_request *req)
553{
554 u16 ctsq;
555
556 ctsq = r8a66597_read(ep->r8a66597, INTSTS0) & CTSQ;
557
558 switch (ctsq) {
559 case CS_RDDS:
560 start_ep0_write(ep, req);
561 break;
562 case CS_WRDS:
563 start_packet_read(ep, req);
564 break;
565
566 case CS_WRND:
567 control_end(ep->r8a66597, 0);
568 break;
569 default:
570 printk(KERN_ERR "start_ep0: unexpect ctsq(%x)\n", ctsq);
571 break;
572 }
573}
574
575static void init_controller(struct r8a66597 *r8a66597)
576{
577 u16 vif = r8a66597->pdata->vif ? LDRV : 0;
578 u16 irq_sense = r8a66597->irq_sense_low ? INTL : 0;
579 u16 endian = r8a66597->pdata->endian ? BIGEND : 0;
580
581 if (r8a66597->pdata->on_chip) {
582 r8a66597_bset(r8a66597, 0x04, SYSCFG1);
583 r8a66597_bset(r8a66597, HSE, SYSCFG0);
584
585 r8a66597_bclr(r8a66597, USBE, SYSCFG0);
586 r8a66597_bclr(r8a66597, DPRPU, SYSCFG0);
587 r8a66597_bset(r8a66597, USBE, SYSCFG0);
588
589 r8a66597_bset(r8a66597, SCKE, SYSCFG0);
590
591 r8a66597_bset(r8a66597, irq_sense, INTENB1);
592 r8a66597_write(r8a66597, BURST | CPU_ADR_RD_WR,
593 DMA0CFG);
594 } else {
595 r8a66597_bset(r8a66597, vif | endian, PINCFG);
596 r8a66597_bset(r8a66597, HSE, SYSCFG0); /* High spd */
597 r8a66597_mdfy(r8a66597, get_xtal_from_pdata(r8a66597->pdata),
598 XTAL, SYSCFG0);
599
600 r8a66597_bclr(r8a66597, USBE, SYSCFG0);
601 r8a66597_bclr(r8a66597, DPRPU, SYSCFG0);
602 r8a66597_bset(r8a66597, USBE, SYSCFG0);
603
604 r8a66597_bset(r8a66597, XCKE, SYSCFG0);
605
606 msleep(3);
607
608 r8a66597_bset(r8a66597, PLLC, SYSCFG0);
609
610 msleep(1);
611
612 r8a66597_bset(r8a66597, SCKE, SYSCFG0);
613
614 r8a66597_bset(r8a66597, irq_sense, INTENB1);
615 r8a66597_write(r8a66597, BURST | CPU_ADR_RD_WR,
616 DMA0CFG);
617 }
618}
619
620static void disable_controller(struct r8a66597 *r8a66597)
621{
622 if (r8a66597->pdata->on_chip) {
623 r8a66597_bset(r8a66597, SCKE, SYSCFG0);
624
625 r8a66597_write(r8a66597, 0, INTENB0);
626 r8a66597_write(r8a66597, 0, INTENB1);
627
628 r8a66597_bclr(r8a66597, USBE, SYSCFG0);
629 r8a66597_bclr(r8a66597, SCKE, SYSCFG0);
630
631 } else {
632 r8a66597_bclr(r8a66597, SCKE, SYSCFG0);
633 udelay(1);
634 r8a66597_bclr(r8a66597, PLLC, SYSCFG0);
635 udelay(1);
636 udelay(1);
637 r8a66597_bclr(r8a66597, XCKE, SYSCFG0);
638 }
639}
640
641static void r8a66597_start_xclock(struct r8a66597 *r8a66597)
642{
643 u16 tmp;
644
645 if (!r8a66597->pdata->on_chip) {
646 tmp = r8a66597_read(r8a66597, SYSCFG0);
647 if (!(tmp & XCKE))
648 r8a66597_bset(r8a66597, XCKE, SYSCFG0);
649 }
650}
651
652static struct r8a66597_request *get_request_from_ep(struct r8a66597_ep *ep)
653{
654 return list_entry(ep->queue.next, struct r8a66597_request, queue);
655}
656
657/*-------------------------------------------------------------------------*/
658static void transfer_complete(struct r8a66597_ep *ep,
659 struct r8a66597_request *req, int status)
660__releases(r8a66597->lock)
661__acquires(r8a66597->lock)
662{
663 int restart = 0;
664
665 if (unlikely(ep->pipenum == 0)) {
666 if (ep->internal_ccpl) {
667 ep->internal_ccpl = 0;
668 return;
669 }
670 }
671
672 list_del_init(&req->queue);
673 if (ep->r8a66597->gadget.speed == USB_SPEED_UNKNOWN)
674 req->req.status = -ESHUTDOWN;
675 else
676 req->req.status = status;
677
678 if (!list_empty(&ep->queue))
679 restart = 1;
680
681 spin_unlock(&ep->r8a66597->lock);
682 req->req.complete(&ep->ep, &req->req);
683 spin_lock(&ep->r8a66597->lock);
684
685 if (restart) {
686 req = get_request_from_ep(ep);
687 if (ep->desc)
688 start_packet(ep, req);
689 }
690}
691
692static void irq_ep0_write(struct r8a66597_ep *ep, struct r8a66597_request *req)
693{
694 int i;
695 u16 tmp;
696 unsigned bufsize;
697 size_t size;
698 void *buf;
699 u16 pipenum = ep->pipenum;
700 struct r8a66597 *r8a66597 = ep->r8a66597;
701
702 pipe_change(r8a66597, pipenum);
703 r8a66597_bset(r8a66597, ISEL, ep->fifosel);
704
705 i = 0;
706 do {
707 tmp = r8a66597_read(r8a66597, ep->fifoctr);
708 if (i++ > 100000) {
709 printk(KERN_ERR "pipe0 is busy. maybe cpu i/o bus"
710 "conflict. please power off this controller.");
711 return;
712 }
713 ndelay(1);
714 } while ((tmp & FRDY) == 0);
715
716 /* prepare parameters */
717 bufsize = get_buffer_size(r8a66597, pipenum);
718 buf = req->req.buf + req->req.actual;
719 size = min(bufsize, req->req.length - req->req.actual);
720
721 /* write fifo */
722 if (req->req.buf) {
723 if (size > 0)
724 r8a66597_write_fifo(r8a66597, ep->fifoaddr, buf, size);
725 if ((size == 0) || ((size % ep->ep.maxpacket) != 0))
726 r8a66597_bset(r8a66597, BVAL, ep->fifoctr);
727 }
728
729 /* update parameters */
730 req->req.actual += size;
731
732 /* check transfer finish */
733 if ((!req->req.zero && (req->req.actual == req->req.length))
734 || (size % ep->ep.maxpacket)
735 || (size == 0)) {
736 disable_irq_ready(r8a66597, pipenum);
737 disable_irq_empty(r8a66597, pipenum);
738 } else {
739 disable_irq_ready(r8a66597, pipenum);
740 enable_irq_empty(r8a66597, pipenum);
741 }
742 pipe_start(r8a66597, pipenum);
743}
744
745static void irq_packet_write(struct r8a66597_ep *ep,
746 struct r8a66597_request *req)
747{
748 u16 tmp;
749 unsigned bufsize;
750 size_t size;
751 void *buf;
752 u16 pipenum = ep->pipenum;
753 struct r8a66597 *r8a66597 = ep->r8a66597;
754
755 pipe_change(r8a66597, pipenum);
756 tmp = r8a66597_read(r8a66597, ep->fifoctr);
757 if (unlikely((tmp & FRDY) == 0)) {
758 pipe_stop(r8a66597, pipenum);
759 pipe_irq_disable(r8a66597, pipenum);
760 printk(KERN_ERR "write fifo not ready. pipnum=%d\n", pipenum);
761 return;
762 }
763
764 /* prepare parameters */
765 bufsize = get_buffer_size(r8a66597, pipenum);
766 buf = req->req.buf + req->req.actual;
767 size = min(bufsize, req->req.length - req->req.actual);
768
769 /* write fifo */
770 if (req->req.buf) {
771 r8a66597_write_fifo(r8a66597, ep->fifoaddr, buf, size);
772 if ((size == 0)
773 || ((size % ep->ep.maxpacket) != 0)
774 || ((bufsize != ep->ep.maxpacket)
775 && (bufsize > size)))
776 r8a66597_bset(r8a66597, BVAL, ep->fifoctr);
777 }
778
779 /* update parameters */
780 req->req.actual += size;
781 /* check transfer finish */
782 if ((!req->req.zero && (req->req.actual == req->req.length))
783 || (size % ep->ep.maxpacket)
784 || (size == 0)) {
785 disable_irq_ready(r8a66597, pipenum);
786 enable_irq_empty(r8a66597, pipenum);
787 } else {
788 disable_irq_empty(r8a66597, pipenum);
789 pipe_irq_enable(r8a66597, pipenum);
790 }
791}
792
793static void irq_packet_read(struct r8a66597_ep *ep,
794 struct r8a66597_request *req)
795{
796 u16 tmp;
797 int rcv_len, bufsize, req_len;
798 int size;
799 void *buf;
800 u16 pipenum = ep->pipenum;
801 struct r8a66597 *r8a66597 = ep->r8a66597;
802 int finish = 0;
803
804 pipe_change(r8a66597, pipenum);
805 tmp = r8a66597_read(r8a66597, ep->fifoctr);
806 if (unlikely((tmp & FRDY) == 0)) {
807 req->req.status = -EPIPE;
808 pipe_stop(r8a66597, pipenum);
809 pipe_irq_disable(r8a66597, pipenum);
810 printk(KERN_ERR "read fifo not ready");
811 return;
812 }
813
814 /* prepare parameters */
815 rcv_len = tmp & DTLN;
816 bufsize = get_buffer_size(r8a66597, pipenum);
817
818 buf = req->req.buf + req->req.actual;
819 req_len = req->req.length - req->req.actual;
820 if (rcv_len < bufsize)
821 size = min(rcv_len, req_len);
822 else
823 size = min(bufsize, req_len);
824
825 /* update parameters */
826 req->req.actual += size;
827
828 /* check transfer finish */
829 if ((!req->req.zero && (req->req.actual == req->req.length))
830 || (size % ep->ep.maxpacket)
831 || (size == 0)) {
832 pipe_stop(r8a66597, pipenum);
833 pipe_irq_disable(r8a66597, pipenum);
834 finish = 1;
835 }
836
837 /* read fifo */
838 if (req->req.buf) {
839 if (size == 0)
840 r8a66597_write(r8a66597, BCLR, ep->fifoctr);
841 else
842 r8a66597_read_fifo(r8a66597, ep->fifoaddr, buf, size);
843
844 }
845
846 if ((ep->pipenum != 0) && finish)
847 transfer_complete(ep, req, 0);
848}
849
850static void irq_pipe_ready(struct r8a66597 *r8a66597, u16 status, u16 enb)
851{
852 u16 check;
853 u16 pipenum;
854 struct r8a66597_ep *ep;
855 struct r8a66597_request *req;
856
857 if ((status & BRDY0) && (enb & BRDY0)) {
858 r8a66597_write(r8a66597, ~BRDY0, BRDYSTS);
859 r8a66597_mdfy(r8a66597, 0, CURPIPE, CFIFOSEL);
860
861 ep = &r8a66597->ep[0];
862 req = get_request_from_ep(ep);
863 irq_packet_read(ep, req);
864 } else {
865 for (pipenum = 1; pipenum < R8A66597_MAX_NUM_PIPE; pipenum++) {
866 check = 1 << pipenum;
867 if ((status & check) && (enb & check)) {
868 r8a66597_write(r8a66597, ~check, BRDYSTS);
869 ep = r8a66597->pipenum2ep[pipenum];
870 req = get_request_from_ep(ep);
871 if (ep->desc->bEndpointAddress & USB_DIR_IN)
872 irq_packet_write(ep, req);
873 else
874 irq_packet_read(ep, req);
875 }
876 }
877 }
878}
879
880static void irq_pipe_empty(struct r8a66597 *r8a66597, u16 status, u16 enb)
881{
882 u16 tmp;
883 u16 check;
884 u16 pipenum;
885 struct r8a66597_ep *ep;
886 struct r8a66597_request *req;
887
888 if ((status & BEMP0) && (enb & BEMP0)) {
889 r8a66597_write(r8a66597, ~BEMP0, BEMPSTS);
890
891 ep = &r8a66597->ep[0];
892 req = get_request_from_ep(ep);
893 irq_ep0_write(ep, req);
894 } else {
895 for (pipenum = 1; pipenum < R8A66597_MAX_NUM_PIPE; pipenum++) {
896 check = 1 << pipenum;
897 if ((status & check) && (enb & check)) {
898 r8a66597_write(r8a66597, ~check, BEMPSTS);
899 tmp = control_reg_get(r8a66597, pipenum);
900 if ((tmp & INBUFM) == 0) {
901 disable_irq_empty(r8a66597, pipenum);
902 pipe_irq_disable(r8a66597, pipenum);
903 pipe_stop(r8a66597, pipenum);
904 ep = r8a66597->pipenum2ep[pipenum];
905 req = get_request_from_ep(ep);
906 if (!list_empty(&ep->queue))
907 transfer_complete(ep, req, 0);
908 }
909 }
910 }
911 }
912}
913
914static void get_status(struct r8a66597 *r8a66597, struct usb_ctrlrequest *ctrl)
915__releases(r8a66597->lock)
916__acquires(r8a66597->lock)
917{
918 struct r8a66597_ep *ep;
919 u16 pid;
920 u16 status = 0;
921 u16 w_index = le16_to_cpu(ctrl->wIndex);
922
923 switch (ctrl->bRequestType & USB_RECIP_MASK) {
924 case USB_RECIP_DEVICE:
925 status = 1 << USB_DEVICE_SELF_POWERED;
926 break;
927 case USB_RECIP_INTERFACE:
928 status = 0;
929 break;
930 case USB_RECIP_ENDPOINT:
931 ep = r8a66597->epaddr2ep[w_index & USB_ENDPOINT_NUMBER_MASK];
932 pid = control_reg_get_pid(r8a66597, ep->pipenum);
933 if (pid == PID_STALL)
934 status = 1 << USB_ENDPOINT_HALT;
935 else
936 status = 0;
937 break;
938 default:
939 pipe_stall(r8a66597, 0);
940 return; /* exit */
941 }
942
943 r8a66597->ep0_data = cpu_to_le16(status);
944 r8a66597->ep0_req->buf = &r8a66597->ep0_data;
945 r8a66597->ep0_req->length = 2;
946 /* AV: what happens if we get called again before that gets through? */
947 spin_unlock(&r8a66597->lock);
948 r8a66597_queue(r8a66597->gadget.ep0, r8a66597->ep0_req, GFP_KERNEL);
949 spin_lock(&r8a66597->lock);
950}
951
952static void clear_feature(struct r8a66597 *r8a66597,
953 struct usb_ctrlrequest *ctrl)
954{
955 switch (ctrl->bRequestType & USB_RECIP_MASK) {
956 case USB_RECIP_DEVICE:
957 control_end(r8a66597, 1);
958 break;
959 case USB_RECIP_INTERFACE:
960 control_end(r8a66597, 1);
961 break;
962 case USB_RECIP_ENDPOINT: {
963 struct r8a66597_ep *ep;
964 struct r8a66597_request *req;
965 u16 w_index = le16_to_cpu(ctrl->wIndex);
966
967 ep = r8a66597->epaddr2ep[w_index & USB_ENDPOINT_NUMBER_MASK];
968 pipe_stop(r8a66597, ep->pipenum);
969 control_reg_sqclr(r8a66597, ep->pipenum);
970
971 control_end(r8a66597, 1);
972
973 req = get_request_from_ep(ep);
974 if (ep->busy) {
975 ep->busy = 0;
976 if (list_empty(&ep->queue))
977 break;
978 start_packet(ep, req);
979 } else if (!list_empty(&ep->queue))
980 pipe_start(r8a66597, ep->pipenum);
981 }
982 break;
983 default:
984 pipe_stall(r8a66597, 0);
985 break;
986 }
987}
988
989static void set_feature(struct r8a66597 *r8a66597, struct usb_ctrlrequest *ctrl)
990{
991
992 switch (ctrl->bRequestType & USB_RECIP_MASK) {
993 case USB_RECIP_DEVICE:
994 control_end(r8a66597, 1);
995 break;
996 case USB_RECIP_INTERFACE:
997 control_end(r8a66597, 1);
998 break;
999 case USB_RECIP_ENDPOINT: {
1000 struct r8a66597_ep *ep;
1001 u16 w_index = le16_to_cpu(ctrl->wIndex);
1002
1003 ep = r8a66597->epaddr2ep[w_index & USB_ENDPOINT_NUMBER_MASK];
1004 pipe_stall(r8a66597, ep->pipenum);
1005
1006 control_end(r8a66597, 1);
1007 }
1008 break;
1009 default:
1010 pipe_stall(r8a66597, 0);
1011 break;
1012 }
1013}
1014
1015/* if return value is true, call class driver's setup() */
1016static int setup_packet(struct r8a66597 *r8a66597, struct usb_ctrlrequest *ctrl)
1017{
1018 u16 *p = (u16 *)ctrl;
1019 unsigned long offset = USBREQ;
1020 int i, ret = 0;
1021
1022 /* read fifo */
1023 r8a66597_write(r8a66597, ~VALID, INTSTS0);
1024
1025 for (i = 0; i < 4; i++)
1026 p[i] = r8a66597_read(r8a66597, offset + i*2);
1027
1028 /* check request */
1029 if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
1030 switch (ctrl->bRequest) {
1031 case USB_REQ_GET_STATUS:
1032 get_status(r8a66597, ctrl);
1033 break;
1034 case USB_REQ_CLEAR_FEATURE:
1035 clear_feature(r8a66597, ctrl);
1036 break;
1037 case USB_REQ_SET_FEATURE:
1038 set_feature(r8a66597, ctrl);
1039 break;
1040 default:
1041 ret = 1;
1042 break;
1043 }
1044 } else
1045 ret = 1;
1046 return ret;
1047}
1048
1049static void r8a66597_update_usb_speed(struct r8a66597 *r8a66597)
1050{
1051 u16 speed = get_usb_speed(r8a66597);
1052
1053 switch (speed) {
1054 case HSMODE:
1055 r8a66597->gadget.speed = USB_SPEED_HIGH;
1056 break;
1057 case FSMODE:
1058 r8a66597->gadget.speed = USB_SPEED_FULL;
1059 break;
1060 default:
1061 r8a66597->gadget.speed = USB_SPEED_UNKNOWN;
1062 printk(KERN_ERR "USB speed unknown\n");
1063 }
1064}
1065
1066static void irq_device_state(struct r8a66597 *r8a66597)
1067{
1068 u16 dvsq;
1069
1070 dvsq = r8a66597_read(r8a66597, INTSTS0) & DVSQ;
1071 r8a66597_write(r8a66597, ~DVST, INTSTS0);
1072
1073 if (dvsq == DS_DFLT) {
1074 /* bus reset */
1075 r8a66597->driver->disconnect(&r8a66597->gadget);
1076 r8a66597_update_usb_speed(r8a66597);
1077 }
1078 if (r8a66597->old_dvsq == DS_CNFG && dvsq != DS_CNFG)
1079 r8a66597_update_usb_speed(r8a66597);
1080 if ((dvsq == DS_CNFG || dvsq == DS_ADDS)
1081 && r8a66597->gadget.speed == USB_SPEED_UNKNOWN)
1082 r8a66597_update_usb_speed(r8a66597);
1083
1084 r8a66597->old_dvsq = dvsq;
1085}
1086
1087static void irq_control_stage(struct r8a66597 *r8a66597)
1088__releases(r8a66597->lock)
1089__acquires(r8a66597->lock)
1090{
1091 struct usb_ctrlrequest ctrl;
1092 u16 ctsq;
1093
1094 ctsq = r8a66597_read(r8a66597, INTSTS0) & CTSQ;
1095 r8a66597_write(r8a66597, ~CTRT, INTSTS0);
1096
1097 switch (ctsq) {
1098 case CS_IDST: {
1099 struct r8a66597_ep *ep;
1100 struct r8a66597_request *req;
1101 ep = &r8a66597->ep[0];
1102 req = get_request_from_ep(ep);
1103 transfer_complete(ep, req, 0);
1104 }
1105 break;
1106
1107 case CS_RDDS:
1108 case CS_WRDS:
1109 case CS_WRND:
1110 if (setup_packet(r8a66597, &ctrl)) {
1111 spin_unlock(&r8a66597->lock);
1112 if (r8a66597->driver->setup(&r8a66597->gadget, &ctrl)
1113 < 0)
1114 pipe_stall(r8a66597, 0);
1115 spin_lock(&r8a66597->lock);
1116 }
1117 break;
1118 case CS_RDSS:
1119 case CS_WRSS:
1120 control_end(r8a66597, 0);
1121 break;
1122 default:
1123 printk(KERN_ERR "ctrl_stage: unexpect ctsq(%x)\n", ctsq);
1124 break;
1125 }
1126}
1127
1128static irqreturn_t r8a66597_irq(int irq, void *_r8a66597)
1129{
1130 struct r8a66597 *r8a66597 = _r8a66597;
1131 u16 intsts0;
1132 u16 intenb0;
1133 u16 brdysts, nrdysts, bempsts;
1134 u16 brdyenb, nrdyenb, bempenb;
1135 u16 savepipe;
1136 u16 mask0;
1137
1138 spin_lock(&r8a66597->lock);
1139
1140 intsts0 = r8a66597_read(r8a66597, INTSTS0);
1141 intenb0 = r8a66597_read(r8a66597, INTENB0);
1142
1143 savepipe = r8a66597_read(r8a66597, CFIFOSEL);
1144
1145 mask0 = intsts0 & intenb0;
1146 if (mask0) {
1147 brdysts = r8a66597_read(r8a66597, BRDYSTS);
1148 nrdysts = r8a66597_read(r8a66597, NRDYSTS);
1149 bempsts = r8a66597_read(r8a66597, BEMPSTS);
1150 brdyenb = r8a66597_read(r8a66597, BRDYENB);
1151 nrdyenb = r8a66597_read(r8a66597, NRDYENB);
1152 bempenb = r8a66597_read(r8a66597, BEMPENB);
1153
1154 if (mask0 & VBINT) {
1155 r8a66597_write(r8a66597, 0xffff & ~VBINT,
1156 INTSTS0);
1157 r8a66597_start_xclock(r8a66597);
1158
1159 /* start vbus sampling */
1160 r8a66597->old_vbus = r8a66597_read(r8a66597, INTSTS0)
1161 & VBSTS;
1162 r8a66597->scount = R8A66597_MAX_SAMPLING;
1163
1164 mod_timer(&r8a66597->timer,
1165 jiffies + msecs_to_jiffies(50));
1166 }
1167 if (intsts0 & DVSQ)
1168 irq_device_state(r8a66597);
1169
1170 if ((intsts0 & BRDY) && (intenb0 & BRDYE)
1171 && (brdysts & brdyenb))
1172 irq_pipe_ready(r8a66597, brdysts, brdyenb);
1173 if ((intsts0 & BEMP) && (intenb0 & BEMPE)
1174 && (bempsts & bempenb))
1175 irq_pipe_empty(r8a66597, bempsts, bempenb);
1176
1177 if (intsts0 & CTRT)
1178 irq_control_stage(r8a66597);
1179 }
1180
1181 r8a66597_write(r8a66597, savepipe, CFIFOSEL);
1182
1183 spin_unlock(&r8a66597->lock);
1184 return IRQ_HANDLED;
1185}
1186
1187static void r8a66597_timer(unsigned long _r8a66597)
1188{
1189 struct r8a66597 *r8a66597 = (struct r8a66597 *)_r8a66597;
1190 unsigned long flags;
1191 u16 tmp;
1192
1193 spin_lock_irqsave(&r8a66597->lock, flags);
1194 tmp = r8a66597_read(r8a66597, SYSCFG0);
1195 if (r8a66597->scount > 0) {
1196 tmp = r8a66597_read(r8a66597, INTSTS0) & VBSTS;
1197 if (tmp == r8a66597->old_vbus) {
1198 r8a66597->scount--;
1199 if (r8a66597->scount == 0) {
1200 if (tmp == VBSTS)
1201 r8a66597_usb_connect(r8a66597);
1202 else
1203 r8a66597_usb_disconnect(r8a66597);
1204 } else {
1205 mod_timer(&r8a66597->timer,
1206 jiffies + msecs_to_jiffies(50));
1207 }
1208 } else {
1209 r8a66597->scount = R8A66597_MAX_SAMPLING;
1210 r8a66597->old_vbus = tmp;
1211 mod_timer(&r8a66597->timer,
1212 jiffies + msecs_to_jiffies(50));
1213 }
1214 }
1215 spin_unlock_irqrestore(&r8a66597->lock, flags);
1216}
1217
1218/*-------------------------------------------------------------------------*/
1219static int r8a66597_enable(struct usb_ep *_ep,
1220 const struct usb_endpoint_descriptor *desc)
1221{
1222 struct r8a66597_ep *ep;
1223
1224 ep = container_of(_ep, struct r8a66597_ep, ep);
1225 return alloc_pipe_config(ep, desc);
1226}
1227
1228static int r8a66597_disable(struct usb_ep *_ep)
1229{
1230 struct r8a66597_ep *ep;
1231 struct r8a66597_request *req;
1232 unsigned long flags;
1233
1234 ep = container_of(_ep, struct r8a66597_ep, ep);
1235 BUG_ON(!ep);
1236
1237 while (!list_empty(&ep->queue)) {
1238 req = get_request_from_ep(ep);
1239 spin_lock_irqsave(&ep->r8a66597->lock, flags);
1240 transfer_complete(ep, req, -ECONNRESET);
1241 spin_unlock_irqrestore(&ep->r8a66597->lock, flags);
1242 }
1243
1244 pipe_irq_disable(ep->r8a66597, ep->pipenum);
1245 return free_pipe_config(ep);
1246}
1247
1248static struct usb_request *r8a66597_alloc_request(struct usb_ep *_ep,
1249 gfp_t gfp_flags)
1250{
1251 struct r8a66597_request *req;
1252
1253 req = kzalloc(sizeof(struct r8a66597_request), gfp_flags);
1254 if (!req)
1255 return NULL;
1256
1257 INIT_LIST_HEAD(&req->queue);
1258
1259 return &req->req;
1260}
1261
1262static void r8a66597_free_request(struct usb_ep *_ep, struct usb_request *_req)
1263{
1264 struct r8a66597_request *req;
1265
1266 req = container_of(_req, struct r8a66597_request, req);
1267 kfree(req);
1268}
1269
1270static int r8a66597_queue(struct usb_ep *_ep, struct usb_request *_req,
1271 gfp_t gfp_flags)
1272{
1273 struct r8a66597_ep *ep;
1274 struct r8a66597_request *req;
1275 unsigned long flags;
1276 int request = 0;
1277
1278 ep = container_of(_ep, struct r8a66597_ep, ep);
1279 req = container_of(_req, struct r8a66597_request, req);
1280
1281 if (ep->r8a66597->gadget.speed == USB_SPEED_UNKNOWN)
1282 return -ESHUTDOWN;
1283
1284 spin_lock_irqsave(&ep->r8a66597->lock, flags);
1285
1286 if (list_empty(&ep->queue))
1287 request = 1;
1288
1289 list_add_tail(&req->queue, &ep->queue);
1290 req->req.actual = 0;
1291 req->req.status = -EINPROGRESS;
1292
1293 if (ep->desc == NULL) /* control */
1294 start_ep0(ep, req);
1295 else {
1296 if (request && !ep->busy)
1297 start_packet(ep, req);
1298 }
1299
1300 spin_unlock_irqrestore(&ep->r8a66597->lock, flags);
1301
1302 return 0;
1303}
1304
1305static int r8a66597_dequeue(struct usb_ep *_ep, struct usb_request *_req)
1306{
1307 struct r8a66597_ep *ep;
1308 struct r8a66597_request *req;
1309 unsigned long flags;
1310
1311 ep = container_of(_ep, struct r8a66597_ep, ep);
1312 req = container_of(_req, struct r8a66597_request, req);
1313
1314 spin_lock_irqsave(&ep->r8a66597->lock, flags);
1315 if (!list_empty(&ep->queue))
1316 transfer_complete(ep, req, -ECONNRESET);
1317 spin_unlock_irqrestore(&ep->r8a66597->lock, flags);
1318
1319 return 0;
1320}
1321
1322static int r8a66597_set_halt(struct usb_ep *_ep, int value)
1323{
1324 struct r8a66597_ep *ep;
1325 struct r8a66597_request *req;
1326 unsigned long flags;
1327 int ret = 0;
1328
1329 ep = container_of(_ep, struct r8a66597_ep, ep);
1330 req = get_request_from_ep(ep);
1331
1332 spin_lock_irqsave(&ep->r8a66597->lock, flags);
1333 if (!list_empty(&ep->queue)) {
1334 ret = -EAGAIN;
1335 goto out;
1336 }
1337 if (value) {
1338 ep->busy = 1;
1339 pipe_stall(ep->r8a66597, ep->pipenum);
1340 } else {
1341 ep->busy = 0;
1342 pipe_stop(ep->r8a66597, ep->pipenum);
1343 }
1344
1345out:
1346 spin_unlock_irqrestore(&ep->r8a66597->lock, flags);
1347 return ret;
1348}
1349
1350static void r8a66597_fifo_flush(struct usb_ep *_ep)
1351{
1352 struct r8a66597_ep *ep;
1353 unsigned long flags;
1354
1355 ep = container_of(_ep, struct r8a66597_ep, ep);
1356 spin_lock_irqsave(&ep->r8a66597->lock, flags);
1357 if (list_empty(&ep->queue) && !ep->busy) {
1358 pipe_stop(ep->r8a66597, ep->pipenum);
1359 r8a66597_bclr(ep->r8a66597, BCLR, ep->fifoctr);
1360 }
1361 spin_unlock_irqrestore(&ep->r8a66597->lock, flags);
1362}
1363
1364static struct usb_ep_ops r8a66597_ep_ops = {
1365 .enable = r8a66597_enable,
1366 .disable = r8a66597_disable,
1367
1368 .alloc_request = r8a66597_alloc_request,
1369 .free_request = r8a66597_free_request,
1370
1371 .queue = r8a66597_queue,
1372 .dequeue = r8a66597_dequeue,
1373
1374 .set_halt = r8a66597_set_halt,
1375 .fifo_flush = r8a66597_fifo_flush,
1376};
1377
1378/*-------------------------------------------------------------------------*/
1379static struct r8a66597 *the_controller;
1380
1381int usb_gadget_register_driver(struct usb_gadget_driver *driver)
1382{
1383 struct r8a66597 *r8a66597 = the_controller;
1384 int retval;
1385
1386 if (!driver
1387 || driver->speed != USB_SPEED_HIGH
1388 || !driver->bind
1389 || !driver->setup)
1390 return -EINVAL;
1391 if (!r8a66597)
1392 return -ENODEV;
1393 if (r8a66597->driver)
1394 return -EBUSY;
1395
1396 /* hook up the driver */
1397 driver->driver.bus = NULL;
1398 r8a66597->driver = driver;
1399 r8a66597->gadget.dev.driver = &driver->driver;
1400
1401 retval = device_add(&r8a66597->gadget.dev);
1402 if (retval) {
1403 printk(KERN_ERR "device_add error (%d)\n", retval);
1404 goto error;
1405 }
1406
1407 retval = driver->bind(&r8a66597->gadget);
1408 if (retval) {
1409 printk(KERN_ERR "bind to driver error (%d)\n", retval);
1410 device_del(&r8a66597->gadget.dev);
1411 goto error;
1412 }
1413
1414 r8a66597_bset(r8a66597, VBSE, INTENB0);
1415 if (r8a66597_read(r8a66597, INTSTS0) & VBSTS) {
1416 r8a66597_start_xclock(r8a66597);
1417 /* start vbus sampling */
1418 r8a66597->old_vbus = r8a66597_read(r8a66597,
1419 INTSTS0) & VBSTS;
1420 r8a66597->scount = R8A66597_MAX_SAMPLING;
1421 mod_timer(&r8a66597->timer, jiffies + msecs_to_jiffies(50));
1422 }
1423
1424 return 0;
1425
1426error:
1427 r8a66597->driver = NULL;
1428 r8a66597->gadget.dev.driver = NULL;
1429
1430 return retval;
1431}
1432EXPORT_SYMBOL(usb_gadget_register_driver);
1433
1434int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
1435{
1436 struct r8a66597 *r8a66597 = the_controller;
1437 unsigned long flags;
1438
1439 if (driver != r8a66597->driver || !driver->unbind)
1440 return -EINVAL;
1441
1442 spin_lock_irqsave(&r8a66597->lock, flags);
1443 if (r8a66597->gadget.speed != USB_SPEED_UNKNOWN)
1444 r8a66597_usb_disconnect(r8a66597);
1445 spin_unlock_irqrestore(&r8a66597->lock, flags);
1446
1447 r8a66597_bclr(r8a66597, VBSE, INTENB0);
1448
1449 driver->unbind(&r8a66597->gadget);
1450
1451 init_controller(r8a66597);
1452 disable_controller(r8a66597);
1453
1454 device_del(&r8a66597->gadget.dev);
1455 r8a66597->driver = NULL;
1456 return 0;
1457}
1458EXPORT_SYMBOL(usb_gadget_unregister_driver);
1459
1460/*-------------------------------------------------------------------------*/
1461static int r8a66597_get_frame(struct usb_gadget *_gadget)
1462{
1463 struct r8a66597 *r8a66597 = gadget_to_r8a66597(_gadget);
1464 return r8a66597_read(r8a66597, FRMNUM) & 0x03FF;
1465}
1466
1467static struct usb_gadget_ops r8a66597_gadget_ops = {
1468 .get_frame = r8a66597_get_frame,
1469};
1470
1471static int __exit r8a66597_remove(struct platform_device *pdev)
1472{
1473 struct r8a66597 *r8a66597 = dev_get_drvdata(&pdev->dev);
1474
1475 del_timer_sync(&r8a66597->timer);
1476 iounmap((void *)r8a66597->reg);
1477 free_irq(platform_get_irq(pdev, 0), r8a66597);
1478 r8a66597_free_request(&r8a66597->ep[0].ep, r8a66597->ep0_req);
Magnus Dammd2e27bd2009-08-19 09:50:49 +00001479#ifdef CONFIG_HAVE_CLK
1480 if (r8a66597->pdata->on_chip) {
1481 clk_disable(r8a66597->clk);
1482 clk_put(r8a66597->clk);
1483 }
1484#endif
Yoshihiro Shimodac4144242009-08-19 04:59:39 +00001485 kfree(r8a66597);
1486 return 0;
1487}
1488
1489static void nop_completion(struct usb_ep *ep, struct usb_request *r)
1490{
1491}
1492
1493static int __init r8a66597_probe(struct platform_device *pdev)
1494{
Magnus Dammd2e27bd2009-08-19 09:50:49 +00001495#ifdef CONFIG_HAVE_CLK
1496 char clk_name[8];
1497#endif
Yoshihiro Shimodac4144242009-08-19 04:59:39 +00001498 struct resource *res, *ires;
1499 int irq;
1500 void __iomem *reg = NULL;
1501 struct r8a66597 *r8a66597 = NULL;
1502 int ret = 0;
1503 int i;
1504 unsigned long irq_trigger;
1505
1506 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1507 if (!res) {
1508 ret = -ENODEV;
1509 printk(KERN_ERR "platform_get_resource error.\n");
1510 goto clean_up;
1511 }
1512
1513 ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1514 irq = ires->start;
1515 irq_trigger = ires->flags & IRQF_TRIGGER_MASK;
1516
1517 if (irq < 0) {
1518 ret = -ENODEV;
1519 printk(KERN_ERR "platform_get_irq error.\n");
1520 goto clean_up;
1521 }
1522
1523 reg = ioremap(res->start, resource_size(res));
1524 if (reg == NULL) {
1525 ret = -ENOMEM;
1526 printk(KERN_ERR "ioremap error.\n");
1527 goto clean_up;
1528 }
1529
1530 /* initialize ucd */
1531 r8a66597 = kzalloc(sizeof(struct r8a66597), GFP_KERNEL);
1532 if (r8a66597 == NULL) {
1533 printk(KERN_ERR "kzalloc error\n");
1534 goto clean_up;
1535 }
1536
1537 spin_lock_init(&r8a66597->lock);
1538 dev_set_drvdata(&pdev->dev, r8a66597);
1539 r8a66597->pdata = pdev->dev.platform_data;
1540 r8a66597->irq_sense_low = irq_trigger == IRQF_TRIGGER_LOW;
1541
1542 r8a66597->gadget.ops = &r8a66597_gadget_ops;
1543 device_initialize(&r8a66597->gadget.dev);
1544 dev_set_name(&r8a66597->gadget.dev, "gadget");
1545 r8a66597->gadget.is_dualspeed = 1;
1546 r8a66597->gadget.dev.parent = &pdev->dev;
1547 r8a66597->gadget.dev.dma_mask = pdev->dev.dma_mask;
1548 r8a66597->gadget.dev.release = pdev->dev.release;
1549 r8a66597->gadget.name = udc_name;
1550
1551 init_timer(&r8a66597->timer);
1552 r8a66597->timer.function = r8a66597_timer;
1553 r8a66597->timer.data = (unsigned long)r8a66597;
1554 r8a66597->reg = (unsigned long)reg;
1555
1556 r8a66597->bi_bufnum = R8A66597_BASE_BUFNUM;
1557
Magnus Dammd2e27bd2009-08-19 09:50:49 +00001558#ifdef CONFIG_HAVE_CLK
1559 if (r8a66597->pdata->on_chip) {
1560 snprintf(clk_name, sizeof(clk_name), "usb%d", pdev->id);
1561 r8a66597->clk = clk_get(&pdev->dev, clk_name);
1562 if (IS_ERR(r8a66597->clk)) {
1563 dev_err(&pdev->dev, "cannot get clock \"%s\"\n",
1564 clk_name);
1565 ret = PTR_ERR(r8a66597->clk);
1566 goto clean_up;
1567 }
1568 clk_enable(r8a66597->clk);
1569 }
1570#endif
1571
Yoshihiro Shimodac4144242009-08-19 04:59:39 +00001572 disable_controller(r8a66597); /* make sure controller is disabled */
1573
1574 ret = request_irq(irq, r8a66597_irq, IRQF_DISABLED | IRQF_SHARED,
1575 udc_name, r8a66597);
1576 if (ret < 0) {
1577 printk(KERN_ERR "request_irq error (%d)\n", ret);
Magnus Dammd2e27bd2009-08-19 09:50:49 +00001578 goto clean_up2;
Yoshihiro Shimodac4144242009-08-19 04:59:39 +00001579 }
1580
1581 INIT_LIST_HEAD(&r8a66597->gadget.ep_list);
1582 r8a66597->gadget.ep0 = &r8a66597->ep[0].ep;
1583 INIT_LIST_HEAD(&r8a66597->gadget.ep0->ep_list);
1584 for (i = 0; i < R8A66597_MAX_NUM_PIPE; i++) {
1585 struct r8a66597_ep *ep = &r8a66597->ep[i];
1586
1587 if (i != 0) {
1588 INIT_LIST_HEAD(&r8a66597->ep[i].ep.ep_list);
1589 list_add_tail(&r8a66597->ep[i].ep.ep_list,
1590 &r8a66597->gadget.ep_list);
1591 }
1592 ep->r8a66597 = r8a66597;
1593 INIT_LIST_HEAD(&ep->queue);
1594 ep->ep.name = r8a66597_ep_name[i];
1595 ep->ep.ops = &r8a66597_ep_ops;
1596 ep->ep.maxpacket = 512;
1597 }
1598 r8a66597->ep[0].ep.maxpacket = 64;
1599 r8a66597->ep[0].pipenum = 0;
1600 r8a66597->ep[0].fifoaddr = CFIFO;
1601 r8a66597->ep[0].fifosel = CFIFOSEL;
1602 r8a66597->ep[0].fifoctr = CFIFOCTR;
1603 r8a66597->ep[0].fifotrn = 0;
1604 r8a66597->ep[0].pipectr = get_pipectr_addr(0);
1605 r8a66597->pipenum2ep[0] = &r8a66597->ep[0];
1606 r8a66597->epaddr2ep[0] = &r8a66597->ep[0];
1607
1608 the_controller = r8a66597;
1609
1610 r8a66597->ep0_req = r8a66597_alloc_request(&r8a66597->ep[0].ep,
1611 GFP_KERNEL);
1612 if (r8a66597->ep0_req == NULL)
Magnus Dammd2e27bd2009-08-19 09:50:49 +00001613 goto clean_up3;
Yoshihiro Shimodac4144242009-08-19 04:59:39 +00001614 r8a66597->ep0_req->complete = nop_completion;
1615
1616 init_controller(r8a66597);
1617
1618 dev_info(&pdev->dev, "version %s\n", DRIVER_VERSION);
1619 return 0;
1620
Magnus Dammd2e27bd2009-08-19 09:50:49 +00001621clean_up3:
Yoshihiro Shimodac4144242009-08-19 04:59:39 +00001622 free_irq(irq, r8a66597);
Magnus Dammd2e27bd2009-08-19 09:50:49 +00001623clean_up2:
1624#ifdef CONFIG_HAVE_CLK
1625 if (r8a66597->pdata->on_chip) {
1626 clk_disable(r8a66597->clk);
1627 clk_put(r8a66597->clk);
1628 }
1629#endif
Yoshihiro Shimodac4144242009-08-19 04:59:39 +00001630clean_up:
1631 if (r8a66597) {
1632 if (r8a66597->ep0_req)
1633 r8a66597_free_request(&r8a66597->ep[0].ep,
1634 r8a66597->ep0_req);
1635 kfree(r8a66597);
1636 }
1637 if (reg)
1638 iounmap(reg);
1639
1640 return ret;
1641}
1642
1643/*-------------------------------------------------------------------------*/
1644static struct platform_driver r8a66597_driver = {
1645 .remove = __exit_p(r8a66597_remove),
1646 .driver = {
1647 .name = (char *) udc_name,
1648 },
1649};
1650
1651static int __init r8a66597_udc_init(void)
1652{
1653 return platform_driver_probe(&r8a66597_driver, r8a66597_probe);
1654}
1655module_init(r8a66597_udc_init);
1656
1657static void __exit r8a66597_udc_cleanup(void)
1658{
1659 platform_driver_unregister(&r8a66597_driver);
1660}
1661module_exit(r8a66597_udc_cleanup);
1662
1663MODULE_DESCRIPTION("R8A66597 USB gadget driver");
1664MODULE_LICENSE("GPL");
1665MODULE_AUTHOR("Yoshihiro Shimoda");
1666