blob: 34abb12a88e8e55736d37be6212aa4e781e91155 [file] [log] [blame]
Yoshihiro Shimodac4144242009-08-19 04:59:39 +00001/*
2 * R8A66597 UDC (USB gadget)
3 *
4 * Copyright (C) 2006-2009 Renesas Solutions Corp.
5 *
Yoshihiro Shimoda5db05c02011-07-07 09:59:07 +09006 * Author : Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
Yoshihiro Shimodac4144242009-08-19 04:59:39 +00007 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; version 2 of the License.
Yoshihiro Shimodac4144242009-08-19 04:59:39 +000011 */
12
13#include <linux/module.h>
14#include <linux/interrupt.h>
15#include <linux/delay.h>
16#include <linux/io.h>
17#include <linux/platform_device.h>
Magnus Dammd2e27bd2009-08-19 09:50:49 +000018#include <linux/clk.h>
Mark Brownae3a0792010-01-18 12:03:18 +000019#include <linux/err.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090020#include <linux/slab.h>
Yoshihiro Shimodac4144242009-08-19 04:59:39 +000021
22#include <linux/usb/ch9.h>
23#include <linux/usb/gadget.h>
24
25#include "r8a66597-udc.h"
26
27#define DRIVER_VERSION "2009-08-18"
28
29static const char udc_name[] = "r8a66597_udc";
30static const char *r8a66597_ep_name[] = {
31 "ep0", "ep1", "ep2", "ep3", "ep4", "ep5", "ep6", "ep7",
32 "ep8", "ep9",
33};
34
Yusuke Godae576a7a2010-10-20 09:28:08 +090035static void init_controller(struct r8a66597 *r8a66597);
Yoshihiro Shimodac4144242009-08-19 04:59:39 +000036static void disable_controller(struct r8a66597 *r8a66597);
37static void irq_ep0_write(struct r8a66597_ep *ep, struct r8a66597_request *req);
38static void irq_packet_write(struct r8a66597_ep *ep,
39 struct r8a66597_request *req);
40static int r8a66597_queue(struct usb_ep *_ep, struct usb_request *_req,
41 gfp_t gfp_flags);
42
43static void transfer_complete(struct r8a66597_ep *ep,
44 struct r8a66597_request *req, int status);
45
46/*-------------------------------------------------------------------------*/
47static inline u16 get_usb_speed(struct r8a66597 *r8a66597)
48{
49 return r8a66597_read(r8a66597, DVSTCTR0) & RHST;
50}
51
52static void enable_pipe_irq(struct r8a66597 *r8a66597, u16 pipenum,
53 unsigned long reg)
54{
55 u16 tmp;
56
57 tmp = r8a66597_read(r8a66597, INTENB0);
58 r8a66597_bclr(r8a66597, BEMPE | NRDYE | BRDYE,
59 INTENB0);
60 r8a66597_bset(r8a66597, (1 << pipenum), reg);
61 r8a66597_write(r8a66597, tmp, INTENB0);
62}
63
64static void disable_pipe_irq(struct r8a66597 *r8a66597, u16 pipenum,
65 unsigned long reg)
66{
67 u16 tmp;
68
69 tmp = r8a66597_read(r8a66597, INTENB0);
70 r8a66597_bclr(r8a66597, BEMPE | NRDYE | BRDYE,
71 INTENB0);
72 r8a66597_bclr(r8a66597, (1 << pipenum), reg);
73 r8a66597_write(r8a66597, tmp, INTENB0);
74}
75
76static void r8a66597_usb_connect(struct r8a66597 *r8a66597)
77{
78 r8a66597_bset(r8a66597, CTRE, INTENB0);
79 r8a66597_bset(r8a66597, BEMPE | BRDYE, INTENB0);
80
81 r8a66597_bset(r8a66597, DPRPU, SYSCFG0);
82}
83
84static void r8a66597_usb_disconnect(struct r8a66597 *r8a66597)
85__releases(r8a66597->lock)
86__acquires(r8a66597->lock)
87{
88 r8a66597_bclr(r8a66597, CTRE, INTENB0);
89 r8a66597_bclr(r8a66597, BEMPE | BRDYE, INTENB0);
90 r8a66597_bclr(r8a66597, DPRPU, SYSCFG0);
91
92 r8a66597->gadget.speed = USB_SPEED_UNKNOWN;
93 spin_unlock(&r8a66597->lock);
94 r8a66597->driver->disconnect(&r8a66597->gadget);
95 spin_lock(&r8a66597->lock);
96
97 disable_controller(r8a66597);
Yusuke Godae576a7a2010-10-20 09:28:08 +090098 init_controller(r8a66597);
99 r8a66597_bset(r8a66597, VBSE, INTENB0);
Yoshihiro Shimodac4144242009-08-19 04:59:39 +0000100 INIT_LIST_HEAD(&r8a66597->ep[0].queue);
101}
102
103static inline u16 control_reg_get_pid(struct r8a66597 *r8a66597, u16 pipenum)
104{
105 u16 pid = 0;
106 unsigned long offset;
107
Yoshihiro Shimoda12158f42011-09-30 20:07:30 +0900108 if (pipenum == 0) {
Yoshihiro Shimodac4144242009-08-19 04:59:39 +0000109 pid = r8a66597_read(r8a66597, DCPCTR) & PID;
Yoshihiro Shimoda12158f42011-09-30 20:07:30 +0900110 } else if (pipenum < R8A66597_MAX_NUM_PIPE) {
Yoshihiro Shimodac4144242009-08-19 04:59:39 +0000111 offset = get_pipectr_addr(pipenum);
112 pid = r8a66597_read(r8a66597, offset) & PID;
Yoshihiro Shimoda12158f42011-09-30 20:07:30 +0900113 } else {
114 dev_err(r8a66597_to_dev(r8a66597), "unexpect pipe num (%d)\n",
115 pipenum);
116 }
Yoshihiro Shimodac4144242009-08-19 04:59:39 +0000117
118 return pid;
119}
120
121static inline void control_reg_set_pid(struct r8a66597 *r8a66597, u16 pipenum,
122 u16 pid)
123{
124 unsigned long offset;
125
Yoshihiro Shimoda12158f42011-09-30 20:07:30 +0900126 if (pipenum == 0) {
Yoshihiro Shimodac4144242009-08-19 04:59:39 +0000127 r8a66597_mdfy(r8a66597, pid, PID, DCPCTR);
Yoshihiro Shimoda12158f42011-09-30 20:07:30 +0900128 } else if (pipenum < R8A66597_MAX_NUM_PIPE) {
Yoshihiro Shimodac4144242009-08-19 04:59:39 +0000129 offset = get_pipectr_addr(pipenum);
130 r8a66597_mdfy(r8a66597, pid, PID, offset);
Yoshihiro Shimoda12158f42011-09-30 20:07:30 +0900131 } else {
132 dev_err(r8a66597_to_dev(r8a66597), "unexpect pipe num (%d)\n",
133 pipenum);
134 }
Yoshihiro Shimodac4144242009-08-19 04:59:39 +0000135}
136
137static inline void pipe_start(struct r8a66597 *r8a66597, u16 pipenum)
138{
139 control_reg_set_pid(r8a66597, pipenum, PID_BUF);
140}
141
142static inline void pipe_stop(struct r8a66597 *r8a66597, u16 pipenum)
143{
144 control_reg_set_pid(r8a66597, pipenum, PID_NAK);
145}
146
147static inline void pipe_stall(struct r8a66597 *r8a66597, u16 pipenum)
148{
149 control_reg_set_pid(r8a66597, pipenum, PID_STALL);
150}
151
152static inline u16 control_reg_get(struct r8a66597 *r8a66597, u16 pipenum)
153{
154 u16 ret = 0;
155 unsigned long offset;
156
Yoshihiro Shimoda12158f42011-09-30 20:07:30 +0900157 if (pipenum == 0) {
Yoshihiro Shimodac4144242009-08-19 04:59:39 +0000158 ret = r8a66597_read(r8a66597, DCPCTR);
Yoshihiro Shimoda12158f42011-09-30 20:07:30 +0900159 } else if (pipenum < R8A66597_MAX_NUM_PIPE) {
Yoshihiro Shimodac4144242009-08-19 04:59:39 +0000160 offset = get_pipectr_addr(pipenum);
161 ret = r8a66597_read(r8a66597, offset);
Yoshihiro Shimoda12158f42011-09-30 20:07:30 +0900162 } else {
163 dev_err(r8a66597_to_dev(r8a66597), "unexpect pipe num (%d)\n",
164 pipenum);
165 }
Yoshihiro Shimodac4144242009-08-19 04:59:39 +0000166
167 return ret;
168}
169
170static inline void control_reg_sqclr(struct r8a66597 *r8a66597, u16 pipenum)
171{
172 unsigned long offset;
173
174 pipe_stop(r8a66597, pipenum);
175
Yoshihiro Shimoda12158f42011-09-30 20:07:30 +0900176 if (pipenum == 0) {
Yoshihiro Shimodac4144242009-08-19 04:59:39 +0000177 r8a66597_bset(r8a66597, SQCLR, DCPCTR);
Yoshihiro Shimoda12158f42011-09-30 20:07:30 +0900178 } else if (pipenum < R8A66597_MAX_NUM_PIPE) {
Yoshihiro Shimodac4144242009-08-19 04:59:39 +0000179 offset = get_pipectr_addr(pipenum);
180 r8a66597_bset(r8a66597, SQCLR, offset);
Yoshihiro Shimoda12158f42011-09-30 20:07:30 +0900181 } else {
182 dev_err(r8a66597_to_dev(r8a66597), "unexpect pipe num (%d)\n",
183 pipenum);
184 }
Yoshihiro Shimodac4144242009-08-19 04:59:39 +0000185}
186
187static inline int get_buffer_size(struct r8a66597 *r8a66597, u16 pipenum)
188{
189 u16 tmp;
190 int size;
191
192 if (pipenum == 0) {
193 tmp = r8a66597_read(r8a66597, DCPCFG);
194 if ((tmp & R8A66597_CNTMD) != 0)
195 size = 256;
196 else {
197 tmp = r8a66597_read(r8a66597, DCPMAXP);
198 size = tmp & MAXP;
199 }
200 } else {
201 r8a66597_write(r8a66597, pipenum, PIPESEL);
202 tmp = r8a66597_read(r8a66597, PIPECFG);
203 if ((tmp & R8A66597_CNTMD) != 0) {
204 tmp = r8a66597_read(r8a66597, PIPEBUF);
205 size = ((tmp >> 10) + 1) * 64;
206 } else {
207 tmp = r8a66597_read(r8a66597, PIPEMAXP);
208 size = tmp & MXPS;
209 }
210 }
211
212 return size;
213}
214
215static inline unsigned short mbw_value(struct r8a66597 *r8a66597)
216{
217 if (r8a66597->pdata->on_chip)
218 return MBW_32;
219 else
220 return MBW_16;
221}
222
223static inline void pipe_change(struct r8a66597 *r8a66597, u16 pipenum)
224{
225 struct r8a66597_ep *ep = r8a66597->pipenum2ep[pipenum];
226
227 if (ep->use_dma)
228 return;
229
230 r8a66597_mdfy(r8a66597, pipenum, CURPIPE, ep->fifosel);
231
232 ndelay(450);
233
234 r8a66597_bset(r8a66597, mbw_value(r8a66597), ep->fifosel);
235}
236
237static int pipe_buffer_setting(struct r8a66597 *r8a66597,
238 struct r8a66597_pipe_info *info)
239{
240 u16 bufnum = 0, buf_bsize = 0;
241 u16 pipecfg = 0;
242
243 if (info->pipe == 0)
244 return -EINVAL;
245
246 r8a66597_write(r8a66597, info->pipe, PIPESEL);
247
248 if (info->dir_in)
249 pipecfg |= R8A66597_DIR;
250 pipecfg |= info->type;
251 pipecfg |= info->epnum;
252 switch (info->type) {
253 case R8A66597_INT:
254 bufnum = 4 + (info->pipe - R8A66597_BASE_PIPENUM_INT);
255 buf_bsize = 0;
256 break;
257 case R8A66597_BULK:
Magnus Dammef5ce3b2009-08-19 14:19:08 +0000258 /* isochronous pipes may be used as bulk pipes */
Yusuke Goda6d86d522011-01-31 15:49:34 +0900259 if (info->pipe >= R8A66597_BASE_PIPENUM_BULK)
Magnus Dammef5ce3b2009-08-19 14:19:08 +0000260 bufnum = info->pipe - R8A66597_BASE_PIPENUM_BULK;
261 else
262 bufnum = info->pipe - R8A66597_BASE_PIPENUM_ISOC;
263
264 bufnum = R8A66597_BASE_BUFNUM + (bufnum * 16);
Yoshihiro Shimodac4144242009-08-19 04:59:39 +0000265 buf_bsize = 7;
266 pipecfg |= R8A66597_DBLB;
267 if (!info->dir_in)
268 pipecfg |= R8A66597_SHTNAK;
269 break;
270 case R8A66597_ISO:
Magnus Dammef5ce3b2009-08-19 14:19:08 +0000271 bufnum = R8A66597_BASE_BUFNUM +
Yoshihiro Shimodac4144242009-08-19 04:59:39 +0000272 (info->pipe - R8A66597_BASE_PIPENUM_ISOC) * 16;
Yoshihiro Shimodac4144242009-08-19 04:59:39 +0000273 buf_bsize = 7;
274 break;
275 }
Magnus Dammef5ce3b2009-08-19 14:19:08 +0000276
277 if (buf_bsize && ((bufnum + 16) >= R8A66597_MAX_BUFNUM)) {
Joe Perchesc0109b82010-09-11 22:10:58 -0700278 pr_err("r8a66597 pipe memory is insufficient\n");
Yoshihiro Shimodac4144242009-08-19 04:59:39 +0000279 return -ENOMEM;
280 }
281
282 r8a66597_write(r8a66597, pipecfg, PIPECFG);
283 r8a66597_write(r8a66597, (buf_bsize << 10) | (bufnum), PIPEBUF);
284 r8a66597_write(r8a66597, info->maxpacket, PIPEMAXP);
285 if (info->interval)
286 info->interval--;
287 r8a66597_write(r8a66597, info->interval, PIPEPERI);
288
289 return 0;
290}
291
292static void pipe_buffer_release(struct r8a66597 *r8a66597,
293 struct r8a66597_pipe_info *info)
294{
295 if (info->pipe == 0)
296 return;
297
Yoshihiro Shimoda12158f42011-09-30 20:07:30 +0900298 if (is_bulk_pipe(info->pipe)) {
Yoshihiro Shimodac4144242009-08-19 04:59:39 +0000299 r8a66597->bulk--;
Yoshihiro Shimoda12158f42011-09-30 20:07:30 +0900300 } else if (is_interrupt_pipe(info->pipe)) {
Yoshihiro Shimodac4144242009-08-19 04:59:39 +0000301 r8a66597->interrupt--;
Yoshihiro Shimoda12158f42011-09-30 20:07:30 +0900302 } else if (is_isoc_pipe(info->pipe)) {
Yoshihiro Shimodac4144242009-08-19 04:59:39 +0000303 r8a66597->isochronous--;
304 if (info->type == R8A66597_BULK)
305 r8a66597->bulk--;
Yoshihiro Shimoda12158f42011-09-30 20:07:30 +0900306 } else {
307 dev_err(r8a66597_to_dev(r8a66597),
308 "ep_release: unexpect pipenum (%d)\n", info->pipe);
309 }
Yoshihiro Shimodac4144242009-08-19 04:59:39 +0000310}
311
312static void pipe_initialize(struct r8a66597_ep *ep)
313{
314 struct r8a66597 *r8a66597 = ep->r8a66597;
315
316 r8a66597_mdfy(r8a66597, 0, CURPIPE, ep->fifosel);
317
318 r8a66597_write(r8a66597, ACLRM, ep->pipectr);
319 r8a66597_write(r8a66597, 0, ep->pipectr);
320 r8a66597_write(r8a66597, SQCLR, ep->pipectr);
321 if (ep->use_dma) {
322 r8a66597_mdfy(r8a66597, ep->pipenum, CURPIPE, ep->fifosel);
323
324 ndelay(450);
325
326 r8a66597_bset(r8a66597, mbw_value(r8a66597), ep->fifosel);
327 }
328}
329
330static void r8a66597_ep_setting(struct r8a66597 *r8a66597,
331 struct r8a66597_ep *ep,
332 const struct usb_endpoint_descriptor *desc,
333 u16 pipenum, int dma)
334{
335 ep->use_dma = 0;
336 ep->fifoaddr = CFIFO;
337 ep->fifosel = CFIFOSEL;
338 ep->fifoctr = CFIFOCTR;
339 ep->fifotrn = 0;
340
341 ep->pipectr = get_pipectr_addr(pipenum);
342 ep->pipenum = pipenum;
Kuninori Morimoto29cc8892011-08-23 03:12:03 -0700343 ep->ep.maxpacket = usb_endpoint_maxp(desc);
Yoshihiro Shimodac4144242009-08-19 04:59:39 +0000344 r8a66597->pipenum2ep[pipenum] = ep;
345 r8a66597->epaddr2ep[desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK]
346 = ep;
347 INIT_LIST_HEAD(&ep->queue);
348}
349
350static void r8a66597_ep_release(struct r8a66597_ep *ep)
351{
352 struct r8a66597 *r8a66597 = ep->r8a66597;
353 u16 pipenum = ep->pipenum;
354
355 if (pipenum == 0)
356 return;
357
358 if (ep->use_dma)
359 r8a66597->num_dma--;
360 ep->pipenum = 0;
361 ep->busy = 0;
362 ep->use_dma = 0;
363}
364
365static int alloc_pipe_config(struct r8a66597_ep *ep,
366 const struct usb_endpoint_descriptor *desc)
367{
368 struct r8a66597 *r8a66597 = ep->r8a66597;
369 struct r8a66597_pipe_info info;
370 int dma = 0;
371 unsigned char *counter;
372 int ret;
373
374 ep->desc = desc;
375
376 if (ep->pipenum) /* already allocated pipe */
377 return 0;
378
379 switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
380 case USB_ENDPOINT_XFER_BULK:
381 if (r8a66597->bulk >= R8A66597_MAX_NUM_BULK) {
382 if (r8a66597->isochronous >= R8A66597_MAX_NUM_ISOC) {
Yoshihiro Shimoda12158f42011-09-30 20:07:30 +0900383 dev_err(r8a66597_to_dev(r8a66597),
384 "bulk pipe is insufficient\n");
Yoshihiro Shimodac4144242009-08-19 04:59:39 +0000385 return -ENODEV;
386 } else {
387 info.pipe = R8A66597_BASE_PIPENUM_ISOC
388 + r8a66597->isochronous;
389 counter = &r8a66597->isochronous;
390 }
391 } else {
392 info.pipe = R8A66597_BASE_PIPENUM_BULK + r8a66597->bulk;
393 counter = &r8a66597->bulk;
394 }
395 info.type = R8A66597_BULK;
396 dma = 1;
397 break;
398 case USB_ENDPOINT_XFER_INT:
399 if (r8a66597->interrupt >= R8A66597_MAX_NUM_INT) {
Yoshihiro Shimoda12158f42011-09-30 20:07:30 +0900400 dev_err(r8a66597_to_dev(r8a66597),
401 "interrupt pipe is insufficient\n");
Yoshihiro Shimodac4144242009-08-19 04:59:39 +0000402 return -ENODEV;
403 }
404 info.pipe = R8A66597_BASE_PIPENUM_INT + r8a66597->interrupt;
405 info.type = R8A66597_INT;
406 counter = &r8a66597->interrupt;
407 break;
408 case USB_ENDPOINT_XFER_ISOC:
409 if (r8a66597->isochronous >= R8A66597_MAX_NUM_ISOC) {
Yoshihiro Shimoda12158f42011-09-30 20:07:30 +0900410 dev_err(r8a66597_to_dev(r8a66597),
411 "isochronous pipe is insufficient\n");
Yoshihiro Shimodac4144242009-08-19 04:59:39 +0000412 return -ENODEV;
413 }
414 info.pipe = R8A66597_BASE_PIPENUM_ISOC + r8a66597->isochronous;
415 info.type = R8A66597_ISO;
416 counter = &r8a66597->isochronous;
417 break;
418 default:
Yoshihiro Shimoda12158f42011-09-30 20:07:30 +0900419 dev_err(r8a66597_to_dev(r8a66597), "unexpect xfer type\n");
Yoshihiro Shimodac4144242009-08-19 04:59:39 +0000420 return -EINVAL;
421 }
422 ep->type = info.type;
423
424 info.epnum = desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
Kuninori Morimoto29cc8892011-08-23 03:12:03 -0700425 info.maxpacket = usb_endpoint_maxp(desc);
Yoshihiro Shimodac4144242009-08-19 04:59:39 +0000426 info.interval = desc->bInterval;
427 if (desc->bEndpointAddress & USB_ENDPOINT_DIR_MASK)
428 info.dir_in = 1;
429 else
430 info.dir_in = 0;
431
432 ret = pipe_buffer_setting(r8a66597, &info);
433 if (ret < 0) {
Yoshihiro Shimoda12158f42011-09-30 20:07:30 +0900434 dev_err(r8a66597_to_dev(r8a66597),
435 "pipe_buffer_setting fail\n");
Yoshihiro Shimodac4144242009-08-19 04:59:39 +0000436 return ret;
437 }
438
439 (*counter)++;
440 if ((counter == &r8a66597->isochronous) && info.type == R8A66597_BULK)
441 r8a66597->bulk++;
442
443 r8a66597_ep_setting(r8a66597, ep, desc, info.pipe, dma);
444 pipe_initialize(ep);
445
446 return 0;
447}
448
449static int free_pipe_config(struct r8a66597_ep *ep)
450{
451 struct r8a66597 *r8a66597 = ep->r8a66597;
452 struct r8a66597_pipe_info info;
453
454 info.pipe = ep->pipenum;
455 info.type = ep->type;
456 pipe_buffer_release(r8a66597, &info);
457 r8a66597_ep_release(ep);
458
459 return 0;
460}
461
462/*-------------------------------------------------------------------------*/
463static void pipe_irq_enable(struct r8a66597 *r8a66597, u16 pipenum)
464{
465 enable_irq_ready(r8a66597, pipenum);
466 enable_irq_nrdy(r8a66597, pipenum);
467}
468
469static void pipe_irq_disable(struct r8a66597 *r8a66597, u16 pipenum)
470{
471 disable_irq_ready(r8a66597, pipenum);
472 disable_irq_nrdy(r8a66597, pipenum);
473}
474
475/* if complete is true, gadget driver complete function is not call */
476static void control_end(struct r8a66597 *r8a66597, unsigned ccpl)
477{
478 r8a66597->ep[0].internal_ccpl = ccpl;
479 pipe_start(r8a66597, 0);
480 r8a66597_bset(r8a66597, CCPL, DCPCTR);
481}
482
483static void start_ep0_write(struct r8a66597_ep *ep,
484 struct r8a66597_request *req)
485{
486 struct r8a66597 *r8a66597 = ep->r8a66597;
487
488 pipe_change(r8a66597, ep->pipenum);
489 r8a66597_mdfy(r8a66597, ISEL, (ISEL | CURPIPE), CFIFOSEL);
490 r8a66597_write(r8a66597, BCLR, ep->fifoctr);
491 if (req->req.length == 0) {
492 r8a66597_bset(r8a66597, BVAL, ep->fifoctr);
493 pipe_start(r8a66597, 0);
494 transfer_complete(ep, req, 0);
495 } else {
496 r8a66597_write(r8a66597, ~BEMP0, BEMPSTS);
497 irq_ep0_write(ep, req);
498 }
499}
500
501static void start_packet_write(struct r8a66597_ep *ep,
502 struct r8a66597_request *req)
503{
504 struct r8a66597 *r8a66597 = ep->r8a66597;
505 u16 tmp;
506
507 pipe_change(r8a66597, ep->pipenum);
508 disable_irq_empty(r8a66597, ep->pipenum);
509 pipe_start(r8a66597, ep->pipenum);
510
511 tmp = r8a66597_read(r8a66597, ep->fifoctr);
512 if (unlikely((tmp & FRDY) == 0))
513 pipe_irq_enable(r8a66597, ep->pipenum);
514 else
515 irq_packet_write(ep, req);
516}
517
518static void start_packet_read(struct r8a66597_ep *ep,
519 struct r8a66597_request *req)
520{
521 struct r8a66597 *r8a66597 = ep->r8a66597;
522 u16 pipenum = ep->pipenum;
523
524 if (ep->pipenum == 0) {
525 r8a66597_mdfy(r8a66597, 0, (ISEL | CURPIPE), CFIFOSEL);
526 r8a66597_write(r8a66597, BCLR, ep->fifoctr);
527 pipe_start(r8a66597, pipenum);
528 pipe_irq_enable(r8a66597, pipenum);
529 } else {
530 if (ep->use_dma) {
531 r8a66597_bset(r8a66597, TRCLR, ep->fifosel);
532 pipe_change(r8a66597, pipenum);
533 r8a66597_bset(r8a66597, TRENB, ep->fifosel);
534 r8a66597_write(r8a66597,
535 (req->req.length + ep->ep.maxpacket - 1)
536 / ep->ep.maxpacket,
537 ep->fifotrn);
538 }
539 pipe_start(r8a66597, pipenum); /* trigger once */
540 pipe_irq_enable(r8a66597, pipenum);
541 }
542}
543
544static void start_packet(struct r8a66597_ep *ep, struct r8a66597_request *req)
545{
546 if (ep->desc->bEndpointAddress & USB_DIR_IN)
547 start_packet_write(ep, req);
548 else
549 start_packet_read(ep, req);
550}
551
552static void start_ep0(struct r8a66597_ep *ep, struct r8a66597_request *req)
553{
554 u16 ctsq;
555
556 ctsq = r8a66597_read(ep->r8a66597, INTSTS0) & CTSQ;
557
558 switch (ctsq) {
559 case CS_RDDS:
560 start_ep0_write(ep, req);
561 break;
562 case CS_WRDS:
563 start_packet_read(ep, req);
564 break;
565
566 case CS_WRND:
567 control_end(ep->r8a66597, 0);
568 break;
569 default:
Yoshihiro Shimoda12158f42011-09-30 20:07:30 +0900570 dev_err(r8a66597_to_dev(ep->r8a66597),
571 "start_ep0: unexpect ctsq(%x)\n", ctsq);
Yoshihiro Shimodac4144242009-08-19 04:59:39 +0000572 break;
573 }
574}
575
576static void init_controller(struct r8a66597 *r8a66597)
577{
578 u16 vif = r8a66597->pdata->vif ? LDRV : 0;
579 u16 irq_sense = r8a66597->irq_sense_low ? INTL : 0;
580 u16 endian = r8a66597->pdata->endian ? BIGEND : 0;
581
582 if (r8a66597->pdata->on_chip) {
Yoshihiro Shimoda5154e9f2011-07-08 14:51:27 +0900583 if (r8a66597->pdata->buswait)
584 r8a66597_write(r8a66597, r8a66597->pdata->buswait,
585 SYSCFG1);
586 else
587 r8a66597_write(r8a66597, 0x0f, SYSCFG1);
Yoshihiro Shimodac4144242009-08-19 04:59:39 +0000588 r8a66597_bset(r8a66597, HSE, SYSCFG0);
589
590 r8a66597_bclr(r8a66597, USBE, SYSCFG0);
591 r8a66597_bclr(r8a66597, DPRPU, SYSCFG0);
592 r8a66597_bset(r8a66597, USBE, SYSCFG0);
593
594 r8a66597_bset(r8a66597, SCKE, SYSCFG0);
595
596 r8a66597_bset(r8a66597, irq_sense, INTENB1);
597 r8a66597_write(r8a66597, BURST | CPU_ADR_RD_WR,
598 DMA0CFG);
599 } else {
600 r8a66597_bset(r8a66597, vif | endian, PINCFG);
601 r8a66597_bset(r8a66597, HSE, SYSCFG0); /* High spd */
602 r8a66597_mdfy(r8a66597, get_xtal_from_pdata(r8a66597->pdata),
603 XTAL, SYSCFG0);
604
605 r8a66597_bclr(r8a66597, USBE, SYSCFG0);
606 r8a66597_bclr(r8a66597, DPRPU, SYSCFG0);
607 r8a66597_bset(r8a66597, USBE, SYSCFG0);
608
609 r8a66597_bset(r8a66597, XCKE, SYSCFG0);
610
611 msleep(3);
612
613 r8a66597_bset(r8a66597, PLLC, SYSCFG0);
614
615 msleep(1);
616
617 r8a66597_bset(r8a66597, SCKE, SYSCFG0);
618
619 r8a66597_bset(r8a66597, irq_sense, INTENB1);
620 r8a66597_write(r8a66597, BURST | CPU_ADR_RD_WR,
621 DMA0CFG);
622 }
623}
624
625static void disable_controller(struct r8a66597 *r8a66597)
626{
627 if (r8a66597->pdata->on_chip) {
628 r8a66597_bset(r8a66597, SCKE, SYSCFG0);
Yoshihiro Shimoda96fe53e2011-07-08 14:51:14 +0900629 r8a66597_bclr(r8a66597, UTST, TESTMODE);
Yoshihiro Shimodac4144242009-08-19 04:59:39 +0000630
Magnus Damm0bb886d2009-08-19 14:26:10 +0000631 /* disable interrupts */
Yoshihiro Shimodac4144242009-08-19 04:59:39 +0000632 r8a66597_write(r8a66597, 0, INTENB0);
633 r8a66597_write(r8a66597, 0, INTENB1);
Magnus Damm0bb886d2009-08-19 14:26:10 +0000634 r8a66597_write(r8a66597, 0, BRDYENB);
635 r8a66597_write(r8a66597, 0, BEMPENB);
636 r8a66597_write(r8a66597, 0, NRDYENB);
637
638 /* clear status */
639 r8a66597_write(r8a66597, 0, BRDYSTS);
640 r8a66597_write(r8a66597, 0, NRDYSTS);
641 r8a66597_write(r8a66597, 0, BEMPSTS);
Yoshihiro Shimodac4144242009-08-19 04:59:39 +0000642
643 r8a66597_bclr(r8a66597, USBE, SYSCFG0);
644 r8a66597_bclr(r8a66597, SCKE, SYSCFG0);
645
646 } else {
Yoshihiro Shimoda96fe53e2011-07-08 14:51:14 +0900647 r8a66597_bclr(r8a66597, UTST, TESTMODE);
Yoshihiro Shimodac4144242009-08-19 04:59:39 +0000648 r8a66597_bclr(r8a66597, SCKE, SYSCFG0);
649 udelay(1);
650 r8a66597_bclr(r8a66597, PLLC, SYSCFG0);
651 udelay(1);
652 udelay(1);
653 r8a66597_bclr(r8a66597, XCKE, SYSCFG0);
654 }
655}
656
657static void r8a66597_start_xclock(struct r8a66597 *r8a66597)
658{
659 u16 tmp;
660
661 if (!r8a66597->pdata->on_chip) {
662 tmp = r8a66597_read(r8a66597, SYSCFG0);
663 if (!(tmp & XCKE))
664 r8a66597_bset(r8a66597, XCKE, SYSCFG0);
665 }
666}
667
668static struct r8a66597_request *get_request_from_ep(struct r8a66597_ep *ep)
669{
670 return list_entry(ep->queue.next, struct r8a66597_request, queue);
671}
672
673/*-------------------------------------------------------------------------*/
674static void transfer_complete(struct r8a66597_ep *ep,
675 struct r8a66597_request *req, int status)
676__releases(r8a66597->lock)
677__acquires(r8a66597->lock)
678{
679 int restart = 0;
680
681 if (unlikely(ep->pipenum == 0)) {
682 if (ep->internal_ccpl) {
683 ep->internal_ccpl = 0;
684 return;
685 }
686 }
687
688 list_del_init(&req->queue);
689 if (ep->r8a66597->gadget.speed == USB_SPEED_UNKNOWN)
690 req->req.status = -ESHUTDOWN;
691 else
692 req->req.status = status;
693
694 if (!list_empty(&ep->queue))
695 restart = 1;
696
697 spin_unlock(&ep->r8a66597->lock);
698 req->req.complete(&ep->ep, &req->req);
699 spin_lock(&ep->r8a66597->lock);
700
701 if (restart) {
702 req = get_request_from_ep(ep);
703 if (ep->desc)
704 start_packet(ep, req);
705 }
706}
707
708static void irq_ep0_write(struct r8a66597_ep *ep, struct r8a66597_request *req)
709{
710 int i;
711 u16 tmp;
712 unsigned bufsize;
713 size_t size;
714 void *buf;
715 u16 pipenum = ep->pipenum;
716 struct r8a66597 *r8a66597 = ep->r8a66597;
717
718 pipe_change(r8a66597, pipenum);
719 r8a66597_bset(r8a66597, ISEL, ep->fifosel);
720
721 i = 0;
722 do {
723 tmp = r8a66597_read(r8a66597, ep->fifoctr);
724 if (i++ > 100000) {
Yoshihiro Shimoda12158f42011-09-30 20:07:30 +0900725 dev_err(r8a66597_to_dev(r8a66597),
726 "pipe0 is busy. maybe cpu i/o bus "
Yoshihiro Shimodac4144242009-08-19 04:59:39 +0000727 "conflict. please power off this controller.");
728 return;
729 }
730 ndelay(1);
731 } while ((tmp & FRDY) == 0);
732
733 /* prepare parameters */
734 bufsize = get_buffer_size(r8a66597, pipenum);
735 buf = req->req.buf + req->req.actual;
736 size = min(bufsize, req->req.length - req->req.actual);
737
738 /* write fifo */
739 if (req->req.buf) {
740 if (size > 0)
Yoshihiro Shimoda0a855772011-09-30 20:07:08 +0900741 r8a66597_write_fifo(r8a66597, ep, buf, size);
Yoshihiro Shimodac4144242009-08-19 04:59:39 +0000742 if ((size == 0) || ((size % ep->ep.maxpacket) != 0))
743 r8a66597_bset(r8a66597, BVAL, ep->fifoctr);
744 }
745
746 /* update parameters */
747 req->req.actual += size;
748
749 /* check transfer finish */
750 if ((!req->req.zero && (req->req.actual == req->req.length))
751 || (size % ep->ep.maxpacket)
752 || (size == 0)) {
753 disable_irq_ready(r8a66597, pipenum);
754 disable_irq_empty(r8a66597, pipenum);
755 } else {
756 disable_irq_ready(r8a66597, pipenum);
757 enable_irq_empty(r8a66597, pipenum);
758 }
759 pipe_start(r8a66597, pipenum);
760}
761
762static void irq_packet_write(struct r8a66597_ep *ep,
763 struct r8a66597_request *req)
764{
765 u16 tmp;
766 unsigned bufsize;
767 size_t size;
768 void *buf;
769 u16 pipenum = ep->pipenum;
770 struct r8a66597 *r8a66597 = ep->r8a66597;
771
772 pipe_change(r8a66597, pipenum);
773 tmp = r8a66597_read(r8a66597, ep->fifoctr);
774 if (unlikely((tmp & FRDY) == 0)) {
775 pipe_stop(r8a66597, pipenum);
776 pipe_irq_disable(r8a66597, pipenum);
Yoshihiro Shimoda12158f42011-09-30 20:07:30 +0900777 dev_err(r8a66597_to_dev(r8a66597),
778 "write fifo not ready. pipnum=%d\n", pipenum);
Yoshihiro Shimodac4144242009-08-19 04:59:39 +0000779 return;
780 }
781
782 /* prepare parameters */
783 bufsize = get_buffer_size(r8a66597, pipenum);
784 buf = req->req.buf + req->req.actual;
785 size = min(bufsize, req->req.length - req->req.actual);
786
787 /* write fifo */
788 if (req->req.buf) {
Yoshihiro Shimoda0a855772011-09-30 20:07:08 +0900789 r8a66597_write_fifo(r8a66597, ep, buf, size);
Yoshihiro Shimodac4144242009-08-19 04:59:39 +0000790 if ((size == 0)
791 || ((size % ep->ep.maxpacket) != 0)
792 || ((bufsize != ep->ep.maxpacket)
793 && (bufsize > size)))
794 r8a66597_bset(r8a66597, BVAL, ep->fifoctr);
795 }
796
797 /* update parameters */
798 req->req.actual += size;
799 /* check transfer finish */
800 if ((!req->req.zero && (req->req.actual == req->req.length))
801 || (size % ep->ep.maxpacket)
802 || (size == 0)) {
803 disable_irq_ready(r8a66597, pipenum);
804 enable_irq_empty(r8a66597, pipenum);
805 } else {
806 disable_irq_empty(r8a66597, pipenum);
807 pipe_irq_enable(r8a66597, pipenum);
808 }
809}
810
811static void irq_packet_read(struct r8a66597_ep *ep,
812 struct r8a66597_request *req)
813{
814 u16 tmp;
815 int rcv_len, bufsize, req_len;
816 int size;
817 void *buf;
818 u16 pipenum = ep->pipenum;
819 struct r8a66597 *r8a66597 = ep->r8a66597;
820 int finish = 0;
821
822 pipe_change(r8a66597, pipenum);
823 tmp = r8a66597_read(r8a66597, ep->fifoctr);
824 if (unlikely((tmp & FRDY) == 0)) {
825 req->req.status = -EPIPE;
826 pipe_stop(r8a66597, pipenum);
827 pipe_irq_disable(r8a66597, pipenum);
Yoshihiro Shimoda12158f42011-09-30 20:07:30 +0900828 dev_err(r8a66597_to_dev(r8a66597), "read fifo not ready");
Yoshihiro Shimodac4144242009-08-19 04:59:39 +0000829 return;
830 }
831
832 /* prepare parameters */
833 rcv_len = tmp & DTLN;
834 bufsize = get_buffer_size(r8a66597, pipenum);
835
836 buf = req->req.buf + req->req.actual;
837 req_len = req->req.length - req->req.actual;
838 if (rcv_len < bufsize)
839 size = min(rcv_len, req_len);
840 else
841 size = min(bufsize, req_len);
842
843 /* update parameters */
844 req->req.actual += size;
845
846 /* check transfer finish */
847 if ((!req->req.zero && (req->req.actual == req->req.length))
848 || (size % ep->ep.maxpacket)
849 || (size == 0)) {
850 pipe_stop(r8a66597, pipenum);
851 pipe_irq_disable(r8a66597, pipenum);
852 finish = 1;
853 }
854
855 /* read fifo */
856 if (req->req.buf) {
857 if (size == 0)
858 r8a66597_write(r8a66597, BCLR, ep->fifoctr);
859 else
860 r8a66597_read_fifo(r8a66597, ep->fifoaddr, buf, size);
861
862 }
863
864 if ((ep->pipenum != 0) && finish)
865 transfer_complete(ep, req, 0);
866}
867
868static void irq_pipe_ready(struct r8a66597 *r8a66597, u16 status, u16 enb)
869{
870 u16 check;
871 u16 pipenum;
872 struct r8a66597_ep *ep;
873 struct r8a66597_request *req;
874
875 if ((status & BRDY0) && (enb & BRDY0)) {
876 r8a66597_write(r8a66597, ~BRDY0, BRDYSTS);
877 r8a66597_mdfy(r8a66597, 0, CURPIPE, CFIFOSEL);
878
879 ep = &r8a66597->ep[0];
880 req = get_request_from_ep(ep);
881 irq_packet_read(ep, req);
882 } else {
883 for (pipenum = 1; pipenum < R8A66597_MAX_NUM_PIPE; pipenum++) {
884 check = 1 << pipenum;
885 if ((status & check) && (enb & check)) {
886 r8a66597_write(r8a66597, ~check, BRDYSTS);
887 ep = r8a66597->pipenum2ep[pipenum];
888 req = get_request_from_ep(ep);
889 if (ep->desc->bEndpointAddress & USB_DIR_IN)
890 irq_packet_write(ep, req);
891 else
892 irq_packet_read(ep, req);
893 }
894 }
895 }
896}
897
898static void irq_pipe_empty(struct r8a66597 *r8a66597, u16 status, u16 enb)
899{
900 u16 tmp;
901 u16 check;
902 u16 pipenum;
903 struct r8a66597_ep *ep;
904 struct r8a66597_request *req;
905
906 if ((status & BEMP0) && (enb & BEMP0)) {
907 r8a66597_write(r8a66597, ~BEMP0, BEMPSTS);
908
909 ep = &r8a66597->ep[0];
910 req = get_request_from_ep(ep);
911 irq_ep0_write(ep, req);
912 } else {
913 for (pipenum = 1; pipenum < R8A66597_MAX_NUM_PIPE; pipenum++) {
914 check = 1 << pipenum;
915 if ((status & check) && (enb & check)) {
916 r8a66597_write(r8a66597, ~check, BEMPSTS);
917 tmp = control_reg_get(r8a66597, pipenum);
918 if ((tmp & INBUFM) == 0) {
919 disable_irq_empty(r8a66597, pipenum);
920 pipe_irq_disable(r8a66597, pipenum);
921 pipe_stop(r8a66597, pipenum);
922 ep = r8a66597->pipenum2ep[pipenum];
923 req = get_request_from_ep(ep);
924 if (!list_empty(&ep->queue))
925 transfer_complete(ep, req, 0);
926 }
927 }
928 }
929 }
930}
931
932static void get_status(struct r8a66597 *r8a66597, struct usb_ctrlrequest *ctrl)
933__releases(r8a66597->lock)
934__acquires(r8a66597->lock)
935{
936 struct r8a66597_ep *ep;
937 u16 pid;
938 u16 status = 0;
939 u16 w_index = le16_to_cpu(ctrl->wIndex);
940
941 switch (ctrl->bRequestType & USB_RECIP_MASK) {
942 case USB_RECIP_DEVICE:
943 status = 1 << USB_DEVICE_SELF_POWERED;
944 break;
945 case USB_RECIP_INTERFACE:
946 status = 0;
947 break;
948 case USB_RECIP_ENDPOINT:
949 ep = r8a66597->epaddr2ep[w_index & USB_ENDPOINT_NUMBER_MASK];
950 pid = control_reg_get_pid(r8a66597, ep->pipenum);
951 if (pid == PID_STALL)
952 status = 1 << USB_ENDPOINT_HALT;
953 else
954 status = 0;
955 break;
956 default:
957 pipe_stall(r8a66597, 0);
958 return; /* exit */
959 }
960
961 r8a66597->ep0_data = cpu_to_le16(status);
962 r8a66597->ep0_req->buf = &r8a66597->ep0_data;
963 r8a66597->ep0_req->length = 2;
964 /* AV: what happens if we get called again before that gets through? */
965 spin_unlock(&r8a66597->lock);
966 r8a66597_queue(r8a66597->gadget.ep0, r8a66597->ep0_req, GFP_KERNEL);
967 spin_lock(&r8a66597->lock);
968}
969
970static void clear_feature(struct r8a66597 *r8a66597,
971 struct usb_ctrlrequest *ctrl)
972{
973 switch (ctrl->bRequestType & USB_RECIP_MASK) {
974 case USB_RECIP_DEVICE:
975 control_end(r8a66597, 1);
976 break;
977 case USB_RECIP_INTERFACE:
978 control_end(r8a66597, 1);
979 break;
980 case USB_RECIP_ENDPOINT: {
981 struct r8a66597_ep *ep;
982 struct r8a66597_request *req;
983 u16 w_index = le16_to_cpu(ctrl->wIndex);
984
985 ep = r8a66597->epaddr2ep[w_index & USB_ENDPOINT_NUMBER_MASK];
Yoshihiro Shimoda9e7291c2009-08-20 07:01:06 +0000986 if (!ep->wedge) {
987 pipe_stop(r8a66597, ep->pipenum);
988 control_reg_sqclr(r8a66597, ep->pipenum);
989 spin_unlock(&r8a66597->lock);
990 usb_ep_clear_halt(&ep->ep);
991 spin_lock(&r8a66597->lock);
992 }
Yoshihiro Shimodac4144242009-08-19 04:59:39 +0000993
994 control_end(r8a66597, 1);
995
996 req = get_request_from_ep(ep);
997 if (ep->busy) {
998 ep->busy = 0;
999 if (list_empty(&ep->queue))
1000 break;
1001 start_packet(ep, req);
1002 } else if (!list_empty(&ep->queue))
1003 pipe_start(r8a66597, ep->pipenum);
1004 }
1005 break;
1006 default:
1007 pipe_stall(r8a66597, 0);
1008 break;
1009 }
1010}
1011
1012static void set_feature(struct r8a66597 *r8a66597, struct usb_ctrlrequest *ctrl)
1013{
Yoshihiro Shimoda96fe53e2011-07-08 14:51:14 +09001014 u16 tmp;
1015 int timeout = 3000;
Yoshihiro Shimodac4144242009-08-19 04:59:39 +00001016
1017 switch (ctrl->bRequestType & USB_RECIP_MASK) {
1018 case USB_RECIP_DEVICE:
Yoshihiro Shimoda96fe53e2011-07-08 14:51:14 +09001019 switch (le16_to_cpu(ctrl->wValue)) {
1020 case USB_DEVICE_TEST_MODE:
1021 control_end(r8a66597, 1);
1022 /* Wait for the completion of status stage */
1023 do {
1024 tmp = r8a66597_read(r8a66597, INTSTS0) & CTSQ;
1025 udelay(1);
1026 } while (tmp != CS_IDST || timeout-- > 0);
1027
1028 if (tmp == CS_IDST)
1029 r8a66597_bset(r8a66597,
1030 le16_to_cpu(ctrl->wIndex >> 8),
1031 TESTMODE);
1032 break;
1033 default:
1034 pipe_stall(r8a66597, 0);
1035 break;
1036 }
Yoshihiro Shimodac4144242009-08-19 04:59:39 +00001037 break;
1038 case USB_RECIP_INTERFACE:
1039 control_end(r8a66597, 1);
1040 break;
1041 case USB_RECIP_ENDPOINT: {
1042 struct r8a66597_ep *ep;
1043 u16 w_index = le16_to_cpu(ctrl->wIndex);
1044
1045 ep = r8a66597->epaddr2ep[w_index & USB_ENDPOINT_NUMBER_MASK];
1046 pipe_stall(r8a66597, ep->pipenum);
1047
1048 control_end(r8a66597, 1);
1049 }
1050 break;
1051 default:
1052 pipe_stall(r8a66597, 0);
1053 break;
1054 }
1055}
1056
1057/* if return value is true, call class driver's setup() */
1058static int setup_packet(struct r8a66597 *r8a66597, struct usb_ctrlrequest *ctrl)
1059{
1060 u16 *p = (u16 *)ctrl;
1061 unsigned long offset = USBREQ;
1062 int i, ret = 0;
1063
1064 /* read fifo */
1065 r8a66597_write(r8a66597, ~VALID, INTSTS0);
1066
1067 for (i = 0; i < 4; i++)
1068 p[i] = r8a66597_read(r8a66597, offset + i*2);
1069
1070 /* check request */
1071 if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
1072 switch (ctrl->bRequest) {
1073 case USB_REQ_GET_STATUS:
1074 get_status(r8a66597, ctrl);
1075 break;
1076 case USB_REQ_CLEAR_FEATURE:
1077 clear_feature(r8a66597, ctrl);
1078 break;
1079 case USB_REQ_SET_FEATURE:
1080 set_feature(r8a66597, ctrl);
1081 break;
1082 default:
1083 ret = 1;
1084 break;
1085 }
1086 } else
1087 ret = 1;
1088 return ret;
1089}
1090
1091static void r8a66597_update_usb_speed(struct r8a66597 *r8a66597)
1092{
1093 u16 speed = get_usb_speed(r8a66597);
1094
1095 switch (speed) {
1096 case HSMODE:
1097 r8a66597->gadget.speed = USB_SPEED_HIGH;
1098 break;
1099 case FSMODE:
1100 r8a66597->gadget.speed = USB_SPEED_FULL;
1101 break;
1102 default:
1103 r8a66597->gadget.speed = USB_SPEED_UNKNOWN;
Yoshihiro Shimoda12158f42011-09-30 20:07:30 +09001104 dev_err(r8a66597_to_dev(r8a66597), "USB speed unknown\n");
Yoshihiro Shimodac4144242009-08-19 04:59:39 +00001105 }
1106}
1107
1108static void irq_device_state(struct r8a66597 *r8a66597)
1109{
1110 u16 dvsq;
1111
1112 dvsq = r8a66597_read(r8a66597, INTSTS0) & DVSQ;
1113 r8a66597_write(r8a66597, ~DVST, INTSTS0);
1114
1115 if (dvsq == DS_DFLT) {
1116 /* bus reset */
Yoshihiro Shimoda2c2da172011-04-04 13:09:22 +09001117 spin_unlock(&r8a66597->lock);
Yoshihiro Shimodac4144242009-08-19 04:59:39 +00001118 r8a66597->driver->disconnect(&r8a66597->gadget);
Yoshihiro Shimoda2c2da172011-04-04 13:09:22 +09001119 spin_lock(&r8a66597->lock);
Yoshihiro Shimodac4144242009-08-19 04:59:39 +00001120 r8a66597_update_usb_speed(r8a66597);
1121 }
1122 if (r8a66597->old_dvsq == DS_CNFG && dvsq != DS_CNFG)
1123 r8a66597_update_usb_speed(r8a66597);
1124 if ((dvsq == DS_CNFG || dvsq == DS_ADDS)
1125 && r8a66597->gadget.speed == USB_SPEED_UNKNOWN)
1126 r8a66597_update_usb_speed(r8a66597);
1127
1128 r8a66597->old_dvsq = dvsq;
1129}
1130
1131static void irq_control_stage(struct r8a66597 *r8a66597)
1132__releases(r8a66597->lock)
1133__acquires(r8a66597->lock)
1134{
1135 struct usb_ctrlrequest ctrl;
1136 u16 ctsq;
1137
1138 ctsq = r8a66597_read(r8a66597, INTSTS0) & CTSQ;
1139 r8a66597_write(r8a66597, ~CTRT, INTSTS0);
1140
1141 switch (ctsq) {
1142 case CS_IDST: {
1143 struct r8a66597_ep *ep;
1144 struct r8a66597_request *req;
1145 ep = &r8a66597->ep[0];
1146 req = get_request_from_ep(ep);
1147 transfer_complete(ep, req, 0);
1148 }
1149 break;
1150
1151 case CS_RDDS:
1152 case CS_WRDS:
1153 case CS_WRND:
1154 if (setup_packet(r8a66597, &ctrl)) {
1155 spin_unlock(&r8a66597->lock);
1156 if (r8a66597->driver->setup(&r8a66597->gadget, &ctrl)
1157 < 0)
1158 pipe_stall(r8a66597, 0);
1159 spin_lock(&r8a66597->lock);
1160 }
1161 break;
1162 case CS_RDSS:
1163 case CS_WRSS:
1164 control_end(r8a66597, 0);
1165 break;
1166 default:
Yoshihiro Shimoda12158f42011-09-30 20:07:30 +09001167 dev_err(r8a66597_to_dev(r8a66597),
1168 "ctrl_stage: unexpect ctsq(%x)\n", ctsq);
Yoshihiro Shimodac4144242009-08-19 04:59:39 +00001169 break;
1170 }
1171}
1172
1173static irqreturn_t r8a66597_irq(int irq, void *_r8a66597)
1174{
1175 struct r8a66597 *r8a66597 = _r8a66597;
1176 u16 intsts0;
1177 u16 intenb0;
1178 u16 brdysts, nrdysts, bempsts;
1179 u16 brdyenb, nrdyenb, bempenb;
1180 u16 savepipe;
1181 u16 mask0;
1182
1183 spin_lock(&r8a66597->lock);
1184
1185 intsts0 = r8a66597_read(r8a66597, INTSTS0);
1186 intenb0 = r8a66597_read(r8a66597, INTENB0);
1187
1188 savepipe = r8a66597_read(r8a66597, CFIFOSEL);
1189
1190 mask0 = intsts0 & intenb0;
1191 if (mask0) {
1192 brdysts = r8a66597_read(r8a66597, BRDYSTS);
1193 nrdysts = r8a66597_read(r8a66597, NRDYSTS);
1194 bempsts = r8a66597_read(r8a66597, BEMPSTS);
1195 brdyenb = r8a66597_read(r8a66597, BRDYENB);
1196 nrdyenb = r8a66597_read(r8a66597, NRDYENB);
1197 bempenb = r8a66597_read(r8a66597, BEMPENB);
1198
1199 if (mask0 & VBINT) {
1200 r8a66597_write(r8a66597, 0xffff & ~VBINT,
1201 INTSTS0);
1202 r8a66597_start_xclock(r8a66597);
1203
1204 /* start vbus sampling */
1205 r8a66597->old_vbus = r8a66597_read(r8a66597, INTSTS0)
1206 & VBSTS;
1207 r8a66597->scount = R8A66597_MAX_SAMPLING;
1208
1209 mod_timer(&r8a66597->timer,
1210 jiffies + msecs_to_jiffies(50));
1211 }
1212 if (intsts0 & DVSQ)
1213 irq_device_state(r8a66597);
1214
1215 if ((intsts0 & BRDY) && (intenb0 & BRDYE)
1216 && (brdysts & brdyenb))
1217 irq_pipe_ready(r8a66597, brdysts, brdyenb);
1218 if ((intsts0 & BEMP) && (intenb0 & BEMPE)
1219 && (bempsts & bempenb))
1220 irq_pipe_empty(r8a66597, bempsts, bempenb);
1221
1222 if (intsts0 & CTRT)
1223 irq_control_stage(r8a66597);
1224 }
1225
1226 r8a66597_write(r8a66597, savepipe, CFIFOSEL);
1227
1228 spin_unlock(&r8a66597->lock);
1229 return IRQ_HANDLED;
1230}
1231
1232static void r8a66597_timer(unsigned long _r8a66597)
1233{
1234 struct r8a66597 *r8a66597 = (struct r8a66597 *)_r8a66597;
1235 unsigned long flags;
1236 u16 tmp;
1237
1238 spin_lock_irqsave(&r8a66597->lock, flags);
1239 tmp = r8a66597_read(r8a66597, SYSCFG0);
1240 if (r8a66597->scount > 0) {
1241 tmp = r8a66597_read(r8a66597, INTSTS0) & VBSTS;
1242 if (tmp == r8a66597->old_vbus) {
1243 r8a66597->scount--;
1244 if (r8a66597->scount == 0) {
1245 if (tmp == VBSTS)
1246 r8a66597_usb_connect(r8a66597);
1247 else
1248 r8a66597_usb_disconnect(r8a66597);
1249 } else {
1250 mod_timer(&r8a66597->timer,
1251 jiffies + msecs_to_jiffies(50));
1252 }
1253 } else {
1254 r8a66597->scount = R8A66597_MAX_SAMPLING;
1255 r8a66597->old_vbus = tmp;
1256 mod_timer(&r8a66597->timer,
1257 jiffies + msecs_to_jiffies(50));
1258 }
1259 }
1260 spin_unlock_irqrestore(&r8a66597->lock, flags);
1261}
1262
1263/*-------------------------------------------------------------------------*/
1264static int r8a66597_enable(struct usb_ep *_ep,
1265 const struct usb_endpoint_descriptor *desc)
1266{
1267 struct r8a66597_ep *ep;
1268
1269 ep = container_of(_ep, struct r8a66597_ep, ep);
1270 return alloc_pipe_config(ep, desc);
1271}
1272
1273static int r8a66597_disable(struct usb_ep *_ep)
1274{
1275 struct r8a66597_ep *ep;
1276 struct r8a66597_request *req;
1277 unsigned long flags;
1278
1279 ep = container_of(_ep, struct r8a66597_ep, ep);
1280 BUG_ON(!ep);
1281
1282 while (!list_empty(&ep->queue)) {
1283 req = get_request_from_ep(ep);
1284 spin_lock_irqsave(&ep->r8a66597->lock, flags);
1285 transfer_complete(ep, req, -ECONNRESET);
1286 spin_unlock_irqrestore(&ep->r8a66597->lock, flags);
1287 }
1288
1289 pipe_irq_disable(ep->r8a66597, ep->pipenum);
1290 return free_pipe_config(ep);
1291}
1292
1293static struct usb_request *r8a66597_alloc_request(struct usb_ep *_ep,
1294 gfp_t gfp_flags)
1295{
1296 struct r8a66597_request *req;
1297
1298 req = kzalloc(sizeof(struct r8a66597_request), gfp_flags);
1299 if (!req)
1300 return NULL;
1301
1302 INIT_LIST_HEAD(&req->queue);
1303
1304 return &req->req;
1305}
1306
1307static void r8a66597_free_request(struct usb_ep *_ep, struct usb_request *_req)
1308{
1309 struct r8a66597_request *req;
1310
1311 req = container_of(_req, struct r8a66597_request, req);
1312 kfree(req);
1313}
1314
1315static int r8a66597_queue(struct usb_ep *_ep, struct usb_request *_req,
1316 gfp_t gfp_flags)
1317{
1318 struct r8a66597_ep *ep;
1319 struct r8a66597_request *req;
1320 unsigned long flags;
1321 int request = 0;
1322
1323 ep = container_of(_ep, struct r8a66597_ep, ep);
1324 req = container_of(_req, struct r8a66597_request, req);
1325
1326 if (ep->r8a66597->gadget.speed == USB_SPEED_UNKNOWN)
1327 return -ESHUTDOWN;
1328
1329 spin_lock_irqsave(&ep->r8a66597->lock, flags);
1330
1331 if (list_empty(&ep->queue))
1332 request = 1;
1333
1334 list_add_tail(&req->queue, &ep->queue);
1335 req->req.actual = 0;
1336 req->req.status = -EINPROGRESS;
1337
1338 if (ep->desc == NULL) /* control */
1339 start_ep0(ep, req);
1340 else {
1341 if (request && !ep->busy)
1342 start_packet(ep, req);
1343 }
1344
1345 spin_unlock_irqrestore(&ep->r8a66597->lock, flags);
1346
1347 return 0;
1348}
1349
1350static int r8a66597_dequeue(struct usb_ep *_ep, struct usb_request *_req)
1351{
1352 struct r8a66597_ep *ep;
1353 struct r8a66597_request *req;
1354 unsigned long flags;
1355
1356 ep = container_of(_ep, struct r8a66597_ep, ep);
1357 req = container_of(_req, struct r8a66597_request, req);
1358
1359 spin_lock_irqsave(&ep->r8a66597->lock, flags);
1360 if (!list_empty(&ep->queue))
1361 transfer_complete(ep, req, -ECONNRESET);
1362 spin_unlock_irqrestore(&ep->r8a66597->lock, flags);
1363
1364 return 0;
1365}
1366
1367static int r8a66597_set_halt(struct usb_ep *_ep, int value)
1368{
1369 struct r8a66597_ep *ep;
1370 struct r8a66597_request *req;
1371 unsigned long flags;
1372 int ret = 0;
1373
1374 ep = container_of(_ep, struct r8a66597_ep, ep);
1375 req = get_request_from_ep(ep);
1376
1377 spin_lock_irqsave(&ep->r8a66597->lock, flags);
1378 if (!list_empty(&ep->queue)) {
1379 ret = -EAGAIN;
1380 goto out;
1381 }
1382 if (value) {
1383 ep->busy = 1;
1384 pipe_stall(ep->r8a66597, ep->pipenum);
1385 } else {
1386 ep->busy = 0;
Yoshihiro Shimoda9e7291c2009-08-20 07:01:06 +00001387 ep->wedge = 0;
Yoshihiro Shimodac4144242009-08-19 04:59:39 +00001388 pipe_stop(ep->r8a66597, ep->pipenum);
1389 }
1390
1391out:
1392 spin_unlock_irqrestore(&ep->r8a66597->lock, flags);
1393 return ret;
1394}
1395
Yoshihiro Shimoda9e7291c2009-08-20 07:01:06 +00001396static int r8a66597_set_wedge(struct usb_ep *_ep)
1397{
1398 struct r8a66597_ep *ep;
1399 unsigned long flags;
1400
1401 ep = container_of(_ep, struct r8a66597_ep, ep);
1402
1403 if (!ep || !ep->desc)
1404 return -EINVAL;
1405
1406 spin_lock_irqsave(&ep->r8a66597->lock, flags);
1407 ep->wedge = 1;
1408 spin_unlock_irqrestore(&ep->r8a66597->lock, flags);
1409
1410 return usb_ep_set_halt(_ep);
1411}
1412
Yoshihiro Shimodac4144242009-08-19 04:59:39 +00001413static void r8a66597_fifo_flush(struct usb_ep *_ep)
1414{
1415 struct r8a66597_ep *ep;
1416 unsigned long flags;
1417
1418 ep = container_of(_ep, struct r8a66597_ep, ep);
1419 spin_lock_irqsave(&ep->r8a66597->lock, flags);
1420 if (list_empty(&ep->queue) && !ep->busy) {
1421 pipe_stop(ep->r8a66597, ep->pipenum);
1422 r8a66597_bclr(ep->r8a66597, BCLR, ep->fifoctr);
1423 }
1424 spin_unlock_irqrestore(&ep->r8a66597->lock, flags);
1425}
1426
1427static struct usb_ep_ops r8a66597_ep_ops = {
1428 .enable = r8a66597_enable,
1429 .disable = r8a66597_disable,
1430
1431 .alloc_request = r8a66597_alloc_request,
1432 .free_request = r8a66597_free_request,
1433
1434 .queue = r8a66597_queue,
1435 .dequeue = r8a66597_dequeue,
1436
1437 .set_halt = r8a66597_set_halt,
Yoshihiro Shimoda9e7291c2009-08-20 07:01:06 +00001438 .set_wedge = r8a66597_set_wedge,
Yoshihiro Shimodac4144242009-08-19 04:59:39 +00001439 .fifo_flush = r8a66597_fifo_flush,
1440};
1441
1442/*-------------------------------------------------------------------------*/
1443static struct r8a66597 *the_controller;
1444
Sebastian Andrzej Siewior0f913492011-06-28 16:33:47 +03001445static int r8a66597_start(struct usb_gadget_driver *driver,
Uwe Kleine-Königb0fca502010-08-12 17:43:53 +02001446 int (*bind)(struct usb_gadget *))
Yoshihiro Shimodac4144242009-08-19 04:59:39 +00001447{
1448 struct r8a66597 *r8a66597 = the_controller;
1449 int retval;
1450
1451 if (!driver
1452 || driver->speed != USB_SPEED_HIGH
Uwe Kleine-Königb0fca502010-08-12 17:43:53 +02001453 || !bind
Yoshihiro Shimodac4144242009-08-19 04:59:39 +00001454 || !driver->setup)
1455 return -EINVAL;
1456 if (!r8a66597)
1457 return -ENODEV;
1458 if (r8a66597->driver)
1459 return -EBUSY;
1460
1461 /* hook up the driver */
1462 driver->driver.bus = NULL;
1463 r8a66597->driver = driver;
1464 r8a66597->gadget.dev.driver = &driver->driver;
1465
1466 retval = device_add(&r8a66597->gadget.dev);
1467 if (retval) {
Yoshihiro Shimoda12158f42011-09-30 20:07:30 +09001468 dev_err(r8a66597_to_dev(r8a66597), "device_add error (%d)\n",
1469 retval);
Yoshihiro Shimodac4144242009-08-19 04:59:39 +00001470 goto error;
1471 }
1472
Uwe Kleine-Königb0fca502010-08-12 17:43:53 +02001473 retval = bind(&r8a66597->gadget);
Yoshihiro Shimodac4144242009-08-19 04:59:39 +00001474 if (retval) {
Yoshihiro Shimoda12158f42011-09-30 20:07:30 +09001475 dev_err(r8a66597_to_dev(r8a66597),
1476 "bind to driver error (%d)\n", retval);
Yoshihiro Shimodac4144242009-08-19 04:59:39 +00001477 device_del(&r8a66597->gadget.dev);
1478 goto error;
1479 }
1480
Yoshihiro Shimodadeafeb22011-07-08 14:51:21 +09001481 init_controller(r8a66597);
Yoshihiro Shimodac4144242009-08-19 04:59:39 +00001482 r8a66597_bset(r8a66597, VBSE, INTENB0);
1483 if (r8a66597_read(r8a66597, INTSTS0) & VBSTS) {
1484 r8a66597_start_xclock(r8a66597);
1485 /* start vbus sampling */
1486 r8a66597->old_vbus = r8a66597_read(r8a66597,
1487 INTSTS0) & VBSTS;
1488 r8a66597->scount = R8A66597_MAX_SAMPLING;
1489 mod_timer(&r8a66597->timer, jiffies + msecs_to_jiffies(50));
1490 }
1491
1492 return 0;
1493
1494error:
1495 r8a66597->driver = NULL;
1496 r8a66597->gadget.dev.driver = NULL;
1497
1498 return retval;
1499}
Yoshihiro Shimodac4144242009-08-19 04:59:39 +00001500
Sebastian Andrzej Siewior0f913492011-06-28 16:33:47 +03001501static int r8a66597_stop(struct usb_gadget_driver *driver)
Yoshihiro Shimodac4144242009-08-19 04:59:39 +00001502{
1503 struct r8a66597 *r8a66597 = the_controller;
1504 unsigned long flags;
1505
1506 if (driver != r8a66597->driver || !driver->unbind)
1507 return -EINVAL;
1508
1509 spin_lock_irqsave(&r8a66597->lock, flags);
1510 if (r8a66597->gadget.speed != USB_SPEED_UNKNOWN)
1511 r8a66597_usb_disconnect(r8a66597);
Yoshihiro Shimodadeafeb22011-07-08 14:51:21 +09001512 r8a66597_bclr(r8a66597, VBSE, INTENB0);
1513 disable_controller(r8a66597);
Yoshihiro Shimodac4144242009-08-19 04:59:39 +00001514 spin_unlock_irqrestore(&r8a66597->lock, flags);
1515
Yoshihiro Shimodac4144242009-08-19 04:59:39 +00001516 driver->unbind(&r8a66597->gadget);
1517
Yoshihiro Shimodac4144242009-08-19 04:59:39 +00001518 device_del(&r8a66597->gadget.dev);
1519 r8a66597->driver = NULL;
1520 return 0;
1521}
Yoshihiro Shimodac4144242009-08-19 04:59:39 +00001522
1523/*-------------------------------------------------------------------------*/
1524static int r8a66597_get_frame(struct usb_gadget *_gadget)
1525{
1526 struct r8a66597 *r8a66597 = gadget_to_r8a66597(_gadget);
1527 return r8a66597_read(r8a66597, FRMNUM) & 0x03FF;
1528}
1529
Yoshihiro Shimoda8ca13752011-07-07 09:58:20 +09001530static int r8a66597_pullup(struct usb_gadget *gadget, int is_on)
1531{
1532 struct r8a66597 *r8a66597 = gadget_to_r8a66597(gadget);
1533 unsigned long flags;
1534
1535 spin_lock_irqsave(&r8a66597->lock, flags);
1536 if (is_on)
1537 r8a66597_bset(r8a66597, DPRPU, SYSCFG0);
1538 else
1539 r8a66597_bclr(r8a66597, DPRPU, SYSCFG0);
1540 spin_unlock_irqrestore(&r8a66597->lock, flags);
1541
1542 return 0;
1543}
1544
Yoshihiro Shimodac4144242009-08-19 04:59:39 +00001545static struct usb_gadget_ops r8a66597_gadget_ops = {
1546 .get_frame = r8a66597_get_frame,
Sebastian Andrzej Siewior0f913492011-06-28 16:33:47 +03001547 .start = r8a66597_start,
1548 .stop = r8a66597_stop,
Yoshihiro Shimoda8ca13752011-07-07 09:58:20 +09001549 .pullup = r8a66597_pullup,
Yoshihiro Shimodac4144242009-08-19 04:59:39 +00001550};
1551
1552static int __exit r8a66597_remove(struct platform_device *pdev)
1553{
1554 struct r8a66597 *r8a66597 = dev_get_drvdata(&pdev->dev);
1555
Sebastian Andrzej Siewior0f913492011-06-28 16:33:47 +03001556 usb_del_gadget_udc(&r8a66597->gadget);
Yoshihiro Shimodac4144242009-08-19 04:59:39 +00001557 del_timer_sync(&r8a66597->timer);
Paul Mundte8b48662010-06-02 16:27:12 +09001558 iounmap(r8a66597->reg);
Yoshihiro Shimodac4144242009-08-19 04:59:39 +00001559 free_irq(platform_get_irq(pdev, 0), r8a66597);
1560 r8a66597_free_request(&r8a66597->ep[0].ep, r8a66597->ep0_req);
Magnus Dammd2e27bd2009-08-19 09:50:49 +00001561#ifdef CONFIG_HAVE_CLK
1562 if (r8a66597->pdata->on_chip) {
1563 clk_disable(r8a66597->clk);
1564 clk_put(r8a66597->clk);
1565 }
1566#endif
Yoshihiro Shimodac4144242009-08-19 04:59:39 +00001567 kfree(r8a66597);
1568 return 0;
1569}
1570
1571static void nop_completion(struct usb_ep *ep, struct usb_request *r)
1572{
1573}
1574
1575static int __init r8a66597_probe(struct platform_device *pdev)
1576{
Magnus Dammd2e27bd2009-08-19 09:50:49 +00001577#ifdef CONFIG_HAVE_CLK
1578 char clk_name[8];
1579#endif
Yoshihiro Shimodac4144242009-08-19 04:59:39 +00001580 struct resource *res, *ires;
1581 int irq;
1582 void __iomem *reg = NULL;
1583 struct r8a66597 *r8a66597 = NULL;
1584 int ret = 0;
1585 int i;
1586 unsigned long irq_trigger;
1587
1588 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1589 if (!res) {
1590 ret = -ENODEV;
Yoshihiro Shimoda12158f42011-09-30 20:07:30 +09001591 dev_err(&pdev->dev, "platform_get_resource error.\n");
Yoshihiro Shimodac4144242009-08-19 04:59:39 +00001592 goto clean_up;
1593 }
1594
1595 ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1596 irq = ires->start;
1597 irq_trigger = ires->flags & IRQF_TRIGGER_MASK;
1598
1599 if (irq < 0) {
1600 ret = -ENODEV;
Yoshihiro Shimoda12158f42011-09-30 20:07:30 +09001601 dev_err(&pdev->dev, "platform_get_irq error.\n");
Yoshihiro Shimodac4144242009-08-19 04:59:39 +00001602 goto clean_up;
1603 }
1604
1605 reg = ioremap(res->start, resource_size(res));
1606 if (reg == NULL) {
1607 ret = -ENOMEM;
Yoshihiro Shimoda12158f42011-09-30 20:07:30 +09001608 dev_err(&pdev->dev, "ioremap error.\n");
Yoshihiro Shimodac4144242009-08-19 04:59:39 +00001609 goto clean_up;
1610 }
1611
1612 /* initialize ucd */
1613 r8a66597 = kzalloc(sizeof(struct r8a66597), GFP_KERNEL);
1614 if (r8a66597 == NULL) {
Axel Lin96f2a342010-08-17 09:41:29 +08001615 ret = -ENOMEM;
Yoshihiro Shimoda12158f42011-09-30 20:07:30 +09001616 dev_err(&pdev->dev, "kzalloc error\n");
Yoshihiro Shimodac4144242009-08-19 04:59:39 +00001617 goto clean_up;
1618 }
1619
1620 spin_lock_init(&r8a66597->lock);
1621 dev_set_drvdata(&pdev->dev, r8a66597);
1622 r8a66597->pdata = pdev->dev.platform_data;
1623 r8a66597->irq_sense_low = irq_trigger == IRQF_TRIGGER_LOW;
1624
1625 r8a66597->gadget.ops = &r8a66597_gadget_ops;
1626 device_initialize(&r8a66597->gadget.dev);
1627 dev_set_name(&r8a66597->gadget.dev, "gadget");
1628 r8a66597->gadget.is_dualspeed = 1;
1629 r8a66597->gadget.dev.parent = &pdev->dev;
1630 r8a66597->gadget.dev.dma_mask = pdev->dev.dma_mask;
1631 r8a66597->gadget.dev.release = pdev->dev.release;
1632 r8a66597->gadget.name = udc_name;
1633
1634 init_timer(&r8a66597->timer);
1635 r8a66597->timer.function = r8a66597_timer;
1636 r8a66597->timer.data = (unsigned long)r8a66597;
Paul Mundte8b48662010-06-02 16:27:12 +09001637 r8a66597->reg = reg;
Yoshihiro Shimodac4144242009-08-19 04:59:39 +00001638
Magnus Dammd2e27bd2009-08-19 09:50:49 +00001639#ifdef CONFIG_HAVE_CLK
1640 if (r8a66597->pdata->on_chip) {
1641 snprintf(clk_name, sizeof(clk_name), "usb%d", pdev->id);
1642 r8a66597->clk = clk_get(&pdev->dev, clk_name);
1643 if (IS_ERR(r8a66597->clk)) {
1644 dev_err(&pdev->dev, "cannot get clock \"%s\"\n",
1645 clk_name);
1646 ret = PTR_ERR(r8a66597->clk);
1647 goto clean_up;
1648 }
1649 clk_enable(r8a66597->clk);
1650 }
1651#endif
1652
Yoshihiro Shimodac4144242009-08-19 04:59:39 +00001653 disable_controller(r8a66597); /* make sure controller is disabled */
1654
Yong Zhangb5dd18d2011-09-07 16:10:52 +08001655 ret = request_irq(irq, r8a66597_irq, IRQF_SHARED,
Yoshihiro Shimodac4144242009-08-19 04:59:39 +00001656 udc_name, r8a66597);
1657 if (ret < 0) {
Yoshihiro Shimoda12158f42011-09-30 20:07:30 +09001658 dev_err(&pdev->dev, "request_irq error (%d)\n", ret);
Magnus Dammd2e27bd2009-08-19 09:50:49 +00001659 goto clean_up2;
Yoshihiro Shimodac4144242009-08-19 04:59:39 +00001660 }
1661
1662 INIT_LIST_HEAD(&r8a66597->gadget.ep_list);
1663 r8a66597->gadget.ep0 = &r8a66597->ep[0].ep;
1664 INIT_LIST_HEAD(&r8a66597->gadget.ep0->ep_list);
1665 for (i = 0; i < R8A66597_MAX_NUM_PIPE; i++) {
1666 struct r8a66597_ep *ep = &r8a66597->ep[i];
1667
1668 if (i != 0) {
1669 INIT_LIST_HEAD(&r8a66597->ep[i].ep.ep_list);
1670 list_add_tail(&r8a66597->ep[i].ep.ep_list,
1671 &r8a66597->gadget.ep_list);
1672 }
1673 ep->r8a66597 = r8a66597;
1674 INIT_LIST_HEAD(&ep->queue);
1675 ep->ep.name = r8a66597_ep_name[i];
1676 ep->ep.ops = &r8a66597_ep_ops;
1677 ep->ep.maxpacket = 512;
1678 }
1679 r8a66597->ep[0].ep.maxpacket = 64;
1680 r8a66597->ep[0].pipenum = 0;
1681 r8a66597->ep[0].fifoaddr = CFIFO;
1682 r8a66597->ep[0].fifosel = CFIFOSEL;
1683 r8a66597->ep[0].fifoctr = CFIFOCTR;
1684 r8a66597->ep[0].fifotrn = 0;
1685 r8a66597->ep[0].pipectr = get_pipectr_addr(0);
1686 r8a66597->pipenum2ep[0] = &r8a66597->ep[0];
1687 r8a66597->epaddr2ep[0] = &r8a66597->ep[0];
1688
1689 the_controller = r8a66597;
1690
1691 r8a66597->ep0_req = r8a66597_alloc_request(&r8a66597->ep[0].ep,
1692 GFP_KERNEL);
1693 if (r8a66597->ep0_req == NULL)
Magnus Dammd2e27bd2009-08-19 09:50:49 +00001694 goto clean_up3;
Yoshihiro Shimodac4144242009-08-19 04:59:39 +00001695 r8a66597->ep0_req->complete = nop_completion;
1696
Sebastian Andrzej Siewior0f913492011-06-28 16:33:47 +03001697 ret = usb_add_gadget_udc(&pdev->dev, &r8a66597->gadget);
1698 if (ret)
1699 goto err_add_udc;
1700
Yoshihiro Shimodac4144242009-08-19 04:59:39 +00001701 dev_info(&pdev->dev, "version %s\n", DRIVER_VERSION);
1702 return 0;
1703
Sebastian Andrzej Siewior0f913492011-06-28 16:33:47 +03001704err_add_udc:
1705 r8a66597_free_request(&r8a66597->ep[0].ep, r8a66597->ep0_req);
Magnus Dammd2e27bd2009-08-19 09:50:49 +00001706clean_up3:
Yoshihiro Shimodac4144242009-08-19 04:59:39 +00001707 free_irq(irq, r8a66597);
Magnus Dammd2e27bd2009-08-19 09:50:49 +00001708clean_up2:
1709#ifdef CONFIG_HAVE_CLK
1710 if (r8a66597->pdata->on_chip) {
1711 clk_disable(r8a66597->clk);
1712 clk_put(r8a66597->clk);
1713 }
1714#endif
Yoshihiro Shimodac4144242009-08-19 04:59:39 +00001715clean_up:
1716 if (r8a66597) {
1717 if (r8a66597->ep0_req)
1718 r8a66597_free_request(&r8a66597->ep[0].ep,
1719 r8a66597->ep0_req);
1720 kfree(r8a66597);
1721 }
1722 if (reg)
1723 iounmap(reg);
1724
1725 return ret;
1726}
1727
1728/*-------------------------------------------------------------------------*/
1729static struct platform_driver r8a66597_driver = {
1730 .remove = __exit_p(r8a66597_remove),
1731 .driver = {
1732 .name = (char *) udc_name,
1733 },
1734};
Sebastian Andrzej Siewior86081d72011-06-29 16:41:55 +03001735MODULE_ALIAS("platform:r8a66597_udc");
Yoshihiro Shimodac4144242009-08-19 04:59:39 +00001736
1737static int __init r8a66597_udc_init(void)
1738{
1739 return platform_driver_probe(&r8a66597_driver, r8a66597_probe);
1740}
1741module_init(r8a66597_udc_init);
1742
1743static void __exit r8a66597_udc_cleanup(void)
1744{
1745 platform_driver_unregister(&r8a66597_driver);
1746}
1747module_exit(r8a66597_udc_cleanup);
1748
1749MODULE_DESCRIPTION("R8A66597 USB gadget driver");
1750MODULE_LICENSE("GPL");
1751MODULE_AUTHOR("Yoshihiro Shimoda");
1752