blob: 2602173d68d415b05b141d0f3c721198bdc8fc03 [file] [log] [blame]
Thomas Dahlmann55d402d2007-07-16 21:40:54 -07001/*
2 * amd5536.c -- AMD 5536 UDC high/full speed USB device controller
3 *
4 * Copyright (C) 2005-2007 AMD (http://www.amd.com)
5 * Author: Thomas Dahlmann
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
Thomas Dahlmann55d402d2007-07-16 21:40:54 -070011 */
12
13/*
14 * The AMD5536 UDC is part of the x86 southbridge AMD Geode CS5536.
15 * It is a USB Highspeed DMA capable USB device controller. Beside ep0 it
16 * provides 4 IN and 4 OUT endpoints (bulk or interrupt type).
17 *
18 * Make sure that UDC is assigned to port 4 by BIOS settings (port can also
19 * be used as host port) and UOC bits PAD_EN and APU are set (should be done
20 * by BIOS init).
21 *
22 * UDC DMA requires 32-bit aligned buffers so DMA with gadget ether does not
23 * work without updating NET_IP_ALIGN. Or PIO mode (module param "use_dma=0")
24 * can be used with gadget ether.
25 */
26
27/* debug control */
28/* #define UDC_VERBOSE */
29
30/* Driver strings */
31#define UDC_MOD_DESCRIPTION "AMD 5536 UDC - USB Device Controller"
Cyril Roelandtc15e03e2012-02-25 02:15:02 +010032#define UDC_DRIVER_VERSION_STRING "01.00.0206"
Thomas Dahlmann55d402d2007-07-16 21:40:54 -070033
34/* system */
35#include <linux/module.h>
36#include <linux/pci.h>
37#include <linux/kernel.h>
Thomas Dahlmann55d402d2007-07-16 21:40:54 -070038#include <linux/delay.h>
39#include <linux/ioport.h>
40#include <linux/sched.h>
41#include <linux/slab.h>
Thomas Dahlmann55d402d2007-07-16 21:40:54 -070042#include <linux/errno.h>
Thomas Dahlmann55d402d2007-07-16 21:40:54 -070043#include <linux/timer.h>
44#include <linux/list.h>
45#include <linux/interrupt.h>
46#include <linux/ioctl.h>
47#include <linux/fs.h>
48#include <linux/dmapool.h>
49#include <linux/moduleparam.h>
50#include <linux/device.h>
51#include <linux/io.h>
52#include <linux/irq.h>
Bryan Wub38b03b2011-06-02 12:51:29 +080053#include <linux/prefetch.h>
Thomas Dahlmann55d402d2007-07-16 21:40:54 -070054
55#include <asm/byteorder.h>
Thomas Dahlmann55d402d2007-07-16 21:40:54 -070056#include <asm/unaligned.h>
57
58/* gadget stack */
59#include <linux/usb/ch9.h>
David Brownell9454a572007-10-04 18:05:17 -070060#include <linux/usb/gadget.h>
Thomas Dahlmann55d402d2007-07-16 21:40:54 -070061
62/* udc specific */
63#include "amd5536udc.h"
64
65
66static void udc_tasklet_disconnect(unsigned long);
67static void empty_req_queue(struct udc_ep *);
Thomas Dahlmann55d402d2007-07-16 21:40:54 -070068static void udc_basic_init(struct udc *dev);
69static void udc_setup_endpoints(struct udc *dev);
70static void udc_soft_reset(struct udc *dev);
71static struct udc_request *udc_alloc_bna_dummy(struct udc_ep *ep);
72static void udc_free_request(struct usb_ep *usbep, struct usb_request *usbreq);
73static int udc_free_dma_chain(struct udc *dev, struct udc_request *req);
Thomas Dahlmann55d402d2007-07-16 21:40:54 -070074static int udc_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id);
75static void udc_pci_remove(struct pci_dev *pdev);
76
77/* description */
78static const char mod_desc[] = UDC_MOD_DESCRIPTION;
79static const char name[] = "amd5536udc";
80
81/* structure to hold endpoint function pointers */
82static const struct usb_ep_ops udc_ep_ops;
83
84/* received setup data */
85static union udc_setup_data setup_data;
86
87/* pointer to device object */
88static struct udc *udc;
89
90/* irq spin lock for soft reset */
91static DEFINE_SPINLOCK(udc_irq_spinlock);
92/* stall spin lock */
93static DEFINE_SPINLOCK(udc_stall_spinlock);
94
95/*
96* slave mode: pending bytes in rx fifo after nyet,
97* used if EPIN irq came but no req was available
98*/
99static unsigned int udc_rxfifo_pending;
100
101/* count soft resets after suspend to avoid loop */
102static int soft_reset_occured;
103static int soft_reset_after_usbreset_occured;
104
105/* timer */
106static struct timer_list udc_timer;
107static int stop_timer;
108
109/* set_rde -- Is used to control enabling of RX DMA. Problem is
110 * that UDC has only one bit (RDE) to enable/disable RX DMA for
111 * all OUT endpoints. So we have to handle race conditions like
112 * when OUT data reaches the fifo but no request was queued yet.
113 * This cannot be solved by letting the RX DMA disabled until a
114 * request gets queued because there may be other OUT packets
115 * in the FIFO (important for not blocking control traffic).
116 * The value of set_rde controls the correspondig timer.
117 *
118 * set_rde -1 == not used, means it is alloed to be set to 0 or 1
119 * set_rde 0 == do not touch RDE, do no start the RDE timer
120 * set_rde 1 == timer function will look whether FIFO has data
121 * set_rde 2 == set by timer function to enable RX DMA on next call
122 */
123static int set_rde = -1;
124
125static DECLARE_COMPLETION(on_exit);
126static struct timer_list udc_pollstall_timer;
127static int stop_pollstall_timer;
128static DECLARE_COMPLETION(on_pollstall_exit);
129
130/* tasklet for usb disconnect */
131static DECLARE_TASKLET(disconnect_tasklet, udc_tasklet_disconnect,
132 (unsigned long) &udc);
133
134
135/* endpoint names used for print */
136static const char ep0_string[] = "ep0in";
Robert Baldyga6f02ac52015-07-31 16:00:20 +0200137static const struct {
138 const char *name;
139 const struct usb_ep_caps caps;
140} ep_info[] = {
141#define EP_INFO(_name, _caps) \
142 { \
143 .name = _name, \
144 .caps = _caps, \
145 }
146
147 EP_INFO(ep0_string,
148 USB_EP_CAPS(USB_EP_CAPS_TYPE_CONTROL, USB_EP_CAPS_DIR_IN)),
149 EP_INFO("ep1in-int",
150 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
151 EP_INFO("ep2in-bulk",
152 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
153 EP_INFO("ep3in-bulk",
154 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
155 EP_INFO("ep4in-bulk",
156 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
157 EP_INFO("ep5in-bulk",
158 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
159 EP_INFO("ep6in-bulk",
160 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
161 EP_INFO("ep7in-bulk",
162 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
163 EP_INFO("ep8in-bulk",
164 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
165 EP_INFO("ep9in-bulk",
166 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
167 EP_INFO("ep10in-bulk",
168 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
169 EP_INFO("ep11in-bulk",
170 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
171 EP_INFO("ep12in-bulk",
172 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
173 EP_INFO("ep13in-bulk",
174 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
175 EP_INFO("ep14in-bulk",
176 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
177 EP_INFO("ep15in-bulk",
178 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
179 EP_INFO("ep0out",
180 USB_EP_CAPS(USB_EP_CAPS_TYPE_CONTROL, USB_EP_CAPS_DIR_OUT)),
181 EP_INFO("ep1out-bulk",
182 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
183 EP_INFO("ep2out-bulk",
184 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
185 EP_INFO("ep3out-bulk",
186 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
187 EP_INFO("ep4out-bulk",
188 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
189 EP_INFO("ep5out-bulk",
190 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
191 EP_INFO("ep6out-bulk",
192 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
193 EP_INFO("ep7out-bulk",
194 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
195 EP_INFO("ep8out-bulk",
196 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
197 EP_INFO("ep9out-bulk",
198 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
199 EP_INFO("ep10out-bulk",
200 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
201 EP_INFO("ep11out-bulk",
202 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
203 EP_INFO("ep12out-bulk",
204 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
205 EP_INFO("ep13out-bulk",
206 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
207 EP_INFO("ep14out-bulk",
208 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
209 EP_INFO("ep15out-bulk",
210 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
211
212#undef EP_INFO
Thomas Dahlmann55d402d2007-07-16 21:40:54 -0700213};
214
215/* DMA usage flag */
Rusty Russell90ab5ee2012-01-13 09:32:20 +1030216static bool use_dma = 1;
Thomas Dahlmann55d402d2007-07-16 21:40:54 -0700217/* packet per buffer dma */
Rusty Russell90ab5ee2012-01-13 09:32:20 +1030218static bool use_dma_ppb = 1;
Thomas Dahlmann55d402d2007-07-16 21:40:54 -0700219/* with per descr. update */
Rusty Russell90ab5ee2012-01-13 09:32:20 +1030220static bool use_dma_ppb_du;
Thomas Dahlmann55d402d2007-07-16 21:40:54 -0700221/* buffer fill mode */
222static int use_dma_bufferfill_mode;
223/* full speed only mode */
Rusty Russell90ab5ee2012-01-13 09:32:20 +1030224static bool use_fullspeed;
Thomas Dahlmann55d402d2007-07-16 21:40:54 -0700225/* tx buffer size for high speed */
226static unsigned long hs_tx_buf = UDC_EPIN_BUFF_SIZE;
227
228/* module parameters */
229module_param(use_dma, bool, S_IRUGO);
230MODULE_PARM_DESC(use_dma, "true for DMA");
231module_param(use_dma_ppb, bool, S_IRUGO);
232MODULE_PARM_DESC(use_dma_ppb, "true for DMA in packet per buffer mode");
233module_param(use_dma_ppb_du, bool, S_IRUGO);
234MODULE_PARM_DESC(use_dma_ppb_du,
235 "true for DMA in packet per buffer mode with descriptor update");
236module_param(use_fullspeed, bool, S_IRUGO);
237MODULE_PARM_DESC(use_fullspeed, "true for fullspeed only");
238
239/*---------------------------------------------------------------------------*/
240/* Prints UDC device registers and endpoint irq registers */
241static void print_regs(struct udc *dev)
242{
243 DBG(dev, "------- Device registers -------\n");
244 DBG(dev, "dev config = %08x\n", readl(&dev->regs->cfg));
245 DBG(dev, "dev control = %08x\n", readl(&dev->regs->ctl));
246 DBG(dev, "dev status = %08x\n", readl(&dev->regs->sts));
247 DBG(dev, "\n");
248 DBG(dev, "dev int's = %08x\n", readl(&dev->regs->irqsts));
249 DBG(dev, "dev intmask = %08x\n", readl(&dev->regs->irqmsk));
250 DBG(dev, "\n");
251 DBG(dev, "dev ep int's = %08x\n", readl(&dev->regs->ep_irqsts));
252 DBG(dev, "dev ep intmask = %08x\n", readl(&dev->regs->ep_irqmsk));
253 DBG(dev, "\n");
254 DBG(dev, "USE DMA = %d\n", use_dma);
255 if (use_dma && use_dma_ppb && !use_dma_ppb_du) {
256 DBG(dev, "DMA mode = PPBNDU (packet per buffer "
257 "WITHOUT desc. update)\n");
258 dev_info(&dev->pdev->dev, "DMA mode (%s)\n", "PPBNDU");
Julia Lawall0cf7a632010-08-28 18:48:56 +0200259 } else if (use_dma && use_dma_ppb && use_dma_ppb_du) {
Thomas Dahlmann55d402d2007-07-16 21:40:54 -0700260 DBG(dev, "DMA mode = PPBDU (packet per buffer "
261 "WITH desc. update)\n");
262 dev_info(&dev->pdev->dev, "DMA mode (%s)\n", "PPBDU");
263 }
264 if (use_dma && use_dma_bufferfill_mode) {
265 DBG(dev, "DMA mode = BF (buffer fill mode)\n");
266 dev_info(&dev->pdev->dev, "DMA mode (%s)\n", "BF");
267 }
Cyril Roelandt170b7782012-02-25 02:14:57 +0100268 if (!use_dma)
Thomas Dahlmann55d402d2007-07-16 21:40:54 -0700269 dev_info(&dev->pdev->dev, "FIFO mode\n");
Thomas Dahlmann55d402d2007-07-16 21:40:54 -0700270 DBG(dev, "-------------------------------------------------------\n");
271}
272
273/* Masks unused interrupts */
274static int udc_mask_unused_interrupts(struct udc *dev)
275{
276 u32 tmp;
277
278 /* mask all dev interrupts */
279 tmp = AMD_BIT(UDC_DEVINT_SVC) |
280 AMD_BIT(UDC_DEVINT_ENUM) |
281 AMD_BIT(UDC_DEVINT_US) |
282 AMD_BIT(UDC_DEVINT_UR) |
283 AMD_BIT(UDC_DEVINT_ES) |
284 AMD_BIT(UDC_DEVINT_SI) |
285 AMD_BIT(UDC_DEVINT_SOF)|
286 AMD_BIT(UDC_DEVINT_SC);
287 writel(tmp, &dev->regs->irqmsk);
288
289 /* mask all ep interrupts */
290 writel(UDC_EPINT_MSK_DISABLE_ALL, &dev->regs->ep_irqmsk);
291
292 return 0;
293}
294
295/* Enables endpoint 0 interrupts */
296static int udc_enable_ep0_interrupts(struct udc *dev)
297{
298 u32 tmp;
299
300 DBG(dev, "udc_enable_ep0_interrupts()\n");
301
302 /* read irq mask */
303 tmp = readl(&dev->regs->ep_irqmsk);
304 /* enable ep0 irq's */
305 tmp &= AMD_UNMASK_BIT(UDC_EPINT_IN_EP0)
306 & AMD_UNMASK_BIT(UDC_EPINT_OUT_EP0);
307 writel(tmp, &dev->regs->ep_irqmsk);
308
309 return 0;
310}
311
312/* Enables device interrupts for SET_INTF and SET_CONFIG */
313static int udc_enable_dev_setup_interrupts(struct udc *dev)
314{
315 u32 tmp;
316
317 DBG(dev, "enable device interrupts for setup data\n");
318
319 /* read irq mask */
320 tmp = readl(&dev->regs->irqmsk);
321
322 /* enable SET_INTERFACE, SET_CONFIG and other needed irq's */
323 tmp &= AMD_UNMASK_BIT(UDC_DEVINT_SI)
324 & AMD_UNMASK_BIT(UDC_DEVINT_SC)
325 & AMD_UNMASK_BIT(UDC_DEVINT_UR)
326 & AMD_UNMASK_BIT(UDC_DEVINT_SVC)
327 & AMD_UNMASK_BIT(UDC_DEVINT_ENUM);
328 writel(tmp, &dev->regs->irqmsk);
329
330 return 0;
331}
332
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300333/* Calculates fifo start of endpoint based on preceding endpoints */
Thomas Dahlmann55d402d2007-07-16 21:40:54 -0700334static int udc_set_txfifo_addr(struct udc_ep *ep)
335{
336 struct udc *dev;
337 u32 tmp;
338 int i;
339
340 if (!ep || !(ep->in))
341 return -EINVAL;
342
343 dev = ep->dev;
344 ep->txfifo = dev->txfifo;
345
346 /* traverse ep's */
347 for (i = 0; i < ep->num; i++) {
348 if (dev->ep[i].regs) {
349 /* read fifo size */
350 tmp = readl(&dev->ep[i].regs->bufin_framenum);
351 tmp = AMD_GETBITS(tmp, UDC_EPIN_BUFF_SIZE);
352 ep->txfifo += tmp;
353 }
354 }
355 return 0;
356}
357
358/* CNAK pending field: bit0 = ep0in, bit16 = ep0out */
359static u32 cnak_pending;
360
361static void UDC_QUEUE_CNAK(struct udc_ep *ep, unsigned num)
362{
363 if (readl(&ep->regs->ctl) & AMD_BIT(UDC_EPCTL_NAK)) {
364 DBG(ep->dev, "NAK could not be cleared for ep%d\n", num);
365 cnak_pending |= 1 << (num);
366 ep->naking = 1;
367 } else
368 cnak_pending = cnak_pending & (~(1 << (num)));
369}
370
371
372/* Enables endpoint, is called by gadget driver */
373static int
374udc_ep_enable(struct usb_ep *usbep, const struct usb_endpoint_descriptor *desc)
375{
376 struct udc_ep *ep;
377 struct udc *dev;
378 u32 tmp;
379 unsigned long iflags;
380 u8 udc_csr_epix;
Al Virofd05e722008-04-28 07:00:16 +0100381 unsigned maxpacket;
Thomas Dahlmann55d402d2007-07-16 21:40:54 -0700382
383 if (!usbep
384 || usbep->name == ep0_string
385 || !desc
386 || desc->bDescriptorType != USB_DT_ENDPOINT)
387 return -EINVAL;
388
389 ep = container_of(usbep, struct udc_ep, ep);
390 dev = ep->dev;
391
392 DBG(dev, "udc_ep_enable() ep %d\n", ep->num);
393
394 if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
395 return -ESHUTDOWN;
396
397 spin_lock_irqsave(&dev->lock, iflags);
Ido Shayevitzef20a722012-03-12 20:25:25 +0200398 ep->ep.desc = desc;
Thomas Dahlmann55d402d2007-07-16 21:40:54 -0700399
400 ep->halted = 0;
401
402 /* set traffic type */
403 tmp = readl(&dev->ep[ep->num].regs->ctl);
404 tmp = AMD_ADDBITS(tmp, desc->bmAttributes, UDC_EPCTL_ET);
405 writel(tmp, &dev->ep[ep->num].regs->ctl);
406
407 /* set max packet size */
Kuninori Morimoto29cc8892011-08-23 03:12:03 -0700408 maxpacket = usb_endpoint_maxp(desc);
Thomas Dahlmann55d402d2007-07-16 21:40:54 -0700409 tmp = readl(&dev->ep[ep->num].regs->bufout_maxpkt);
Al Virofd05e722008-04-28 07:00:16 +0100410 tmp = AMD_ADDBITS(tmp, maxpacket, UDC_EP_MAX_PKT_SIZE);
411 ep->ep.maxpacket = maxpacket;
Thomas Dahlmann55d402d2007-07-16 21:40:54 -0700412 writel(tmp, &dev->ep[ep->num].regs->bufout_maxpkt);
413
414 /* IN ep */
415 if (ep->in) {
416
417 /* ep ix in UDC CSR register space */
418 udc_csr_epix = ep->num;
419
420 /* set buffer size (tx fifo entries) */
421 tmp = readl(&dev->ep[ep->num].regs->bufin_framenum);
422 /* double buffering: fifo size = 2 x max packet size */
423 tmp = AMD_ADDBITS(
424 tmp,
Al Virofd05e722008-04-28 07:00:16 +0100425 maxpacket * UDC_EPIN_BUFF_SIZE_MULT
426 / UDC_DWORD_BYTES,
Thomas Dahlmann55d402d2007-07-16 21:40:54 -0700427 UDC_EPIN_BUFF_SIZE);
428 writel(tmp, &dev->ep[ep->num].regs->bufin_framenum);
429
430 /* calc. tx fifo base addr */
431 udc_set_txfifo_addr(ep);
432
433 /* flush fifo */
434 tmp = readl(&ep->regs->ctl);
435 tmp |= AMD_BIT(UDC_EPCTL_F);
436 writel(tmp, &ep->regs->ctl);
437
438 /* OUT ep */
439 } else {
440 /* ep ix in UDC CSR register space */
441 udc_csr_epix = ep->num - UDC_CSR_EP_OUT_IX_OFS;
442
443 /* set max packet size UDC CSR */
444 tmp = readl(&dev->csr->ne[ep->num - UDC_CSR_EP_OUT_IX_OFS]);
Al Virofd05e722008-04-28 07:00:16 +0100445 tmp = AMD_ADDBITS(tmp, maxpacket,
Thomas Dahlmann55d402d2007-07-16 21:40:54 -0700446 UDC_CSR_NE_MAX_PKT);
447 writel(tmp, &dev->csr->ne[ep->num - UDC_CSR_EP_OUT_IX_OFS]);
448
449 if (use_dma && !ep->in) {
450 /* alloc and init BNA dummy request */
451 ep->bna_dummy_req = udc_alloc_bna_dummy(ep);
452 ep->bna_occurred = 0;
453 }
454
455 if (ep->num != UDC_EP0OUT_IX)
456 dev->data_ep_enabled = 1;
457 }
458
459 /* set ep values */
460 tmp = readl(&dev->csr->ne[udc_csr_epix]);
461 /* max packet */
Al Virofd05e722008-04-28 07:00:16 +0100462 tmp = AMD_ADDBITS(tmp, maxpacket, UDC_CSR_NE_MAX_PKT);
Thomas Dahlmann55d402d2007-07-16 21:40:54 -0700463 /* ep number */
464 tmp = AMD_ADDBITS(tmp, desc->bEndpointAddress, UDC_CSR_NE_NUM);
465 /* ep direction */
466 tmp = AMD_ADDBITS(tmp, ep->in, UDC_CSR_NE_DIR);
467 /* ep type */
468 tmp = AMD_ADDBITS(tmp, desc->bmAttributes, UDC_CSR_NE_TYPE);
469 /* ep config */
470 tmp = AMD_ADDBITS(tmp, ep->dev->cur_config, UDC_CSR_NE_CFG);
471 /* ep interface */
472 tmp = AMD_ADDBITS(tmp, ep->dev->cur_intf, UDC_CSR_NE_INTF);
473 /* ep alt */
474 tmp = AMD_ADDBITS(tmp, ep->dev->cur_alt, UDC_CSR_NE_ALT);
475 /* write reg */
476 writel(tmp, &dev->csr->ne[udc_csr_epix]);
477
478 /* enable ep irq */
479 tmp = readl(&dev->regs->ep_irqmsk);
480 tmp &= AMD_UNMASK_BIT(ep->num);
481 writel(tmp, &dev->regs->ep_irqmsk);
482
483 /*
484 * clear NAK by writing CNAK
485 * avoid BNA for OUT DMA, don't clear NAK until DMA desc. written
486 */
487 if (!use_dma || ep->in) {
488 tmp = readl(&ep->regs->ctl);
489 tmp |= AMD_BIT(UDC_EPCTL_CNAK);
490 writel(tmp, &ep->regs->ctl);
491 ep->naking = 0;
492 UDC_QUEUE_CNAK(ep, ep->num);
493 }
494 tmp = desc->bEndpointAddress;
495 DBG(dev, "%s enabled\n", usbep->name);
496
497 spin_unlock_irqrestore(&dev->lock, iflags);
498 return 0;
499}
500
501/* Resets endpoint */
502static void ep_init(struct udc_regs __iomem *regs, struct udc_ep *ep)
503{
504 u32 tmp;
505
506 VDBG(ep->dev, "ep-%d reset\n", ep->num);
Ido Shayevitzf9c56cd2012-02-08 13:56:48 +0200507 ep->ep.desc = NULL;
Thomas Dahlmann55d402d2007-07-16 21:40:54 -0700508 ep->ep.ops = &udc_ep_ops;
509 INIT_LIST_HEAD(&ep->queue);
510
Robert Baldygae117e742013-12-13 12:23:38 +0100511 usb_ep_set_maxpacket_limit(&ep->ep,(u16) ~0);
Thomas Dahlmann55d402d2007-07-16 21:40:54 -0700512 /* set NAK */
513 tmp = readl(&ep->regs->ctl);
514 tmp |= AMD_BIT(UDC_EPCTL_SNAK);
515 writel(tmp, &ep->regs->ctl);
516 ep->naking = 1;
517
518 /* disable interrupt */
519 tmp = readl(&regs->ep_irqmsk);
520 tmp |= AMD_BIT(ep->num);
521 writel(tmp, &regs->ep_irqmsk);
522
523 if (ep->in) {
524 /* unset P and IN bit of potential former DMA */
525 tmp = readl(&ep->regs->ctl);
526 tmp &= AMD_UNMASK_BIT(UDC_EPCTL_P);
527 writel(tmp, &ep->regs->ctl);
528
529 tmp = readl(&ep->regs->sts);
530 tmp |= AMD_BIT(UDC_EPSTS_IN);
531 writel(tmp, &ep->regs->sts);
532
533 /* flush the fifo */
534 tmp = readl(&ep->regs->ctl);
535 tmp |= AMD_BIT(UDC_EPCTL_F);
536 writel(tmp, &ep->regs->ctl);
537
538 }
539 /* reset desc pointer */
540 writel(0, &ep->regs->desptr);
541}
542
543/* Disables endpoint, is called by gadget driver */
544static int udc_ep_disable(struct usb_ep *usbep)
545{
546 struct udc_ep *ep = NULL;
547 unsigned long iflags;
548
549 if (!usbep)
550 return -EINVAL;
551
552 ep = container_of(usbep, struct udc_ep, ep);
Ido Shayevitzef20a722012-03-12 20:25:25 +0200553 if (usbep->name == ep0_string || !ep->ep.desc)
Thomas Dahlmann55d402d2007-07-16 21:40:54 -0700554 return -EINVAL;
555
556 DBG(ep->dev, "Disable ep-%d\n", ep->num);
557
558 spin_lock_irqsave(&ep->dev->lock, iflags);
559 udc_free_request(&ep->ep, &ep->bna_dummy_req->req);
560 empty_req_queue(ep);
561 ep_init(ep->dev->regs, ep);
562 spin_unlock_irqrestore(&ep->dev->lock, iflags);
563
564 return 0;
565}
566
567/* Allocates request packet, called by gadget driver */
568static struct usb_request *
569udc_alloc_request(struct usb_ep *usbep, gfp_t gfp)
570{
571 struct udc_request *req;
572 struct udc_data_dma *dma_desc;
573 struct udc_ep *ep;
574
575 if (!usbep)
576 return NULL;
577
578 ep = container_of(usbep, struct udc_ep, ep);
579
580 VDBG(ep->dev, "udc_alloc_req(): ep%d\n", ep->num);
581 req = kzalloc(sizeof(struct udc_request), gfp);
582 if (!req)
583 return NULL;
584
585 req->req.dma = DMA_DONT_USE;
586 INIT_LIST_HEAD(&req->queue);
587
588 if (ep->dma) {
589 /* ep0 in requests are allocated from data pool here */
590 dma_desc = pci_pool_alloc(ep->dev->data_requests, gfp,
591 &req->td_phys);
592 if (!dma_desc) {
593 kfree(req);
594 return NULL;
595 }
596
597 VDBG(ep->dev, "udc_alloc_req: req = %p dma_desc = %p, "
598 "td_phys = %lx\n",
599 req, dma_desc,
600 (unsigned long)req->td_phys);
601 /* prevent from using desc. - set HOST BUSY */
602 dma_desc->status = AMD_ADDBITS(dma_desc->status,
603 UDC_DMA_STP_STS_BS_HOST_BUSY,
604 UDC_DMA_STP_STS_BS);
Harvey Harrison551509d2009-02-11 14:11:36 -0800605 dma_desc->bufptr = cpu_to_le32(DMA_DONT_USE);
Thomas Dahlmann55d402d2007-07-16 21:40:54 -0700606 req->td_data = dma_desc;
607 req->td_data_last = NULL;
608 req->chain_len = 1;
609 }
610
611 return &req->req;
612}
613
614/* Frees request packet, called by gadget driver */
615static void
616udc_free_request(struct usb_ep *usbep, struct usb_request *usbreq)
617{
618 struct udc_ep *ep;
619 struct udc_request *req;
620
621 if (!usbep || !usbreq)
622 return;
623
624 ep = container_of(usbep, struct udc_ep, ep);
625 req = container_of(usbreq, struct udc_request, req);
626 VDBG(ep->dev, "free_req req=%p\n", req);
627 BUG_ON(!list_empty(&req->queue));
628 if (req->td_data) {
629 VDBG(ep->dev, "req->td_data=%p\n", req->td_data);
630
631 /* free dma chain if created */
Cyril Roelandt170b7782012-02-25 02:14:57 +0100632 if (req->chain_len > 1)
Thomas Dahlmann55d402d2007-07-16 21:40:54 -0700633 udc_free_dma_chain(ep->dev, req);
Thomas Dahlmann55d402d2007-07-16 21:40:54 -0700634
635 pci_pool_free(ep->dev->data_requests, req->td_data,
636 req->td_phys);
637 }
638 kfree(req);
639}
640
641/* Init BNA dummy descriptor for HOST BUSY and pointing to itself */
642static void udc_init_bna_dummy(struct udc_request *req)
643{
644 if (req) {
645 /* set last bit */
646 req->td_data->status |= AMD_BIT(UDC_DMA_IN_STS_L);
647 /* set next pointer to itself */
648 req->td_data->next = req->td_phys;
649 /* set HOST BUSY */
650 req->td_data->status
651 = AMD_ADDBITS(req->td_data->status,
652 UDC_DMA_STP_STS_BS_DMA_DONE,
653 UDC_DMA_STP_STS_BS);
654#ifdef UDC_VERBOSE
655 pr_debug("bna desc = %p, sts = %08x\n",
656 req->td_data, req->td_data->status);
657#endif
658 }
659}
660
661/* Allocate BNA dummy descriptor */
662static struct udc_request *udc_alloc_bna_dummy(struct udc_ep *ep)
663{
664 struct udc_request *req = NULL;
665 struct usb_request *_req = NULL;
666
667 /* alloc the dummy request */
668 _req = udc_alloc_request(&ep->ep, GFP_ATOMIC);
669 if (_req) {
670 req = container_of(_req, struct udc_request, req);
671 ep->bna_dummy_req = req;
672 udc_init_bna_dummy(req);
673 }
674 return req;
675}
676
677/* Write data to TX fifo for IN packets */
678static void
679udc_txfifo_write(struct udc_ep *ep, struct usb_request *req)
680{
681 u8 *req_buf;
682 u32 *buf;
683 int i, j;
684 unsigned bytes = 0;
685 unsigned remaining = 0;
686
687 if (!req || !ep)
688 return;
689
690 req_buf = req->buf + req->actual;
691 prefetch(req_buf);
692 remaining = req->length - req->actual;
693
694 buf = (u32 *) req_buf;
695
696 bytes = ep->ep.maxpacket;
697 if (bytes > remaining)
698 bytes = remaining;
699
700 /* dwords first */
Cyril Roelandt170b7782012-02-25 02:14:57 +0100701 for (i = 0; i < bytes / UDC_DWORD_BYTES; i++)
Thomas Dahlmann55d402d2007-07-16 21:40:54 -0700702 writel(*(buf + i), ep->txfifo);
Thomas Dahlmann55d402d2007-07-16 21:40:54 -0700703
704 /* remaining bytes must be written by byte access */
705 for (j = 0; j < bytes % UDC_DWORD_BYTES; j++) {
706 writeb((u8)(*(buf + i) >> (j << UDC_BITS_PER_BYTE_SHIFT)),
707 ep->txfifo);
708 }
709
710 /* dummy write confirm */
711 writel(0, &ep->regs->confirm);
712}
713
714/* Read dwords from RX fifo for OUT transfers */
715static int udc_rxfifo_read_dwords(struct udc *dev, u32 *buf, int dwords)
716{
717 int i;
718
719 VDBG(dev, "udc_read_dwords(): %d dwords\n", dwords);
720
Cyril Roelandt170b7782012-02-25 02:14:57 +0100721 for (i = 0; i < dwords; i++)
Thomas Dahlmann55d402d2007-07-16 21:40:54 -0700722 *(buf + i) = readl(dev->rxfifo);
Thomas Dahlmann55d402d2007-07-16 21:40:54 -0700723 return 0;
724}
725
726/* Read bytes from RX fifo for OUT transfers */
727static int udc_rxfifo_read_bytes(struct udc *dev, u8 *buf, int bytes)
728{
729 int i, j;
730 u32 tmp;
731
732 VDBG(dev, "udc_read_bytes(): %d bytes\n", bytes);
733
734 /* dwords first */
Cyril Roelandt170b7782012-02-25 02:14:57 +0100735 for (i = 0; i < bytes / UDC_DWORD_BYTES; i++)
Thomas Dahlmann55d402d2007-07-16 21:40:54 -0700736 *((u32 *)(buf + (i<<2))) = readl(dev->rxfifo);
Thomas Dahlmann55d402d2007-07-16 21:40:54 -0700737
738 /* remaining bytes must be read by byte access */
739 if (bytes % UDC_DWORD_BYTES) {
740 tmp = readl(dev->rxfifo);
741 for (j = 0; j < bytes % UDC_DWORD_BYTES; j++) {
742 *(buf + (i<<2) + j) = (u8)(tmp & UDC_BYTE_MASK);
743 tmp = tmp >> UDC_BITS_PER_BYTE;
744 }
745 }
746
747 return 0;
748}
749
750/* Read data from RX fifo for OUT transfers */
751static int
752udc_rxfifo_read(struct udc_ep *ep, struct udc_request *req)
753{
754 u8 *buf;
755 unsigned buf_space;
756 unsigned bytes = 0;
757 unsigned finished = 0;
758
759 /* received number bytes */
760 bytes = readl(&ep->regs->sts);
761 bytes = AMD_GETBITS(bytes, UDC_EPSTS_RX_PKT_SIZE);
762
763 buf_space = req->req.length - req->req.actual;
764 buf = req->req.buf + req->req.actual;
765 if (bytes > buf_space) {
766 if ((buf_space % ep->ep.maxpacket) != 0) {
767 DBG(ep->dev,
768 "%s: rx %d bytes, rx-buf space = %d bytesn\n",
769 ep->ep.name, bytes, buf_space);
770 req->req.status = -EOVERFLOW;
771 }
772 bytes = buf_space;
773 }
774 req->req.actual += bytes;
775
776 /* last packet ? */
777 if (((bytes % ep->ep.maxpacket) != 0) || (!bytes)
778 || ((req->req.actual == req->req.length) && !req->req.zero))
779 finished = 1;
780
781 /* read rx fifo bytes */
782 VDBG(ep->dev, "ep %s: rxfifo read %d bytes\n", ep->ep.name, bytes);
783 udc_rxfifo_read_bytes(ep->dev, buf, bytes);
784
785 return finished;
786}
787
Sudip Mukherjeec9760ad2015-09-22 18:54:33 +0530788/* Creates or re-inits a DMA chain */
789static int udc_create_dma_chain(
790 struct udc_ep *ep,
791 struct udc_request *req,
792 unsigned long buf_len, gfp_t gfp_flags
793)
794{
795 unsigned long bytes = req->req.length;
796 unsigned int i;
797 dma_addr_t dma_addr;
798 struct udc_data_dma *td = NULL;
799 struct udc_data_dma *last = NULL;
800 unsigned long txbytes;
801 unsigned create_new_chain = 0;
802 unsigned len;
803
804 VDBG(ep->dev, "udc_create_dma_chain: bytes=%ld buf_len=%ld\n",
805 bytes, buf_len);
806 dma_addr = DMA_DONT_USE;
807
808 /* unset L bit in first desc for OUT */
809 if (!ep->in)
810 req->td_data->status &= AMD_CLEAR_BIT(UDC_DMA_IN_STS_L);
811
812 /* alloc only new desc's if not already available */
813 len = req->req.length / ep->ep.maxpacket;
814 if (req->req.length % ep->ep.maxpacket)
815 len++;
816
817 if (len > req->chain_len) {
818 /* shorter chain already allocated before */
819 if (req->chain_len > 1)
820 udc_free_dma_chain(ep->dev, req);
821 req->chain_len = len;
822 create_new_chain = 1;
823 }
824
825 td = req->td_data;
826 /* gen. required number of descriptors and buffers */
827 for (i = buf_len; i < bytes; i += buf_len) {
828 /* create or determine next desc. */
829 if (create_new_chain) {
830 td = pci_pool_alloc(ep->dev->data_requests,
831 gfp_flags, &dma_addr);
832 if (!td)
833 return -ENOMEM;
834
835 td->status = 0;
836 } else if (i == buf_len) {
837 /* first td */
838 td = (struct udc_data_dma *)phys_to_virt(
839 req->td_data->next);
840 td->status = 0;
841 } else {
842 td = (struct udc_data_dma *)phys_to_virt(last->next);
843 td->status = 0;
844 }
845
846 if (td)
847 td->bufptr = req->req.dma + i; /* assign buffer */
848 else
849 break;
850
851 /* short packet ? */
852 if ((bytes - i) >= buf_len) {
853 txbytes = buf_len;
854 } else {
855 /* short packet */
856 txbytes = bytes - i;
857 }
858
859 /* link td and assign tx bytes */
860 if (i == buf_len) {
861 if (create_new_chain)
862 req->td_data->next = dma_addr;
863 /*
864 * else
865 * req->td_data->next = virt_to_phys(td);
866 */
867 /* write tx bytes */
868 if (ep->in) {
869 /* first desc */
870 req->td_data->status =
871 AMD_ADDBITS(req->td_data->status,
872 ep->ep.maxpacket,
873 UDC_DMA_IN_STS_TXBYTES);
874 /* second desc */
875 td->status = AMD_ADDBITS(td->status,
876 txbytes,
877 UDC_DMA_IN_STS_TXBYTES);
878 }
879 } else {
880 if (create_new_chain)
881 last->next = dma_addr;
882 /*
883 * else
884 * last->next = virt_to_phys(td);
885 */
886 if (ep->in) {
887 /* write tx bytes */
888 td->status = AMD_ADDBITS(td->status,
889 txbytes,
890 UDC_DMA_IN_STS_TXBYTES);
891 }
892 }
893 last = td;
894 }
895 /* set last bit */
896 if (td) {
897 td->status |= AMD_BIT(UDC_DMA_IN_STS_L);
898 /* last desc. points to itself */
899 req->td_data_last = td;
900 }
901
902 return 0;
903}
904
Thomas Dahlmann55d402d2007-07-16 21:40:54 -0700905/* create/re-init a DMA descriptor or a DMA descriptor chain */
906static int prep_dma(struct udc_ep *ep, struct udc_request *req, gfp_t gfp)
907{
908 int retval = 0;
909 u32 tmp;
910
911 VDBG(ep->dev, "prep_dma\n");
912 VDBG(ep->dev, "prep_dma ep%d req->td_data=%p\n",
913 ep->num, req->td_data);
914
915 /* set buffer pointer */
916 req->td_data->bufptr = req->req.dma;
917
918 /* set last bit */
919 req->td_data->status |= AMD_BIT(UDC_DMA_IN_STS_L);
920
921 /* build/re-init dma chain if maxpkt scatter mode, not for EP0 */
922 if (use_dma_ppb) {
923
924 retval = udc_create_dma_chain(ep, req, ep->ep.maxpacket, gfp);
925 if (retval != 0) {
926 if (retval == -ENOMEM)
927 DBG(ep->dev, "Out of DMA memory\n");
928 return retval;
929 }
930 if (ep->in) {
931 if (req->req.length == ep->ep.maxpacket) {
932 /* write tx bytes */
933 req->td_data->status =
934 AMD_ADDBITS(req->td_data->status,
935 ep->ep.maxpacket,
936 UDC_DMA_IN_STS_TXBYTES);
937
938 }
939 }
940
941 }
942
943 if (ep->in) {
944 VDBG(ep->dev, "IN: use_dma_ppb=%d req->req.len=%d "
945 "maxpacket=%d ep%d\n",
946 use_dma_ppb, req->req.length,
947 ep->ep.maxpacket, ep->num);
948 /*
949 * if bytes < max packet then tx bytes must
950 * be written in packet per buffer mode
951 */
952 if (!use_dma_ppb || req->req.length < ep->ep.maxpacket
953 || ep->num == UDC_EP0OUT_IX
954 || ep->num == UDC_EP0IN_IX) {
955 /* write tx bytes */
956 req->td_data->status =
957 AMD_ADDBITS(req->td_data->status,
958 req->req.length,
959 UDC_DMA_IN_STS_TXBYTES);
960 /* reset frame num */
961 req->td_data->status =
962 AMD_ADDBITS(req->td_data->status,
963 0,
964 UDC_DMA_IN_STS_FRAMENUM);
965 }
966 /* set HOST BUSY */
967 req->td_data->status =
968 AMD_ADDBITS(req->td_data->status,
969 UDC_DMA_STP_STS_BS_HOST_BUSY,
970 UDC_DMA_STP_STS_BS);
971 } else {
972 VDBG(ep->dev, "OUT set host ready\n");
973 /* set HOST READY */
974 req->td_data->status =
975 AMD_ADDBITS(req->td_data->status,
976 UDC_DMA_STP_STS_BS_HOST_READY,
977 UDC_DMA_STP_STS_BS);
978
979
980 /* clear NAK by writing CNAK */
981 if (ep->naking) {
982 tmp = readl(&ep->regs->ctl);
983 tmp |= AMD_BIT(UDC_EPCTL_CNAK);
984 writel(tmp, &ep->regs->ctl);
985 ep->naking = 0;
986 UDC_QUEUE_CNAK(ep, ep->num);
987 }
988
989 }
990
991 return retval;
992}
993
994/* Completes request packet ... caller MUST hold lock */
995static void
996complete_req(struct udc_ep *ep, struct udc_request *req, int sts)
997__releases(ep->dev->lock)
998__acquires(ep->dev->lock)
999{
1000 struct udc *dev;
1001 unsigned halted;
1002
1003 VDBG(ep->dev, "complete_req(): ep%d\n", ep->num);
1004
1005 dev = ep->dev;
1006 /* unmap DMA */
Felipe Balbi220e8602011-12-19 12:01:28 +02001007 if (ep->dma)
1008 usb_gadget_unmap_request(&dev->gadget, &req->req, ep->in);
Thomas Dahlmann55d402d2007-07-16 21:40:54 -07001009
1010 halted = ep->halted;
1011 ep->halted = 1;
1012
1013 /* set new status if pending */
1014 if (req->req.status == -EINPROGRESS)
1015 req->req.status = sts;
1016
1017 /* remove from ep queue */
1018 list_del_init(&req->queue);
1019
1020 VDBG(ep->dev, "req %p => complete %d bytes at %s with sts %d\n",
1021 &req->req, req->req.length, ep->ep.name, sts);
1022
1023 spin_unlock(&dev->lock);
Michal Sojka304f7e52014-09-24 22:43:19 +02001024 usb_gadget_giveback_request(&ep->ep, &req->req);
Thomas Dahlmann55d402d2007-07-16 21:40:54 -07001025 spin_lock(&dev->lock);
1026 ep->halted = halted;
1027}
1028
1029/* frees pci pool descriptors of a DMA chain */
1030static int udc_free_dma_chain(struct udc *dev, struct udc_request *req)
1031{
1032
1033 int ret_val = 0;
1034 struct udc_data_dma *td;
1035 struct udc_data_dma *td_last = NULL;
1036 unsigned int i;
1037
1038 DBG(dev, "free chain req = %p\n", req);
1039
1040 /* do not free first desc., will be done by free for request */
1041 td_last = req->td_data;
1042 td = phys_to_virt(td_last->next);
1043
1044 for (i = 1; i < req->chain_len; i++) {
1045
1046 pci_pool_free(dev->data_requests, td,
1047 (dma_addr_t) td_last->next);
1048 td_last = td;
1049 td = phys_to_virt(td_last->next);
1050 }
1051
1052 return ret_val;
1053}
1054
1055/* Iterates to the end of a DMA chain and returns last descriptor */
1056static struct udc_data_dma *udc_get_last_dma_desc(struct udc_request *req)
1057{
1058 struct udc_data_dma *td;
1059
1060 td = req->td_data;
Cyril Roelandt170b7782012-02-25 02:14:57 +01001061 while (td && !(td->status & AMD_BIT(UDC_DMA_IN_STS_L)))
Thomas Dahlmann55d402d2007-07-16 21:40:54 -07001062 td = phys_to_virt(td->next);
Thomas Dahlmann55d402d2007-07-16 21:40:54 -07001063
1064 return td;
1065
1066}
1067
1068/* Iterates to the end of a DMA chain and counts bytes received */
1069static u32 udc_get_ppbdu_rxbytes(struct udc_request *req)
1070{
1071 struct udc_data_dma *td;
1072 u32 count;
1073
1074 td = req->td_data;
1075 /* received number bytes */
1076 count = AMD_GETBITS(td->status, UDC_DMA_OUT_STS_RXBYTES);
1077
1078 while (td && !(td->status & AMD_BIT(UDC_DMA_IN_STS_L))) {
1079 td = phys_to_virt(td->next);
1080 /* received number bytes */
1081 if (td) {
1082 count += AMD_GETBITS(td->status,
1083 UDC_DMA_OUT_STS_RXBYTES);
1084 }
1085 }
1086
1087 return count;
1088
1089}
1090
Thomas Dahlmann55d402d2007-07-16 21:40:54 -07001091/* Enabling RX DMA */
1092static void udc_set_rde(struct udc *dev)
1093{
1094 u32 tmp;
1095
1096 VDBG(dev, "udc_set_rde()\n");
1097 /* stop RDE timer */
1098 if (timer_pending(&udc_timer)) {
1099 set_rde = 0;
1100 mod_timer(&udc_timer, jiffies - 1);
1101 }
1102 /* set RDE */
1103 tmp = readl(&dev->regs->ctl);
1104 tmp |= AMD_BIT(UDC_DEVCTL_RDE);
1105 writel(tmp, &dev->regs->ctl);
1106}
1107
1108/* Queues a request packet, called by gadget driver */
1109static int
1110udc_queue(struct usb_ep *usbep, struct usb_request *usbreq, gfp_t gfp)
1111{
1112 int retval = 0;
1113 u8 open_rxfifo = 0;
1114 unsigned long iflags;
1115 struct udc_ep *ep;
1116 struct udc_request *req;
1117 struct udc *dev;
1118 u32 tmp;
1119
1120 /* check the inputs */
1121 req = container_of(usbreq, struct udc_request, req);
1122
1123 if (!usbep || !usbreq || !usbreq->complete || !usbreq->buf
1124 || !list_empty(&req->queue))
1125 return -EINVAL;
1126
1127 ep = container_of(usbep, struct udc_ep, ep);
Ido Shayevitzef20a722012-03-12 20:25:25 +02001128 if (!ep->ep.desc && (ep->num != 0 && ep->num != UDC_EP0OUT_IX))
Thomas Dahlmann55d402d2007-07-16 21:40:54 -07001129 return -EINVAL;
1130
1131 VDBG(ep->dev, "udc_queue(): ep%d-in=%d\n", ep->num, ep->in);
1132 dev = ep->dev;
1133
1134 if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
1135 return -ESHUTDOWN;
1136
1137 /* map dma (usually done before) */
Felipe Balbi220e8602011-12-19 12:01:28 +02001138 if (ep->dma) {
Thomas Dahlmann55d402d2007-07-16 21:40:54 -07001139 VDBG(dev, "DMA map req %p\n", req);
Felipe Balbi220e8602011-12-19 12:01:28 +02001140 retval = usb_gadget_map_request(&udc->gadget, usbreq, ep->in);
1141 if (retval)
1142 return retval;
Thomas Dahlmann55d402d2007-07-16 21:40:54 -07001143 }
1144
1145 VDBG(dev, "%s queue req %p, len %d req->td_data=%p buf %p\n",
1146 usbep->name, usbreq, usbreq->length,
1147 req->td_data, usbreq->buf);
1148
1149 spin_lock_irqsave(&dev->lock, iflags);
1150 usbreq->actual = 0;
1151 usbreq->status = -EINPROGRESS;
1152 req->dma_done = 0;
1153
1154 /* on empty queue just do first transfer */
1155 if (list_empty(&ep->queue)) {
1156 /* zlp */
1157 if (usbreq->length == 0) {
1158 /* IN zlp's are handled by hardware */
1159 complete_req(ep, req, 0);
1160 VDBG(dev, "%s: zlp\n", ep->ep.name);
1161 /*
1162 * if set_config or set_intf is waiting for ack by zlp
1163 * then set CSR_DONE
1164 */
1165 if (dev->set_cfg_not_acked) {
1166 tmp = readl(&dev->regs->ctl);
1167 tmp |= AMD_BIT(UDC_DEVCTL_CSR_DONE);
1168 writel(tmp, &dev->regs->ctl);
1169 dev->set_cfg_not_acked = 0;
1170 }
1171 /* setup command is ACK'ed now by zlp */
1172 if (dev->waiting_zlp_ack_ep0in) {
1173 /* clear NAK by writing CNAK in EP0_IN */
1174 tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
1175 tmp |= AMD_BIT(UDC_EPCTL_CNAK);
1176 writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
1177 dev->ep[UDC_EP0IN_IX].naking = 0;
1178 UDC_QUEUE_CNAK(&dev->ep[UDC_EP0IN_IX],
1179 UDC_EP0IN_IX);
1180 dev->waiting_zlp_ack_ep0in = 0;
1181 }
1182 goto finished;
1183 }
1184 if (ep->dma) {
Alexey Khoroshilovffcba5a2013-08-01 23:50:47 +04001185 retval = prep_dma(ep, req, GFP_ATOMIC);
Thomas Dahlmann55d402d2007-07-16 21:40:54 -07001186 if (retval != 0)
1187 goto finished;
1188 /* write desc pointer to enable DMA */
1189 if (ep->in) {
1190 /* set HOST READY */
1191 req->td_data->status =
1192 AMD_ADDBITS(req->td_data->status,
1193 UDC_DMA_IN_STS_BS_HOST_READY,
1194 UDC_DMA_IN_STS_BS);
1195 }
1196
1197 /* disabled rx dma while descriptor update */
1198 if (!ep->in) {
1199 /* stop RDE timer */
1200 if (timer_pending(&udc_timer)) {
1201 set_rde = 0;
1202 mod_timer(&udc_timer, jiffies - 1);
1203 }
1204 /* clear RDE */
1205 tmp = readl(&dev->regs->ctl);
1206 tmp &= AMD_UNMASK_BIT(UDC_DEVCTL_RDE);
1207 writel(tmp, &dev->regs->ctl);
1208 open_rxfifo = 1;
1209
1210 /*
1211 * if BNA occurred then let BNA dummy desc.
1212 * point to current desc.
1213 */
1214 if (ep->bna_occurred) {
1215 VDBG(dev, "copy to BNA dummy desc.\n");
1216 memcpy(ep->bna_dummy_req->td_data,
1217 req->td_data,
1218 sizeof(struct udc_data_dma));
1219 }
1220 }
1221 /* write desc pointer */
1222 writel(req->td_phys, &ep->regs->desptr);
1223
1224 /* clear NAK by writing CNAK */
1225 if (ep->naking) {
1226 tmp = readl(&ep->regs->ctl);
1227 tmp |= AMD_BIT(UDC_EPCTL_CNAK);
1228 writel(tmp, &ep->regs->ctl);
1229 ep->naking = 0;
1230 UDC_QUEUE_CNAK(ep, ep->num);
1231 }
1232
1233 if (ep->in) {
1234 /* enable ep irq */
1235 tmp = readl(&dev->regs->ep_irqmsk);
1236 tmp &= AMD_UNMASK_BIT(ep->num);
1237 writel(tmp, &dev->regs->ep_irqmsk);
1238 }
Thomas Dahlmannc5deb832009-11-17 14:18:27 -08001239 } else if (ep->in) {
1240 /* enable ep irq */
1241 tmp = readl(&dev->regs->ep_irqmsk);
1242 tmp &= AMD_UNMASK_BIT(ep->num);
1243 writel(tmp, &dev->regs->ep_irqmsk);
1244 }
Thomas Dahlmann55d402d2007-07-16 21:40:54 -07001245
1246 } else if (ep->dma) {
1247
1248 /*
1249 * prep_dma not used for OUT ep's, this is not possible
1250 * for PPB modes, because of chain creation reasons
1251 */
1252 if (ep->in) {
Alexey Khoroshilovffcba5a2013-08-01 23:50:47 +04001253 retval = prep_dma(ep, req, GFP_ATOMIC);
Thomas Dahlmann55d402d2007-07-16 21:40:54 -07001254 if (retval != 0)
1255 goto finished;
1256 }
1257 }
1258 VDBG(dev, "list_add\n");
1259 /* add request to ep queue */
1260 if (req) {
1261
1262 list_add_tail(&req->queue, &ep->queue);
1263
1264 /* open rxfifo if out data queued */
1265 if (open_rxfifo) {
1266 /* enable DMA */
1267 req->dma_going = 1;
1268 udc_set_rde(dev);
1269 if (ep->num != UDC_EP0OUT_IX)
1270 dev->data_ep_queued = 1;
1271 }
1272 /* stop OUT naking */
1273 if (!ep->in) {
1274 if (!use_dma && udc_rxfifo_pending) {
Joe Perchesfec8de32007-11-19 17:53:33 -08001275 DBG(dev, "udc_queue(): pending bytes in "
Thomas Dahlmann55d402d2007-07-16 21:40:54 -07001276 "rxfifo after nyet\n");
1277 /*
1278 * read pending bytes afer nyet:
1279 * referring to isr
1280 */
1281 if (udc_rxfifo_read(ep, req)) {
1282 /* finish */
1283 complete_req(ep, req, 0);
1284 }
1285 udc_rxfifo_pending = 0;
1286
1287 }
1288 }
1289 }
1290
1291finished:
1292 spin_unlock_irqrestore(&dev->lock, iflags);
1293 return retval;
1294}
1295
1296/* Empty request queue of an endpoint; caller holds spinlock */
1297static void empty_req_queue(struct udc_ep *ep)
1298{
1299 struct udc_request *req;
1300
1301 ep->halted = 1;
1302 while (!list_empty(&ep->queue)) {
1303 req = list_entry(ep->queue.next,
1304 struct udc_request,
1305 queue);
1306 complete_req(ep, req, -ESHUTDOWN);
1307 }
1308}
1309
1310/* Dequeues a request packet, called by gadget driver */
1311static int udc_dequeue(struct usb_ep *usbep, struct usb_request *usbreq)
1312{
1313 struct udc_ep *ep;
1314 struct udc_request *req;
1315 unsigned halted;
1316 unsigned long iflags;
1317
1318 ep = container_of(usbep, struct udc_ep, ep);
Ido Shayevitzef20a722012-03-12 20:25:25 +02001319 if (!usbep || !usbreq || (!ep->ep.desc && (ep->num != 0
Thomas Dahlmann55d402d2007-07-16 21:40:54 -07001320 && ep->num != UDC_EP0OUT_IX)))
1321 return -EINVAL;
1322
1323 req = container_of(usbreq, struct udc_request, req);
1324
1325 spin_lock_irqsave(&ep->dev->lock, iflags);
1326 halted = ep->halted;
1327 ep->halted = 1;
1328 /* request in processing or next one */
1329 if (ep->queue.next == &req->queue) {
1330 if (ep->dma && req->dma_going) {
1331 if (ep->in)
1332 ep->cancel_transfer = 1;
1333 else {
1334 u32 tmp;
1335 u32 dma_sts;
1336 /* stop potential receive DMA */
1337 tmp = readl(&udc->regs->ctl);
1338 writel(tmp & AMD_UNMASK_BIT(UDC_DEVCTL_RDE),
1339 &udc->regs->ctl);
1340 /*
1341 * Cancel transfer later in ISR
1342 * if descriptor was touched.
1343 */
1344 dma_sts = AMD_GETBITS(req->td_data->status,
1345 UDC_DMA_OUT_STS_BS);
1346 if (dma_sts != UDC_DMA_OUT_STS_BS_HOST_READY)
1347 ep->cancel_transfer = 1;
1348 else {
1349 udc_init_bna_dummy(ep->req);
1350 writel(ep->bna_dummy_req->td_phys,
1351 &ep->regs->desptr);
1352 }
1353 writel(tmp, &udc->regs->ctl);
1354 }
1355 }
1356 }
1357 complete_req(ep, req, -ECONNRESET);
1358 ep->halted = halted;
1359
1360 spin_unlock_irqrestore(&ep->dev->lock, iflags);
1361 return 0;
1362}
1363
1364/* Halt or clear halt of endpoint */
1365static int
1366udc_set_halt(struct usb_ep *usbep, int halt)
1367{
1368 struct udc_ep *ep;
1369 u32 tmp;
1370 unsigned long iflags;
1371 int retval = 0;
1372
1373 if (!usbep)
1374 return -EINVAL;
1375
1376 pr_debug("set_halt %s: halt=%d\n", usbep->name, halt);
1377
1378 ep = container_of(usbep, struct udc_ep, ep);
Ido Shayevitzef20a722012-03-12 20:25:25 +02001379 if (!ep->ep.desc && (ep->num != 0 && ep->num != UDC_EP0OUT_IX))
Thomas Dahlmann55d402d2007-07-16 21:40:54 -07001380 return -EINVAL;
1381 if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
1382 return -ESHUTDOWN;
1383
1384 spin_lock_irqsave(&udc_stall_spinlock, iflags);
1385 /* halt or clear halt */
1386 if (halt) {
1387 if (ep->num == 0)
1388 ep->dev->stall_ep0in = 1;
1389 else {
1390 /*
1391 * set STALL
1392 * rxfifo empty not taken into acount
1393 */
1394 tmp = readl(&ep->regs->ctl);
1395 tmp |= AMD_BIT(UDC_EPCTL_S);
1396 writel(tmp, &ep->regs->ctl);
1397 ep->halted = 1;
1398
1399 /* setup poll timer */
1400 if (!timer_pending(&udc_pollstall_timer)) {
1401 udc_pollstall_timer.expires = jiffies +
1402 HZ * UDC_POLLSTALL_TIMER_USECONDS
1403 / (1000 * 1000);
1404 if (!stop_pollstall_timer) {
1405 DBG(ep->dev, "start polltimer\n");
1406 add_timer(&udc_pollstall_timer);
1407 }
1408 }
1409 }
1410 } else {
1411 /* ep is halted by set_halt() before */
1412 if (ep->halted) {
1413 tmp = readl(&ep->regs->ctl);
1414 /* clear stall bit */
1415 tmp = tmp & AMD_CLEAR_BIT(UDC_EPCTL_S);
1416 /* clear NAK by writing CNAK */
1417 tmp |= AMD_BIT(UDC_EPCTL_CNAK);
1418 writel(tmp, &ep->regs->ctl);
1419 ep->halted = 0;
1420 UDC_QUEUE_CNAK(ep, ep->num);
1421 }
1422 }
1423 spin_unlock_irqrestore(&udc_stall_spinlock, iflags);
1424 return retval;
1425}
1426
1427/* gadget interface */
1428static const struct usb_ep_ops udc_ep_ops = {
1429 .enable = udc_ep_enable,
1430 .disable = udc_ep_disable,
1431
1432 .alloc_request = udc_alloc_request,
1433 .free_request = udc_free_request,
1434
1435 .queue = udc_queue,
1436 .dequeue = udc_dequeue,
1437
1438 .set_halt = udc_set_halt,
1439 /* fifo ops not implemented */
1440};
1441
1442/*-------------------------------------------------------------------------*/
1443
1444/* Get frame counter (not implemented) */
1445static int udc_get_frame(struct usb_gadget *gadget)
1446{
1447 return -EOPNOTSUPP;
1448}
1449
Sudip Mukherjee79a5b4a2015-09-22 18:54:32 +05301450/* Initiates a remote wakeup */
1451static int udc_remote_wakeup(struct udc *dev)
1452{
1453 unsigned long flags;
1454 u32 tmp;
1455
1456 DBG(dev, "UDC initiates remote wakeup\n");
1457
1458 spin_lock_irqsave(&dev->lock, flags);
1459
1460 tmp = readl(&dev->regs->ctl);
1461 tmp |= AMD_BIT(UDC_DEVCTL_RES);
1462 writel(tmp, &dev->regs->ctl);
1463 tmp &= AMD_CLEAR_BIT(UDC_DEVCTL_RES);
1464 writel(tmp, &dev->regs->ctl);
1465
1466 spin_unlock_irqrestore(&dev->lock, flags);
1467 return 0;
1468}
1469
Thomas Dahlmann55d402d2007-07-16 21:40:54 -07001470/* Remote wakeup gadget interface */
1471static int udc_wakeup(struct usb_gadget *gadget)
1472{
1473 struct udc *dev;
1474
1475 if (!gadget)
1476 return -EINVAL;
1477 dev = container_of(gadget, struct udc, gadget);
1478 udc_remote_wakeup(dev);
1479
1480 return 0;
1481}
1482
Felipe Balbi45005f62013-01-24 10:28:39 +02001483static int amd5536_udc_start(struct usb_gadget *g,
1484 struct usb_gadget_driver *driver);
Felipe Balbi22835b82014-10-17 12:05:12 -05001485static int amd5536_udc_stop(struct usb_gadget *g);
1486
Thomas Dahlmann55d402d2007-07-16 21:40:54 -07001487static const struct usb_gadget_ops udc_ops = {
1488 .wakeup = udc_wakeup,
1489 .get_frame = udc_get_frame,
Felipe Balbi45005f62013-01-24 10:28:39 +02001490 .udc_start = amd5536_udc_start,
1491 .udc_stop = amd5536_udc_stop,
Thomas Dahlmann55d402d2007-07-16 21:40:54 -07001492};
1493
1494/* Setups endpoint parameters, adds endpoints to linked list */
1495static void make_ep_lists(struct udc *dev)
1496{
1497 /* make gadget ep lists */
1498 INIT_LIST_HEAD(&dev->gadget.ep_list);
1499 list_add_tail(&dev->ep[UDC_EPIN_STATUS_IX].ep.ep_list,
1500 &dev->gadget.ep_list);
1501 list_add_tail(&dev->ep[UDC_EPIN_IX].ep.ep_list,
1502 &dev->gadget.ep_list);
1503 list_add_tail(&dev->ep[UDC_EPOUT_IX].ep.ep_list,
1504 &dev->gadget.ep_list);
1505
1506 /* fifo config */
1507 dev->ep[UDC_EPIN_STATUS_IX].fifo_depth = UDC_EPIN_SMALLINT_BUFF_SIZE;
1508 if (dev->gadget.speed == USB_SPEED_FULL)
1509 dev->ep[UDC_EPIN_IX].fifo_depth = UDC_FS_EPIN_BUFF_SIZE;
1510 else if (dev->gadget.speed == USB_SPEED_HIGH)
1511 dev->ep[UDC_EPIN_IX].fifo_depth = hs_tx_buf;
1512 dev->ep[UDC_EPOUT_IX].fifo_depth = UDC_RXFIFO_SIZE;
1513}
1514
1515/* init registers at driver load time */
1516static int startup_registers(struct udc *dev)
1517{
1518 u32 tmp;
1519
1520 /* init controller by soft reset */
1521 udc_soft_reset(dev);
1522
1523 /* mask not needed interrupts */
1524 udc_mask_unused_interrupts(dev);
1525
1526 /* put into initial config */
1527 udc_basic_init(dev);
1528 /* link up all endpoints */
1529 udc_setup_endpoints(dev);
1530
1531 /* program speed */
1532 tmp = readl(&dev->regs->cfg);
Cyril Roelandt170b7782012-02-25 02:14:57 +01001533 if (use_fullspeed)
Thomas Dahlmann55d402d2007-07-16 21:40:54 -07001534 tmp = AMD_ADDBITS(tmp, UDC_DEVCFG_SPD_FS, UDC_DEVCFG_SPD);
Cyril Roelandt170b7782012-02-25 02:14:57 +01001535 else
Thomas Dahlmann55d402d2007-07-16 21:40:54 -07001536 tmp = AMD_ADDBITS(tmp, UDC_DEVCFG_SPD_HS, UDC_DEVCFG_SPD);
Thomas Dahlmann55d402d2007-07-16 21:40:54 -07001537 writel(tmp, &dev->regs->cfg);
1538
1539 return 0;
1540}
1541
1542/* Inits UDC context */
1543static void udc_basic_init(struct udc *dev)
1544{
1545 u32 tmp;
1546
1547 DBG(dev, "udc_basic_init()\n");
1548
1549 dev->gadget.speed = USB_SPEED_UNKNOWN;
1550
1551 /* stop RDE timer */
1552 if (timer_pending(&udc_timer)) {
1553 set_rde = 0;
1554 mod_timer(&udc_timer, jiffies - 1);
1555 }
1556 /* stop poll stall timer */
Cyril Roelandt170b7782012-02-25 02:14:57 +01001557 if (timer_pending(&udc_pollstall_timer))
Thomas Dahlmann55d402d2007-07-16 21:40:54 -07001558 mod_timer(&udc_pollstall_timer, jiffies - 1);
Thomas Dahlmann55d402d2007-07-16 21:40:54 -07001559 /* disable DMA */
1560 tmp = readl(&dev->regs->ctl);
1561 tmp &= AMD_UNMASK_BIT(UDC_DEVCTL_RDE);
1562 tmp &= AMD_UNMASK_BIT(UDC_DEVCTL_TDE);
1563 writel(tmp, &dev->regs->ctl);
1564
1565 /* enable dynamic CSR programming */
1566 tmp = readl(&dev->regs->cfg);
1567 tmp |= AMD_BIT(UDC_DEVCFG_CSR_PRG);
1568 /* set self powered */
1569 tmp |= AMD_BIT(UDC_DEVCFG_SP);
1570 /* set remote wakeupable */
1571 tmp |= AMD_BIT(UDC_DEVCFG_RWKP);
1572 writel(tmp, &dev->regs->cfg);
1573
1574 make_ep_lists(dev);
1575
1576 dev->data_ep_enabled = 0;
1577 dev->data_ep_queued = 0;
1578}
1579
1580/* Sets initial endpoint parameters */
1581static void udc_setup_endpoints(struct udc *dev)
1582{
1583 struct udc_ep *ep;
1584 u32 tmp;
1585 u32 reg;
1586
1587 DBG(dev, "udc_setup_endpoints()\n");
1588
1589 /* read enum speed */
1590 tmp = readl(&dev->regs->sts);
1591 tmp = AMD_GETBITS(tmp, UDC_DEVSTS_ENUM_SPEED);
Cyril Roelandt170b7782012-02-25 02:14:57 +01001592 if (tmp == UDC_DEVSTS_ENUM_SPEED_HIGH)
Thomas Dahlmann55d402d2007-07-16 21:40:54 -07001593 dev->gadget.speed = USB_SPEED_HIGH;
Cyril Roelandt170b7782012-02-25 02:14:57 +01001594 else if (tmp == UDC_DEVSTS_ENUM_SPEED_FULL)
Thomas Dahlmann55d402d2007-07-16 21:40:54 -07001595 dev->gadget.speed = USB_SPEED_FULL;
Thomas Dahlmann55d402d2007-07-16 21:40:54 -07001596
1597 /* set basic ep parameters */
1598 for (tmp = 0; tmp < UDC_EP_NUM; tmp++) {
1599 ep = &dev->ep[tmp];
1600 ep->dev = dev;
Robert Baldyga6f02ac52015-07-31 16:00:20 +02001601 ep->ep.name = ep_info[tmp].name;
1602 ep->ep.caps = ep_info[tmp].caps;
Thomas Dahlmann55d402d2007-07-16 21:40:54 -07001603 ep->num = tmp;
1604 /* txfifo size is calculated at enable time */
1605 ep->txfifo = dev->txfifo;
1606
1607 /* fifo size */
1608 if (tmp < UDC_EPIN_NUM) {
1609 ep->fifo_depth = UDC_TXFIFO_SIZE;
1610 ep->in = 1;
1611 } else {
1612 ep->fifo_depth = UDC_RXFIFO_SIZE;
1613 ep->in = 0;
1614
1615 }
1616 ep->regs = &dev->ep_regs[tmp];
1617 /*
1618 * ep will be reset only if ep was not enabled before to avoid
1619 * disabling ep interrupts when ENUM interrupt occurs but ep is
1620 * not enabled by gadget driver
1621 */
Ido Shayevitzef20a722012-03-12 20:25:25 +02001622 if (!ep->ep.desc)
Thomas Dahlmann55d402d2007-07-16 21:40:54 -07001623 ep_init(dev->regs, ep);
Thomas Dahlmann55d402d2007-07-16 21:40:54 -07001624
1625 if (use_dma) {
1626 /*
1627 * ep->dma is not really used, just to indicate that
1628 * DMA is active: remove this
1629 * dma regs = dev control regs
1630 */
1631 ep->dma = &dev->regs->ctl;
1632
1633 /* nak OUT endpoints until enable - not for ep0 */
1634 if (tmp != UDC_EP0IN_IX && tmp != UDC_EP0OUT_IX
1635 && tmp > UDC_EPIN_NUM) {
1636 /* set NAK */
1637 reg = readl(&dev->ep[tmp].regs->ctl);
1638 reg |= AMD_BIT(UDC_EPCTL_SNAK);
1639 writel(reg, &dev->ep[tmp].regs->ctl);
1640 dev->ep[tmp].naking = 1;
1641
1642 }
1643 }
1644 }
1645 /* EP0 max packet */
1646 if (dev->gadget.speed == USB_SPEED_FULL) {
Robert Baldygae117e742013-12-13 12:23:38 +01001647 usb_ep_set_maxpacket_limit(&dev->ep[UDC_EP0IN_IX].ep,
1648 UDC_FS_EP0IN_MAX_PKT_SIZE);
1649 usb_ep_set_maxpacket_limit(&dev->ep[UDC_EP0OUT_IX].ep,
1650 UDC_FS_EP0OUT_MAX_PKT_SIZE);
Thomas Dahlmann55d402d2007-07-16 21:40:54 -07001651 } else if (dev->gadget.speed == USB_SPEED_HIGH) {
Robert Baldygae117e742013-12-13 12:23:38 +01001652 usb_ep_set_maxpacket_limit(&dev->ep[UDC_EP0IN_IX].ep,
1653 UDC_EP0IN_MAX_PKT_SIZE);
1654 usb_ep_set_maxpacket_limit(&dev->ep[UDC_EP0OUT_IX].ep,
1655 UDC_EP0OUT_MAX_PKT_SIZE);
Thomas Dahlmann55d402d2007-07-16 21:40:54 -07001656 }
1657
1658 /*
1659 * with suspend bug workaround, ep0 params for gadget driver
1660 * are set at gadget driver bind() call
1661 */
1662 dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IX].ep;
1663 dev->ep[UDC_EP0IN_IX].halted = 0;
1664 INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
1665
1666 /* init cfg/alt/int */
1667 dev->cur_config = 0;
1668 dev->cur_intf = 0;
1669 dev->cur_alt = 0;
1670}
1671
1672/* Bringup after Connect event, initial bringup to be ready for ep0 events */
1673static void usb_connect(struct udc *dev)
1674{
1675
1676 dev_info(&dev->pdev->dev, "USB Connect\n");
1677
1678 dev->connected = 1;
1679
1680 /* put into initial config */
1681 udc_basic_init(dev);
1682
1683 /* enable device setup interrupts */
1684 udc_enable_dev_setup_interrupts(dev);
1685}
1686
1687/*
1688 * Calls gadget with disconnect event and resets the UDC and makes
1689 * initial bringup to be ready for ep0 events
1690 */
1691static void usb_disconnect(struct udc *dev)
1692{
1693
1694 dev_info(&dev->pdev->dev, "USB Disconnect\n");
1695
1696 dev->connected = 0;
1697
1698 /* mask interrupts */
1699 udc_mask_unused_interrupts(dev);
1700
1701 /* REVISIT there doesn't seem to be a point to having this
1702 * talk to a tasklet ... do it directly, we already hold
1703 * the spinlock needed to process the disconnect.
1704 */
1705
1706 tasklet_schedule(&disconnect_tasklet);
1707}
1708
1709/* Tasklet for disconnect to be outside of interrupt context */
1710static void udc_tasklet_disconnect(unsigned long par)
1711{
1712 struct udc *dev = (struct udc *)(*((struct udc **) par));
1713 u32 tmp;
1714
1715 DBG(dev, "Tasklet disconnect\n");
1716 spin_lock_irq(&dev->lock);
1717
1718 if (dev->driver) {
1719 spin_unlock(&dev->lock);
1720 dev->driver->disconnect(&dev->gadget);
1721 spin_lock(&dev->lock);
1722
1723 /* empty queues */
Cyril Roelandt170b7782012-02-25 02:14:57 +01001724 for (tmp = 0; tmp < UDC_EP_NUM; tmp++)
Thomas Dahlmann55d402d2007-07-16 21:40:54 -07001725 empty_req_queue(&dev->ep[tmp]);
Thomas Dahlmann55d402d2007-07-16 21:40:54 -07001726
1727 }
1728
1729 /* disable ep0 */
1730 ep_init(dev->regs,
1731 &dev->ep[UDC_EP0IN_IX]);
1732
1733
1734 if (!soft_reset_occured) {
1735 /* init controller by soft reset */
1736 udc_soft_reset(dev);
1737 soft_reset_occured++;
1738 }
1739
1740 /* re-enable dev interrupts */
1741 udc_enable_dev_setup_interrupts(dev);
1742 /* back to full speed ? */
1743 if (use_fullspeed) {
1744 tmp = readl(&dev->regs->cfg);
1745 tmp = AMD_ADDBITS(tmp, UDC_DEVCFG_SPD_FS, UDC_DEVCFG_SPD);
1746 writel(tmp, &dev->regs->cfg);
1747 }
1748
1749 spin_unlock_irq(&dev->lock);
1750}
1751
1752/* Reset the UDC core */
1753static void udc_soft_reset(struct udc *dev)
1754{
1755 unsigned long flags;
1756
1757 DBG(dev, "Soft reset\n");
1758 /*
1759 * reset possible waiting interrupts, because int.
1760 * status is lost after soft reset,
1761 * ep int. status reset
1762 */
1763 writel(UDC_EPINT_MSK_DISABLE_ALL, &dev->regs->ep_irqsts);
1764 /* device int. status reset */
1765 writel(UDC_DEV_MSK_DISABLE, &dev->regs->irqsts);
1766
1767 spin_lock_irqsave(&udc_irq_spinlock, flags);
1768 writel(AMD_BIT(UDC_DEVCFG_SOFTRESET), &dev->regs->cfg);
1769 readl(&dev->regs->cfg);
1770 spin_unlock_irqrestore(&udc_irq_spinlock, flags);
1771
1772}
1773
1774/* RDE timer callback to set RDE bit */
1775static void udc_timer_function(unsigned long v)
1776{
1777 u32 tmp;
1778
1779 spin_lock_irq(&udc_irq_spinlock);
1780
1781 if (set_rde > 0) {
1782 /*
1783 * open the fifo if fifo was filled on last timer call
1784 * conditionally
1785 */
1786 if (set_rde > 1) {
1787 /* set RDE to receive setup data */
1788 tmp = readl(&udc->regs->ctl);
1789 tmp |= AMD_BIT(UDC_DEVCTL_RDE);
1790 writel(tmp, &udc->regs->ctl);
1791 set_rde = -1;
1792 } else if (readl(&udc->regs->sts)
1793 & AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY)) {
1794 /*
1795 * if fifo empty setup polling, do not just
1796 * open the fifo
1797 */
1798 udc_timer.expires = jiffies + HZ/UDC_RDE_TIMER_DIV;
Cyril Roelandt170b7782012-02-25 02:14:57 +01001799 if (!stop_timer)
Thomas Dahlmann55d402d2007-07-16 21:40:54 -07001800 add_timer(&udc_timer);
Thomas Dahlmann55d402d2007-07-16 21:40:54 -07001801 } else {
1802 /*
1803 * fifo contains data now, setup timer for opening
1804 * the fifo when timer expires to be able to receive
1805 * setup packets, when data packets gets queued by
1806 * gadget layer then timer will forced to expire with
1807 * set_rde=0 (RDE is set in udc_queue())
1808 */
1809 set_rde++;
1810 /* debug: lhadmot_timer_start = 221070 */
1811 udc_timer.expires = jiffies + HZ*UDC_RDE_TIMER_SECONDS;
Cyril Roelandt170b7782012-02-25 02:14:57 +01001812 if (!stop_timer)
Thomas Dahlmann55d402d2007-07-16 21:40:54 -07001813 add_timer(&udc_timer);
Thomas Dahlmann55d402d2007-07-16 21:40:54 -07001814 }
1815
1816 } else
1817 set_rde = -1; /* RDE was set by udc_queue() */
1818 spin_unlock_irq(&udc_irq_spinlock);
1819 if (stop_timer)
1820 complete(&on_exit);
1821
1822}
1823
1824/* Handle halt state, used in stall poll timer */
1825static void udc_handle_halt_state(struct udc_ep *ep)
1826{
1827 u32 tmp;
1828 /* set stall as long not halted */
1829 if (ep->halted == 1) {
1830 tmp = readl(&ep->regs->ctl);
1831 /* STALL cleared ? */
1832 if (!(tmp & AMD_BIT(UDC_EPCTL_S))) {
1833 /*
1834 * FIXME: MSC spec requires that stall remains
1835 * even on receivng of CLEAR_FEATURE HALT. So
1836 * we would set STALL again here to be compliant.
1837 * But with current mass storage drivers this does
1838 * not work (would produce endless host retries).
1839 * So we clear halt on CLEAR_FEATURE.
1840 *
1841 DBG(ep->dev, "ep %d: set STALL again\n", ep->num);
1842 tmp |= AMD_BIT(UDC_EPCTL_S);
1843 writel(tmp, &ep->regs->ctl);*/
1844
1845 /* clear NAK by writing CNAK */
1846 tmp |= AMD_BIT(UDC_EPCTL_CNAK);
1847 writel(tmp, &ep->regs->ctl);
1848 ep->halted = 0;
1849 UDC_QUEUE_CNAK(ep, ep->num);
1850 }
1851 }
1852}
1853
1854/* Stall timer callback to poll S bit and set it again after */
1855static void udc_pollstall_timer_function(unsigned long v)
1856{
1857 struct udc_ep *ep;
1858 int halted = 0;
1859
1860 spin_lock_irq(&udc_stall_spinlock);
1861 /*
1862 * only one IN and OUT endpoints are handled
1863 * IN poll stall
1864 */
1865 ep = &udc->ep[UDC_EPIN_IX];
1866 udc_handle_halt_state(ep);
1867 if (ep->halted)
1868 halted = 1;
1869 /* OUT poll stall */
1870 ep = &udc->ep[UDC_EPOUT_IX];
1871 udc_handle_halt_state(ep);
1872 if (ep->halted)
1873 halted = 1;
1874
1875 /* setup timer again when still halted */
1876 if (!stop_pollstall_timer && halted) {
1877 udc_pollstall_timer.expires = jiffies +
1878 HZ * UDC_POLLSTALL_TIMER_USECONDS
1879 / (1000 * 1000);
1880 add_timer(&udc_pollstall_timer);
1881 }
1882 spin_unlock_irq(&udc_stall_spinlock);
1883
1884 if (stop_pollstall_timer)
1885 complete(&on_pollstall_exit);
1886}
1887
1888/* Inits endpoint 0 so that SETUP packets are processed */
1889static void activate_control_endpoints(struct udc *dev)
1890{
1891 u32 tmp;
1892
1893 DBG(dev, "activate_control_endpoints\n");
1894
1895 /* flush fifo */
1896 tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
1897 tmp |= AMD_BIT(UDC_EPCTL_F);
1898 writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
1899
1900 /* set ep0 directions */
1901 dev->ep[UDC_EP0IN_IX].in = 1;
1902 dev->ep[UDC_EP0OUT_IX].in = 0;
1903
1904 /* set buffer size (tx fifo entries) of EP0_IN */
1905 tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->bufin_framenum);
1906 if (dev->gadget.speed == USB_SPEED_FULL)
1907 tmp = AMD_ADDBITS(tmp, UDC_FS_EPIN0_BUFF_SIZE,
1908 UDC_EPIN_BUFF_SIZE);
1909 else if (dev->gadget.speed == USB_SPEED_HIGH)
1910 tmp = AMD_ADDBITS(tmp, UDC_EPIN0_BUFF_SIZE,
1911 UDC_EPIN_BUFF_SIZE);
1912 writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->bufin_framenum);
1913
1914 /* set max packet size of EP0_IN */
1915 tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->bufout_maxpkt);
1916 if (dev->gadget.speed == USB_SPEED_FULL)
1917 tmp = AMD_ADDBITS(tmp, UDC_FS_EP0IN_MAX_PKT_SIZE,
1918 UDC_EP_MAX_PKT_SIZE);
1919 else if (dev->gadget.speed == USB_SPEED_HIGH)
1920 tmp = AMD_ADDBITS(tmp, UDC_EP0IN_MAX_PKT_SIZE,
1921 UDC_EP_MAX_PKT_SIZE);
1922 writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->bufout_maxpkt);
1923
1924 /* set max packet size of EP0_OUT */
1925 tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->bufout_maxpkt);
1926 if (dev->gadget.speed == USB_SPEED_FULL)
1927 tmp = AMD_ADDBITS(tmp, UDC_FS_EP0OUT_MAX_PKT_SIZE,
1928 UDC_EP_MAX_PKT_SIZE);
1929 else if (dev->gadget.speed == USB_SPEED_HIGH)
1930 tmp = AMD_ADDBITS(tmp, UDC_EP0OUT_MAX_PKT_SIZE,
1931 UDC_EP_MAX_PKT_SIZE);
1932 writel(tmp, &dev->ep[UDC_EP0OUT_IX].regs->bufout_maxpkt);
1933
1934 /* set max packet size of EP0 in UDC CSR */
1935 tmp = readl(&dev->csr->ne[0]);
1936 if (dev->gadget.speed == USB_SPEED_FULL)
1937 tmp = AMD_ADDBITS(tmp, UDC_FS_EP0OUT_MAX_PKT_SIZE,
1938 UDC_CSR_NE_MAX_PKT);
1939 else if (dev->gadget.speed == USB_SPEED_HIGH)
1940 tmp = AMD_ADDBITS(tmp, UDC_EP0OUT_MAX_PKT_SIZE,
1941 UDC_CSR_NE_MAX_PKT);
1942 writel(tmp, &dev->csr->ne[0]);
1943
1944 if (use_dma) {
1945 dev->ep[UDC_EP0OUT_IX].td->status |=
1946 AMD_BIT(UDC_DMA_OUT_STS_L);
1947 /* write dma desc address */
1948 writel(dev->ep[UDC_EP0OUT_IX].td_stp_dma,
1949 &dev->ep[UDC_EP0OUT_IX].regs->subptr);
1950 writel(dev->ep[UDC_EP0OUT_IX].td_phys,
1951 &dev->ep[UDC_EP0OUT_IX].regs->desptr);
1952 /* stop RDE timer */
1953 if (timer_pending(&udc_timer)) {
1954 set_rde = 0;
1955 mod_timer(&udc_timer, jiffies - 1);
1956 }
1957 /* stop pollstall timer */
Cyril Roelandt170b7782012-02-25 02:14:57 +01001958 if (timer_pending(&udc_pollstall_timer))
Thomas Dahlmann55d402d2007-07-16 21:40:54 -07001959 mod_timer(&udc_pollstall_timer, jiffies - 1);
Thomas Dahlmann55d402d2007-07-16 21:40:54 -07001960 /* enable DMA */
1961 tmp = readl(&dev->regs->ctl);
1962 tmp |= AMD_BIT(UDC_DEVCTL_MODE)
1963 | AMD_BIT(UDC_DEVCTL_RDE)
1964 | AMD_BIT(UDC_DEVCTL_TDE);
Cyril Roelandt170b7782012-02-25 02:14:57 +01001965 if (use_dma_bufferfill_mode)
Thomas Dahlmann55d402d2007-07-16 21:40:54 -07001966 tmp |= AMD_BIT(UDC_DEVCTL_BF);
Cyril Roelandt170b7782012-02-25 02:14:57 +01001967 else if (use_dma_ppb_du)
Thomas Dahlmann55d402d2007-07-16 21:40:54 -07001968 tmp |= AMD_BIT(UDC_DEVCTL_DU);
Thomas Dahlmann55d402d2007-07-16 21:40:54 -07001969 writel(tmp, &dev->regs->ctl);
1970 }
1971
1972 /* clear NAK by writing CNAK for EP0IN */
1973 tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
1974 tmp |= AMD_BIT(UDC_EPCTL_CNAK);
1975 writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
1976 dev->ep[UDC_EP0IN_IX].naking = 0;
1977 UDC_QUEUE_CNAK(&dev->ep[UDC_EP0IN_IX], UDC_EP0IN_IX);
1978
1979 /* clear NAK by writing CNAK for EP0OUT */
1980 tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->ctl);
1981 tmp |= AMD_BIT(UDC_EPCTL_CNAK);
1982 writel(tmp, &dev->ep[UDC_EP0OUT_IX].regs->ctl);
1983 dev->ep[UDC_EP0OUT_IX].naking = 0;
1984 UDC_QUEUE_CNAK(&dev->ep[UDC_EP0OUT_IX], UDC_EP0OUT_IX);
1985}
1986
1987/* Make endpoint 0 ready for control traffic */
1988static int setup_ep0(struct udc *dev)
1989{
1990 activate_control_endpoints(dev);
1991 /* enable ep0 interrupts */
1992 udc_enable_ep0_interrupts(dev);
1993 /* enable device setup interrupts */
1994 udc_enable_dev_setup_interrupts(dev);
1995
1996 return 0;
1997}
1998
1999/* Called by gadget driver to register itself */
Felipe Balbi45005f62013-01-24 10:28:39 +02002000static int amd5536_udc_start(struct usb_gadget *g,
2001 struct usb_gadget_driver *driver)
Thomas Dahlmann55d402d2007-07-16 21:40:54 -07002002{
Felipe Balbi45005f62013-01-24 10:28:39 +02002003 struct udc *dev = to_amd5536_udc(g);
Thomas Dahlmann55d402d2007-07-16 21:40:54 -07002004 u32 tmp;
2005
Thomas Dahlmann55d402d2007-07-16 21:40:54 -07002006 driver->driver.bus = NULL;
2007 dev->driver = driver;
Thomas Dahlmann55d402d2007-07-16 21:40:54 -07002008
Thomas Dahlmann55d402d2007-07-16 21:40:54 -07002009 /* Some gadget drivers use both ep0 directions.
2010 * NOTE: to gadget driver, ep0 is just one endpoint...
2011 */
2012 dev->ep[UDC_EP0OUT_IX].ep.driver_data =
2013 dev->ep[UDC_EP0IN_IX].ep.driver_data;
2014
Thomas Dahlmann55d402d2007-07-16 21:40:54 -07002015 /* get ready for ep0 traffic */
2016 setup_ep0(dev);
2017
2018 /* clear SD */
2019 tmp = readl(&dev->regs->ctl);
2020 tmp = tmp & AMD_CLEAR_BIT(UDC_DEVCTL_SD);
2021 writel(tmp, &dev->regs->ctl);
2022
2023 usb_connect(dev);
2024
2025 return 0;
2026}
Thomas Dahlmann55d402d2007-07-16 21:40:54 -07002027
2028/* shutdown requests and disconnect from gadget */
2029static void
2030shutdown(struct udc *dev, struct usb_gadget_driver *driver)
2031__releases(dev->lock)
2032__acquires(dev->lock)
2033{
2034 int tmp;
2035
Thomas Dahlmannc5deb832009-11-17 14:18:27 -08002036 /* empty queues and init hardware */
2037 udc_basic_init(dev);
Felipe Balbi45005f62013-01-24 10:28:39 +02002038
Thomas Dahlmannc5deb832009-11-17 14:18:27 -08002039 for (tmp = 0; tmp < UDC_EP_NUM; tmp++)
2040 empty_req_queue(&dev->ep[tmp]);
2041
Thomas Dahlmann55d402d2007-07-16 21:40:54 -07002042 udc_setup_endpoints(dev);
2043}
2044
2045/* Called by gadget driver to unregister itself */
Felipe Balbi22835b82014-10-17 12:05:12 -05002046static int amd5536_udc_stop(struct usb_gadget *g)
Thomas Dahlmann55d402d2007-07-16 21:40:54 -07002047{
Felipe Balbi45005f62013-01-24 10:28:39 +02002048 struct udc *dev = to_amd5536_udc(g);
2049 unsigned long flags;
Thomas Dahlmann55d402d2007-07-16 21:40:54 -07002050 u32 tmp;
2051
Thomas Dahlmann55d402d2007-07-16 21:40:54 -07002052 spin_lock_irqsave(&dev->lock, flags);
2053 udc_mask_unused_interrupts(dev);
Felipe Balbi21090f02014-10-17 11:16:04 -05002054 shutdown(dev, NULL);
Thomas Dahlmann55d402d2007-07-16 21:40:54 -07002055 spin_unlock_irqrestore(&dev->lock, flags);
2056
Thomas Dahlmann55d402d2007-07-16 21:40:54 -07002057 dev->driver = NULL;
2058
2059 /* set SD */
2060 tmp = readl(&dev->regs->ctl);
2061 tmp |= AMD_BIT(UDC_DEVCTL_SD);
2062 writel(tmp, &dev->regs->ctl);
2063
Thomas Dahlmann55d402d2007-07-16 21:40:54 -07002064 return 0;
2065}
Thomas Dahlmann55d402d2007-07-16 21:40:54 -07002066
2067/* Clear pending NAK bits */
2068static void udc_process_cnak_queue(struct udc *dev)
2069{
2070 u32 tmp;
2071 u32 reg;
2072
2073 /* check epin's */
2074 DBG(dev, "CNAK pending queue processing\n");
2075 for (tmp = 0; tmp < UDC_EPIN_NUM_USED; tmp++) {
2076 if (cnak_pending & (1 << tmp)) {
2077 DBG(dev, "CNAK pending for ep%d\n", tmp);
2078 /* clear NAK by writing CNAK */
2079 reg = readl(&dev->ep[tmp].regs->ctl);
2080 reg |= AMD_BIT(UDC_EPCTL_CNAK);
2081 writel(reg, &dev->ep[tmp].regs->ctl);
2082 dev->ep[tmp].naking = 0;
2083 UDC_QUEUE_CNAK(&dev->ep[tmp], dev->ep[tmp].num);
2084 }
2085 }
2086 /* ... and ep0out */
2087 if (cnak_pending & (1 << UDC_EP0OUT_IX)) {
2088 DBG(dev, "CNAK pending for ep%d\n", UDC_EP0OUT_IX);
2089 /* clear NAK by writing CNAK */
2090 reg = readl(&dev->ep[UDC_EP0OUT_IX].regs->ctl);
2091 reg |= AMD_BIT(UDC_EPCTL_CNAK);
2092 writel(reg, &dev->ep[UDC_EP0OUT_IX].regs->ctl);
2093 dev->ep[UDC_EP0OUT_IX].naking = 0;
2094 UDC_QUEUE_CNAK(&dev->ep[UDC_EP0OUT_IX],
2095 dev->ep[UDC_EP0OUT_IX].num);
2096 }
2097}
2098
2099/* Enabling RX DMA after setup packet */
2100static void udc_ep0_set_rde(struct udc *dev)
2101{
2102 if (use_dma) {
2103 /*
2104 * only enable RXDMA when no data endpoint enabled
2105 * or data is queued
2106 */
2107 if (!dev->data_ep_enabled || dev->data_ep_queued) {
2108 udc_set_rde(dev);
2109 } else {
2110 /*
2111 * setup timer for enabling RDE (to not enable
2112 * RXFIFO DMA for data endpoints to early)
2113 */
2114 if (set_rde != 0 && !timer_pending(&udc_timer)) {
2115 udc_timer.expires =
2116 jiffies + HZ/UDC_RDE_TIMER_DIV;
2117 set_rde = 1;
Cyril Roelandt170b7782012-02-25 02:14:57 +01002118 if (!stop_timer)
Thomas Dahlmann55d402d2007-07-16 21:40:54 -07002119 add_timer(&udc_timer);
Thomas Dahlmann55d402d2007-07-16 21:40:54 -07002120 }
2121 }
2122 }
2123}
2124
2125
2126/* Interrupt handler for data OUT traffic */
2127static irqreturn_t udc_data_out_isr(struct udc *dev, int ep_ix)
2128{
2129 irqreturn_t ret_val = IRQ_NONE;
2130 u32 tmp;
2131 struct udc_ep *ep;
2132 struct udc_request *req;
2133 unsigned int count;
2134 struct udc_data_dma *td = NULL;
2135 unsigned dma_done;
2136
2137 VDBG(dev, "ep%d irq\n", ep_ix);
2138 ep = &dev->ep[ep_ix];
2139
2140 tmp = readl(&ep->regs->sts);
2141 if (use_dma) {
2142 /* BNA event ? */
2143 if (tmp & AMD_BIT(UDC_EPSTS_BNA)) {
Cyril Roelandt5647a142012-02-25 02:14:58 +01002144 DBG(dev, "BNA ep%dout occurred - DESPTR = %x\n",
Thomas Dahlmann55d402d2007-07-16 21:40:54 -07002145 ep->num, readl(&ep->regs->desptr));
2146 /* clear BNA */
2147 writel(tmp | AMD_BIT(UDC_EPSTS_BNA), &ep->regs->sts);
2148 if (!ep->cancel_transfer)
2149 ep->bna_occurred = 1;
2150 else
2151 ep->cancel_transfer = 0;
2152 ret_val = IRQ_HANDLED;
2153 goto finished;
2154 }
2155 }
2156 /* HE event ? */
2157 if (tmp & AMD_BIT(UDC_EPSTS_HE)) {
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002158 dev_err(&dev->pdev->dev, "HE ep%dout occurred\n", ep->num);
Thomas Dahlmann55d402d2007-07-16 21:40:54 -07002159
2160 /* clear HE */
2161 writel(tmp | AMD_BIT(UDC_EPSTS_HE), &ep->regs->sts);
2162 ret_val = IRQ_HANDLED;
2163 goto finished;
2164 }
2165
2166 if (!list_empty(&ep->queue)) {
2167
2168 /* next request */
2169 req = list_entry(ep->queue.next,
2170 struct udc_request, queue);
2171 } else {
2172 req = NULL;
2173 udc_rxfifo_pending = 1;
2174 }
2175 VDBG(dev, "req = %p\n", req);
2176 /* fifo mode */
2177 if (!use_dma) {
2178
2179 /* read fifo */
2180 if (req && udc_rxfifo_read(ep, req)) {
2181 ret_val = IRQ_HANDLED;
2182
2183 /* finish */
2184 complete_req(ep, req, 0);
2185 /* next request */
2186 if (!list_empty(&ep->queue) && !ep->halted) {
2187 req = list_entry(ep->queue.next,
2188 struct udc_request, queue);
2189 } else
2190 req = NULL;
2191 }
2192
2193 /* DMA */
2194 } else if (!ep->cancel_transfer && req != NULL) {
2195 ret_val = IRQ_HANDLED;
2196
2197 /* check for DMA done */
2198 if (!use_dma_ppb) {
2199 dma_done = AMD_GETBITS(req->td_data->status,
2200 UDC_DMA_OUT_STS_BS);
2201 /* packet per buffer mode - rx bytes */
2202 } else {
2203 /*
2204 * if BNA occurred then recover desc. from
2205 * BNA dummy desc.
2206 */
2207 if (ep->bna_occurred) {
2208 VDBG(dev, "Recover desc. from BNA dummy\n");
2209 memcpy(req->td_data, ep->bna_dummy_req->td_data,
2210 sizeof(struct udc_data_dma));
2211 ep->bna_occurred = 0;
2212 udc_init_bna_dummy(ep->req);
2213 }
2214 td = udc_get_last_dma_desc(req);
2215 dma_done = AMD_GETBITS(td->status, UDC_DMA_OUT_STS_BS);
2216 }
2217 if (dma_done == UDC_DMA_OUT_STS_BS_DMA_DONE) {
2218 /* buffer fill mode - rx bytes */
2219 if (!use_dma_ppb) {
2220 /* received number bytes */
2221 count = AMD_GETBITS(req->td_data->status,
2222 UDC_DMA_OUT_STS_RXBYTES);
2223 VDBG(dev, "rx bytes=%u\n", count);
2224 /* packet per buffer mode - rx bytes */
2225 } else {
2226 VDBG(dev, "req->td_data=%p\n", req->td_data);
2227 VDBG(dev, "last desc = %p\n", td);
2228 /* received number bytes */
2229 if (use_dma_ppb_du) {
2230 /* every desc. counts bytes */
2231 count = udc_get_ppbdu_rxbytes(req);
2232 } else {
2233 /* last desc. counts bytes */
2234 count = AMD_GETBITS(td->status,
2235 UDC_DMA_OUT_STS_RXBYTES);
2236 if (!count && req->req.length
2237 == UDC_DMA_MAXPACKET) {
2238 /*
2239 * on 64k packets the RXBYTES
2240 * field is zero
2241 */
2242 count = UDC_DMA_MAXPACKET;
2243 }
2244 }
2245 VDBG(dev, "last desc rx bytes=%u\n", count);
2246 }
2247
2248 tmp = req->req.length - req->req.actual;
2249 if (count > tmp) {
2250 if ((tmp % ep->ep.maxpacket) != 0) {
2251 DBG(dev, "%s: rx %db, space=%db\n",
2252 ep->ep.name, count, tmp);
2253 req->req.status = -EOVERFLOW;
2254 }
2255 count = tmp;
2256 }
2257 req->req.actual += count;
2258 req->dma_going = 0;
2259 /* complete request */
2260 complete_req(ep, req, 0);
2261
2262 /* next request */
2263 if (!list_empty(&ep->queue) && !ep->halted) {
2264 req = list_entry(ep->queue.next,
2265 struct udc_request,
2266 queue);
2267 /*
2268 * DMA may be already started by udc_queue()
2269 * called by gadget drivers completion
2270 * routine. This happens when queue
2271 * holds one request only.
2272 */
2273 if (req->dma_going == 0) {
2274 /* next dma */
2275 if (prep_dma(ep, req, GFP_ATOMIC) != 0)
2276 goto finished;
2277 /* write desc pointer */
2278 writel(req->td_phys,
2279 &ep->regs->desptr);
2280 req->dma_going = 1;
2281 /* enable DMA */
2282 udc_set_rde(dev);
2283 }
2284 } else {
2285 /*
2286 * implant BNA dummy descriptor to allow
2287 * RXFIFO opening by RDE
2288 */
2289 if (ep->bna_dummy_req) {
2290 /* write desc pointer */
2291 writel(ep->bna_dummy_req->td_phys,
2292 &ep->regs->desptr);
2293 ep->bna_occurred = 0;
2294 }
2295
2296 /*
2297 * schedule timer for setting RDE if queue
2298 * remains empty to allow ep0 packets pass
2299 * through
2300 */
2301 if (set_rde != 0
2302 && !timer_pending(&udc_timer)) {
2303 udc_timer.expires =
2304 jiffies
2305 + HZ*UDC_RDE_TIMER_SECONDS;
2306 set_rde = 1;
Cyril Roelandt170b7782012-02-25 02:14:57 +01002307 if (!stop_timer)
Thomas Dahlmann55d402d2007-07-16 21:40:54 -07002308 add_timer(&udc_timer);
Thomas Dahlmann55d402d2007-07-16 21:40:54 -07002309 }
2310 if (ep->num != UDC_EP0OUT_IX)
2311 dev->data_ep_queued = 0;
2312 }
2313
2314 } else {
2315 /*
2316 * RX DMA must be reenabled for each desc in PPBDU mode
2317 * and must be enabled for PPBNDU mode in case of BNA
2318 */
2319 udc_set_rde(dev);
2320 }
2321
2322 } else if (ep->cancel_transfer) {
2323 ret_val = IRQ_HANDLED;
2324 ep->cancel_transfer = 0;
2325 }
2326
2327 /* check pending CNAKS */
2328 if (cnak_pending) {
2329 /* CNAk processing when rxfifo empty only */
Cyril Roelandt170b7782012-02-25 02:14:57 +01002330 if (readl(&dev->regs->sts) & AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY))
Thomas Dahlmann55d402d2007-07-16 21:40:54 -07002331 udc_process_cnak_queue(dev);
Thomas Dahlmann55d402d2007-07-16 21:40:54 -07002332 }
2333
2334 /* clear OUT bits in ep status */
2335 writel(UDC_EPSTS_OUT_CLEAR, &ep->regs->sts);
2336finished:
2337 return ret_val;
2338}
2339
2340/* Interrupt handler for data IN traffic */
2341static irqreturn_t udc_data_in_isr(struct udc *dev, int ep_ix)
2342{
2343 irqreturn_t ret_val = IRQ_NONE;
2344 u32 tmp;
2345 u32 epsts;
2346 struct udc_ep *ep;
2347 struct udc_request *req;
2348 struct udc_data_dma *td;
2349 unsigned dma_done;
2350 unsigned len;
2351
2352 ep = &dev->ep[ep_ix];
2353
2354 epsts = readl(&ep->regs->sts);
2355 if (use_dma) {
2356 /* BNA ? */
2357 if (epsts & AMD_BIT(UDC_EPSTS_BNA)) {
2358 dev_err(&dev->pdev->dev,
Cyril Roelandt5647a142012-02-25 02:14:58 +01002359 "BNA ep%din occurred - DESPTR = %08lx\n",
Thomas Dahlmann55d402d2007-07-16 21:40:54 -07002360 ep->num,
2361 (unsigned long) readl(&ep->regs->desptr));
2362
2363 /* clear BNA */
2364 writel(epsts, &ep->regs->sts);
2365 ret_val = IRQ_HANDLED;
2366 goto finished;
2367 }
2368 }
2369 /* HE event ? */
2370 if (epsts & AMD_BIT(UDC_EPSTS_HE)) {
2371 dev_err(&dev->pdev->dev,
Cyril Roelandt5647a142012-02-25 02:14:58 +01002372 "HE ep%dn occurred - DESPTR = %08lx\n",
Thomas Dahlmann55d402d2007-07-16 21:40:54 -07002373 ep->num, (unsigned long) readl(&ep->regs->desptr));
2374
2375 /* clear HE */
2376 writel(epsts | AMD_BIT(UDC_EPSTS_HE), &ep->regs->sts);
2377 ret_val = IRQ_HANDLED;
2378 goto finished;
2379 }
2380
2381 /* DMA completion */
2382 if (epsts & AMD_BIT(UDC_EPSTS_TDC)) {
2383 VDBG(dev, "TDC set- completion\n");
2384 ret_val = IRQ_HANDLED;
2385 if (!ep->cancel_transfer && !list_empty(&ep->queue)) {
2386 req = list_entry(ep->queue.next,
2387 struct udc_request, queue);
Julia Lawall058e6982009-07-12 09:43:52 +02002388 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002389 * length bytes transferred
Julia Lawall058e6982009-07-12 09:43:52 +02002390 * check dma done of last desc. in PPBDU mode
2391 */
2392 if (use_dma_ppb_du) {
2393 td = udc_get_last_dma_desc(req);
2394 if (td) {
2395 dma_done =
2396 AMD_GETBITS(td->status,
2397 UDC_DMA_IN_STS_BS);
2398 /* don't care DMA done */
Thomas Dahlmann55d402d2007-07-16 21:40:54 -07002399 req->req.actual = req->req.length;
2400 }
Julia Lawall058e6982009-07-12 09:43:52 +02002401 } else {
2402 /* assume all bytes transferred */
2403 req->req.actual = req->req.length;
2404 }
Thomas Dahlmann55d402d2007-07-16 21:40:54 -07002405
Julia Lawall058e6982009-07-12 09:43:52 +02002406 if (req->req.actual == req->req.length) {
2407 /* complete req */
2408 complete_req(ep, req, 0);
2409 req->dma_going = 0;
2410 /* further request available ? */
2411 if (list_empty(&ep->queue)) {
2412 /* disable interrupt */
2413 tmp = readl(&dev->regs->ep_irqmsk);
2414 tmp |= AMD_BIT(ep->num);
2415 writel(tmp, &dev->regs->ep_irqmsk);
Thomas Dahlmann55d402d2007-07-16 21:40:54 -07002416 }
2417 }
2418 }
2419 ep->cancel_transfer = 0;
2420
2421 }
2422 /*
2423 * status reg has IN bit set and TDC not set (if TDC was handled,
2424 * IN must not be handled (UDC defect) ?
2425 */
2426 if ((epsts & AMD_BIT(UDC_EPSTS_IN))
2427 && !(epsts & AMD_BIT(UDC_EPSTS_TDC))) {
2428 ret_val = IRQ_HANDLED;
2429 if (!list_empty(&ep->queue)) {
2430 /* next request */
2431 req = list_entry(ep->queue.next,
2432 struct udc_request, queue);
2433 /* FIFO mode */
2434 if (!use_dma) {
2435 /* write fifo */
2436 udc_txfifo_write(ep, &req->req);
2437 len = req->req.length - req->req.actual;
Cyril Roelandt1435db42012-02-25 02:14:59 +01002438 if (len > ep->ep.maxpacket)
2439 len = ep->ep.maxpacket;
2440 req->req.actual += len;
Thomas Dahlmann55d402d2007-07-16 21:40:54 -07002441 if (req->req.actual == req->req.length
2442 || (len != ep->ep.maxpacket)) {
2443 /* complete req */
2444 complete_req(ep, req, 0);
2445 }
2446 /* DMA */
2447 } else if (req && !req->dma_going) {
2448 VDBG(dev, "IN DMA : req=%p req->td_data=%p\n",
2449 req, req->td_data);
2450 if (req->td_data) {
2451
2452 req->dma_going = 1;
2453
2454 /*
2455 * unset L bit of first desc.
2456 * for chain
2457 */
2458 if (use_dma_ppb && req->req.length >
2459 ep->ep.maxpacket) {
2460 req->td_data->status &=
2461 AMD_CLEAR_BIT(
2462 UDC_DMA_IN_STS_L);
2463 }
2464
2465 /* write desc pointer */
2466 writel(req->td_phys, &ep->regs->desptr);
2467
2468 /* set HOST READY */
2469 req->td_data->status =
2470 AMD_ADDBITS(
2471 req->td_data->status,
2472 UDC_DMA_IN_STS_BS_HOST_READY,
2473 UDC_DMA_IN_STS_BS);
2474
2475 /* set poll demand bit */
2476 tmp = readl(&ep->regs->ctl);
2477 tmp |= AMD_BIT(UDC_EPCTL_P);
2478 writel(tmp, &ep->regs->ctl);
2479 }
2480 }
2481
Thomas Dahlmannc5deb832009-11-17 14:18:27 -08002482 } else if (!use_dma && ep->in) {
2483 /* disable interrupt */
2484 tmp = readl(
2485 &dev->regs->ep_irqmsk);
2486 tmp |= AMD_BIT(ep->num);
2487 writel(tmp,
2488 &dev->regs->ep_irqmsk);
Thomas Dahlmann55d402d2007-07-16 21:40:54 -07002489 }
2490 }
2491 /* clear status bits */
2492 writel(epsts, &ep->regs->sts);
2493
2494finished:
2495 return ret_val;
2496
2497}
2498
2499/* Interrupt handler for Control OUT traffic */
2500static irqreturn_t udc_control_out_isr(struct udc *dev)
2501__releases(dev->lock)
2502__acquires(dev->lock)
2503{
2504 irqreturn_t ret_val = IRQ_NONE;
2505 u32 tmp;
2506 int setup_supported;
2507 u32 count;
2508 int set = 0;
2509 struct udc_ep *ep;
2510 struct udc_ep *ep_tmp;
2511
2512 ep = &dev->ep[UDC_EP0OUT_IX];
2513
2514 /* clear irq */
2515 writel(AMD_BIT(UDC_EPINT_OUT_EP0), &dev->regs->ep_irqsts);
2516
2517 tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->sts);
2518 /* check BNA and clear if set */
2519 if (tmp & AMD_BIT(UDC_EPSTS_BNA)) {
2520 VDBG(dev, "ep0: BNA set\n");
2521 writel(AMD_BIT(UDC_EPSTS_BNA),
2522 &dev->ep[UDC_EP0OUT_IX].regs->sts);
2523 ep->bna_occurred = 1;
2524 ret_val = IRQ_HANDLED;
2525 goto finished;
2526 }
2527
2528 /* type of data: SETUP or DATA 0 bytes */
2529 tmp = AMD_GETBITS(tmp, UDC_EPSTS_OUT);
2530 VDBG(dev, "data_typ = %x\n", tmp);
2531
2532 /* setup data */
2533 if (tmp == UDC_EPSTS_OUT_SETUP) {
2534 ret_val = IRQ_HANDLED;
2535
2536 ep->dev->stall_ep0in = 0;
2537 dev->waiting_zlp_ack_ep0in = 0;
2538
2539 /* set NAK for EP0_IN */
2540 tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
2541 tmp |= AMD_BIT(UDC_EPCTL_SNAK);
2542 writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
2543 dev->ep[UDC_EP0IN_IX].naking = 1;
2544 /* get setup data */
2545 if (use_dma) {
2546
2547 /* clear OUT bits in ep status */
2548 writel(UDC_EPSTS_OUT_CLEAR,
2549 &dev->ep[UDC_EP0OUT_IX].regs->sts);
2550
2551 setup_data.data[0] =
2552 dev->ep[UDC_EP0OUT_IX].td_stp->data12;
2553 setup_data.data[1] =
2554 dev->ep[UDC_EP0OUT_IX].td_stp->data34;
2555 /* set HOST READY */
2556 dev->ep[UDC_EP0OUT_IX].td_stp->status =
2557 UDC_DMA_STP_STS_BS_HOST_READY;
2558 } else {
2559 /* read fifo */
2560 udc_rxfifo_read_dwords(dev, setup_data.data, 2);
2561 }
2562
2563 /* determine direction of control data */
2564 if ((setup_data.request.bRequestType & USB_DIR_IN) != 0) {
2565 dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IX].ep;
2566 /* enable RDE */
2567 udc_ep0_set_rde(dev);
2568 set = 0;
2569 } else {
2570 dev->gadget.ep0 = &dev->ep[UDC_EP0OUT_IX].ep;
2571 /*
2572 * implant BNA dummy descriptor to allow RXFIFO opening
2573 * by RDE
2574 */
2575 if (ep->bna_dummy_req) {
2576 /* write desc pointer */
2577 writel(ep->bna_dummy_req->td_phys,
2578 &dev->ep[UDC_EP0OUT_IX].regs->desptr);
2579 ep->bna_occurred = 0;
2580 }
2581
2582 set = 1;
2583 dev->ep[UDC_EP0OUT_IX].naking = 1;
2584 /*
2585 * setup timer for enabling RDE (to not enable
2586 * RXFIFO DMA for data to early)
2587 */
2588 set_rde = 1;
2589 if (!timer_pending(&udc_timer)) {
2590 udc_timer.expires = jiffies +
2591 HZ/UDC_RDE_TIMER_DIV;
Cyril Roelandt170b7782012-02-25 02:14:57 +01002592 if (!stop_timer)
Thomas Dahlmann55d402d2007-07-16 21:40:54 -07002593 add_timer(&udc_timer);
Thomas Dahlmann55d402d2007-07-16 21:40:54 -07002594 }
2595 }
2596
2597 /*
2598 * mass storage reset must be processed here because
2599 * next packet may be a CLEAR_FEATURE HALT which would not
2600 * clear the stall bit when no STALL handshake was received
2601 * before (autostall can cause this)
2602 */
2603 if (setup_data.data[0] == UDC_MSCRES_DWORD0
2604 && setup_data.data[1] == UDC_MSCRES_DWORD1) {
2605 DBG(dev, "MSC Reset\n");
2606 /*
2607 * clear stall bits
2608 * only one IN and OUT endpoints are handled
2609 */
2610 ep_tmp = &udc->ep[UDC_EPIN_IX];
2611 udc_set_halt(&ep_tmp->ep, 0);
2612 ep_tmp = &udc->ep[UDC_EPOUT_IX];
2613 udc_set_halt(&ep_tmp->ep, 0);
2614 }
2615
2616 /* call gadget with setup data received */
2617 spin_unlock(&dev->lock);
2618 setup_supported = dev->driver->setup(&dev->gadget,
2619 &setup_data.request);
2620 spin_lock(&dev->lock);
2621
2622 tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
2623 /* ep0 in returns data (not zlp) on IN phase */
2624 if (setup_supported >= 0 && setup_supported <
2625 UDC_EP0IN_MAXPACKET) {
2626 /* clear NAK by writing CNAK in EP0_IN */
2627 tmp |= AMD_BIT(UDC_EPCTL_CNAK);
2628 writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
2629 dev->ep[UDC_EP0IN_IX].naking = 0;
2630 UDC_QUEUE_CNAK(&dev->ep[UDC_EP0IN_IX], UDC_EP0IN_IX);
2631
2632 /* if unsupported request then stall */
2633 } else if (setup_supported < 0) {
2634 tmp |= AMD_BIT(UDC_EPCTL_S);
2635 writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
2636 } else
2637 dev->waiting_zlp_ack_ep0in = 1;
2638
2639
2640 /* clear NAK by writing CNAK in EP0_OUT */
2641 if (!set) {
2642 tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->ctl);
2643 tmp |= AMD_BIT(UDC_EPCTL_CNAK);
2644 writel(tmp, &dev->ep[UDC_EP0OUT_IX].regs->ctl);
2645 dev->ep[UDC_EP0OUT_IX].naking = 0;
2646 UDC_QUEUE_CNAK(&dev->ep[UDC_EP0OUT_IX], UDC_EP0OUT_IX);
2647 }
2648
2649 if (!use_dma) {
2650 /* clear OUT bits in ep status */
2651 writel(UDC_EPSTS_OUT_CLEAR,
2652 &dev->ep[UDC_EP0OUT_IX].regs->sts);
2653 }
2654
2655 /* data packet 0 bytes */
2656 } else if (tmp == UDC_EPSTS_OUT_DATA) {
2657 /* clear OUT bits in ep status */
2658 writel(UDC_EPSTS_OUT_CLEAR, &dev->ep[UDC_EP0OUT_IX].regs->sts);
2659
2660 /* get setup data: only 0 packet */
2661 if (use_dma) {
2662 /* no req if 0 packet, just reactivate */
2663 if (list_empty(&dev->ep[UDC_EP0OUT_IX].queue)) {
2664 VDBG(dev, "ZLP\n");
2665
2666 /* set HOST READY */
2667 dev->ep[UDC_EP0OUT_IX].td->status =
2668 AMD_ADDBITS(
2669 dev->ep[UDC_EP0OUT_IX].td->status,
2670 UDC_DMA_OUT_STS_BS_HOST_READY,
2671 UDC_DMA_OUT_STS_BS);
2672 /* enable RDE */
2673 udc_ep0_set_rde(dev);
2674 ret_val = IRQ_HANDLED;
2675
2676 } else {
2677 /* control write */
2678 ret_val |= udc_data_out_isr(dev, UDC_EP0OUT_IX);
2679 /* re-program desc. pointer for possible ZLPs */
2680 writel(dev->ep[UDC_EP0OUT_IX].td_phys,
2681 &dev->ep[UDC_EP0OUT_IX].regs->desptr);
2682 /* enable RDE */
2683 udc_ep0_set_rde(dev);
2684 }
2685 } else {
2686
2687 /* received number bytes */
2688 count = readl(&dev->ep[UDC_EP0OUT_IX].regs->sts);
2689 count = AMD_GETBITS(count, UDC_EPSTS_RX_PKT_SIZE);
2690 /* out data for fifo mode not working */
2691 count = 0;
2692
2693 /* 0 packet or real data ? */
2694 if (count != 0) {
2695 ret_val |= udc_data_out_isr(dev, UDC_EP0OUT_IX);
2696 } else {
2697 /* dummy read confirm */
2698 readl(&dev->ep[UDC_EP0OUT_IX].regs->confirm);
2699 ret_val = IRQ_HANDLED;
2700 }
2701 }
2702 }
2703
2704 /* check pending CNAKS */
2705 if (cnak_pending) {
2706 /* CNAk processing when rxfifo empty only */
Cyril Roelandt170b7782012-02-25 02:14:57 +01002707 if (readl(&dev->regs->sts) & AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY))
Thomas Dahlmann55d402d2007-07-16 21:40:54 -07002708 udc_process_cnak_queue(dev);
Thomas Dahlmann55d402d2007-07-16 21:40:54 -07002709 }
2710
2711finished:
2712 return ret_val;
2713}
2714
2715/* Interrupt handler for Control IN traffic */
2716static irqreturn_t udc_control_in_isr(struct udc *dev)
2717{
2718 irqreturn_t ret_val = IRQ_NONE;
2719 u32 tmp;
2720 struct udc_ep *ep;
2721 struct udc_request *req;
2722 unsigned len;
2723
2724 ep = &dev->ep[UDC_EP0IN_IX];
2725
2726 /* clear irq */
2727 writel(AMD_BIT(UDC_EPINT_IN_EP0), &dev->regs->ep_irqsts);
2728
2729 tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->sts);
2730 /* DMA completion */
2731 if (tmp & AMD_BIT(UDC_EPSTS_TDC)) {
Cyril Roelandt5647a142012-02-25 02:14:58 +01002732 VDBG(dev, "isr: TDC clear\n");
Thomas Dahlmann55d402d2007-07-16 21:40:54 -07002733 ret_val = IRQ_HANDLED;
2734
2735 /* clear TDC bit */
2736 writel(AMD_BIT(UDC_EPSTS_TDC),
2737 &dev->ep[UDC_EP0IN_IX].regs->sts);
2738
2739 /* status reg has IN bit set ? */
2740 } else if (tmp & AMD_BIT(UDC_EPSTS_IN)) {
2741 ret_val = IRQ_HANDLED;
2742
2743 if (ep->dma) {
2744 /* clear IN bit */
2745 writel(AMD_BIT(UDC_EPSTS_IN),
2746 &dev->ep[UDC_EP0IN_IX].regs->sts);
2747 }
2748 if (dev->stall_ep0in) {
2749 DBG(dev, "stall ep0in\n");
2750 /* halt ep0in */
2751 tmp = readl(&ep->regs->ctl);
2752 tmp |= AMD_BIT(UDC_EPCTL_S);
2753 writel(tmp, &ep->regs->ctl);
2754 } else {
2755 if (!list_empty(&ep->queue)) {
2756 /* next request */
2757 req = list_entry(ep->queue.next,
2758 struct udc_request, queue);
2759
2760 if (ep->dma) {
2761 /* write desc pointer */
2762 writel(req->td_phys, &ep->regs->desptr);
2763 /* set HOST READY */
2764 req->td_data->status =
2765 AMD_ADDBITS(
2766 req->td_data->status,
2767 UDC_DMA_STP_STS_BS_HOST_READY,
2768 UDC_DMA_STP_STS_BS);
2769
2770 /* set poll demand bit */
2771 tmp =
2772 readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
2773 tmp |= AMD_BIT(UDC_EPCTL_P);
2774 writel(tmp,
2775 &dev->ep[UDC_EP0IN_IX].regs->ctl);
2776
2777 /* all bytes will be transferred */
2778 req->req.actual = req->req.length;
2779
2780 /* complete req */
2781 complete_req(ep, req, 0);
2782
2783 } else {
2784 /* write fifo */
2785 udc_txfifo_write(ep, &req->req);
2786
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002787 /* lengh bytes transferred */
Thomas Dahlmann55d402d2007-07-16 21:40:54 -07002788 len = req->req.length - req->req.actual;
2789 if (len > ep->ep.maxpacket)
2790 len = ep->ep.maxpacket;
2791
2792 req->req.actual += len;
2793 if (req->req.actual == req->req.length
2794 || (len != ep->ep.maxpacket)) {
2795 /* complete req */
2796 complete_req(ep, req, 0);
2797 }
2798 }
2799
2800 }
2801 }
2802 ep->halted = 0;
2803 dev->stall_ep0in = 0;
2804 if (!ep->dma) {
2805 /* clear IN bit */
2806 writel(AMD_BIT(UDC_EPSTS_IN),
2807 &dev->ep[UDC_EP0IN_IX].regs->sts);
2808 }
2809 }
2810
2811 return ret_val;
2812}
2813
2814
2815/* Interrupt handler for global device events */
2816static irqreturn_t udc_dev_isr(struct udc *dev, u32 dev_irq)
2817__releases(dev->lock)
2818__acquires(dev->lock)
2819{
2820 irqreturn_t ret_val = IRQ_NONE;
2821 u32 tmp;
2822 u32 cfg;
2823 struct udc_ep *ep;
2824 u16 i;
2825 u8 udc_csr_epix;
2826
2827 /* SET_CONFIG irq ? */
2828 if (dev_irq & AMD_BIT(UDC_DEVINT_SC)) {
2829 ret_val = IRQ_HANDLED;
2830
2831 /* read config value */
2832 tmp = readl(&dev->regs->sts);
2833 cfg = AMD_GETBITS(tmp, UDC_DEVSTS_CFG);
2834 DBG(dev, "SET_CONFIG interrupt: config=%d\n", cfg);
2835 dev->cur_config = cfg;
2836 dev->set_cfg_not_acked = 1;
2837
2838 /* make usb request for gadget driver */
2839 memset(&setup_data, 0 , sizeof(union udc_setup_data));
2840 setup_data.request.bRequest = USB_REQ_SET_CONFIGURATION;
Al Virofd05e722008-04-28 07:00:16 +01002841 setup_data.request.wValue = cpu_to_le16(dev->cur_config);
Thomas Dahlmann55d402d2007-07-16 21:40:54 -07002842
2843 /* programm the NE registers */
2844 for (i = 0; i < UDC_EP_NUM; i++) {
2845 ep = &dev->ep[i];
2846 if (ep->in) {
2847
2848 /* ep ix in UDC CSR register space */
2849 udc_csr_epix = ep->num;
2850
2851
2852 /* OUT ep */
2853 } else {
2854 /* ep ix in UDC CSR register space */
2855 udc_csr_epix = ep->num - UDC_CSR_EP_OUT_IX_OFS;
2856 }
2857
2858 tmp = readl(&dev->csr->ne[udc_csr_epix]);
2859 /* ep cfg */
2860 tmp = AMD_ADDBITS(tmp, ep->dev->cur_config,
2861 UDC_CSR_NE_CFG);
2862 /* write reg */
2863 writel(tmp, &dev->csr->ne[udc_csr_epix]);
2864
2865 /* clear stall bits */
2866 ep->halted = 0;
2867 tmp = readl(&ep->regs->ctl);
2868 tmp = tmp & AMD_CLEAR_BIT(UDC_EPCTL_S);
2869 writel(tmp, &ep->regs->ctl);
2870 }
2871 /* call gadget zero with setup data received */
2872 spin_unlock(&dev->lock);
2873 tmp = dev->driver->setup(&dev->gadget, &setup_data.request);
2874 spin_lock(&dev->lock);
2875
2876 } /* SET_INTERFACE ? */
2877 if (dev_irq & AMD_BIT(UDC_DEVINT_SI)) {
2878 ret_val = IRQ_HANDLED;
2879
2880 dev->set_cfg_not_acked = 1;
2881 /* read interface and alt setting values */
2882 tmp = readl(&dev->regs->sts);
2883 dev->cur_alt = AMD_GETBITS(tmp, UDC_DEVSTS_ALT);
2884 dev->cur_intf = AMD_GETBITS(tmp, UDC_DEVSTS_INTF);
2885
2886 /* make usb request for gadget driver */
2887 memset(&setup_data, 0 , sizeof(union udc_setup_data));
2888 setup_data.request.bRequest = USB_REQ_SET_INTERFACE;
2889 setup_data.request.bRequestType = USB_RECIP_INTERFACE;
Al Virofd05e722008-04-28 07:00:16 +01002890 setup_data.request.wValue = cpu_to_le16(dev->cur_alt);
2891 setup_data.request.wIndex = cpu_to_le16(dev->cur_intf);
Thomas Dahlmann55d402d2007-07-16 21:40:54 -07002892
2893 DBG(dev, "SET_INTERFACE interrupt: alt=%d intf=%d\n",
2894 dev->cur_alt, dev->cur_intf);
2895
2896 /* programm the NE registers */
2897 for (i = 0; i < UDC_EP_NUM; i++) {
2898 ep = &dev->ep[i];
2899 if (ep->in) {
2900
2901 /* ep ix in UDC CSR register space */
2902 udc_csr_epix = ep->num;
2903
2904
2905 /* OUT ep */
2906 } else {
2907 /* ep ix in UDC CSR register space */
2908 udc_csr_epix = ep->num - UDC_CSR_EP_OUT_IX_OFS;
2909 }
2910
2911 /* UDC CSR reg */
2912 /* set ep values */
2913 tmp = readl(&dev->csr->ne[udc_csr_epix]);
2914 /* ep interface */
2915 tmp = AMD_ADDBITS(tmp, ep->dev->cur_intf,
2916 UDC_CSR_NE_INTF);
2917 /* tmp = AMD_ADDBITS(tmp, 2, UDC_CSR_NE_INTF); */
2918 /* ep alt */
2919 tmp = AMD_ADDBITS(tmp, ep->dev->cur_alt,
2920 UDC_CSR_NE_ALT);
2921 /* write reg */
2922 writel(tmp, &dev->csr->ne[udc_csr_epix]);
2923
2924 /* clear stall bits */
2925 ep->halted = 0;
2926 tmp = readl(&ep->regs->ctl);
2927 tmp = tmp & AMD_CLEAR_BIT(UDC_EPCTL_S);
2928 writel(tmp, &ep->regs->ctl);
2929 }
2930
2931 /* call gadget zero with setup data received */
2932 spin_unlock(&dev->lock);
2933 tmp = dev->driver->setup(&dev->gadget, &setup_data.request);
2934 spin_lock(&dev->lock);
2935
2936 } /* USB reset */
2937 if (dev_irq & AMD_BIT(UDC_DEVINT_UR)) {
2938 DBG(dev, "USB Reset interrupt\n");
2939 ret_val = IRQ_HANDLED;
2940
2941 /* allow soft reset when suspend occurs */
2942 soft_reset_occured = 0;
2943
2944 dev->waiting_zlp_ack_ep0in = 0;
2945 dev->set_cfg_not_acked = 0;
2946
2947 /* mask not needed interrupts */
2948 udc_mask_unused_interrupts(dev);
2949
2950 /* call gadget to resume and reset configs etc. */
2951 spin_unlock(&dev->lock);
2952 if (dev->sys_suspended && dev->driver->resume) {
2953 dev->driver->resume(&dev->gadget);
2954 dev->sys_suspended = 0;
2955 }
Peter Chen107d13c2014-11-06 14:28:06 +08002956 usb_gadget_udc_reset(&dev->gadget, dev->driver);
Thomas Dahlmann55d402d2007-07-16 21:40:54 -07002957 spin_lock(&dev->lock);
2958
2959 /* disable ep0 to empty req queue */
2960 empty_req_queue(&dev->ep[UDC_EP0IN_IX]);
2961 ep_init(dev->regs, &dev->ep[UDC_EP0IN_IX]);
2962
2963 /* soft reset when rxfifo not empty */
2964 tmp = readl(&dev->regs->sts);
2965 if (!(tmp & AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY))
2966 && !soft_reset_after_usbreset_occured) {
2967 udc_soft_reset(dev);
2968 soft_reset_after_usbreset_occured++;
2969 }
2970
2971 /*
2972 * DMA reset to kill potential old DMA hw hang,
2973 * POLL bit is already reset by ep_init() through
2974 * disconnect()
2975 */
2976 DBG(dev, "DMA machine reset\n");
2977 tmp = readl(&dev->regs->cfg);
2978 writel(tmp | AMD_BIT(UDC_DEVCFG_DMARST), &dev->regs->cfg);
2979 writel(tmp, &dev->regs->cfg);
2980
2981 /* put into initial config */
2982 udc_basic_init(dev);
2983
2984 /* enable device setup interrupts */
2985 udc_enable_dev_setup_interrupts(dev);
2986
2987 /* enable suspend interrupt */
2988 tmp = readl(&dev->regs->irqmsk);
2989 tmp &= AMD_UNMASK_BIT(UDC_DEVINT_US);
2990 writel(tmp, &dev->regs->irqmsk);
2991
2992 } /* USB suspend */
2993 if (dev_irq & AMD_BIT(UDC_DEVINT_US)) {
2994 DBG(dev, "USB Suspend interrupt\n");
2995 ret_val = IRQ_HANDLED;
2996 if (dev->driver->suspend) {
2997 spin_unlock(&dev->lock);
2998 dev->sys_suspended = 1;
2999 dev->driver->suspend(&dev->gadget);
3000 spin_lock(&dev->lock);
3001 }
3002 } /* new speed ? */
3003 if (dev_irq & AMD_BIT(UDC_DEVINT_ENUM)) {
3004 DBG(dev, "ENUM interrupt\n");
3005 ret_val = IRQ_HANDLED;
3006 soft_reset_after_usbreset_occured = 0;
3007
3008 /* disable ep0 to empty req queue */
3009 empty_req_queue(&dev->ep[UDC_EP0IN_IX]);
3010 ep_init(dev->regs, &dev->ep[UDC_EP0IN_IX]);
3011
3012 /* link up all endpoints */
3013 udc_setup_endpoints(dev);
Michal Nazarewicze538dfd2011-08-30 17:11:19 +02003014 dev_info(&dev->pdev->dev, "Connect: %s\n",
3015 usb_speed_string(dev->gadget.speed));
Thomas Dahlmann55d402d2007-07-16 21:40:54 -07003016
3017 /* init ep 0 */
3018 activate_control_endpoints(dev);
3019
3020 /* enable ep0 interrupts */
3021 udc_enable_ep0_interrupts(dev);
3022 }
3023 /* session valid change interrupt */
3024 if (dev_irq & AMD_BIT(UDC_DEVINT_SVC)) {
3025 DBG(dev, "USB SVC interrupt\n");
3026 ret_val = IRQ_HANDLED;
3027
3028 /* check that session is not valid to detect disconnect */
3029 tmp = readl(&dev->regs->sts);
3030 if (!(tmp & AMD_BIT(UDC_DEVSTS_SESSVLD))) {
3031 /* disable suspend interrupt */
3032 tmp = readl(&dev->regs->irqmsk);
3033 tmp |= AMD_BIT(UDC_DEVINT_US);
3034 writel(tmp, &dev->regs->irqmsk);
3035 DBG(dev, "USB Disconnect (session valid low)\n");
3036 /* cleanup on disconnect */
3037 usb_disconnect(udc);
3038 }
3039
3040 }
3041
3042 return ret_val;
3043}
3044
3045/* Interrupt Service Routine, see Linux Kernel Doc for parameters */
3046static irqreturn_t udc_irq(int irq, void *pdev)
3047{
3048 struct udc *dev = pdev;
3049 u32 reg;
3050 u16 i;
3051 u32 ep_irq;
3052 irqreturn_t ret_val = IRQ_NONE;
3053
3054 spin_lock(&dev->lock);
3055
3056 /* check for ep irq */
3057 reg = readl(&dev->regs->ep_irqsts);
3058 if (reg) {
3059 if (reg & AMD_BIT(UDC_EPINT_OUT_EP0))
3060 ret_val |= udc_control_out_isr(dev);
3061 if (reg & AMD_BIT(UDC_EPINT_IN_EP0))
3062 ret_val |= udc_control_in_isr(dev);
3063
3064 /*
3065 * data endpoint
3066 * iterate ep's
3067 */
3068 for (i = 1; i < UDC_EP_NUM; i++) {
3069 ep_irq = 1 << i;
3070 if (!(reg & ep_irq) || i == UDC_EPINT_OUT_EP0)
3071 continue;
3072
3073 /* clear irq status */
3074 writel(ep_irq, &dev->regs->ep_irqsts);
3075
3076 /* irq for out ep ? */
3077 if (i > UDC_EPIN_NUM)
3078 ret_val |= udc_data_out_isr(dev, i);
3079 else
3080 ret_val |= udc_data_in_isr(dev, i);
3081 }
3082
3083 }
3084
3085
3086 /* check for dev irq */
3087 reg = readl(&dev->regs->irqsts);
3088 if (reg) {
3089 /* clear irq */
3090 writel(reg, &dev->regs->irqsts);
3091 ret_val |= udc_dev_isr(dev, reg);
3092 }
3093
3094
3095 spin_unlock(&dev->lock);
3096 return ret_val;
3097}
3098
3099/* Tears down device */
3100static void gadget_release(struct device *pdev)
3101{
3102 struct amd5536udc *dev = dev_get_drvdata(pdev);
3103 kfree(dev);
3104}
3105
3106/* Cleanup on device remove */
3107static void udc_remove(struct udc *dev)
3108{
3109 /* remove timer */
3110 stop_timer++;
3111 if (timer_pending(&udc_timer))
3112 wait_for_completion(&on_exit);
3113 if (udc_timer.data)
3114 del_timer_sync(&udc_timer);
3115 /* remove pollstall timer */
3116 stop_pollstall_timer++;
3117 if (timer_pending(&udc_pollstall_timer))
3118 wait_for_completion(&on_pollstall_exit);
3119 if (udc_pollstall_timer.data)
3120 del_timer_sync(&udc_pollstall_timer);
3121 udc = NULL;
3122}
3123
Sudip Mukherjee580693b2015-09-22 18:54:27 +05303124/* free all the dma pools */
3125static void free_dma_pools(struct udc *dev)
3126{
3127 dma_pool_free(dev->stp_requests, dev->ep[UDC_EP0OUT_IX].td,
3128 dev->ep[UDC_EP0OUT_IX].td_phys);
3129 dma_pool_free(dev->stp_requests, dev->ep[UDC_EP0OUT_IX].td_stp,
3130 dev->ep[UDC_EP0OUT_IX].td_stp_dma);
3131 dma_pool_destroy(dev->stp_requests);
3132 dma_pool_destroy(dev->data_requests);
3133}
3134
Thomas Dahlmann55d402d2007-07-16 21:40:54 -07003135/* Reset all pci context */
3136static void udc_pci_remove(struct pci_dev *pdev)
3137{
3138 struct udc *dev;
3139
3140 dev = pci_get_drvdata(pdev);
3141
Sebastian Andrzej Siewior0f913492011-06-28 16:33:47 +03003142 usb_del_gadget_udc(&udc->gadget);
Thomas Dahlmann55d402d2007-07-16 21:40:54 -07003143 /* gadget driver must not be registered */
Sudip Mukherjee2e1b7d02015-09-22 18:54:28 +05303144 if (WARN_ON(dev->driver))
3145 return;
Thomas Dahlmann55d402d2007-07-16 21:40:54 -07003146
3147 /* dma pool cleanup */
Sudip Mukherjeef349dd32015-09-22 18:54:29 +05303148 free_dma_pools(dev);
Thomas Dahlmann55d402d2007-07-16 21:40:54 -07003149
3150 /* reset controller */
3151 writel(AMD_BIT(UDC_DEVCFG_SOFTRESET), &dev->regs->cfg);
Sudip Mukherjee76c37272015-09-22 18:54:30 +05303152 free_irq(pdev->irq, dev);
3153 iounmap(dev->virt_addr);
3154 release_mem_region(pci_resource_start(pdev, 0),
3155 pci_resource_len(pdev, 0));
3156 pci_disable_device(pdev);
Thomas Dahlmann55d402d2007-07-16 21:40:54 -07003157
Thomas Dahlmann55d402d2007-07-16 21:40:54 -07003158 udc_remove(dev);
3159}
3160
3161/* create dma pools on init */
3162static int init_dma_pools(struct udc *dev)
3163{
3164 struct udc_stp_dma *td_stp;
3165 struct udc_data_dma *td_data;
3166 int retval;
3167
3168 /* consistent DMA mode setting ? */
3169 if (use_dma_ppb) {
3170 use_dma_bufferfill_mode = 0;
3171 } else {
3172 use_dma_ppb_du = 0;
3173 use_dma_bufferfill_mode = 1;
3174 }
3175
3176 /* DMA setup */
3177 dev->data_requests = dma_pool_create("data_requests", NULL,
3178 sizeof(struct udc_data_dma), 0, 0);
3179 if (!dev->data_requests) {
3180 DBG(dev, "can't get request data pool\n");
Sudip Mukherjee14a37ec2015-09-22 18:54:26 +05303181 return -ENOMEM;
Thomas Dahlmann55d402d2007-07-16 21:40:54 -07003182 }
3183
3184 /* EP0 in dma regs = dev control regs */
3185 dev->ep[UDC_EP0IN_IX].dma = &dev->regs->ctl;
3186
3187 /* dma desc for setup data */
3188 dev->stp_requests = dma_pool_create("setup requests", NULL,
3189 sizeof(struct udc_stp_dma), 0, 0);
3190 if (!dev->stp_requests) {
3191 DBG(dev, "can't get stp request pool\n");
3192 retval = -ENOMEM;
Sudip Mukherjee14a37ec2015-09-22 18:54:26 +05303193 goto err_create_dma_pool;
Thomas Dahlmann55d402d2007-07-16 21:40:54 -07003194 }
3195 /* setup */
3196 td_stp = dma_pool_alloc(dev->stp_requests, GFP_KERNEL,
3197 &dev->ep[UDC_EP0OUT_IX].td_stp_dma);
3198 if (td_stp == NULL) {
3199 retval = -ENOMEM;
Sudip Mukherjee14a37ec2015-09-22 18:54:26 +05303200 goto err_alloc_dma;
Thomas Dahlmann55d402d2007-07-16 21:40:54 -07003201 }
3202 dev->ep[UDC_EP0OUT_IX].td_stp = td_stp;
3203
3204 /* data: 0 packets !? */
3205 td_data = dma_pool_alloc(dev->stp_requests, GFP_KERNEL,
3206 &dev->ep[UDC_EP0OUT_IX].td_phys);
3207 if (td_data == NULL) {
3208 retval = -ENOMEM;
Sudip Mukherjee14a37ec2015-09-22 18:54:26 +05303209 goto err_alloc_phys;
Thomas Dahlmann55d402d2007-07-16 21:40:54 -07003210 }
3211 dev->ep[UDC_EP0OUT_IX].td = td_data;
3212 return 0;
3213
Sudip Mukherjee14a37ec2015-09-22 18:54:26 +05303214err_alloc_phys:
3215 dma_pool_free(dev->stp_requests, dev->ep[UDC_EP0OUT_IX].td_stp,
3216 dev->ep[UDC_EP0OUT_IX].td_stp_dma);
3217err_alloc_dma:
3218 dma_pool_destroy(dev->stp_requests);
3219 dev->stp_requests = NULL;
3220err_create_dma_pool:
3221 dma_pool_destroy(dev->data_requests);
3222 dev->data_requests = NULL;
Thomas Dahlmann55d402d2007-07-16 21:40:54 -07003223 return retval;
3224}
3225
Sudip Mukherjee4f06b6b2015-09-22 18:54:31 +05303226/* general probe */
3227static int udc_probe(struct udc *dev)
3228{
3229 char tmp[128];
3230 u32 reg;
3231 int retval;
3232
3233 /* mark timer as not initialized */
3234 udc_timer.data = 0;
3235 udc_pollstall_timer.data = 0;
3236
3237 /* device struct setup */
3238 dev->gadget.ops = &udc_ops;
3239
3240 dev_set_name(&dev->gadget.dev, "gadget");
3241 dev->gadget.name = name;
3242 dev->gadget.max_speed = USB_SPEED_HIGH;
3243
3244 /* init registers, interrupts, ... */
3245 startup_registers(dev);
3246
3247 dev_info(&dev->pdev->dev, "%s\n", mod_desc);
3248
3249 snprintf(tmp, sizeof(tmp), "%d", dev->irq);
3250 dev_info(&dev->pdev->dev,
3251 "irq %s, pci mem %08lx, chip rev %02x(Geode5536 %s)\n",
3252 tmp, dev->phys_addr, dev->chiprev,
3253 (dev->chiprev == UDC_HSA0_REV) ? "A0" : "B1");
3254 strcpy(tmp, UDC_DRIVER_VERSION_STRING);
3255 if (dev->chiprev == UDC_HSA0_REV) {
3256 dev_err(&dev->pdev->dev, "chip revision is A0; too old\n");
3257 retval = -ENODEV;
3258 goto finished;
3259 }
3260 dev_info(&dev->pdev->dev,
3261 "driver version: %s(for Geode5536 B1)\n", tmp);
3262 udc = dev;
3263
3264 retval = usb_add_gadget_udc_release(&udc->pdev->dev, &dev->gadget,
3265 gadget_release);
3266 if (retval)
3267 goto finished;
3268
3269 /* timer init */
3270 init_timer(&udc_timer);
3271 udc_timer.function = udc_timer_function;
3272 udc_timer.data = 1;
3273 /* timer pollstall init */
3274 init_timer(&udc_pollstall_timer);
3275 udc_pollstall_timer.function = udc_pollstall_timer_function;
3276 udc_pollstall_timer.data = 1;
3277
3278 /* set SD */
3279 reg = readl(&dev->regs->ctl);
3280 reg |= AMD_BIT(UDC_DEVCTL_SD);
3281 writel(reg, &dev->regs->ctl);
3282
3283 /* print dev register info */
3284 print_regs(dev);
3285
3286 return 0;
3287
3288finished:
3289 return retval;
3290}
3291
Thomas Dahlmann55d402d2007-07-16 21:40:54 -07003292/* Called by pci bus driver to init pci context */
3293static int udc_pci_probe(
3294 struct pci_dev *pdev,
3295 const struct pci_device_id *id
3296)
3297{
3298 struct udc *dev;
3299 unsigned long resource;
3300 unsigned long len;
3301 int retval = 0;
3302
3303 /* one udc only */
3304 if (udc) {
3305 dev_dbg(&pdev->dev, "already probed\n");
3306 return -EBUSY;
3307 }
3308
3309 /* init */
3310 dev = kzalloc(sizeof(struct udc), GFP_KERNEL);
Alexey Khoroshilov6527cc22015-09-06 01:11:51 +03003311 if (!dev)
3312 return -ENOMEM;
Thomas Dahlmann55d402d2007-07-16 21:40:54 -07003313
3314 /* pci setup */
3315 if (pci_enable_device(pdev) < 0) {
3316 retval = -ENODEV;
Alexey Khoroshilov6527cc22015-09-06 01:11:51 +03003317 goto err_pcidev;
Thomas Dahlmann55d402d2007-07-16 21:40:54 -07003318 }
Thomas Dahlmann55d402d2007-07-16 21:40:54 -07003319
3320 /* PCI resource allocation */
3321 resource = pci_resource_start(pdev, 0);
3322 len = pci_resource_len(pdev, 0);
3323
3324 if (!request_mem_region(resource, len, name)) {
3325 dev_dbg(&pdev->dev, "pci device used already\n");
3326 retval = -EBUSY;
Alexey Khoroshilov6527cc22015-09-06 01:11:51 +03003327 goto err_memreg;
Thomas Dahlmann55d402d2007-07-16 21:40:54 -07003328 }
Thomas Dahlmann55d402d2007-07-16 21:40:54 -07003329
3330 dev->virt_addr = ioremap_nocache(resource, len);
3331 if (dev->virt_addr == NULL) {
3332 dev_dbg(&pdev->dev, "start address cannot be mapped\n");
3333 retval = -EFAULT;
Alexey Khoroshilov6527cc22015-09-06 01:11:51 +03003334 goto err_ioremap;
Thomas Dahlmann55d402d2007-07-16 21:40:54 -07003335 }
3336
3337 if (!pdev->irq) {
Xi Wang25e14c12012-11-15 04:21:01 -05003338 dev_err(&pdev->dev, "irq not set\n");
Thomas Dahlmann55d402d2007-07-16 21:40:54 -07003339 retval = -ENODEV;
Alexey Khoroshilov6527cc22015-09-06 01:11:51 +03003340 goto err_irq;
Thomas Dahlmann55d402d2007-07-16 21:40:54 -07003341 }
3342
Thomas Dahlmannc5deb832009-11-17 14:18:27 -08003343 spin_lock_init(&dev->lock);
3344 /* udc csr registers base */
3345 dev->csr = dev->virt_addr + UDC_CSR_ADDR;
3346 /* dev registers base */
3347 dev->regs = dev->virt_addr + UDC_DEVCFG_ADDR;
3348 /* ep registers base */
3349 dev->ep_regs = dev->virt_addr + UDC_EPREGS_ADDR;
3350 /* fifo's base */
3351 dev->rxfifo = (u32 __iomem *)(dev->virt_addr + UDC_RXFIFO_ADDR);
3352 dev->txfifo = (u32 __iomem *)(dev->virt_addr + UDC_TXFIFO_ADDR);
3353
Thomas Dahlmann55d402d2007-07-16 21:40:54 -07003354 if (request_irq(pdev->irq, udc_irq, IRQF_SHARED, name, dev) != 0) {
Xi Wang25e14c12012-11-15 04:21:01 -05003355 dev_dbg(&pdev->dev, "request_irq(%d) fail\n", pdev->irq);
Thomas Dahlmann55d402d2007-07-16 21:40:54 -07003356 retval = -EBUSY;
Alexey Khoroshilov6527cc22015-09-06 01:11:51 +03003357 goto err_irq;
Thomas Dahlmann55d402d2007-07-16 21:40:54 -07003358 }
Thomas Dahlmann55d402d2007-07-16 21:40:54 -07003359
3360 pci_set_drvdata(pdev, dev);
3361
Auke Kok1d3ee412007-08-27 16:16:13 -07003362 /* chip revision for Hs AMD5536 */
3363 dev->chiprev = pdev->revision;
Thomas Dahlmann55d402d2007-07-16 21:40:54 -07003364
3365 pci_set_master(pdev);
David Brownell51745282007-10-24 18:44:08 -07003366 pci_try_set_mwi(pdev);
Thomas Dahlmann55d402d2007-07-16 21:40:54 -07003367
Thomas Dahlmann55d402d2007-07-16 21:40:54 -07003368 /* init dma pools */
3369 if (use_dma) {
3370 retval = init_dma_pools(dev);
3371 if (retval != 0)
Sudip Mukherjee580693b2015-09-22 18:54:27 +05303372 goto err_dma;
Thomas Dahlmann55d402d2007-07-16 21:40:54 -07003373 }
3374
3375 dev->phys_addr = resource;
3376 dev->irq = pdev->irq;
3377 dev->pdev = pdev;
Thomas Dahlmann55d402d2007-07-16 21:40:54 -07003378
3379 /* general probing */
Sudip Mukherjee580693b2015-09-22 18:54:27 +05303380 if (udc_probe(dev)) {
3381 retval = -ENODEV;
3382 goto err_probe;
3383 }
3384 return 0;
Thomas Dahlmann55d402d2007-07-16 21:40:54 -07003385
Sudip Mukherjee580693b2015-09-22 18:54:27 +05303386err_probe:
3387 if (use_dma)
3388 free_dma_pools(dev);
3389err_dma:
3390 free_irq(pdev->irq, dev);
Alexey Khoroshilov6527cc22015-09-06 01:11:51 +03003391err_irq:
3392 iounmap(dev->virt_addr);
3393err_ioremap:
3394 release_mem_region(resource, len);
3395err_memreg:
3396 pci_disable_device(pdev);
3397err_pcidev:
3398 kfree(dev);
Thomas Dahlmann55d402d2007-07-16 21:40:54 -07003399 return retval;
3400}
3401
Thomas Dahlmann55d402d2007-07-16 21:40:54 -07003402/* PCI device parameters */
Jingoo Han9510ecee62013-11-28 14:16:30 +09003403static const struct pci_device_id pci_id[] = {
Thomas Dahlmann55d402d2007-07-16 21:40:54 -07003404 {
3405 PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x2096),
3406 .class = (PCI_CLASS_SERIAL_USB << 8) | 0xfe,
3407 .class_mask = 0xffffffff,
3408 },
3409 {},
3410};
3411MODULE_DEVICE_TABLE(pci, pci_id);
3412
3413/* PCI functions */
3414static struct pci_driver udc_pci_driver = {
3415 .name = (char *) name,
3416 .id_table = pci_id,
3417 .probe = udc_pci_probe,
3418 .remove = udc_pci_remove,
3419};
3420
Axel Lin3cdb7722012-04-04 22:14:58 +08003421module_pci_driver(udc_pci_driver);
Thomas Dahlmann55d402d2007-07-16 21:40:54 -07003422
3423MODULE_DESCRIPTION(UDC_MOD_DESCRIPTION);
3424MODULE_AUTHOR("Thomas Dahlmann");
3425MODULE_LICENSE("GPL");
3426