blob: 9498be87a72450dd2e31c9945351a1924f753c90 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Driver for the PLX NET2280 USB device controller.
3 * Specs and errata are available from <http://www.plxtech.com>.
4 *
David Brownell901b3d72006-09-02 03:13:45 -07005 * PLX Technology Inc. (formerly NetChip Technology) supported the
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 * development of this driver.
7 *
8 *
9 * CODE STATUS HIGHLIGHTS
10 *
11 * This driver should work well with most "gadget" drivers, including
12 * the File Storage, Serial, and Ethernet/RNDIS gadget drivers
13 * as well as Gadget Zero and Gadgetfs.
14 *
15 * DMA is enabled by default. Drivers using transfer queues might use
16 * DMA chaining to remove IRQ latencies between transfers. (Except when
17 * short OUT transfers happen.) Drivers can use the req->no_interrupt
18 * hint to completely eliminate some IRQs, if a later IRQ is guaranteed
19 * and DMA chaining is enabled.
20 *
21 * Note that almost all the errata workarounds here are only needed for
22 * rev1 chips. Rev1a silicon (0110) fixes almost all of them.
23 */
24
25/*
26 * Copyright (C) 2003 David Brownell
27 * Copyright (C) 2003-2005 PLX Technology, Inc.
28 *
David Brownell901b3d72006-09-02 03:13:45 -070029 * Modified Seth Levy 2005 PLX Technology, Inc. to provide compatibility
30 * with 2282 chip
Guennadi Liakhovetski950ee4c2006-03-19 20:49:14 +010031 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070032 * This program is free software; you can redistribute it and/or modify
33 * it under the terms of the GNU General Public License as published by
34 * the Free Software Foundation; either version 2 of the License, or
35 * (at your option) any later version.
36 *
37 * This program is distributed in the hope that it will be useful,
38 * but WITHOUT ANY WARRANTY; without even the implied warranty of
39 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
40 * GNU General Public License for more details.
41 *
42 * You should have received a copy of the GNU General Public License
43 * along with this program; if not, write to the Free Software
44 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
45 */
46
47#undef DEBUG /* messages on error and most fault paths */
48#undef VERBOSE /* extra debug messages (success too) */
49
Linus Torvalds1da177e2005-04-16 15:20:36 -070050#include <linux/module.h>
51#include <linux/pci.h>
David Brownell682d4c82006-01-18 23:55:08 -080052#include <linux/dma-mapping.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070053#include <linux/kernel.h>
54#include <linux/delay.h>
55#include <linux/ioport.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070056#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070057#include <linux/errno.h>
58#include <linux/init.h>
59#include <linux/timer.h>
60#include <linux/list.h>
61#include <linux/interrupt.h>
62#include <linux/moduleparam.h>
63#include <linux/device.h>
David Brownell5f848132006-12-16 15:34:53 -080064#include <linux/usb/ch9.h>
David Brownell9454a572007-10-04 18:05:17 -070065#include <linux/usb/gadget.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070066
67#include <asm/byteorder.h>
68#include <asm/io.h>
69#include <asm/irq.h>
70#include <asm/system.h>
71#include <asm/unaligned.h>
72
73
Guennadi Liakhovetski950ee4c2006-03-19 20:49:14 +010074#define DRIVER_DESC "PLX NET228x USB Peripheral Controller"
75#define DRIVER_VERSION "2005 Sept 27"
Linus Torvalds1da177e2005-04-16 15:20:36 -070076
77#define DMA_ADDR_INVALID (~(dma_addr_t)0)
78#define EP_DONTUSE 13 /* nonzero */
79
80#define USE_RDK_LEDS /* GPIO pins control three LEDs */
81
82
83static const char driver_name [] = "net2280";
84static const char driver_desc [] = DRIVER_DESC;
85
86static const char ep0name [] = "ep0";
David Brownell901b3d72006-09-02 03:13:45 -070087static const char *const ep_name [] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -070088 ep0name,
89 "ep-a", "ep-b", "ep-c", "ep-d",
90 "ep-e", "ep-f",
91};
92
93/* use_dma -- general goodness, fewer interrupts, less cpu load (vs PIO)
94 * use_dma_chaining -- dma descriptor queueing gives even more irq reduction
95 *
96 * The net2280 DMA engines are not tightly integrated with their FIFOs;
97 * not all cases are (yet) handled well in this driver or the silicon.
98 * Some gadget drivers work better with the dma support here than others.
99 * These two parameters let you use PIO or more aggressive DMA.
100 */
101static int use_dma = 1;
102static int use_dma_chaining = 0;
103
104/* "modprobe net2280 use_dma=n" etc */
105module_param (use_dma, bool, S_IRUGO);
106module_param (use_dma_chaining, bool, S_IRUGO);
107
108
109/* mode 0 == ep-{a,b,c,d} 1K fifo each
110 * mode 1 == ep-{a,b} 2K fifo each, ep-{c,d} unavailable
111 * mode 2 == ep-a 2K fifo, ep-{b,c} 1K each, ep-d unavailable
112 */
113static ushort fifo_mode = 0;
114
115/* "modprobe net2280 fifo_mode=1" etc */
116module_param (fifo_mode, ushort, 0644);
117
118/* enable_suspend -- When enabled, the driver will respond to
119 * USB suspend requests by powering down the NET2280. Otherwise,
120 * USB suspend requests will be ignored. This is acceptible for
Guennadi Liakhovetski950ee4c2006-03-19 20:49:14 +0100121 * self-powered devices
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122 */
123static int enable_suspend = 0;
124
125/* "modprobe net2280 enable_suspend=1" etc */
126module_param (enable_suspend, bool, S_IRUGO);
127
128
129#define DIR_STRING(bAddress) (((bAddress) & USB_DIR_IN) ? "in" : "out")
130
131#if defined(CONFIG_USB_GADGET_DEBUG_FILES) || defined (DEBUG)
132static char *type_string (u8 bmAttributes)
133{
134 switch ((bmAttributes) & USB_ENDPOINT_XFERTYPE_MASK) {
135 case USB_ENDPOINT_XFER_BULK: return "bulk";
136 case USB_ENDPOINT_XFER_ISOC: return "iso";
137 case USB_ENDPOINT_XFER_INT: return "intr";
138 };
139 return "control";
140}
141#endif
142
143#include "net2280.h"
144
Harvey Harrison551509d2009-02-11 14:11:36 -0800145#define valid_bit cpu_to_le32 (1 << VALID_BIT)
146#define dma_done_ie cpu_to_le32 (1 << DMA_DONE_INTERRUPT_ENABLE)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147
148/*-------------------------------------------------------------------------*/
149
150static int
151net2280_enable (struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
152{
153 struct net2280 *dev;
154 struct net2280_ep *ep;
155 u32 max, tmp;
156 unsigned long flags;
157
158 ep = container_of (_ep, struct net2280_ep, ep);
159 if (!_ep || !desc || ep->desc || _ep->name == ep0name
160 || desc->bDescriptorType != USB_DT_ENDPOINT)
161 return -EINVAL;
162 dev = ep->dev;
163 if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
164 return -ESHUTDOWN;
165
166 /* erratum 0119 workaround ties up an endpoint number */
167 if ((desc->bEndpointAddress & 0x0f) == EP_DONTUSE)
168 return -EDOM;
169
170 /* sanity check ep-e/ep-f since their fifos are small */
171 max = le16_to_cpu (desc->wMaxPacketSize) & 0x1fff;
172 if (ep->num > 4 && max > 64)
173 return -ERANGE;
174
175 spin_lock_irqsave (&dev->lock, flags);
176 _ep->maxpacket = max & 0x7ff;
177 ep->desc = desc;
178
179 /* ep_reset() has already been called */
180 ep->stopped = 0;
Alan Stern80661342008-08-14 15:49:11 -0400181 ep->wedged = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182 ep->out_overflow = 0;
183
184 /* set speed-dependent max packet; may kick in high bandwidth */
185 set_idx_reg (dev->regs, REG_EP_MAXPKT (dev, ep->num), max);
186
187 /* FIFO lines can't go to different packets. PIO is ok, so
188 * use it instead of troublesome (non-bulk) multi-packet DMA.
189 */
190 if (ep->dma && (max % 4) != 0 && use_dma_chaining) {
191 DEBUG (ep->dev, "%s, no dma for maxpacket %d\n",
192 ep->ep.name, ep->ep.maxpacket);
193 ep->dma = NULL;
194 }
195
196 /* set type, direction, address; reset fifo counters */
197 writel ((1 << FIFO_FLUSH), &ep->regs->ep_stat);
198 tmp = (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK);
199 if (tmp == USB_ENDPOINT_XFER_INT) {
200 /* erratum 0105 workaround prevents hs NYET */
201 if (dev->chiprev == 0100
202 && dev->gadget.speed == USB_SPEED_HIGH
203 && !(desc->bEndpointAddress & USB_DIR_IN))
204 writel ((1 << CLEAR_NAK_OUT_PACKETS_MODE),
205 &ep->regs->ep_rsp);
206 } else if (tmp == USB_ENDPOINT_XFER_BULK) {
207 /* catch some particularly blatant driver bugs */
208 if ((dev->gadget.speed == USB_SPEED_HIGH
209 && max != 512)
210 || (dev->gadget.speed == USB_SPEED_FULL
211 && max > 64)) {
212 spin_unlock_irqrestore (&dev->lock, flags);
213 return -ERANGE;
214 }
215 }
216 ep->is_iso = (tmp == USB_ENDPOINT_XFER_ISOC) ? 1 : 0;
217 tmp <<= ENDPOINT_TYPE;
218 tmp |= desc->bEndpointAddress;
219 tmp |= (4 << ENDPOINT_BYTE_COUNT); /* default full fifo lines */
220 tmp |= 1 << ENDPOINT_ENABLE;
221 wmb ();
222
223 /* for OUT transfers, block the rx fifo until a read is posted */
224 ep->is_in = (tmp & USB_DIR_IN) != 0;
225 if (!ep->is_in)
226 writel ((1 << SET_NAK_OUT_PACKETS), &ep->regs->ep_rsp);
Guennadi Liakhovetski950ee4c2006-03-19 20:49:14 +0100227 else if (dev->pdev->device != 0x2280) {
David Brownell901b3d72006-09-02 03:13:45 -0700228 /* Added for 2282, Don't use nak packets on an in endpoint,
229 * this was ignored on 2280
230 */
Guennadi Liakhovetski950ee4c2006-03-19 20:49:14 +0100231 writel ((1 << CLEAR_NAK_OUT_PACKETS)
232 | (1 << CLEAR_NAK_OUT_PACKETS_MODE), &ep->regs->ep_rsp);
233 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234
235 writel (tmp, &ep->regs->ep_cfg);
236
237 /* enable irqs */
238 if (!ep->dma) { /* pio, per-packet */
239 tmp = (1 << ep->num) | readl (&dev->regs->pciirqenb0);
240 writel (tmp, &dev->regs->pciirqenb0);
241
242 tmp = (1 << DATA_PACKET_RECEIVED_INTERRUPT_ENABLE)
Guennadi Liakhovetski950ee4c2006-03-19 20:49:14 +0100243 | (1 << DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE);
244 if (dev->pdev->device == 0x2280)
245 tmp |= readl (&ep->regs->ep_irqenb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246 writel (tmp, &ep->regs->ep_irqenb);
247 } else { /* dma, per-request */
248 tmp = (1 << (8 + ep->num)); /* completion */
249 tmp |= readl (&dev->regs->pciirqenb1);
250 writel (tmp, &dev->regs->pciirqenb1);
251
252 /* for short OUT transfers, dma completions can't
253 * advance the queue; do it pio-style, by hand.
254 * NOTE erratum 0112 workaround #2
255 */
256 if ((desc->bEndpointAddress & USB_DIR_IN) == 0) {
257 tmp = (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT_ENABLE);
258 writel (tmp, &ep->regs->ep_irqenb);
259
260 tmp = (1 << ep->num) | readl (&dev->regs->pciirqenb0);
261 writel (tmp, &dev->regs->pciirqenb0);
262 }
263 }
264
265 tmp = desc->bEndpointAddress;
266 DEBUG (dev, "enabled %s (ep%d%s-%s) %s max %04x\n",
267 _ep->name, tmp & 0x0f, DIR_STRING (tmp),
268 type_string (desc->bmAttributes),
269 ep->dma ? "dma" : "pio", max);
270
271 /* pci writes may still be posted */
272 spin_unlock_irqrestore (&dev->lock, flags);
273 return 0;
274}
275
276static int handshake (u32 __iomem *ptr, u32 mask, u32 done, int usec)
277{
278 u32 result;
279
280 do {
281 result = readl (ptr);
282 if (result == ~(u32)0) /* "device unplugged" */
283 return -ENODEV;
284 result &= mask;
285 if (result == done)
286 return 0;
287 udelay (1);
288 usec--;
289 } while (usec > 0);
290 return -ETIMEDOUT;
291}
292
David Brownell901b3d72006-09-02 03:13:45 -0700293static const struct usb_ep_ops net2280_ep_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294
295static void ep_reset (struct net2280_regs __iomem *regs, struct net2280_ep *ep)
296{
297 u32 tmp;
298
299 ep->desc = NULL;
300 INIT_LIST_HEAD (&ep->queue);
301
302 ep->ep.maxpacket = ~0;
303 ep->ep.ops = &net2280_ep_ops;
304
305 /* disable the dma, irqs, endpoint... */
306 if (ep->dma) {
307 writel (0, &ep->dma->dmactl);
308 writel ( (1 << DMA_SCATTER_GATHER_DONE_INTERRUPT)
309 | (1 << DMA_TRANSACTION_DONE_INTERRUPT)
310 | (1 << DMA_ABORT)
311 , &ep->dma->dmastat);
312
313 tmp = readl (&regs->pciirqenb0);
314 tmp &= ~(1 << ep->num);
315 writel (tmp, &regs->pciirqenb0);
316 } else {
317 tmp = readl (&regs->pciirqenb1);
318 tmp &= ~(1 << (8 + ep->num)); /* completion */
319 writel (tmp, &regs->pciirqenb1);
320 }
321 writel (0, &ep->regs->ep_irqenb);
322
323 /* init to our chosen defaults, notably so that we NAK OUT
324 * packets until the driver queues a read (+note erratum 0112)
325 */
Guennadi Liakhovetski950ee4c2006-03-19 20:49:14 +0100326 if (!ep->is_in || ep->dev->pdev->device == 0x2280) {
327 tmp = (1 << SET_NAK_OUT_PACKETS_MODE)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700328 | (1 << SET_NAK_OUT_PACKETS)
329 | (1 << CLEAR_EP_HIDE_STATUS_PHASE)
330 | (1 << CLEAR_INTERRUPT_MODE);
Guennadi Liakhovetski950ee4c2006-03-19 20:49:14 +0100331 } else {
332 /* added for 2282 */
333 tmp = (1 << CLEAR_NAK_OUT_PACKETS_MODE)
334 | (1 << CLEAR_NAK_OUT_PACKETS)
335 | (1 << CLEAR_EP_HIDE_STATUS_PHASE)
336 | (1 << CLEAR_INTERRUPT_MODE);
337 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700338
339 if (ep->num != 0) {
340 tmp |= (1 << CLEAR_ENDPOINT_TOGGLE)
341 | (1 << CLEAR_ENDPOINT_HALT);
342 }
343 writel (tmp, &ep->regs->ep_rsp);
344
345 /* scrub most status bits, and flush any fifo state */
Guennadi Liakhovetski950ee4c2006-03-19 20:49:14 +0100346 if (ep->dev->pdev->device == 0x2280)
347 tmp = (1 << FIFO_OVERFLOW)
348 | (1 << FIFO_UNDERFLOW);
349 else
350 tmp = 0;
351
352 writel (tmp | (1 << TIMEOUT)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353 | (1 << USB_STALL_SENT)
354 | (1 << USB_IN_NAK_SENT)
355 | (1 << USB_IN_ACK_RCVD)
356 | (1 << USB_OUT_PING_NAK_SENT)
357 | (1 << USB_OUT_ACK_SENT)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700358 | (1 << FIFO_FLUSH)
359 | (1 << SHORT_PACKET_OUT_DONE_INTERRUPT)
360 | (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT)
361 | (1 << DATA_PACKET_RECEIVED_INTERRUPT)
362 | (1 << DATA_PACKET_TRANSMITTED_INTERRUPT)
363 | (1 << DATA_OUT_PING_TOKEN_INTERRUPT)
364 | (1 << DATA_IN_TOKEN_INTERRUPT)
365 , &ep->regs->ep_stat);
366
367 /* fifo size is handled separately */
368}
369
370static void nuke (struct net2280_ep *);
371
372static int net2280_disable (struct usb_ep *_ep)
373{
374 struct net2280_ep *ep;
375 unsigned long flags;
376
377 ep = container_of (_ep, struct net2280_ep, ep);
378 if (!_ep || !ep->desc || _ep->name == ep0name)
379 return -EINVAL;
380
381 spin_lock_irqsave (&ep->dev->lock, flags);
382 nuke (ep);
383 ep_reset (ep->dev->regs, ep);
384
385 VDEBUG (ep->dev, "disabled %s %s\n",
386 ep->dma ? "dma" : "pio", _ep->name);
387
388 /* synch memory views with the device */
389 (void) readl (&ep->regs->ep_cfg);
390
391 if (use_dma && !ep->dma && ep->num >= 1 && ep->num <= 4)
392 ep->dma = &ep->dev->dma [ep->num - 1];
393
394 spin_unlock_irqrestore (&ep->dev->lock, flags);
395 return 0;
396}
397
398/*-------------------------------------------------------------------------*/
399
400static struct usb_request *
Al Viro55016f12005-10-21 03:21:58 -0400401net2280_alloc_request (struct usb_ep *_ep, gfp_t gfp_flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700402{
403 struct net2280_ep *ep;
404 struct net2280_request *req;
405
406 if (!_ep)
407 return NULL;
408 ep = container_of (_ep, struct net2280_ep, ep);
409
Eric Sesterhenn7039f422006-02-27 13:34:10 -0800410 req = kzalloc(sizeof(*req), gfp_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700411 if (!req)
412 return NULL;
413
Linus Torvalds1da177e2005-04-16 15:20:36 -0700414 req->req.dma = DMA_ADDR_INVALID;
415 INIT_LIST_HEAD (&req->queue);
416
417 /* this dma descriptor may be swapped with the previous dummy */
418 if (ep->dma) {
419 struct net2280_dma *td;
420
421 td = pci_pool_alloc (ep->dev->requests, gfp_flags,
422 &req->td_dma);
423 if (!td) {
424 kfree (req);
425 return NULL;
426 }
427 td->dmacount = 0; /* not VALID */
Harvey Harrison551509d2009-02-11 14:11:36 -0800428 td->dmaaddr = cpu_to_le32 (DMA_ADDR_INVALID);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700429 td->dmadesc = td->dmaaddr;
430 req->td = td;
431 }
432 return &req->req;
433}
434
435static void
436net2280_free_request (struct usb_ep *_ep, struct usb_request *_req)
437{
438 struct net2280_ep *ep;
439 struct net2280_request *req;
440
441 ep = container_of (_ep, struct net2280_ep, ep);
442 if (!_ep || !_req)
443 return;
444
445 req = container_of (_req, struct net2280_request, req);
446 WARN_ON (!list_empty (&req->queue));
447 if (req->td)
448 pci_pool_free (ep->dev->requests, req->td, req->td_dma);
449 kfree (req);
450}
451
452/*-------------------------------------------------------------------------*/
453
Linus Torvalds1da177e2005-04-16 15:20:36 -0700454/* load a packet into the fifo we use for usb IN transfers.
455 * works for all endpoints.
456 *
457 * NOTE: pio with ep-a..ep-d could stuff multiple packets into the fifo
458 * at a time, but this code is simpler because it knows it only writes
459 * one packet. ep-a..ep-d should use dma instead.
460 */
461static void
462write_fifo (struct net2280_ep *ep, struct usb_request *req)
463{
464 struct net2280_ep_regs __iomem *regs = ep->regs;
465 u8 *buf;
466 u32 tmp;
467 unsigned count, total;
468
469 /* INVARIANT: fifo is currently empty. (testable) */
470
471 if (req) {
472 buf = req->buf + req->actual;
473 prefetch (buf);
474 total = req->length - req->actual;
475 } else {
476 total = 0;
477 buf = NULL;
478 }
479
480 /* write just one packet at a time */
481 count = ep->ep.maxpacket;
482 if (count > total) /* min() cannot be used on a bitfield */
483 count = total;
484
485 VDEBUG (ep->dev, "write %s fifo (IN) %d bytes%s req %p\n",
486 ep->ep.name, count,
487 (count != ep->ep.maxpacket) ? " (short)" : "",
488 req);
489 while (count >= 4) {
490 /* NOTE be careful if you try to align these. fifo lines
491 * should normally be full (4 bytes) and successive partial
492 * lines are ok only in certain cases.
493 */
494 tmp = get_unaligned ((u32 *)buf);
495 cpu_to_le32s (&tmp);
496 writel (tmp, &regs->ep_data);
497 buf += 4;
498 count -= 4;
499 }
500
501 /* last fifo entry is "short" unless we wrote a full packet.
502 * also explicitly validate last word in (periodic) transfers
503 * when maxpacket is not a multiple of 4 bytes.
504 */
505 if (count || total < ep->ep.maxpacket) {
506 tmp = count ? get_unaligned ((u32 *)buf) : count;
507 cpu_to_le32s (&tmp);
508 set_fifo_bytecount (ep, count & 0x03);
509 writel (tmp, &regs->ep_data);
510 }
511
512 /* pci writes may still be posted */
513}
514
515/* work around erratum 0106: PCI and USB race over the OUT fifo.
516 * caller guarantees chiprev 0100, out endpoint is NAKing, and
517 * there's no real data in the fifo.
518 *
519 * NOTE: also used in cases where that erratum doesn't apply:
520 * where the host wrote "too much" data to us.
521 */
522static void out_flush (struct net2280_ep *ep)
523{
524 u32 __iomem *statp;
525 u32 tmp;
526
527 ASSERT_OUT_NAKING (ep);
528
529 statp = &ep->regs->ep_stat;
530 writel ( (1 << DATA_OUT_PING_TOKEN_INTERRUPT)
531 | (1 << DATA_PACKET_RECEIVED_INTERRUPT)
532 , statp);
533 writel ((1 << FIFO_FLUSH), statp);
534 mb ();
535 tmp = readl (statp);
536 if (tmp & (1 << DATA_OUT_PING_TOKEN_INTERRUPT)
537 /* high speed did bulk NYET; fifo isn't filling */
538 && ep->dev->gadget.speed == USB_SPEED_FULL) {
539 unsigned usec;
540
541 usec = 50; /* 64 byte bulk/interrupt */
542 handshake (statp, (1 << USB_OUT_PING_NAK_SENT),
543 (1 << USB_OUT_PING_NAK_SENT), usec);
544 /* NAK done; now CLEAR_NAK_OUT_PACKETS is safe */
545 }
546}
547
548/* unload packet(s) from the fifo we use for usb OUT transfers.
549 * returns true iff the request completed, because of short packet
550 * or the request buffer having filled with full packets.
551 *
552 * for ep-a..ep-d this will read multiple packets out when they
553 * have been accepted.
554 */
555static int
556read_fifo (struct net2280_ep *ep, struct net2280_request *req)
557{
558 struct net2280_ep_regs __iomem *regs = ep->regs;
559 u8 *buf = req->req.buf + req->req.actual;
560 unsigned count, tmp, is_short;
561 unsigned cleanup = 0, prevent = 0;
562
563 /* erratum 0106 ... packets coming in during fifo reads might
564 * be incompletely rejected. not all cases have workarounds.
565 */
566 if (ep->dev->chiprev == 0x0100
567 && ep->dev->gadget.speed == USB_SPEED_FULL) {
568 udelay (1);
569 tmp = readl (&ep->regs->ep_stat);
570 if ((tmp & (1 << NAK_OUT_PACKETS)))
571 cleanup = 1;
572 else if ((tmp & (1 << FIFO_FULL))) {
573 start_out_naking (ep);
574 prevent = 1;
575 }
576 /* else: hope we don't see the problem */
577 }
578
579 /* never overflow the rx buffer. the fifo reads packets until
580 * it sees a short one; we might not be ready for them all.
581 */
582 prefetchw (buf);
583 count = readl (&regs->ep_avail);
584 if (unlikely (count == 0)) {
585 udelay (1);
586 tmp = readl (&ep->regs->ep_stat);
587 count = readl (&regs->ep_avail);
588 /* handled that data already? */
589 if (count == 0 && (tmp & (1 << NAK_OUT_PACKETS)) == 0)
590 return 0;
591 }
592
593 tmp = req->req.length - req->req.actual;
594 if (count > tmp) {
595 /* as with DMA, data overflow gets flushed */
596 if ((tmp % ep->ep.maxpacket) != 0) {
597 ERROR (ep->dev,
598 "%s out fifo %d bytes, expected %d\n",
599 ep->ep.name, count, tmp);
600 req->req.status = -EOVERFLOW;
601 cleanup = 1;
602 /* NAK_OUT_PACKETS will be set, so flushing is safe;
603 * the next read will start with the next packet
604 */
605 } /* else it's a ZLP, no worries */
606 count = tmp;
607 }
608 req->req.actual += count;
609
610 is_short = (count == 0) || ((count % ep->ep.maxpacket) != 0);
611
612 VDEBUG (ep->dev, "read %s fifo (OUT) %d bytes%s%s%s req %p %d/%d\n",
613 ep->ep.name, count, is_short ? " (short)" : "",
614 cleanup ? " flush" : "", prevent ? " nak" : "",
615 req, req->req.actual, req->req.length);
616
617 while (count >= 4) {
618 tmp = readl (&regs->ep_data);
619 cpu_to_le32s (&tmp);
620 put_unaligned (tmp, (u32 *)buf);
621 buf += 4;
622 count -= 4;
623 }
624 if (count) {
625 tmp = readl (&regs->ep_data);
626 /* LE conversion is implicit here: */
627 do {
628 *buf++ = (u8) tmp;
629 tmp >>= 8;
630 } while (--count);
631 }
632 if (cleanup)
633 out_flush (ep);
634 if (prevent) {
635 writel ((1 << CLEAR_NAK_OUT_PACKETS), &ep->regs->ep_rsp);
636 (void) readl (&ep->regs->ep_rsp);
637 }
638
639 return is_short || ((req->req.actual == req->req.length)
640 && !req->req.zero);
641}
642
643/* fill out dma descriptor to match a given request */
644static void
645fill_dma_desc (struct net2280_ep *ep, struct net2280_request *req, int valid)
646{
647 struct net2280_dma *td = req->td;
648 u32 dmacount = req->req.length;
649
650 /* don't let DMA continue after a short OUT packet,
651 * so overruns can't affect the next transfer.
652 * in case of overruns on max-size packets, we can't
653 * stop the fifo from filling but we can flush it.
654 */
655 if (ep->is_in)
656 dmacount |= (1 << DMA_DIRECTION);
David Brownell901b3d72006-09-02 03:13:45 -0700657 if ((!ep->is_in && (dmacount % ep->ep.maxpacket) != 0)
658 || ep->dev->pdev->device != 0x2280)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700659 dmacount |= (1 << END_OF_CHAIN);
660
661 req->valid = valid;
662 if (valid)
663 dmacount |= (1 << VALID_BIT);
664 if (likely(!req->req.no_interrupt || !use_dma_chaining))
665 dmacount |= (1 << DMA_DONE_INTERRUPT_ENABLE);
666
667 /* td->dmadesc = previously set by caller */
668 td->dmaaddr = cpu_to_le32 (req->req.dma);
669
670 /* 2280 may be polling VALID_BIT through ep->dma->dmadesc */
671 wmb ();
Harvey Harrisonda2bbdc2008-10-29 14:25:51 -0700672 td->dmacount = cpu_to_le32(dmacount);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700673}
674
675static const u32 dmactl_default =
676 (1 << DMA_SCATTER_GATHER_DONE_INTERRUPT)
677 | (1 << DMA_CLEAR_COUNT_ENABLE)
678 /* erratum 0116 workaround part 1 (use POLLING) */
679 | (POLL_100_USEC << DESCRIPTOR_POLLING_RATE)
680 | (1 << DMA_VALID_BIT_POLLING_ENABLE)
681 | (1 << DMA_VALID_BIT_ENABLE)
682 | (1 << DMA_SCATTER_GATHER_ENABLE)
683 /* erratum 0116 workaround part 2 (no AUTOSTART) */
684 | (1 << DMA_ENABLE);
685
686static inline void spin_stop_dma (struct net2280_dma_regs __iomem *dma)
687{
688 handshake (&dma->dmactl, (1 << DMA_ENABLE), 0, 50);
689}
690
691static inline void stop_dma (struct net2280_dma_regs __iomem *dma)
692{
693 writel (readl (&dma->dmactl) & ~(1 << DMA_ENABLE), &dma->dmactl);
694 spin_stop_dma (dma);
695}
696
697static void start_queue (struct net2280_ep *ep, u32 dmactl, u32 td_dma)
698{
699 struct net2280_dma_regs __iomem *dma = ep->dma;
Guennadi Liakhovetski950ee4c2006-03-19 20:49:14 +0100700 unsigned int tmp = (1 << VALID_BIT) | (ep->is_in << DMA_DIRECTION);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700701
Guennadi Liakhovetski950ee4c2006-03-19 20:49:14 +0100702 if (ep->dev->pdev->device != 0x2280)
703 tmp |= (1 << END_OF_CHAIN);
704
705 writel (tmp, &dma->dmacount);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700706 writel (readl (&dma->dmastat), &dma->dmastat);
707
708 writel (td_dma, &dma->dmadesc);
709 writel (dmactl, &dma->dmactl);
710
711 /* erratum 0116 workaround part 3: pci arbiter away from net2280 */
712 (void) readl (&ep->dev->pci->pcimstctl);
713
714 writel ((1 << DMA_START), &dma->dmastat);
715
716 if (!ep->is_in)
717 stop_out_naking (ep);
718}
719
720static void start_dma (struct net2280_ep *ep, struct net2280_request *req)
721{
722 u32 tmp;
723 struct net2280_dma_regs __iomem *dma = ep->dma;
724
725 /* FIXME can't use DMA for ZLPs */
726
727 /* on this path we "know" there's no dma active (yet) */
728 WARN_ON (readl (&dma->dmactl) & (1 << DMA_ENABLE));
729 writel (0, &ep->dma->dmactl);
730
731 /* previous OUT packet might have been short */
732 if (!ep->is_in && ((tmp = readl (&ep->regs->ep_stat))
David Brownell901b3d72006-09-02 03:13:45 -0700733 & (1 << NAK_OUT_PACKETS)) != 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700734 writel ((1 << SHORT_PACKET_TRANSFERRED_INTERRUPT),
735 &ep->regs->ep_stat);
736
737 tmp = readl (&ep->regs->ep_avail);
738 if (tmp) {
739 writel (readl (&dma->dmastat), &dma->dmastat);
740
741 /* transfer all/some fifo data */
742 writel (req->req.dma, &dma->dmaaddr);
743 tmp = min (tmp, req->req.length);
744
745 /* dma irq, faking scatterlist status */
746 req->td->dmacount = cpu_to_le32 (req->req.length - tmp);
747 writel ((1 << DMA_DONE_INTERRUPT_ENABLE)
748 | tmp, &dma->dmacount);
749 req->td->dmadesc = 0;
750 req->valid = 1;
751
752 writel ((1 << DMA_ENABLE), &dma->dmactl);
753 writel ((1 << DMA_START), &dma->dmastat);
754 return;
755 }
756 }
757
758 tmp = dmactl_default;
759
760 /* force packet boundaries between dma requests, but prevent the
761 * controller from automagically writing a last "short" packet
762 * (zero length) unless the driver explicitly said to do that.
763 */
764 if (ep->is_in) {
765 if (likely ((req->req.length % ep->ep.maxpacket) != 0
766 || req->req.zero)) {
767 tmp |= (1 << DMA_FIFO_VALIDATE);
768 ep->in_fifo_validate = 1;
769 } else
770 ep->in_fifo_validate = 0;
771 }
772
773 /* init req->td, pointing to the current dummy */
774 req->td->dmadesc = cpu_to_le32 (ep->td_dma);
775 fill_dma_desc (ep, req, 1);
776
777 if (!use_dma_chaining)
Harvey Harrison551509d2009-02-11 14:11:36 -0800778 req->td->dmacount |= cpu_to_le32 (1 << END_OF_CHAIN);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700779
780 start_queue (ep, tmp, req->td_dma);
781}
782
783static inline void
784queue_dma (struct net2280_ep *ep, struct net2280_request *req, int valid)
785{
786 struct net2280_dma *end;
787 dma_addr_t tmp;
788
789 /* swap new dummy for old, link; fill and maybe activate */
790 end = ep->dummy;
791 ep->dummy = req->td;
792 req->td = end;
793
794 tmp = ep->td_dma;
795 ep->td_dma = req->td_dma;
796 req->td_dma = tmp;
797
798 end->dmadesc = cpu_to_le32 (ep->td_dma);
799
800 fill_dma_desc (ep, req, valid);
801}
802
803static void
804done (struct net2280_ep *ep, struct net2280_request *req, int status)
805{
806 struct net2280 *dev;
807 unsigned stopped = ep->stopped;
808
809 list_del_init (&req->queue);
810
811 if (req->req.status == -EINPROGRESS)
812 req->req.status = status;
813 else
814 status = req->req.status;
815
816 dev = ep->dev;
817 if (req->mapped) {
818 pci_unmap_single (dev->pdev, req->req.dma, req->req.length,
819 ep->is_in ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE);
820 req->req.dma = DMA_ADDR_INVALID;
821 req->mapped = 0;
822 }
823
824 if (status && status != -ESHUTDOWN)
825 VDEBUG (dev, "complete %s req %p stat %d len %u/%u\n",
826 ep->ep.name, &req->req, status,
827 req->req.actual, req->req.length);
828
829 /* don't modify queue heads during completion callback */
830 ep->stopped = 1;
831 spin_unlock (&dev->lock);
832 req->req.complete (&ep->ep, &req->req);
833 spin_lock (&dev->lock);
834 ep->stopped = stopped;
835}
836
837/*-------------------------------------------------------------------------*/
838
839static int
Al Viro55016f12005-10-21 03:21:58 -0400840net2280_queue (struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700841{
842 struct net2280_request *req;
843 struct net2280_ep *ep;
844 struct net2280 *dev;
845 unsigned long flags;
846
847 /* we always require a cpu-view buffer, so that we can
848 * always use pio (as fallback or whatever).
849 */
850 req = container_of (_req, struct net2280_request, req);
851 if (!_req || !_req->complete || !_req->buf
852 || !list_empty (&req->queue))
853 return -EINVAL;
854 if (_req->length > (~0 & DMA_BYTE_COUNT_MASK))
855 return -EDOM;
856 ep = container_of (_ep, struct net2280_ep, ep);
857 if (!_ep || (!ep->desc && ep->num != 0))
858 return -EINVAL;
859 dev = ep->dev;
860 if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
861 return -ESHUTDOWN;
862
863 /* FIXME implement PIO fallback for ZLPs with DMA */
864 if (ep->dma && _req->length == 0)
865 return -EOPNOTSUPP;
866
867 /* set up dma mapping in case the caller didn't */
868 if (ep->dma && _req->dma == DMA_ADDR_INVALID) {
869 _req->dma = pci_map_single (dev->pdev, _req->buf, _req->length,
870 ep->is_in ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE);
871 req->mapped = 1;
872 }
873
874#if 0
875 VDEBUG (dev, "%s queue req %p, len %d buf %p\n",
876 _ep->name, _req, _req->length, _req->buf);
877#endif
878
879 spin_lock_irqsave (&dev->lock, flags);
880
881 _req->status = -EINPROGRESS;
882 _req->actual = 0;
883
884 /* kickstart this i/o queue? */
885 if (list_empty (&ep->queue) && !ep->stopped) {
886 /* use DMA if the endpoint supports it, else pio */
887 if (ep->dma)
888 start_dma (ep, req);
889 else {
890 /* maybe there's no control data, just status ack */
891 if (ep->num == 0 && _req->length == 0) {
892 allow_status (ep);
893 done (ep, req, 0);
894 VDEBUG (dev, "%s status ack\n", ep->ep.name);
895 goto done;
896 }
897
898 /* PIO ... stuff the fifo, or unblock it. */
899 if (ep->is_in)
900 write_fifo (ep, _req);
901 else if (list_empty (&ep->queue)) {
902 u32 s;
903
904 /* OUT FIFO might have packet(s) buffered */
905 s = readl (&ep->regs->ep_stat);
906 if ((s & (1 << FIFO_EMPTY)) == 0) {
907 /* note: _req->short_not_ok is
908 * ignored here since PIO _always_
909 * stops queue advance here, and
910 * _req->status doesn't change for
911 * short reads (only _req->actual)
912 */
913 if (read_fifo (ep, req)) {
914 done (ep, req, 0);
915 if (ep->num == 0)
916 allow_status (ep);
917 /* don't queue it */
918 req = NULL;
919 } else
920 s = readl (&ep->regs->ep_stat);
921 }
922
923 /* don't NAK, let the fifo fill */
924 if (req && (s & (1 << NAK_OUT_PACKETS)))
925 writel ((1 << CLEAR_NAK_OUT_PACKETS),
926 &ep->regs->ep_rsp);
927 }
928 }
929
930 } else if (ep->dma) {
931 int valid = 1;
932
933 if (ep->is_in) {
934 int expect;
935
936 /* preventing magic zlps is per-engine state, not
937 * per-transfer; irq logic must recover hiccups.
938 */
939 expect = likely (req->req.zero
940 || (req->req.length % ep->ep.maxpacket) != 0);
941 if (expect != ep->in_fifo_validate)
942 valid = 0;
943 }
944 queue_dma (ep, req, valid);
945
946 } /* else the irq handler advances the queue. */
947
Alan Stern1f26e282006-11-16 10:16:00 -0500948 ep->responded = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700949 if (req)
950 list_add_tail (&req->queue, &ep->queue);
951done:
952 spin_unlock_irqrestore (&dev->lock, flags);
953
954 /* pci writes may still be posted */
955 return 0;
956}
957
958static inline void
959dma_done (
960 struct net2280_ep *ep,
961 struct net2280_request *req,
962 u32 dmacount,
963 int status
964)
965{
966 req->req.actual = req->req.length - (DMA_BYTE_COUNT_MASK & dmacount);
967 done (ep, req, status);
968}
969
970static void restart_dma (struct net2280_ep *ep);
971
972static void scan_dma_completions (struct net2280_ep *ep)
973{
974 /* only look at descriptors that were "naturally" retired,
975 * so fifo and list head state won't matter
976 */
977 while (!list_empty (&ep->queue)) {
978 struct net2280_request *req;
979 u32 tmp;
980
981 req = list_entry (ep->queue.next,
982 struct net2280_request, queue);
983 if (!req->valid)
984 break;
985 rmb ();
986 tmp = le32_to_cpup (&req->td->dmacount);
987 if ((tmp & (1 << VALID_BIT)) != 0)
988 break;
989
990 /* SHORT_PACKET_TRANSFERRED_INTERRUPT handles "usb-short"
991 * cases where DMA must be aborted; this code handles
992 * all non-abort DMA completions.
993 */
994 if (unlikely (req->td->dmadesc == 0)) {
995 /* paranoia */
996 tmp = readl (&ep->dma->dmacount);
997 if (tmp & DMA_BYTE_COUNT_MASK)
998 break;
999 /* single transfer mode */
1000 dma_done (ep, req, tmp, 0);
1001 break;
1002 } else if (!ep->is_in
1003 && (req->req.length % ep->ep.maxpacket) != 0) {
1004 tmp = readl (&ep->regs->ep_stat);
1005
1006 /* AVOID TROUBLE HERE by not issuing short reads from
1007 * your gadget driver. That helps avoids errata 0121,
1008 * 0122, and 0124; not all cases trigger the warning.
1009 */
1010 if ((tmp & (1 << NAK_OUT_PACKETS)) == 0) {
Arjan van de Venb6c63932008-07-25 01:45:52 -07001011 WARNING (ep->dev, "%s lost packet sync!\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001012 ep->ep.name);
1013 req->req.status = -EOVERFLOW;
1014 } else if ((tmp = readl (&ep->regs->ep_avail)) != 0) {
1015 /* fifo gets flushed later */
1016 ep->out_overflow = 1;
1017 DEBUG (ep->dev, "%s dma, discard %d len %d\n",
1018 ep->ep.name, tmp,
1019 req->req.length);
1020 req->req.status = -EOVERFLOW;
1021 }
1022 }
1023 dma_done (ep, req, tmp, 0);
1024 }
1025}
1026
1027static void restart_dma (struct net2280_ep *ep)
1028{
1029 struct net2280_request *req;
1030 u32 dmactl = dmactl_default;
1031
1032 if (ep->stopped)
1033 return;
1034 req = list_entry (ep->queue.next, struct net2280_request, queue);
1035
1036 if (!use_dma_chaining) {
1037 start_dma (ep, req);
1038 return;
1039 }
1040
1041 /* the 2280 will be processing the queue unless queue hiccups after
1042 * the previous transfer:
1043 * IN: wanted automagic zlp, head doesn't (or vice versa)
1044 * DMA_FIFO_VALIDATE doesn't init from dma descriptors.
1045 * OUT: was "usb-short", we must restart.
1046 */
1047 if (ep->is_in && !req->valid) {
1048 struct net2280_request *entry, *prev = NULL;
1049 int reqmode, done = 0;
1050
1051 DEBUG (ep->dev, "%s dma hiccup td %p\n", ep->ep.name, req->td);
1052 ep->in_fifo_validate = likely (req->req.zero
1053 || (req->req.length % ep->ep.maxpacket) != 0);
1054 if (ep->in_fifo_validate)
1055 dmactl |= (1 << DMA_FIFO_VALIDATE);
1056 list_for_each_entry (entry, &ep->queue, queue) {
David Brownell320f3452005-05-07 13:05:18 -07001057 __le32 dmacount;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001058
1059 if (entry == req)
1060 continue;
1061 dmacount = entry->td->dmacount;
1062 if (!done) {
1063 reqmode = likely (entry->req.zero
1064 || (entry->req.length
1065 % ep->ep.maxpacket) != 0);
1066 if (reqmode == ep->in_fifo_validate) {
1067 entry->valid = 1;
1068 dmacount |= valid_bit;
1069 entry->td->dmacount = dmacount;
1070 prev = entry;
1071 continue;
1072 } else {
1073 /* force a hiccup */
1074 prev->td->dmacount |= dma_done_ie;
1075 done = 1;
1076 }
1077 }
1078
1079 /* walk the rest of the queue so unlinks behave */
1080 entry->valid = 0;
1081 dmacount &= ~valid_bit;
1082 entry->td->dmacount = dmacount;
1083 prev = entry;
1084 }
1085 }
1086
1087 writel (0, &ep->dma->dmactl);
1088 start_queue (ep, dmactl, req->td_dma);
1089}
1090
1091static void abort_dma (struct net2280_ep *ep)
1092{
1093 /* abort the current transfer */
1094 if (likely (!list_empty (&ep->queue))) {
1095 /* FIXME work around errata 0121, 0122, 0124 */
1096 writel ((1 << DMA_ABORT), &ep->dma->dmastat);
1097 spin_stop_dma (ep->dma);
1098 } else
1099 stop_dma (ep->dma);
1100 scan_dma_completions (ep);
1101}
1102
1103/* dequeue ALL requests */
1104static void nuke (struct net2280_ep *ep)
1105{
1106 struct net2280_request *req;
1107
1108 /* called with spinlock held */
1109 ep->stopped = 1;
1110 if (ep->dma)
1111 abort_dma (ep);
1112 while (!list_empty (&ep->queue)) {
1113 req = list_entry (ep->queue.next,
1114 struct net2280_request,
1115 queue);
1116 done (ep, req, -ESHUTDOWN);
1117 }
1118}
1119
1120/* dequeue JUST ONE request */
1121static int net2280_dequeue (struct usb_ep *_ep, struct usb_request *_req)
1122{
1123 struct net2280_ep *ep;
1124 struct net2280_request *req;
1125 unsigned long flags;
1126 u32 dmactl;
1127 int stopped;
1128
1129 ep = container_of (_ep, struct net2280_ep, ep);
1130 if (!_ep || (!ep->desc && ep->num != 0) || !_req)
1131 return -EINVAL;
1132
1133 spin_lock_irqsave (&ep->dev->lock, flags);
1134 stopped = ep->stopped;
1135
1136 /* quiesce dma while we patch the queue */
1137 dmactl = 0;
1138 ep->stopped = 1;
1139 if (ep->dma) {
1140 dmactl = readl (&ep->dma->dmactl);
1141 /* WARNING erratum 0127 may kick in ... */
1142 stop_dma (ep->dma);
1143 scan_dma_completions (ep);
1144 }
1145
1146 /* make sure it's still queued on this endpoint */
1147 list_for_each_entry (req, &ep->queue, queue) {
1148 if (&req->req == _req)
1149 break;
1150 }
1151 if (&req->req != _req) {
1152 spin_unlock_irqrestore (&ep->dev->lock, flags);
1153 return -EINVAL;
1154 }
1155
1156 /* queue head may be partially complete. */
1157 if (ep->queue.next == &req->queue) {
1158 if (ep->dma) {
1159 DEBUG (ep->dev, "unlink (%s) dma\n", _ep->name);
1160 _req->status = -ECONNRESET;
1161 abort_dma (ep);
1162 if (likely (ep->queue.next == &req->queue)) {
1163 // NOTE: misreports single-transfer mode
1164 req->td->dmacount = 0; /* invalidate */
1165 dma_done (ep, req,
1166 readl (&ep->dma->dmacount),
1167 -ECONNRESET);
1168 }
1169 } else {
1170 DEBUG (ep->dev, "unlink (%s) pio\n", _ep->name);
1171 done (ep, req, -ECONNRESET);
1172 }
1173 req = NULL;
1174
1175 /* patch up hardware chaining data */
1176 } else if (ep->dma && use_dma_chaining) {
1177 if (req->queue.prev == ep->queue.next) {
1178 writel (le32_to_cpu (req->td->dmadesc),
1179 &ep->dma->dmadesc);
1180 if (req->td->dmacount & dma_done_ie)
1181 writel (readl (&ep->dma->dmacount)
David Brownell320f3452005-05-07 13:05:18 -07001182 | le32_to_cpu(dma_done_ie),
Linus Torvalds1da177e2005-04-16 15:20:36 -07001183 &ep->dma->dmacount);
1184 } else {
1185 struct net2280_request *prev;
1186
1187 prev = list_entry (req->queue.prev,
1188 struct net2280_request, queue);
1189 prev->td->dmadesc = req->td->dmadesc;
1190 if (req->td->dmacount & dma_done_ie)
1191 prev->td->dmacount |= dma_done_ie;
1192 }
1193 }
1194
1195 if (req)
1196 done (ep, req, -ECONNRESET);
1197 ep->stopped = stopped;
1198
1199 if (ep->dma) {
1200 /* turn off dma on inactive queues */
1201 if (list_empty (&ep->queue))
1202 stop_dma (ep->dma);
1203 else if (!ep->stopped) {
1204 /* resume current request, or start new one */
1205 if (req)
1206 writel (dmactl, &ep->dma->dmactl);
1207 else
1208 start_dma (ep, list_entry (ep->queue.next,
1209 struct net2280_request, queue));
1210 }
1211 }
1212
1213 spin_unlock_irqrestore (&ep->dev->lock, flags);
1214 return 0;
1215}
1216
1217/*-------------------------------------------------------------------------*/
1218
1219static int net2280_fifo_status (struct usb_ep *_ep);
1220
1221static int
Alan Stern80661342008-08-14 15:49:11 -04001222net2280_set_halt_and_wedge(struct usb_ep *_ep, int value, int wedged)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001223{
1224 struct net2280_ep *ep;
1225 unsigned long flags;
1226 int retval = 0;
1227
1228 ep = container_of (_ep, struct net2280_ep, ep);
1229 if (!_ep || (!ep->desc && ep->num != 0))
1230 return -EINVAL;
1231 if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
1232 return -ESHUTDOWN;
1233 if (ep->desc /* not ep0 */ && (ep->desc->bmAttributes & 0x03)
1234 == USB_ENDPOINT_XFER_ISOC)
1235 return -EINVAL;
1236
1237 spin_lock_irqsave (&ep->dev->lock, flags);
1238 if (!list_empty (&ep->queue))
1239 retval = -EAGAIN;
1240 else if (ep->is_in && value && net2280_fifo_status (_ep) != 0)
1241 retval = -EAGAIN;
1242 else {
Alan Stern80661342008-08-14 15:49:11 -04001243 VDEBUG (ep->dev, "%s %s %s\n", _ep->name,
1244 value ? "set" : "clear",
1245 wedged ? "wedge" : "halt");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001246 /* set/clear, then synch memory views with the device */
1247 if (value) {
1248 if (ep->num == 0)
1249 ep->dev->protocol_stall = 1;
1250 else
1251 set_halt (ep);
Alan Stern80661342008-08-14 15:49:11 -04001252 if (wedged)
1253 ep->wedged = 1;
1254 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001255 clear_halt (ep);
Alan Stern80661342008-08-14 15:49:11 -04001256 ep->wedged = 0;
1257 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001258 (void) readl (&ep->regs->ep_rsp);
1259 }
1260 spin_unlock_irqrestore (&ep->dev->lock, flags);
1261
1262 return retval;
1263}
1264
1265static int
Alan Stern80661342008-08-14 15:49:11 -04001266net2280_set_halt(struct usb_ep *_ep, int value)
1267{
1268 return net2280_set_halt_and_wedge(_ep, value, 0);
1269}
1270
1271static int
1272net2280_set_wedge(struct usb_ep *_ep)
1273{
1274 if (!_ep || _ep->name == ep0name)
1275 return -EINVAL;
1276 return net2280_set_halt_and_wedge(_ep, 1, 1);
1277}
1278
1279static int
Linus Torvalds1da177e2005-04-16 15:20:36 -07001280net2280_fifo_status (struct usb_ep *_ep)
1281{
1282 struct net2280_ep *ep;
1283 u32 avail;
1284
1285 ep = container_of (_ep, struct net2280_ep, ep);
1286 if (!_ep || (!ep->desc && ep->num != 0))
1287 return -ENODEV;
1288 if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
1289 return -ESHUTDOWN;
1290
1291 avail = readl (&ep->regs->ep_avail) & ((1 << 12) - 1);
1292 if (avail > ep->fifo_size)
1293 return -EOVERFLOW;
1294 if (ep->is_in)
1295 avail = ep->fifo_size - avail;
1296 return avail;
1297}
1298
1299static void
1300net2280_fifo_flush (struct usb_ep *_ep)
1301{
1302 struct net2280_ep *ep;
1303
1304 ep = container_of (_ep, struct net2280_ep, ep);
1305 if (!_ep || (!ep->desc && ep->num != 0))
1306 return;
1307 if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
1308 return;
1309
1310 writel ((1 << FIFO_FLUSH), &ep->regs->ep_stat);
1311 (void) readl (&ep->regs->ep_rsp);
1312}
1313
David Brownell901b3d72006-09-02 03:13:45 -07001314static const struct usb_ep_ops net2280_ep_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001315 .enable = net2280_enable,
1316 .disable = net2280_disable,
1317
1318 .alloc_request = net2280_alloc_request,
1319 .free_request = net2280_free_request,
1320
Linus Torvalds1da177e2005-04-16 15:20:36 -07001321 .queue = net2280_queue,
1322 .dequeue = net2280_dequeue,
1323
1324 .set_halt = net2280_set_halt,
Alan Stern80661342008-08-14 15:49:11 -04001325 .set_wedge = net2280_set_wedge,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001326 .fifo_status = net2280_fifo_status,
1327 .fifo_flush = net2280_fifo_flush,
1328};
1329
1330/*-------------------------------------------------------------------------*/
1331
1332static int net2280_get_frame (struct usb_gadget *_gadget)
1333{
1334 struct net2280 *dev;
1335 unsigned long flags;
1336 u16 retval;
1337
1338 if (!_gadget)
1339 return -ENODEV;
1340 dev = container_of (_gadget, struct net2280, gadget);
1341 spin_lock_irqsave (&dev->lock, flags);
1342 retval = get_idx_reg (dev->regs, REG_FRAME) & 0x03ff;
1343 spin_unlock_irqrestore (&dev->lock, flags);
1344 return retval;
1345}
1346
1347static int net2280_wakeup (struct usb_gadget *_gadget)
1348{
1349 struct net2280 *dev;
1350 u32 tmp;
1351 unsigned long flags;
1352
1353 if (!_gadget)
1354 return 0;
1355 dev = container_of (_gadget, struct net2280, gadget);
1356
1357 spin_lock_irqsave (&dev->lock, flags);
1358 tmp = readl (&dev->usb->usbctl);
1359 if (tmp & (1 << DEVICE_REMOTE_WAKEUP_ENABLE))
1360 writel (1 << GENERATE_RESUME, &dev->usb->usbstat);
1361 spin_unlock_irqrestore (&dev->lock, flags);
1362
1363 /* pci writes may still be posted */
1364 return 0;
1365}
1366
1367static int net2280_set_selfpowered (struct usb_gadget *_gadget, int value)
1368{
1369 struct net2280 *dev;
1370 u32 tmp;
1371 unsigned long flags;
1372
1373 if (!_gadget)
1374 return 0;
1375 dev = container_of (_gadget, struct net2280, gadget);
1376
1377 spin_lock_irqsave (&dev->lock, flags);
1378 tmp = readl (&dev->usb->usbctl);
1379 if (value)
1380 tmp |= (1 << SELF_POWERED_STATUS);
1381 else
1382 tmp &= ~(1 << SELF_POWERED_STATUS);
1383 writel (tmp, &dev->usb->usbctl);
1384 spin_unlock_irqrestore (&dev->lock, flags);
1385
1386 return 0;
1387}
1388
1389static int net2280_pullup(struct usb_gadget *_gadget, int is_on)
1390{
1391 struct net2280 *dev;
1392 u32 tmp;
1393 unsigned long flags;
1394
1395 if (!_gadget)
1396 return -ENODEV;
1397 dev = container_of (_gadget, struct net2280, gadget);
1398
1399 spin_lock_irqsave (&dev->lock, flags);
1400 tmp = readl (&dev->usb->usbctl);
1401 dev->softconnect = (is_on != 0);
1402 if (is_on)
1403 tmp |= (1 << USB_DETECT_ENABLE);
1404 else
1405 tmp &= ~(1 << USB_DETECT_ENABLE);
1406 writel (tmp, &dev->usb->usbctl);
1407 spin_unlock_irqrestore (&dev->lock, flags);
1408
1409 return 0;
1410}
1411
1412static const struct usb_gadget_ops net2280_ops = {
1413 .get_frame = net2280_get_frame,
1414 .wakeup = net2280_wakeup,
1415 .set_selfpowered = net2280_set_selfpowered,
1416 .pullup = net2280_pullup,
1417};
1418
1419/*-------------------------------------------------------------------------*/
1420
1421#ifdef CONFIG_USB_GADGET_DEBUG_FILES
1422
1423/* FIXME move these into procfs, and use seq_file.
1424 * Sysfs _still_ doesn't behave for arbitrarily sized files,
1425 * and also doesn't help products using this with 2.4 kernels.
1426 */
1427
1428/* "function" sysfs attribute */
1429static ssize_t
Yani Ioannou10523b32005-05-17 06:43:37 -04001430show_function (struct device *_dev, struct device_attribute *attr, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001431{
1432 struct net2280 *dev = dev_get_drvdata (_dev);
1433
1434 if (!dev->driver
1435 || !dev->driver->function
1436 || strlen (dev->driver->function) > PAGE_SIZE)
1437 return 0;
1438 return scnprintf (buf, PAGE_SIZE, "%s\n", dev->driver->function);
1439}
1440static DEVICE_ATTR (function, S_IRUGO, show_function, NULL);
1441
David Howells62fb44b2008-02-08 04:19:30 -08001442static ssize_t net2280_show_registers(struct device *_dev,
1443 struct device_attribute *attr, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001444{
1445 struct net2280 *dev;
1446 char *next;
1447 unsigned size, t;
1448 unsigned long flags;
1449 int i;
1450 u32 t1, t2;
Andrew Morton30e69592005-06-26 17:18:46 -07001451 const char *s;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001452
1453 dev = dev_get_drvdata (_dev);
1454 next = buf;
1455 size = PAGE_SIZE;
1456 spin_lock_irqsave (&dev->lock, flags);
1457
1458 if (dev->driver)
1459 s = dev->driver->driver.name;
1460 else
1461 s = "(none)";
1462
1463 /* Main Control Registers */
1464 t = scnprintf (next, size, "%s version " DRIVER_VERSION
1465 ", chiprev %04x, dma %s\n\n"
1466 "devinit %03x fifoctl %08x gadget '%s'\n"
1467 "pci irqenb0 %02x irqenb1 %08x "
1468 "irqstat0 %04x irqstat1 %08x\n",
1469 driver_name, dev->chiprev,
1470 use_dma
1471 ? (use_dma_chaining ? "chaining" : "enabled")
1472 : "disabled",
1473 readl (&dev->regs->devinit),
1474 readl (&dev->regs->fifoctl),
1475 s,
1476 readl (&dev->regs->pciirqenb0),
1477 readl (&dev->regs->pciirqenb1),
1478 readl (&dev->regs->irqstat0),
1479 readl (&dev->regs->irqstat1));
1480 size -= t;
1481 next += t;
1482
1483 /* USB Control Registers */
1484 t1 = readl (&dev->usb->usbctl);
1485 t2 = readl (&dev->usb->usbstat);
1486 if (t1 & (1 << VBUS_PIN)) {
1487 if (t2 & (1 << HIGH_SPEED))
1488 s = "high speed";
1489 else if (dev->gadget.speed == USB_SPEED_UNKNOWN)
1490 s = "powered";
1491 else
1492 s = "full speed";
1493 /* full speed bit (6) not working?? */
1494 } else
1495 s = "not attached";
1496 t = scnprintf (next, size,
1497 "stdrsp %08x usbctl %08x usbstat %08x "
1498 "addr 0x%02x (%s)\n",
1499 readl (&dev->usb->stdrsp), t1, t2,
1500 readl (&dev->usb->ouraddr), s);
1501 size -= t;
1502 next += t;
1503
1504 /* PCI Master Control Registers */
1505
1506 /* DMA Control Registers */
1507
1508 /* Configurable EP Control Registers */
1509 for (i = 0; i < 7; i++) {
1510 struct net2280_ep *ep;
1511
1512 ep = &dev->ep [i];
1513 if (i && !ep->desc)
1514 continue;
1515
1516 t1 = readl (&ep->regs->ep_cfg);
1517 t2 = readl (&ep->regs->ep_rsp) & 0xff;
1518 t = scnprintf (next, size,
1519 "\n%s\tcfg %05x rsp (%02x) %s%s%s%s%s%s%s%s"
1520 "irqenb %02x\n",
1521 ep->ep.name, t1, t2,
1522 (t2 & (1 << CLEAR_NAK_OUT_PACKETS))
1523 ? "NAK " : "",
1524 (t2 & (1 << CLEAR_EP_HIDE_STATUS_PHASE))
1525 ? "hide " : "",
1526 (t2 & (1 << CLEAR_EP_FORCE_CRC_ERROR))
1527 ? "CRC " : "",
1528 (t2 & (1 << CLEAR_INTERRUPT_MODE))
1529 ? "interrupt " : "",
1530 (t2 & (1<<CLEAR_CONTROL_STATUS_PHASE_HANDSHAKE))
1531 ? "status " : "",
1532 (t2 & (1 << CLEAR_NAK_OUT_PACKETS_MODE))
1533 ? "NAKmode " : "",
1534 (t2 & (1 << CLEAR_ENDPOINT_TOGGLE))
1535 ? "DATA1 " : "DATA0 ",
1536 (t2 & (1 << CLEAR_ENDPOINT_HALT))
1537 ? "HALT " : "",
1538 readl (&ep->regs->ep_irqenb));
1539 size -= t;
1540 next += t;
1541
1542 t = scnprintf (next, size,
1543 "\tstat %08x avail %04x "
1544 "(ep%d%s-%s)%s\n",
1545 readl (&ep->regs->ep_stat),
1546 readl (&ep->regs->ep_avail),
1547 t1 & 0x0f, DIR_STRING (t1),
1548 type_string (t1 >> 8),
1549 ep->stopped ? "*" : "");
1550 size -= t;
1551 next += t;
1552
1553 if (!ep->dma)
1554 continue;
1555
1556 t = scnprintf (next, size,
1557 " dma\tctl %08x stat %08x count %08x\n"
1558 "\taddr %08x desc %08x\n",
1559 readl (&ep->dma->dmactl),
1560 readl (&ep->dma->dmastat),
1561 readl (&ep->dma->dmacount),
1562 readl (&ep->dma->dmaaddr),
1563 readl (&ep->dma->dmadesc));
1564 size -= t;
1565 next += t;
1566
1567 }
1568
1569 /* Indexed Registers */
David Brownell901b3d72006-09-02 03:13:45 -07001570 // none yet
Linus Torvalds1da177e2005-04-16 15:20:36 -07001571
1572 /* Statistics */
1573 t = scnprintf (next, size, "\nirqs: ");
1574 size -= t;
1575 next += t;
1576 for (i = 0; i < 7; i++) {
1577 struct net2280_ep *ep;
1578
1579 ep = &dev->ep [i];
1580 if (i && !ep->irqs)
1581 continue;
1582 t = scnprintf (next, size, " %s/%lu", ep->ep.name, ep->irqs);
1583 size -= t;
1584 next += t;
1585
1586 }
1587 t = scnprintf (next, size, "\n");
1588 size -= t;
1589 next += t;
1590
1591 spin_unlock_irqrestore (&dev->lock, flags);
1592
1593 return PAGE_SIZE - size;
1594}
David Howells62fb44b2008-02-08 04:19:30 -08001595static DEVICE_ATTR(registers, S_IRUGO, net2280_show_registers, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001596
1597static ssize_t
Yani Ioannou10523b32005-05-17 06:43:37 -04001598show_queues (struct device *_dev, struct device_attribute *attr, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001599{
1600 struct net2280 *dev;
1601 char *next;
1602 unsigned size;
1603 unsigned long flags;
1604 int i;
1605
1606 dev = dev_get_drvdata (_dev);
1607 next = buf;
1608 size = PAGE_SIZE;
1609 spin_lock_irqsave (&dev->lock, flags);
1610
1611 for (i = 0; i < 7; i++) {
1612 struct net2280_ep *ep = &dev->ep [i];
1613 struct net2280_request *req;
1614 int t;
1615
1616 if (i != 0) {
1617 const struct usb_endpoint_descriptor *d;
1618
1619 d = ep->desc;
1620 if (!d)
1621 continue;
1622 t = d->bEndpointAddress;
1623 t = scnprintf (next, size,
1624 "\n%s (ep%d%s-%s) max %04x %s fifo %d\n",
1625 ep->ep.name, t & USB_ENDPOINT_NUMBER_MASK,
1626 (t & USB_DIR_IN) ? "in" : "out",
1627 ({ char *val;
1628 switch (d->bmAttributes & 0x03) {
1629 case USB_ENDPOINT_XFER_BULK:
David Brownell901b3d72006-09-02 03:13:45 -07001630 val = "bulk"; break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001631 case USB_ENDPOINT_XFER_INT:
David Brownell901b3d72006-09-02 03:13:45 -07001632 val = "intr"; break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001633 default:
David Brownell901b3d72006-09-02 03:13:45 -07001634 val = "iso"; break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001635 }; val; }),
1636 le16_to_cpu (d->wMaxPacketSize) & 0x1fff,
1637 ep->dma ? "dma" : "pio", ep->fifo_size
1638 );
1639 } else /* ep0 should only have one transfer queued */
1640 t = scnprintf (next, size, "ep0 max 64 pio %s\n",
1641 ep->is_in ? "in" : "out");
1642 if (t <= 0 || t > size)
1643 goto done;
1644 size -= t;
1645 next += t;
1646
1647 if (list_empty (&ep->queue)) {
1648 t = scnprintf (next, size, "\t(nothing queued)\n");
1649 if (t <= 0 || t > size)
1650 goto done;
1651 size -= t;
1652 next += t;
1653 continue;
1654 }
1655 list_for_each_entry (req, &ep->queue, queue) {
1656 if (ep->dma && req->td_dma == readl (&ep->dma->dmadesc))
1657 t = scnprintf (next, size,
1658 "\treq %p len %d/%d "
1659 "buf %p (dmacount %08x)\n",
1660 &req->req, req->req.actual,
1661 req->req.length, req->req.buf,
1662 readl (&ep->dma->dmacount));
1663 else
1664 t = scnprintf (next, size,
1665 "\treq %p len %d/%d buf %p\n",
1666 &req->req, req->req.actual,
1667 req->req.length, req->req.buf);
1668 if (t <= 0 || t > size)
1669 goto done;
1670 size -= t;
1671 next += t;
1672
1673 if (ep->dma) {
1674 struct net2280_dma *td;
1675
1676 td = req->td;
1677 t = scnprintf (next, size, "\t td %08x "
1678 " count %08x buf %08x desc %08x\n",
1679 (u32) req->td_dma,
1680 le32_to_cpu (td->dmacount),
1681 le32_to_cpu (td->dmaaddr),
1682 le32_to_cpu (td->dmadesc));
1683 if (t <= 0 || t > size)
1684 goto done;
1685 size -= t;
1686 next += t;
1687 }
1688 }
1689 }
1690
1691done:
1692 spin_unlock_irqrestore (&dev->lock, flags);
1693 return PAGE_SIZE - size;
1694}
1695static DEVICE_ATTR (queues, S_IRUGO, show_queues, NULL);
1696
1697
1698#else
1699
Linus Torvalds99504212006-10-17 18:03:33 -07001700#define device_create_file(a,b) (0)
1701#define device_remove_file(a,b) do { } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001702
1703#endif
1704
1705/*-------------------------------------------------------------------------*/
1706
1707/* another driver-specific mode might be a request type doing dma
1708 * to/from another device fifo instead of to/from memory.
1709 */
1710
1711static void set_fifo_mode (struct net2280 *dev, int mode)
1712{
1713 /* keeping high bits preserves BAR2 */
1714 writel ((0xffff << PCI_BASE2_RANGE) | mode, &dev->regs->fifoctl);
1715
1716 /* always ep-{a,b,e,f} ... maybe not ep-c or ep-d */
1717 INIT_LIST_HEAD (&dev->gadget.ep_list);
1718 list_add_tail (&dev->ep [1].ep.ep_list, &dev->gadget.ep_list);
1719 list_add_tail (&dev->ep [2].ep.ep_list, &dev->gadget.ep_list);
1720 switch (mode) {
1721 case 0:
1722 list_add_tail (&dev->ep [3].ep.ep_list, &dev->gadget.ep_list);
1723 list_add_tail (&dev->ep [4].ep.ep_list, &dev->gadget.ep_list);
1724 dev->ep [1].fifo_size = dev->ep [2].fifo_size = 1024;
1725 break;
1726 case 1:
1727 dev->ep [1].fifo_size = dev->ep [2].fifo_size = 2048;
1728 break;
1729 case 2:
1730 list_add_tail (&dev->ep [3].ep.ep_list, &dev->gadget.ep_list);
1731 dev->ep [1].fifo_size = 2048;
1732 dev->ep [2].fifo_size = 1024;
1733 break;
1734 }
1735 /* fifo sizes for ep0, ep-c, ep-d, ep-e, and ep-f never change */
1736 list_add_tail (&dev->ep [5].ep.ep_list, &dev->gadget.ep_list);
1737 list_add_tail (&dev->ep [6].ep.ep_list, &dev->gadget.ep_list);
1738}
1739
David Brownell320f3452005-05-07 13:05:18 -07001740/* just declare this in any driver that really need it */
1741extern int net2280_set_fifo_mode (struct usb_gadget *gadget, int mode);
1742
Linus Torvalds1da177e2005-04-16 15:20:36 -07001743/**
1744 * net2280_set_fifo_mode - change allocation of fifo buffers
1745 * @gadget: access to the net2280 device that will be updated
1746 * @mode: 0 for default, four 1kB buffers (ep-a through ep-d);
David Brownell901b3d72006-09-02 03:13:45 -07001747 * 1 for two 2kB buffers (ep-a and ep-b only);
1748 * 2 for one 2kB buffer (ep-a) and two 1kB ones (ep-b, ep-c).
Linus Torvalds1da177e2005-04-16 15:20:36 -07001749 *
1750 * returns zero on success, else negative errno. when this succeeds,
1751 * the contents of gadget->ep_list may have changed.
1752 *
1753 * you may only call this function when endpoints a-d are all disabled.
1754 * use it whenever extra hardware buffering can help performance, such
1755 * as before enabling "high bandwidth" interrupt endpoints that use
1756 * maxpacket bigger than 512 (when double buffering would otherwise
1757 * be unavailable).
1758 */
1759int net2280_set_fifo_mode (struct usb_gadget *gadget, int mode)
1760{
1761 int i;
1762 struct net2280 *dev;
1763 int status = 0;
1764 unsigned long flags;
1765
1766 if (!gadget)
1767 return -ENODEV;
1768 dev = container_of (gadget, struct net2280, gadget);
1769
1770 spin_lock_irqsave (&dev->lock, flags);
1771
1772 for (i = 1; i <= 4; i++)
1773 if (dev->ep [i].desc) {
1774 status = -EINVAL;
1775 break;
1776 }
1777 if (mode < 0 || mode > 2)
1778 status = -EINVAL;
1779 if (status == 0)
1780 set_fifo_mode (dev, mode);
1781 spin_unlock_irqrestore (&dev->lock, flags);
1782
1783 if (status == 0) {
1784 if (mode == 1)
1785 DEBUG (dev, "fifo: ep-a 2K, ep-b 2K\n");
1786 else if (mode == 2)
1787 DEBUG (dev, "fifo: ep-a 2K, ep-b 1K, ep-c 1K\n");
1788 /* else all are 1K */
1789 }
1790 return status;
1791}
1792EXPORT_SYMBOL (net2280_set_fifo_mode);
1793
1794/*-------------------------------------------------------------------------*/
1795
1796/* keeping it simple:
1797 * - one bus driver, initted first;
1798 * - one function driver, initted second
1799 *
1800 * most of the work to support multiple net2280 controllers would
1801 * be to associate this gadget driver (yes?) with all of them, or
1802 * perhaps to bind specific drivers to specific devices.
1803 */
1804
1805static struct net2280 *the_controller;
1806
1807static void usb_reset (struct net2280 *dev)
1808{
1809 u32 tmp;
1810
1811 dev->gadget.speed = USB_SPEED_UNKNOWN;
1812 (void) readl (&dev->usb->usbctl);
1813
1814 net2280_led_init (dev);
1815
1816 /* disable automatic responses, and irqs */
1817 writel (0, &dev->usb->stdrsp);
1818 writel (0, &dev->regs->pciirqenb0);
1819 writel (0, &dev->regs->pciirqenb1);
1820
1821 /* clear old dma and irq state */
1822 for (tmp = 0; tmp < 4; tmp++) {
1823 struct net2280_ep *ep = &dev->ep [tmp + 1];
1824
1825 if (ep->dma)
1826 abort_dma (ep);
1827 }
1828 writel (~0, &dev->regs->irqstat0),
1829 writel (~(1 << SUSPEND_REQUEST_INTERRUPT), &dev->regs->irqstat1),
1830
1831 /* reset, and enable pci */
1832 tmp = readl (&dev->regs->devinit)
1833 | (1 << PCI_ENABLE)
1834 | (1 << FIFO_SOFT_RESET)
1835 | (1 << USB_SOFT_RESET)
1836 | (1 << M8051_RESET);
1837 writel (tmp, &dev->regs->devinit);
1838
1839 /* standard fifo and endpoint allocations */
1840 set_fifo_mode (dev, (fifo_mode <= 2) ? fifo_mode : 0);
1841}
1842
1843static void usb_reinit (struct net2280 *dev)
1844{
1845 u32 tmp;
1846 int init_dma;
1847
1848 /* use_dma changes are ignored till next device re-init */
1849 init_dma = use_dma;
1850
1851 /* basic endpoint init */
1852 for (tmp = 0; tmp < 7; tmp++) {
1853 struct net2280_ep *ep = &dev->ep [tmp];
1854
1855 ep->ep.name = ep_name [tmp];
1856 ep->dev = dev;
1857 ep->num = tmp;
1858
1859 if (tmp > 0 && tmp <= 4) {
1860 ep->fifo_size = 1024;
1861 if (init_dma)
1862 ep->dma = &dev->dma [tmp - 1];
1863 } else
1864 ep->fifo_size = 64;
1865 ep->regs = &dev->epregs [tmp];
1866 ep_reset (dev->regs, ep);
1867 }
1868 dev->ep [0].ep.maxpacket = 64;
1869 dev->ep [5].ep.maxpacket = 64;
1870 dev->ep [6].ep.maxpacket = 64;
1871
1872 dev->gadget.ep0 = &dev->ep [0].ep;
1873 dev->ep [0].stopped = 0;
1874 INIT_LIST_HEAD (&dev->gadget.ep0->ep_list);
1875
1876 /* we want to prevent lowlevel/insecure access from the USB host,
1877 * but erratum 0119 means this enable bit is ignored
1878 */
1879 for (tmp = 0; tmp < 5; tmp++)
1880 writel (EP_DONTUSE, &dev->dep [tmp].dep_cfg);
1881}
1882
1883static void ep0_start (struct net2280 *dev)
1884{
1885 writel ( (1 << CLEAR_EP_HIDE_STATUS_PHASE)
1886 | (1 << CLEAR_NAK_OUT_PACKETS)
1887 | (1 << CLEAR_CONTROL_STATUS_PHASE_HANDSHAKE)
1888 , &dev->epregs [0].ep_rsp);
1889
1890 /*
1891 * hardware optionally handles a bunch of standard requests
1892 * that the API hides from drivers anyway. have it do so.
1893 * endpoint status/features are handled in software, to
1894 * help pass tests for some dubious behavior.
1895 */
1896 writel ( (1 << SET_TEST_MODE)
1897 | (1 << SET_ADDRESS)
1898 | (1 << DEVICE_SET_CLEAR_DEVICE_REMOTE_WAKEUP)
1899 | (1 << GET_DEVICE_STATUS)
1900 | (1 << GET_INTERFACE_STATUS)
1901 , &dev->usb->stdrsp);
1902 writel ( (1 << USB_ROOT_PORT_WAKEUP_ENABLE)
1903 | (1 << SELF_POWERED_USB_DEVICE)
1904 | (1 << REMOTE_WAKEUP_SUPPORT)
1905 | (dev->softconnect << USB_DETECT_ENABLE)
1906 | (1 << SELF_POWERED_STATUS)
1907 , &dev->usb->usbctl);
1908
1909 /* enable irqs so we can see ep0 and general operation */
1910 writel ( (1 << SETUP_PACKET_INTERRUPT_ENABLE)
1911 | (1 << ENDPOINT_0_INTERRUPT_ENABLE)
1912 , &dev->regs->pciirqenb0);
1913 writel ( (1 << PCI_INTERRUPT_ENABLE)
1914 | (1 << PCI_MASTER_ABORT_RECEIVED_INTERRUPT_ENABLE)
1915 | (1 << PCI_TARGET_ABORT_RECEIVED_INTERRUPT_ENABLE)
1916 | (1 << PCI_RETRY_ABORT_INTERRUPT_ENABLE)
1917 | (1 << VBUS_INTERRUPT_ENABLE)
1918 | (1 << ROOT_PORT_RESET_INTERRUPT_ENABLE)
1919 | (1 << SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE)
1920 , &dev->regs->pciirqenb1);
1921
1922 /* don't leave any writes posted */
1923 (void) readl (&dev->usb->usbctl);
1924}
1925
1926/* when a driver is successfully registered, it will receive
1927 * control requests including set_configuration(), which enables
1928 * non-control requests. then usb traffic follows until a
1929 * disconnect is reported. then a host may connect again, or
1930 * the driver might get unbound.
1931 */
1932int usb_gadget_register_driver (struct usb_gadget_driver *driver)
1933{
1934 struct net2280 *dev = the_controller;
1935 int retval;
1936 unsigned i;
1937
1938 /* insist on high speed support from the driver, since
1939 * (dev->usb->xcvrdiag & FORCE_FULL_SPEED_MODE)
1940 * "must not be used in normal operation"
1941 */
1942 if (!driver
1943 || driver->speed != USB_SPEED_HIGH
1944 || !driver->bind
Linus Torvalds1da177e2005-04-16 15:20:36 -07001945 || !driver->setup)
1946 return -EINVAL;
1947 if (!dev)
1948 return -ENODEV;
1949 if (dev->driver)
1950 return -EBUSY;
1951
1952 for (i = 0; i < 7; i++)
1953 dev->ep [i].irqs = 0;
1954
1955 /* hook up the driver ... */
1956 dev->softconnect = 1;
1957 driver->driver.bus = NULL;
1958 dev->driver = driver;
1959 dev->gadget.dev.driver = &driver->driver;
1960 retval = driver->bind (&dev->gadget);
1961 if (retval) {
1962 DEBUG (dev, "bind to driver %s --> %d\n",
1963 driver->driver.name, retval);
1964 dev->driver = NULL;
1965 dev->gadget.dev.driver = NULL;
1966 return retval;
1967 }
1968
Jeff Garzikb3899da2006-10-11 21:50:24 -04001969 retval = device_create_file (&dev->pdev->dev, &dev_attr_function);
1970 if (retval) goto err_unbind;
1971 retval = device_create_file (&dev->pdev->dev, &dev_attr_queues);
1972 if (retval) goto err_func;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001973
1974 /* ... then enable host detection and ep0; and we're ready
1975 * for set_configuration as well as eventual disconnect.
1976 */
1977 net2280_led_active (dev, 1);
1978 ep0_start (dev);
1979
1980 DEBUG (dev, "%s ready, usbctl %08x stdrsp %08x\n",
1981 driver->driver.name,
1982 readl (&dev->usb->usbctl),
1983 readl (&dev->usb->stdrsp));
1984
1985 /* pci writes may still be posted */
1986 return 0;
Jeff Garzikb3899da2006-10-11 21:50:24 -04001987
1988err_func:
1989 device_remove_file (&dev->pdev->dev, &dev_attr_function);
1990err_unbind:
1991 driver->unbind (&dev->gadget);
1992 dev->gadget.dev.driver = NULL;
1993 dev->driver = NULL;
1994 return retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001995}
1996EXPORT_SYMBOL (usb_gadget_register_driver);
1997
1998static void
1999stop_activity (struct net2280 *dev, struct usb_gadget_driver *driver)
2000{
2001 int i;
2002
2003 /* don't disconnect if it's not connected */
2004 if (dev->gadget.speed == USB_SPEED_UNKNOWN)
2005 driver = NULL;
2006
2007 /* stop hardware; prevent new request submissions;
2008 * and kill any outstanding requests.
2009 */
2010 usb_reset (dev);
2011 for (i = 0; i < 7; i++)
2012 nuke (&dev->ep [i]);
2013
2014 /* report disconnect; the driver is already quiesced */
2015 if (driver) {
2016 spin_unlock (&dev->lock);
2017 driver->disconnect (&dev->gadget);
2018 spin_lock (&dev->lock);
2019 }
2020
2021 usb_reinit (dev);
2022}
2023
2024int usb_gadget_unregister_driver (struct usb_gadget_driver *driver)
2025{
2026 struct net2280 *dev = the_controller;
2027 unsigned long flags;
2028
2029 if (!dev)
2030 return -ENODEV;
David Brownell6bea4762006-12-05 03:15:33 -08002031 if (!driver || driver != dev->driver || !driver->unbind)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002032 return -EINVAL;
2033
2034 spin_lock_irqsave (&dev->lock, flags);
2035 stop_activity (dev, driver);
2036 spin_unlock_irqrestore (&dev->lock, flags);
2037
2038 net2280_pullup (&dev->gadget, 0);
2039
2040 driver->unbind (&dev->gadget);
2041 dev->gadget.dev.driver = NULL;
2042 dev->driver = NULL;
2043
2044 net2280_led_active (dev, 0);
2045 device_remove_file (&dev->pdev->dev, &dev_attr_function);
2046 device_remove_file (&dev->pdev->dev, &dev_attr_queues);
2047
2048 DEBUG (dev, "unregistered driver '%s'\n", driver->driver.name);
2049 return 0;
2050}
2051EXPORT_SYMBOL (usb_gadget_unregister_driver);
2052
2053
2054/*-------------------------------------------------------------------------*/
2055
2056/* handle ep0, ep-e, ep-f with 64 byte packets: packet per irq.
2057 * also works for dma-capable endpoints, in pio mode or just
2058 * to manually advance the queue after short OUT transfers.
2059 */
2060static void handle_ep_small (struct net2280_ep *ep)
2061{
2062 struct net2280_request *req;
2063 u32 t;
2064 /* 0 error, 1 mid-data, 2 done */
2065 int mode = 1;
2066
2067 if (!list_empty (&ep->queue))
2068 req = list_entry (ep->queue.next,
2069 struct net2280_request, queue);
2070 else
2071 req = NULL;
2072
2073 /* ack all, and handle what we care about */
2074 t = readl (&ep->regs->ep_stat);
2075 ep->irqs++;
2076#if 0
2077 VDEBUG (ep->dev, "%s ack ep_stat %08x, req %p\n",
2078 ep->ep.name, t, req ? &req->req : 0);
2079#endif
Guennadi Liakhovetski950ee4c2006-03-19 20:49:14 +01002080 if (!ep->is_in || ep->dev->pdev->device == 0x2280)
2081 writel (t & ~(1 << NAK_OUT_PACKETS), &ep->regs->ep_stat);
2082 else
2083 /* Added for 2282 */
2084 writel (t, &ep->regs->ep_stat);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002085
2086 /* for ep0, monitor token irqs to catch data stage length errors
2087 * and to synchronize on status.
2088 *
2089 * also, to defer reporting of protocol stalls ... here's where
2090 * data or status first appears, handling stalls here should never
2091 * cause trouble on the host side..
2092 *
2093 * control requests could be slightly faster without token synch for
2094 * status, but status can jam up that way.
2095 */
2096 if (unlikely (ep->num == 0)) {
2097 if (ep->is_in) {
2098 /* status; stop NAKing */
2099 if (t & (1 << DATA_OUT_PING_TOKEN_INTERRUPT)) {
2100 if (ep->dev->protocol_stall) {
2101 ep->stopped = 1;
2102 set_halt (ep);
2103 }
2104 if (!req)
2105 allow_status (ep);
2106 mode = 2;
2107 /* reply to extra IN data tokens with a zlp */
2108 } else if (t & (1 << DATA_IN_TOKEN_INTERRUPT)) {
2109 if (ep->dev->protocol_stall) {
2110 ep->stopped = 1;
2111 set_halt (ep);
2112 mode = 2;
Alan Stern1f26e282006-11-16 10:16:00 -05002113 } else if (ep->responded &&
2114 !req && !ep->stopped)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002115 write_fifo (ep, NULL);
2116 }
2117 } else {
2118 /* status; stop NAKing */
2119 if (t & (1 << DATA_IN_TOKEN_INTERRUPT)) {
2120 if (ep->dev->protocol_stall) {
2121 ep->stopped = 1;
2122 set_halt (ep);
2123 }
2124 mode = 2;
2125 /* an extra OUT token is an error */
2126 } else if (((t & (1 << DATA_OUT_PING_TOKEN_INTERRUPT))
2127 && req
2128 && req->req.actual == req->req.length)
Alan Stern1f26e282006-11-16 10:16:00 -05002129 || (ep->responded && !req)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002130 ep->dev->protocol_stall = 1;
2131 set_halt (ep);
2132 ep->stopped = 1;
2133 if (req)
2134 done (ep, req, -EOVERFLOW);
2135 req = NULL;
2136 }
2137 }
2138 }
2139
2140 if (unlikely (!req))
2141 return;
2142
2143 /* manual DMA queue advance after short OUT */
2144 if (likely (ep->dma != 0)) {
2145 if (t & (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT)) {
2146 u32 count;
2147 int stopped = ep->stopped;
2148
2149 /* TRANSFERRED works around OUT_DONE erratum 0112.
2150 * we expect (N <= maxpacket) bytes; host wrote M.
2151 * iff (M < N) we won't ever see a DMA interrupt.
2152 */
2153 ep->stopped = 1;
2154 for (count = 0; ; t = readl (&ep->regs->ep_stat)) {
2155
2156 /* any preceding dma transfers must finish.
2157 * dma handles (M >= N), may empty the queue
2158 */
2159 scan_dma_completions (ep);
2160 if (unlikely (list_empty (&ep->queue)
2161 || ep->out_overflow)) {
2162 req = NULL;
2163 break;
2164 }
2165 req = list_entry (ep->queue.next,
2166 struct net2280_request, queue);
2167
2168 /* here either (M < N), a "real" short rx;
2169 * or (M == N) and the queue didn't empty
2170 */
2171 if (likely (t & (1 << FIFO_EMPTY))) {
2172 count = readl (&ep->dma->dmacount);
2173 count &= DMA_BYTE_COUNT_MASK;
2174 if (readl (&ep->dma->dmadesc)
2175 != req->td_dma)
2176 req = NULL;
2177 break;
2178 }
2179 udelay(1);
2180 }
2181
2182 /* stop DMA, leave ep NAKing */
2183 writel ((1 << DMA_ABORT), &ep->dma->dmastat);
2184 spin_stop_dma (ep->dma);
2185
2186 if (likely (req)) {
2187 req->td->dmacount = 0;
2188 t = readl (&ep->regs->ep_avail);
David Brownell68dcc682006-04-02 10:18:53 -08002189 dma_done (ep, req, count,
David Brownell901b3d72006-09-02 03:13:45 -07002190 (ep->out_overflow || t)
2191 ? -EOVERFLOW : 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002192 }
2193
2194 /* also flush to prevent erratum 0106 trouble */
2195 if (unlikely (ep->out_overflow
2196 || (ep->dev->chiprev == 0x0100
2197 && ep->dev->gadget.speed
2198 == USB_SPEED_FULL))) {
2199 out_flush (ep);
2200 ep->out_overflow = 0;
2201 }
2202
2203 /* (re)start dma if needed, stop NAKing */
2204 ep->stopped = stopped;
2205 if (!list_empty (&ep->queue))
2206 restart_dma (ep);
2207 } else
2208 DEBUG (ep->dev, "%s dma ep_stat %08x ??\n",
2209 ep->ep.name, t);
2210 return;
2211
2212 /* data packet(s) received (in the fifo, OUT) */
2213 } else if (t & (1 << DATA_PACKET_RECEIVED_INTERRUPT)) {
2214 if (read_fifo (ep, req) && ep->num != 0)
2215 mode = 2;
2216
2217 /* data packet(s) transmitted (IN) */
2218 } else if (t & (1 << DATA_PACKET_TRANSMITTED_INTERRUPT)) {
2219 unsigned len;
2220
2221 len = req->req.length - req->req.actual;
2222 if (len > ep->ep.maxpacket)
2223 len = ep->ep.maxpacket;
2224 req->req.actual += len;
2225
2226 /* if we wrote it all, we're usually done */
2227 if (req->req.actual == req->req.length) {
2228 if (ep->num == 0) {
Alan Stern317e83b2006-04-14 16:42:03 -04002229 /* send zlps until the status stage */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002230 } else if (!req->req.zero || len != ep->ep.maxpacket)
2231 mode = 2;
2232 }
2233
2234 /* there was nothing to do ... */
2235 } else if (mode == 1)
2236 return;
2237
2238 /* done */
2239 if (mode == 2) {
2240 /* stream endpoints often resubmit/unlink in completion */
2241 done (ep, req, 0);
2242
2243 /* maybe advance queue to next request */
2244 if (ep->num == 0) {
2245 /* NOTE: net2280 could let gadget driver start the
2246 * status stage later. since not all controllers let
2247 * them control that, the api doesn't (yet) allow it.
2248 */
2249 if (!ep->stopped)
2250 allow_status (ep);
2251 req = NULL;
2252 } else {
2253 if (!list_empty (&ep->queue) && !ep->stopped)
2254 req = list_entry (ep->queue.next,
2255 struct net2280_request, queue);
2256 else
2257 req = NULL;
2258 if (req && !ep->is_in)
2259 stop_out_naking (ep);
2260 }
2261 }
2262
2263 /* is there a buffer for the next packet?
2264 * for best streaming performance, make sure there is one.
2265 */
2266 if (req && !ep->stopped) {
2267
2268 /* load IN fifo with next packet (may be zlp) */
2269 if (t & (1 << DATA_PACKET_TRANSMITTED_INTERRUPT))
2270 write_fifo (ep, &req->req);
2271 }
2272}
2273
2274static struct net2280_ep *
2275get_ep_by_addr (struct net2280 *dev, u16 wIndex)
2276{
2277 struct net2280_ep *ep;
2278
2279 if ((wIndex & USB_ENDPOINT_NUMBER_MASK) == 0)
2280 return &dev->ep [0];
2281 list_for_each_entry (ep, &dev->gadget.ep_list, ep.ep_list) {
2282 u8 bEndpointAddress;
2283
2284 if (!ep->desc)
2285 continue;
2286 bEndpointAddress = ep->desc->bEndpointAddress;
2287 if ((wIndex ^ bEndpointAddress) & USB_DIR_IN)
2288 continue;
2289 if ((wIndex & 0x0f) == (bEndpointAddress & 0x0f))
2290 return ep;
2291 }
2292 return NULL;
2293}
2294
2295static void handle_stat0_irqs (struct net2280 *dev, u32 stat)
2296{
2297 struct net2280_ep *ep;
2298 u32 num, scratch;
2299
2300 /* most of these don't need individual acks */
2301 stat &= ~(1 << INTA_ASSERTED);
2302 if (!stat)
2303 return;
2304 // DEBUG (dev, "irqstat0 %04x\n", stat);
2305
2306 /* starting a control request? */
2307 if (unlikely (stat & (1 << SETUP_PACKET_INTERRUPT))) {
2308 union {
2309 u32 raw [2];
2310 struct usb_ctrlrequest r;
2311 } u;
Guennadi Liakhovetski950ee4c2006-03-19 20:49:14 +01002312 int tmp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002313 struct net2280_request *req;
2314
2315 if (dev->gadget.speed == USB_SPEED_UNKNOWN) {
2316 if (readl (&dev->usb->usbstat) & (1 << HIGH_SPEED))
2317 dev->gadget.speed = USB_SPEED_HIGH;
2318 else
2319 dev->gadget.speed = USB_SPEED_FULL;
2320 net2280_led_speed (dev, dev->gadget.speed);
2321 DEBUG (dev, "%s speed\n",
2322 (dev->gadget.speed == USB_SPEED_HIGH)
2323 ? "high" : "full");
2324 }
2325
2326 ep = &dev->ep [0];
2327 ep->irqs++;
2328
2329 /* make sure any leftover request state is cleared */
2330 stat &= ~(1 << ENDPOINT_0_INTERRUPT);
2331 while (!list_empty (&ep->queue)) {
2332 req = list_entry (ep->queue.next,
2333 struct net2280_request, queue);
2334 done (ep, req, (req->req.actual == req->req.length)
2335 ? 0 : -EPROTO);
2336 }
2337 ep->stopped = 0;
2338 dev->protocol_stall = 0;
Guennadi Liakhovetski950ee4c2006-03-19 20:49:14 +01002339
2340 if (ep->dev->pdev->device == 0x2280)
2341 tmp = (1 << FIFO_OVERFLOW)
2342 | (1 << FIFO_UNDERFLOW);
2343 else
2344 tmp = 0;
2345
2346 writel (tmp | (1 << TIMEOUT)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002347 | (1 << USB_STALL_SENT)
2348 | (1 << USB_IN_NAK_SENT)
2349 | (1 << USB_IN_ACK_RCVD)
2350 | (1 << USB_OUT_PING_NAK_SENT)
2351 | (1 << USB_OUT_ACK_SENT)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002352 | (1 << SHORT_PACKET_OUT_DONE_INTERRUPT)
2353 | (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT)
2354 | (1 << DATA_PACKET_RECEIVED_INTERRUPT)
2355 | (1 << DATA_PACKET_TRANSMITTED_INTERRUPT)
2356 | (1 << DATA_OUT_PING_TOKEN_INTERRUPT)
2357 | (1 << DATA_IN_TOKEN_INTERRUPT)
2358 , &ep->regs->ep_stat);
2359 u.raw [0] = readl (&dev->usb->setup0123);
2360 u.raw [1] = readl (&dev->usb->setup4567);
David Brownell901b3d72006-09-02 03:13:45 -07002361
Linus Torvalds1da177e2005-04-16 15:20:36 -07002362 cpu_to_le32s (&u.raw [0]);
2363 cpu_to_le32s (&u.raw [1]);
2364
Guennadi Liakhovetski950ee4c2006-03-19 20:49:14 +01002365 tmp = 0;
2366
David Brownell01ee7d72007-05-25 20:40:14 -07002367#define w_value le16_to_cpu(u.r.wValue)
2368#define w_index le16_to_cpu(u.r.wIndex)
2369#define w_length le16_to_cpu(u.r.wLength)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002370
2371 /* ack the irq */
2372 writel (1 << SETUP_PACKET_INTERRUPT, &dev->regs->irqstat0);
2373 stat ^= (1 << SETUP_PACKET_INTERRUPT);
2374
2375 /* watch control traffic at the token level, and force
2376 * synchronization before letting the status stage happen.
2377 * FIXME ignore tokens we'll NAK, until driver responds.
2378 * that'll mean a lot less irqs for some drivers.
2379 */
2380 ep->is_in = (u.r.bRequestType & USB_DIR_IN) != 0;
2381 if (ep->is_in) {
2382 scratch = (1 << DATA_PACKET_TRANSMITTED_INTERRUPT)
2383 | (1 << DATA_OUT_PING_TOKEN_INTERRUPT)
2384 | (1 << DATA_IN_TOKEN_INTERRUPT);
2385 stop_out_naking (ep);
2386 } else
2387 scratch = (1 << DATA_PACKET_RECEIVED_INTERRUPT)
2388 | (1 << DATA_OUT_PING_TOKEN_INTERRUPT)
2389 | (1 << DATA_IN_TOKEN_INTERRUPT);
2390 writel (scratch, &dev->epregs [0].ep_irqenb);
2391
2392 /* we made the hardware handle most lowlevel requests;
2393 * everything else goes uplevel to the gadget code.
2394 */
Alan Stern1f26e282006-11-16 10:16:00 -05002395 ep->responded = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002396 switch (u.r.bRequest) {
2397 case USB_REQ_GET_STATUS: {
2398 struct net2280_ep *e;
David Brownell320f3452005-05-07 13:05:18 -07002399 __le32 status;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002400
2401 /* hw handles device and interface status */
2402 if (u.r.bRequestType != (USB_DIR_IN|USB_RECIP_ENDPOINT))
2403 goto delegate;
David Brownell320f3452005-05-07 13:05:18 -07002404 if ((e = get_ep_by_addr (dev, w_index)) == 0
2405 || w_length > 2)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002406 goto do_stall;
2407
2408 if (readl (&e->regs->ep_rsp)
2409 & (1 << SET_ENDPOINT_HALT))
Harvey Harrison551509d2009-02-11 14:11:36 -08002410 status = cpu_to_le32 (1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002411 else
Harvey Harrison551509d2009-02-11 14:11:36 -08002412 status = cpu_to_le32 (0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002413
2414 /* don't bother with a request object! */
2415 writel (0, &dev->epregs [0].ep_irqenb);
David Brownell320f3452005-05-07 13:05:18 -07002416 set_fifo_bytecount (ep, w_length);
2417 writel ((__force u32)status, &dev->epregs [0].ep_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002418 allow_status (ep);
2419 VDEBUG (dev, "%s stat %02x\n", ep->ep.name, status);
2420 goto next_endpoints;
2421 }
2422 break;
2423 case USB_REQ_CLEAR_FEATURE: {
2424 struct net2280_ep *e;
2425
2426 /* hw handles device features */
2427 if (u.r.bRequestType != USB_RECIP_ENDPOINT)
2428 goto delegate;
David Brownell320f3452005-05-07 13:05:18 -07002429 if (w_value != USB_ENDPOINT_HALT
2430 || w_length != 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002431 goto do_stall;
David Brownell320f3452005-05-07 13:05:18 -07002432 if ((e = get_ep_by_addr (dev, w_index)) == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002433 goto do_stall;
Alan Stern80661342008-08-14 15:49:11 -04002434 if (e->wedged) {
2435 VDEBUG(dev, "%s wedged, halt not cleared\n",
2436 ep->ep.name);
2437 } else {
2438 VDEBUG(dev, "%s clear halt\n", ep->ep.name);
2439 clear_halt(e);
2440 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002441 allow_status (ep);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002442 goto next_endpoints;
2443 }
2444 break;
2445 case USB_REQ_SET_FEATURE: {
2446 struct net2280_ep *e;
2447
2448 /* hw handles device features */
2449 if (u.r.bRequestType != USB_RECIP_ENDPOINT)
2450 goto delegate;
David Brownell320f3452005-05-07 13:05:18 -07002451 if (w_value != USB_ENDPOINT_HALT
2452 || w_length != 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002453 goto do_stall;
David Brownell320f3452005-05-07 13:05:18 -07002454 if ((e = get_ep_by_addr (dev, w_index)) == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002455 goto do_stall;
Alan Stern80661342008-08-14 15:49:11 -04002456 if (e->ep.name == ep0name)
2457 goto do_stall;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002458 set_halt (e);
2459 allow_status (ep);
2460 VDEBUG (dev, "%s set halt\n", ep->ep.name);
2461 goto next_endpoints;
2462 }
2463 break;
2464 default:
2465delegate:
Joe Perchesfec8de32007-11-19 17:53:33 -08002466 VDEBUG (dev, "setup %02x.%02x v%04x i%04x l%04x "
Linus Torvalds1da177e2005-04-16 15:20:36 -07002467 "ep_cfg %08x\n",
2468 u.r.bRequestType, u.r.bRequest,
David Brownell320f3452005-05-07 13:05:18 -07002469 w_value, w_index, w_length,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002470 readl (&ep->regs->ep_cfg));
Alan Stern1f26e282006-11-16 10:16:00 -05002471 ep->responded = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002472 spin_unlock (&dev->lock);
2473 tmp = dev->driver->setup (&dev->gadget, &u.r);
2474 spin_lock (&dev->lock);
2475 }
2476
2477 /* stall ep0 on error */
2478 if (tmp < 0) {
2479do_stall:
2480 VDEBUG (dev, "req %02x.%02x protocol STALL; stat %d\n",
2481 u.r.bRequestType, u.r.bRequest, tmp);
2482 dev->protocol_stall = 1;
2483 }
2484
2485 /* some in/out token irq should follow; maybe stall then.
2486 * driver must queue a request (even zlp) or halt ep0
2487 * before the host times out.
2488 */
2489 }
2490
David Brownell320f3452005-05-07 13:05:18 -07002491#undef w_value
2492#undef w_index
2493#undef w_length
2494
Linus Torvalds1da177e2005-04-16 15:20:36 -07002495next_endpoints:
2496 /* endpoint data irq ? */
2497 scratch = stat & 0x7f;
2498 stat &= ~0x7f;
2499 for (num = 0; scratch; num++) {
2500 u32 t;
2501
2502 /* do this endpoint's FIFO and queue need tending? */
2503 t = 1 << num;
2504 if ((scratch & t) == 0)
2505 continue;
2506 scratch ^= t;
2507
2508 ep = &dev->ep [num];
2509 handle_ep_small (ep);
2510 }
2511
2512 if (stat)
2513 DEBUG (dev, "unhandled irqstat0 %08x\n", stat);
2514}
2515
2516#define DMA_INTERRUPTS ( \
2517 (1 << DMA_D_INTERRUPT) \
2518 | (1 << DMA_C_INTERRUPT) \
2519 | (1 << DMA_B_INTERRUPT) \
2520 | (1 << DMA_A_INTERRUPT))
2521#define PCI_ERROR_INTERRUPTS ( \
2522 (1 << PCI_MASTER_ABORT_RECEIVED_INTERRUPT) \
2523 | (1 << PCI_TARGET_ABORT_RECEIVED_INTERRUPT) \
2524 | (1 << PCI_RETRY_ABORT_INTERRUPT))
2525
2526static void handle_stat1_irqs (struct net2280 *dev, u32 stat)
2527{
2528 struct net2280_ep *ep;
2529 u32 tmp, num, mask, scratch;
2530
2531 /* after disconnect there's nothing else to do! */
2532 tmp = (1 << VBUS_INTERRUPT) | (1 << ROOT_PORT_RESET_INTERRUPT);
2533 mask = (1 << HIGH_SPEED) | (1 << FULL_SPEED);
2534
2535 /* VBUS disconnect is indicated by VBUS_PIN and VBUS_INTERRUPT set.
2536 * Root Port Reset is indicated by ROOT_PORT_RESET_INTERRRUPT set and
David Brownell901b3d72006-09-02 03:13:45 -07002537 * both HIGH_SPEED and FULL_SPEED clear (as ROOT_PORT_RESET_INTERRUPT
Linus Torvalds1da177e2005-04-16 15:20:36 -07002538 * only indicates a change in the reset state).
2539 */
2540 if (stat & tmp) {
2541 writel (tmp, &dev->regs->irqstat1);
David Brownell901b3d72006-09-02 03:13:45 -07002542 if ((((stat & (1 << ROOT_PORT_RESET_INTERRUPT))
2543 && ((readl (&dev->usb->usbstat) & mask)
2544 == 0))
2545 || ((readl (&dev->usb->usbctl)
2546 & (1 << VBUS_PIN)) == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002547 ) && ( dev->gadget.speed != USB_SPEED_UNKNOWN)) {
2548 DEBUG (dev, "disconnect %s\n",
2549 dev->driver->driver.name);
2550 stop_activity (dev, dev->driver);
2551 ep0_start (dev);
2552 return;
2553 }
2554 stat &= ~tmp;
2555
2556 /* vBUS can bounce ... one of many reasons to ignore the
2557 * notion of hotplug events on bus connect/disconnect!
2558 */
2559 if (!stat)
2560 return;
2561 }
2562
2563 /* NOTE: chip stays in PCI D0 state for now, but it could
2564 * enter D1 to save more power
2565 */
2566 tmp = (1 << SUSPEND_REQUEST_CHANGE_INTERRUPT);
2567 if (stat & tmp) {
2568 writel (tmp, &dev->regs->irqstat1);
2569 if (stat & (1 << SUSPEND_REQUEST_INTERRUPT)) {
2570 if (dev->driver->suspend)
2571 dev->driver->suspend (&dev->gadget);
2572 if (!enable_suspend)
2573 stat &= ~(1 << SUSPEND_REQUEST_INTERRUPT);
2574 } else {
2575 if (dev->driver->resume)
2576 dev->driver->resume (&dev->gadget);
2577 /* at high speed, note erratum 0133 */
2578 }
2579 stat &= ~tmp;
2580 }
2581
2582 /* clear any other status/irqs */
2583 if (stat)
2584 writel (stat, &dev->regs->irqstat1);
2585
2586 /* some status we can just ignore */
Guennadi Liakhovetski950ee4c2006-03-19 20:49:14 +01002587 if (dev->pdev->device == 0x2280)
2588 stat &= ~((1 << CONTROL_STATUS_INTERRUPT)
2589 | (1 << SUSPEND_REQUEST_INTERRUPT)
2590 | (1 << RESUME_INTERRUPT)
2591 | (1 << SOF_INTERRUPT));
2592 else
2593 stat &= ~((1 << CONTROL_STATUS_INTERRUPT)
2594 | (1 << RESUME_INTERRUPT)
2595 | (1 << SOF_DOWN_INTERRUPT)
2596 | (1 << SOF_INTERRUPT));
2597
Linus Torvalds1da177e2005-04-16 15:20:36 -07002598 if (!stat)
2599 return;
2600 // DEBUG (dev, "irqstat1 %08x\n", stat);
2601
2602 /* DMA status, for ep-{a,b,c,d} */
2603 scratch = stat & DMA_INTERRUPTS;
2604 stat &= ~DMA_INTERRUPTS;
2605 scratch >>= 9;
2606 for (num = 0; scratch; num++) {
2607 struct net2280_dma_regs __iomem *dma;
2608
2609 tmp = 1 << num;
2610 if ((tmp & scratch) == 0)
2611 continue;
2612 scratch ^= tmp;
2613
2614 ep = &dev->ep [num + 1];
2615 dma = ep->dma;
2616
2617 if (!dma)
2618 continue;
2619
2620 /* clear ep's dma status */
2621 tmp = readl (&dma->dmastat);
2622 writel (tmp, &dma->dmastat);
2623
2624 /* chaining should stop on abort, short OUT from fifo,
2625 * or (stat0 codepath) short OUT transfer.
2626 */
2627 if (!use_dma_chaining) {
2628 if ((tmp & (1 << DMA_TRANSACTION_DONE_INTERRUPT))
2629 == 0) {
2630 DEBUG (ep->dev, "%s no xact done? %08x\n",
2631 ep->ep.name, tmp);
2632 continue;
2633 }
2634 stop_dma (ep->dma);
2635 }
2636
2637 /* OUT transfers terminate when the data from the
2638 * host is in our memory. Process whatever's done.
2639 * On this path, we know transfer's last packet wasn't
2640 * less than req->length. NAK_OUT_PACKETS may be set,
2641 * or the FIFO may already be holding new packets.
2642 *
2643 * IN transfers can linger in the FIFO for a very
2644 * long time ... we ignore that for now, accounting
2645 * precisely (like PIO does) needs per-packet irqs
2646 */
2647 scan_dma_completions (ep);
2648
2649 /* disable dma on inactive queues; else maybe restart */
2650 if (list_empty (&ep->queue)) {
2651 if (use_dma_chaining)
2652 stop_dma (ep->dma);
2653 } else {
2654 tmp = readl (&dma->dmactl);
2655 if (!use_dma_chaining
2656 || (tmp & (1 << DMA_ENABLE)) == 0)
2657 restart_dma (ep);
2658 else if (ep->is_in && use_dma_chaining) {
2659 struct net2280_request *req;
David Brownell320f3452005-05-07 13:05:18 -07002660 __le32 dmacount;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002661
2662 /* the descriptor at the head of the chain
2663 * may still have VALID_BIT clear; that's
2664 * used to trigger changing DMA_FIFO_VALIDATE
2665 * (affects automagic zlp writes).
2666 */
2667 req = list_entry (ep->queue.next,
2668 struct net2280_request, queue);
2669 dmacount = req->td->dmacount;
Harvey Harrison551509d2009-02-11 14:11:36 -08002670 dmacount &= cpu_to_le32 (
Linus Torvalds1da177e2005-04-16 15:20:36 -07002671 (1 << VALID_BIT)
2672 | DMA_BYTE_COUNT_MASK);
2673 if (dmacount && (dmacount & valid_bit) == 0)
2674 restart_dma (ep);
2675 }
2676 }
2677 ep->irqs++;
2678 }
2679
2680 /* NOTE: there are other PCI errors we might usefully notice.
2681 * if they appear very often, here's where to try recovering.
2682 */
2683 if (stat & PCI_ERROR_INTERRUPTS) {
2684 ERROR (dev, "pci dma error; stat %08x\n", stat);
2685 stat &= ~PCI_ERROR_INTERRUPTS;
2686 /* these are fatal errors, but "maybe" they won't
2687 * happen again ...
2688 */
2689 stop_activity (dev, dev->driver);
2690 ep0_start (dev);
2691 stat = 0;
2692 }
2693
2694 if (stat)
2695 DEBUG (dev, "unhandled irqstat1 %08x\n", stat);
2696}
2697
David Howells7d12e782006-10-05 14:55:46 +01002698static irqreturn_t net2280_irq (int irq, void *_dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002699{
2700 struct net2280 *dev = _dev;
2701
Alan Stern658ad5e2006-04-14 16:44:11 -04002702 /* shared interrupt, not ours */
2703 if (!(readl(&dev->regs->irqstat0) & (1 << INTA_ASSERTED)))
2704 return IRQ_NONE;
2705
Linus Torvalds1da177e2005-04-16 15:20:36 -07002706 spin_lock (&dev->lock);
2707
2708 /* handle disconnect, dma, and more */
2709 handle_stat1_irqs (dev, readl (&dev->regs->irqstat1));
2710
2711 /* control requests and PIO */
2712 handle_stat0_irqs (dev, readl (&dev->regs->irqstat0));
2713
2714 spin_unlock (&dev->lock);
2715
2716 return IRQ_HANDLED;
2717}
2718
2719/*-------------------------------------------------------------------------*/
2720
2721static void gadget_release (struct device *_dev)
2722{
2723 struct net2280 *dev = dev_get_drvdata (_dev);
2724
2725 kfree (dev);
2726}
2727
2728/* tear down the binding between this driver and the pci device */
2729
2730static void net2280_remove (struct pci_dev *pdev)
2731{
2732 struct net2280 *dev = pci_get_drvdata (pdev);
2733
David Brownell6bea4762006-12-05 03:15:33 -08002734 BUG_ON(dev->driver);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002735
2736 /* then clean up the resources we allocated during probe() */
2737 net2280_led_shutdown (dev);
2738 if (dev->requests) {
2739 int i;
2740 for (i = 1; i < 5; i++) {
2741 if (!dev->ep [i].dummy)
2742 continue;
2743 pci_pool_free (dev->requests, dev->ep [i].dummy,
2744 dev->ep [i].td_dma);
2745 }
2746 pci_pool_destroy (dev->requests);
2747 }
2748 if (dev->got_irq)
2749 free_irq (pdev->irq, dev);
2750 if (dev->regs)
2751 iounmap (dev->regs);
2752 if (dev->region)
2753 release_mem_region (pci_resource_start (pdev, 0),
2754 pci_resource_len (pdev, 0));
2755 if (dev->enabled)
2756 pci_disable_device (pdev);
2757 device_unregister (&dev->gadget.dev);
2758 device_remove_file (&pdev->dev, &dev_attr_registers);
2759 pci_set_drvdata (pdev, NULL);
2760
2761 INFO (dev, "unbind\n");
2762
2763 the_controller = NULL;
2764}
2765
2766/* wrap this driver around the specified device, but
2767 * don't respond over USB until a gadget driver binds to us.
2768 */
2769
2770static int net2280_probe (struct pci_dev *pdev, const struct pci_device_id *id)
2771{
2772 struct net2280 *dev;
2773 unsigned long resource, len;
2774 void __iomem *base = NULL;
2775 int retval, i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002776
2777 /* if you want to support more than one controller in a system,
2778 * usb_gadget_driver_{register,unregister}() must change.
2779 */
2780 if (the_controller) {
2781 dev_warn (&pdev->dev, "ignoring\n");
2782 return -EBUSY;
2783 }
2784
2785 /* alloc, and start init */
Christoph Lametere94b1762006-12-06 20:33:17 -08002786 dev = kzalloc (sizeof *dev, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002787 if (dev == NULL){
2788 retval = -ENOMEM;
2789 goto done;
2790 }
2791
Alan Stern9fb81ce2006-04-14 16:46:28 -04002792 pci_set_drvdata (pdev, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002793 spin_lock_init (&dev->lock);
2794 dev->pdev = pdev;
2795 dev->gadget.ops = &net2280_ops;
2796 dev->gadget.is_dualspeed = 1;
2797
2798 /* the "gadget" abstracts/virtualizes the controller */
Kay Sievers0031a062008-05-02 06:02:41 +02002799 dev_set_name(&dev->gadget.dev, "gadget");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002800 dev->gadget.dev.parent = &pdev->dev;
2801 dev->gadget.dev.dma_mask = pdev->dev.dma_mask;
2802 dev->gadget.dev.release = gadget_release;
2803 dev->gadget.name = driver_name;
2804
2805 /* now all the pci goodies ... */
2806 if (pci_enable_device (pdev) < 0) {
David Brownell901b3d72006-09-02 03:13:45 -07002807 retval = -ENODEV;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002808 goto done;
2809 }
2810 dev->enabled = 1;
2811
2812 /* BAR 0 holds all the registers
2813 * BAR 1 is 8051 memory; unused here (note erratum 0103)
2814 * BAR 2 is fifo memory; unused here
2815 */
2816 resource = pci_resource_start (pdev, 0);
2817 len = pci_resource_len (pdev, 0);
2818 if (!request_mem_region (resource, len, driver_name)) {
2819 DEBUG (dev, "controller already in use\n");
2820 retval = -EBUSY;
2821 goto done;
2822 }
2823 dev->region = 1;
2824
David Brownell901b3d72006-09-02 03:13:45 -07002825 /* FIXME provide firmware download interface to put
2826 * 8051 code into the chip, e.g. to turn on PCI PM.
2827 */
2828
Linus Torvalds1da177e2005-04-16 15:20:36 -07002829 base = ioremap_nocache (resource, len);
2830 if (base == NULL) {
2831 DEBUG (dev, "can't map memory\n");
2832 retval = -EFAULT;
2833 goto done;
2834 }
2835 dev->regs = (struct net2280_regs __iomem *) base;
2836 dev->usb = (struct net2280_usb_regs __iomem *) (base + 0x0080);
2837 dev->pci = (struct net2280_pci_regs __iomem *) (base + 0x0100);
2838 dev->dma = (struct net2280_dma_regs __iomem *) (base + 0x0180);
2839 dev->dep = (struct net2280_dep_regs __iomem *) (base + 0x0200);
2840 dev->epregs = (struct net2280_ep_regs __iomem *) (base + 0x0300);
2841
2842 /* put into initial config, link up all endpoints */
2843 writel (0, &dev->usb->usbctl);
2844 usb_reset (dev);
2845 usb_reinit (dev);
2846
2847 /* irq setup after old hardware is cleaned up */
2848 if (!pdev->irq) {
2849 ERROR (dev, "No IRQ. Check PCI setup!\n");
2850 retval = -ENODEV;
2851 goto done;
2852 }
David S. Millerc6387a42006-06-20 01:21:29 -07002853
Thomas Gleixnerd54b5ca2006-07-01 19:29:44 -07002854 if (request_irq (pdev->irq, net2280_irq, IRQF_SHARED, driver_name, dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002855 != 0) {
David S. Millerc6387a42006-06-20 01:21:29 -07002856 ERROR (dev, "request interrupt %d failed\n", pdev->irq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002857 retval = -EBUSY;
2858 goto done;
2859 }
2860 dev->got_irq = 1;
2861
2862 /* DMA setup */
2863 /* NOTE: we know only the 32 LSBs of dma addresses may be nonzero */
2864 dev->requests = pci_pool_create ("requests", pdev,
2865 sizeof (struct net2280_dma),
2866 0 /* no alignment requirements */,
2867 0 /* or page-crossing issues */);
2868 if (!dev->requests) {
2869 DEBUG (dev, "can't get request pool\n");
2870 retval = -ENOMEM;
2871 goto done;
2872 }
2873 for (i = 1; i < 5; i++) {
2874 struct net2280_dma *td;
2875
2876 td = pci_pool_alloc (dev->requests, GFP_KERNEL,
2877 &dev->ep [i].td_dma);
2878 if (!td) {
2879 DEBUG (dev, "can't get dummy %d\n", i);
2880 retval = -ENOMEM;
2881 goto done;
2882 }
2883 td->dmacount = 0; /* not VALID */
Harvey Harrison551509d2009-02-11 14:11:36 -08002884 td->dmaaddr = cpu_to_le32 (DMA_ADDR_INVALID);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002885 td->dmadesc = td->dmaaddr;
2886 dev->ep [i].dummy = td;
2887 }
2888
2889 /* enable lower-overhead pci memory bursts during DMA */
2890 writel ( (1 << DMA_MEMORY_WRITE_AND_INVALIDATE_ENABLE)
2891 // 256 write retries may not be enough...
2892 // | (1 << PCI_RETRY_ABORT_ENABLE)
2893 | (1 << DMA_READ_MULTIPLE_ENABLE)
2894 | (1 << DMA_READ_LINE_ENABLE)
2895 , &dev->pci->pcimstctl);
2896 /* erratum 0115 shouldn't appear: Linux inits PCI_LATENCY_TIMER */
2897 pci_set_master (pdev);
Randy Dunlap694625c2007-07-09 11:55:54 -07002898 pci_try_set_mwi (pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002899
2900 /* ... also flushes any posted pci writes */
2901 dev->chiprev = get_idx_reg (dev->regs, REG_CHIPREV) & 0xffff;
2902
2903 /* done */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002904 INFO (dev, "%s\n", driver_desc);
David S. Millerc6387a42006-06-20 01:21:29 -07002905 INFO (dev, "irq %d, pci mem %p, chip rev %04x\n",
2906 pdev->irq, base, dev->chiprev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002907 INFO (dev, "version: " DRIVER_VERSION "; dma %s\n",
2908 use_dma
2909 ? (use_dma_chaining ? "chaining" : "enabled")
2910 : "disabled");
2911 the_controller = dev;
2912
Jeff Garzikb3899da2006-10-11 21:50:24 -04002913 retval = device_register (&dev->gadget.dev);
2914 if (retval) goto done;
2915 retval = device_create_file (&pdev->dev, &dev_attr_registers);
2916 if (retval) goto done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002917
2918 return 0;
2919
2920done:
2921 if (dev)
2922 net2280_remove (pdev);
2923 return retval;
2924}
2925
Alan Stern2d61bde2006-05-05 16:23:42 -04002926/* make sure the board is quiescent; otherwise it will continue
2927 * generating IRQs across the upcoming reboot.
2928 */
2929
2930static void net2280_shutdown (struct pci_dev *pdev)
2931{
2932 struct net2280 *dev = pci_get_drvdata (pdev);
2933
2934 /* disable IRQs */
2935 writel (0, &dev->regs->pciirqenb0);
2936 writel (0, &dev->regs->pciirqenb1);
2937
2938 /* disable the pullup so the host will think we're gone */
2939 writel (0, &dev->usb->usbctl);
2940}
2941
Linus Torvalds1da177e2005-04-16 15:20:36 -07002942
2943/*-------------------------------------------------------------------------*/
2944
David Brownell901b3d72006-09-02 03:13:45 -07002945static const struct pci_device_id pci_ids [] = { {
2946 .class = ((PCI_CLASS_SERIAL_USB << 8) | 0xfe),
2947 .class_mask = ~0,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002948 .vendor = 0x17cc,
2949 .device = 0x2280,
2950 .subvendor = PCI_ANY_ID,
2951 .subdevice = PCI_ANY_ID,
Guennadi Liakhovetski950ee4c2006-03-19 20:49:14 +01002952}, {
David Brownell901b3d72006-09-02 03:13:45 -07002953 .class = ((PCI_CLASS_SERIAL_USB << 8) | 0xfe),
2954 .class_mask = ~0,
Guennadi Liakhovetski950ee4c2006-03-19 20:49:14 +01002955 .vendor = 0x17cc,
2956 .device = 0x2282,
2957 .subvendor = PCI_ANY_ID,
2958 .subdevice = PCI_ANY_ID,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002959
2960}, { /* end: all zeroes */ }
2961};
2962MODULE_DEVICE_TABLE (pci, pci_ids);
2963
2964/* pci driver glue; this is a "new style" PCI driver module */
2965static struct pci_driver net2280_pci_driver = {
2966 .name = (char *) driver_name,
2967 .id_table = pci_ids,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002968
2969 .probe = net2280_probe,
2970 .remove = net2280_remove,
Alan Stern2d61bde2006-05-05 16:23:42 -04002971 .shutdown = net2280_shutdown,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002972
2973 /* FIXME add power management support */
2974};
2975
2976MODULE_DESCRIPTION (DRIVER_DESC);
2977MODULE_AUTHOR ("David Brownell");
2978MODULE_LICENSE ("GPL");
2979
2980static int __init init (void)
2981{
2982 if (!use_dma)
2983 use_dma_chaining = 0;
2984 return pci_register_driver (&net2280_pci_driver);
2985}
2986module_init (init);
2987
2988static void __exit cleanup (void)
2989{
2990 pci_unregister_driver (&net2280_pci_driver);
2991}
2992module_exit (cleanup);