blob: ca15405583e20bf1ad58b89e749ca293a013dfc8 [file] [log] [blame]
Seth Levyceb80362011-06-06 19:42:44 -04001/*
2 * Driver for PLX NET2272 USB device controller
3 *
4 * Copyright (C) 2005-2006 PLX Technology, Inc.
5 * Copyright (C) 2006-2011 Analog Devices, Inc.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
21
22#include <linux/delay.h>
23#include <linux/device.h>
24#include <linux/errno.h>
25#include <linux/gpio.h>
26#include <linux/init.h>
27#include <linux/interrupt.h>
28#include <linux/io.h>
29#include <linux/ioport.h>
Seth Levyceb80362011-06-06 19:42:44 -040030#include <linux/kernel.h>
31#include <linux/list.h>
32#include <linux/module.h>
33#include <linux/moduleparam.h>
34#include <linux/pci.h>
35#include <linux/platform_device.h>
Geert Uytterhoevend84d6612011-08-08 11:36:51 +020036#include <linux/prefetch.h>
Seth Levyceb80362011-06-06 19:42:44 -040037#include <linux/sched.h>
38#include <linux/slab.h>
39#include <linux/timer.h>
40#include <linux/usb.h>
41#include <linux/usb/ch9.h>
42#include <linux/usb/gadget.h>
43
44#include <asm/byteorder.h>
Seth Levyceb80362011-06-06 19:42:44 -040045#include <asm/unaligned.h>
46
47#include "net2272.h"
48
49#define DRIVER_DESC "PLX NET2272 USB Peripheral Controller"
50
51static const char driver_name[] = "net2272";
52static const char driver_vers[] = "2006 October 17/mainline";
53static const char driver_desc[] = DRIVER_DESC;
54
55static const char ep0name[] = "ep0";
56static const char * const ep_name[] = {
57 ep0name,
58 "ep-a", "ep-b", "ep-c",
59};
60
Paul Bolleeda81be2013-03-20 09:44:17 +010061#ifdef CONFIG_USB_NET2272_DMA
Seth Levyceb80362011-06-06 19:42:44 -040062/*
63 * use_dma: the NET2272 can use an external DMA controller.
64 * Note that since there is no generic DMA api, some functions,
65 * notably request_dma, start_dma, and cancel_dma will need to be
66 * modified for your platform's particular dma controller.
67 *
68 * If use_dma is disabled, pio will be used instead.
69 */
Rusty Russell90ab5ee2012-01-13 09:32:20 +103070static bool use_dma = 0;
Seth Levyceb80362011-06-06 19:42:44 -040071module_param(use_dma, bool, 0644);
72
73/*
74 * dma_ep: selects the endpoint for use with dma (1=ep-a, 2=ep-b)
75 * The NET2272 can only use dma for a single endpoint at a time.
76 * At some point this could be modified to allow either endpoint
77 * to take control of dma as it becomes available.
78 *
79 * Note that DMA should not be used on OUT endpoints unless it can
80 * be guaranteed that no short packets will arrive on an IN endpoint
81 * while the DMA operation is pending. Otherwise the OUT DMA will
82 * terminate prematurely (See NET2272 Errata 630-0213-0101)
83 */
84static ushort dma_ep = 1;
85module_param(dma_ep, ushort, 0644);
86
87/*
88 * dma_mode: net2272 dma mode setting (see LOCCTL1 definiton):
89 * mode 0 == Slow DREQ mode
90 * mode 1 == Fast DREQ mode
91 * mode 2 == Burst mode
92 */
93static ushort dma_mode = 2;
94module_param(dma_mode, ushort, 0644);
95#else
96#define use_dma 0
97#define dma_ep 1
98#define dma_mode 2
99#endif
100
101/*
102 * fifo_mode: net2272 buffer configuration:
103 * mode 0 == ep-{a,b,c} 512db each
104 * mode 1 == ep-a 1k, ep-{b,c} 512db
105 * mode 2 == ep-a 1k, ep-b 1k, ep-c 512db
106 * mode 3 == ep-a 1k, ep-b disabled, ep-c 512db
107 */
108static ushort fifo_mode = 0;
109module_param(fifo_mode, ushort, 0644);
110
111/*
112 * enable_suspend: When enabled, the driver will respond to
113 * USB suspend requests by powering down the NET2272. Otherwise,
114 * USB suspend requests will be ignored. This is acceptible for
115 * self-powered devices. For bus powered devices set this to 1.
116 */
117static ushort enable_suspend = 0;
118module_param(enable_suspend, ushort, 0644);
119
120static void assert_out_naking(struct net2272_ep *ep, const char *where)
121{
122 u8 tmp;
123
124#ifndef DEBUG
125 return;
126#endif
127
128 tmp = net2272_ep_read(ep, EP_STAT0);
129 if ((tmp & (1 << NAK_OUT_PACKETS)) == 0) {
130 dev_dbg(ep->dev->dev, "%s %s %02x !NAK\n",
131 ep->ep.name, where, tmp);
132 net2272_ep_write(ep, EP_RSPSET, 1 << ALT_NAK_OUT_PACKETS);
133 }
134}
135#define ASSERT_OUT_NAKING(ep) assert_out_naking(ep, __func__)
136
137static void stop_out_naking(struct net2272_ep *ep)
138{
139 u8 tmp = net2272_ep_read(ep, EP_STAT0);
140
141 if ((tmp & (1 << NAK_OUT_PACKETS)) != 0)
142 net2272_ep_write(ep, EP_RSPCLR, 1 << ALT_NAK_OUT_PACKETS);
143}
144
145#define PIPEDIR(bAddress) (usb_pipein(bAddress) ? "in" : "out")
146
147static char *type_string(u8 bmAttributes)
148{
149 switch ((bmAttributes) & USB_ENDPOINT_XFERTYPE_MASK) {
150 case USB_ENDPOINT_XFER_BULK: return "bulk";
151 case USB_ENDPOINT_XFER_ISOC: return "iso";
152 case USB_ENDPOINT_XFER_INT: return "intr";
153 default: return "control";
154 }
155}
156
157static char *buf_state_string(unsigned state)
158{
159 switch (state) {
160 case BUFF_FREE: return "free";
161 case BUFF_VALID: return "valid";
162 case BUFF_LCL: return "local";
163 case BUFF_USB: return "usb";
164 default: return "unknown";
165 }
166}
167
168static char *dma_mode_string(void)
169{
170 if (!use_dma)
171 return "PIO";
172 switch (dma_mode) {
173 case 0: return "SLOW DREQ";
174 case 1: return "FAST DREQ";
175 case 2: return "BURST";
176 default: return "invalid";
177 }
178}
179
180static void net2272_dequeue_all(struct net2272_ep *);
181static int net2272_kick_dma(struct net2272_ep *, struct net2272_request *);
182static int net2272_fifo_status(struct usb_ep *);
183
184static struct usb_ep_ops net2272_ep_ops;
185
186/*---------------------------------------------------------------------------*/
187
188static int
189net2272_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
190{
191 struct net2272 *dev;
192 struct net2272_ep *ep;
193 u32 max;
194 u8 tmp;
195 unsigned long flags;
196
197 ep = container_of(_ep, struct net2272_ep, ep);
198 if (!_ep || !desc || ep->desc || _ep->name == ep0name
199 || desc->bDescriptorType != USB_DT_ENDPOINT)
200 return -EINVAL;
201 dev = ep->dev;
202 if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
203 return -ESHUTDOWN;
204
Kuninori Morimoto29cc8892011-08-23 03:12:03 -0700205 max = usb_endpoint_maxp(desc) & 0x1fff;
Seth Levyceb80362011-06-06 19:42:44 -0400206
207 spin_lock_irqsave(&dev->lock, flags);
208 _ep->maxpacket = max & 0x7fff;
209 ep->desc = desc;
210
211 /* net2272_ep_reset() has already been called */
212 ep->stopped = 0;
213 ep->wedged = 0;
214
215 /* set speed-dependent max packet */
216 net2272_ep_write(ep, EP_MAXPKT0, max & 0xff);
217 net2272_ep_write(ep, EP_MAXPKT1, (max & 0xff00) >> 8);
218
219 /* set type, direction, address; reset fifo counters */
220 net2272_ep_write(ep, EP_STAT1, 1 << BUFFER_FLUSH);
221 tmp = usb_endpoint_type(desc);
222 if (usb_endpoint_xfer_bulk(desc)) {
223 /* catch some particularly blatant driver bugs */
224 if ((dev->gadget.speed == USB_SPEED_HIGH && max != 512) ||
225 (dev->gadget.speed == USB_SPEED_FULL && max > 64)) {
226 spin_unlock_irqrestore(&dev->lock, flags);
227 return -ERANGE;
228 }
229 }
230 ep->is_iso = usb_endpoint_xfer_isoc(desc) ? 1 : 0;
231 tmp <<= ENDPOINT_TYPE;
232 tmp |= ((desc->bEndpointAddress & 0x0f) << ENDPOINT_NUMBER);
233 tmp |= usb_endpoint_dir_in(desc) << ENDPOINT_DIRECTION;
234 tmp |= (1 << ENDPOINT_ENABLE);
235
236 /* for OUT transfers, block the rx fifo until a read is posted */
237 ep->is_in = usb_endpoint_dir_in(desc);
238 if (!ep->is_in)
239 net2272_ep_write(ep, EP_RSPSET, 1 << ALT_NAK_OUT_PACKETS);
240
241 net2272_ep_write(ep, EP_CFG, tmp);
242
243 /* enable irqs */
244 tmp = (1 << ep->num) | net2272_read(dev, IRQENB0);
245 net2272_write(dev, IRQENB0, tmp);
246
247 tmp = (1 << DATA_PACKET_RECEIVED_INTERRUPT_ENABLE)
248 | (1 << DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE)
249 | net2272_ep_read(ep, EP_IRQENB);
250 net2272_ep_write(ep, EP_IRQENB, tmp);
251
252 tmp = desc->bEndpointAddress;
253 dev_dbg(dev->dev, "enabled %s (ep%d%s-%s) max %04x cfg %02x\n",
254 _ep->name, tmp & 0x0f, PIPEDIR(tmp),
255 type_string(desc->bmAttributes), max,
256 net2272_ep_read(ep, EP_CFG));
257
258 spin_unlock_irqrestore(&dev->lock, flags);
259 return 0;
260}
261
262static void net2272_ep_reset(struct net2272_ep *ep)
263{
264 u8 tmp;
265
266 ep->desc = NULL;
267 INIT_LIST_HEAD(&ep->queue);
268
Robert Baldygae117e742013-12-13 12:23:38 +0100269 usb_ep_set_maxpacket_limit(&ep->ep, ~0);
Seth Levyceb80362011-06-06 19:42:44 -0400270 ep->ep.ops = &net2272_ep_ops;
271
272 /* disable irqs, endpoint */
273 net2272_ep_write(ep, EP_IRQENB, 0);
274
275 /* init to our chosen defaults, notably so that we NAK OUT
276 * packets until the driver queues a read.
277 */
278 tmp = (1 << NAK_OUT_PACKETS_MODE) | (1 << ALT_NAK_OUT_PACKETS);
279 net2272_ep_write(ep, EP_RSPSET, tmp);
280
281 tmp = (1 << INTERRUPT_MODE) | (1 << HIDE_STATUS_PHASE);
282 if (ep->num != 0)
283 tmp |= (1 << ENDPOINT_TOGGLE) | (1 << ENDPOINT_HALT);
284
285 net2272_ep_write(ep, EP_RSPCLR, tmp);
286
287 /* scrub most status bits, and flush any fifo state */
288 net2272_ep_write(ep, EP_STAT0,
289 (1 << DATA_IN_TOKEN_INTERRUPT)
290 | (1 << DATA_OUT_TOKEN_INTERRUPT)
291 | (1 << DATA_PACKET_TRANSMITTED_INTERRUPT)
292 | (1 << DATA_PACKET_RECEIVED_INTERRUPT)
293 | (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT));
294
295 net2272_ep_write(ep, EP_STAT1,
296 (1 << TIMEOUT)
297 | (1 << USB_OUT_ACK_SENT)
298 | (1 << USB_OUT_NAK_SENT)
299 | (1 << USB_IN_ACK_RCVD)
300 | (1 << USB_IN_NAK_SENT)
301 | (1 << USB_STALL_SENT)
302 | (1 << LOCAL_OUT_ZLP)
303 | (1 << BUFFER_FLUSH));
304
305 /* fifo size is handled seperately */
306}
307
308static int net2272_disable(struct usb_ep *_ep)
309{
310 struct net2272_ep *ep;
311 unsigned long flags;
312
313 ep = container_of(_ep, struct net2272_ep, ep);
314 if (!_ep || !ep->desc || _ep->name == ep0name)
315 return -EINVAL;
316
317 spin_lock_irqsave(&ep->dev->lock, flags);
318 net2272_dequeue_all(ep);
319 net2272_ep_reset(ep);
320
321 dev_vdbg(ep->dev->dev, "disabled %s\n", _ep->name);
322
323 spin_unlock_irqrestore(&ep->dev->lock, flags);
324 return 0;
325}
326
327/*---------------------------------------------------------------------------*/
328
329static struct usb_request *
330net2272_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
331{
332 struct net2272_ep *ep;
333 struct net2272_request *req;
334
335 if (!_ep)
336 return NULL;
337 ep = container_of(_ep, struct net2272_ep, ep);
338
339 req = kzalloc(sizeof(*req), gfp_flags);
340 if (!req)
341 return NULL;
342
Seth Levyceb80362011-06-06 19:42:44 -0400343 INIT_LIST_HEAD(&req->queue);
344
345 return &req->req;
346}
347
348static void
349net2272_free_request(struct usb_ep *_ep, struct usb_request *_req)
350{
351 struct net2272_ep *ep;
352 struct net2272_request *req;
353
354 ep = container_of(_ep, struct net2272_ep, ep);
355 if (!_ep || !_req)
356 return;
357
358 req = container_of(_req, struct net2272_request, req);
359 WARN_ON(!list_empty(&req->queue));
360 kfree(req);
361}
362
363static void
364net2272_done(struct net2272_ep *ep, struct net2272_request *req, int status)
365{
366 struct net2272 *dev;
367 unsigned stopped = ep->stopped;
368
369 if (ep->num == 0) {
370 if (ep->dev->protocol_stall) {
371 ep->stopped = 1;
372 set_halt(ep);
373 }
374 allow_status(ep);
375 }
376
377 list_del_init(&req->queue);
378
379 if (req->req.status == -EINPROGRESS)
380 req->req.status = status;
381 else
382 status = req->req.status;
383
384 dev = ep->dev;
Felipe Balbiaf93f2c2011-12-19 12:07:40 +0200385 if (use_dma && ep->dma)
386 usb_gadget_unmap_request(&dev->gadget, &req->req,
387 ep->is_in);
Seth Levyceb80362011-06-06 19:42:44 -0400388
389 if (status && status != -ESHUTDOWN)
390 dev_vdbg(dev->dev, "complete %s req %p stat %d len %u/%u buf %p\n",
391 ep->ep.name, &req->req, status,
392 req->req.actual, req->req.length, req->req.buf);
393
394 /* don't modify queue heads during completion callback */
395 ep->stopped = 1;
396 spin_unlock(&dev->lock);
397 req->req.complete(&ep->ep, &req->req);
398 spin_lock(&dev->lock);
399 ep->stopped = stopped;
400}
401
402static int
403net2272_write_packet(struct net2272_ep *ep, u8 *buf,
404 struct net2272_request *req, unsigned max)
405{
406 u16 __iomem *ep_data = net2272_reg_addr(ep->dev, EP_DATA);
407 u16 *bufp;
408 unsigned length, count;
409 u8 tmp;
410
411 length = min(req->req.length - req->req.actual, max);
412 req->req.actual += length;
413
414 dev_vdbg(ep->dev->dev, "write packet %s req %p max %u len %u avail %u\n",
415 ep->ep.name, req, max, length,
416 (net2272_ep_read(ep, EP_AVAIL1) << 8) | net2272_ep_read(ep, EP_AVAIL0));
417
418 count = length;
419 bufp = (u16 *)buf;
420
421 while (likely(count >= 2)) {
422 /* no byte-swap required; chip endian set during init */
423 writew(*bufp++, ep_data);
424 count -= 2;
425 }
426 buf = (u8 *)bufp;
427
428 /* write final byte by placing the NET2272 into 8-bit mode */
429 if (unlikely(count)) {
430 tmp = net2272_read(ep->dev, LOCCTL);
431 net2272_write(ep->dev, LOCCTL, tmp & ~(1 << DATA_WIDTH));
432 writeb(*buf, ep_data);
433 net2272_write(ep->dev, LOCCTL, tmp);
434 }
435 return length;
436}
437
438/* returns: 0: still running, 1: completed, negative: errno */
439static int
440net2272_write_fifo(struct net2272_ep *ep, struct net2272_request *req)
441{
442 u8 *buf;
443 unsigned count, max;
444 int status;
445
446 dev_vdbg(ep->dev->dev, "write_fifo %s actual %d len %d\n",
447 ep->ep.name, req->req.actual, req->req.length);
448
449 /*
450 * Keep loading the endpoint until the final packet is loaded,
451 * or the endpoint buffer is full.
452 */
453 top:
454 /*
455 * Clear interrupt status
456 * - Packet Transmitted interrupt will become set again when the
457 * host successfully takes another packet
458 */
459 net2272_ep_write(ep, EP_STAT0, (1 << DATA_PACKET_TRANSMITTED_INTERRUPT));
460 while (!(net2272_ep_read(ep, EP_STAT0) & (1 << BUFFER_FULL))) {
461 buf = req->req.buf + req->req.actual;
462 prefetch(buf);
463
464 /* force pagesel */
465 net2272_ep_read(ep, EP_STAT0);
466
467 max = (net2272_ep_read(ep, EP_AVAIL1) << 8) |
468 (net2272_ep_read(ep, EP_AVAIL0));
469
470 if (max < ep->ep.maxpacket)
471 max = (net2272_ep_read(ep, EP_AVAIL1) << 8)
472 | (net2272_ep_read(ep, EP_AVAIL0));
473
474 count = net2272_write_packet(ep, buf, req, max);
475 /* see if we are done */
476 if (req->req.length == req->req.actual) {
477 /* validate short or zlp packet */
478 if (count < ep->ep.maxpacket)
479 set_fifo_bytecount(ep, 0);
480 net2272_done(ep, req, 0);
481
482 if (!list_empty(&ep->queue)) {
483 req = list_entry(ep->queue.next,
484 struct net2272_request,
485 queue);
486 status = net2272_kick_dma(ep, req);
487
488 if (status < 0)
489 if ((net2272_ep_read(ep, EP_STAT0)
490 & (1 << BUFFER_EMPTY)))
491 goto top;
492 }
493 return 1;
494 }
495 net2272_ep_write(ep, EP_STAT0, (1 << DATA_PACKET_TRANSMITTED_INTERRUPT));
496 }
497 return 0;
498}
499
500static void
501net2272_out_flush(struct net2272_ep *ep)
502{
503 ASSERT_OUT_NAKING(ep);
504
505 net2272_ep_write(ep, EP_STAT0, (1 << DATA_OUT_TOKEN_INTERRUPT)
506 | (1 << DATA_PACKET_RECEIVED_INTERRUPT));
507 net2272_ep_write(ep, EP_STAT1, 1 << BUFFER_FLUSH);
508}
509
510static int
511net2272_read_packet(struct net2272_ep *ep, u8 *buf,
512 struct net2272_request *req, unsigned avail)
513{
514 u16 __iomem *ep_data = net2272_reg_addr(ep->dev, EP_DATA);
515 unsigned is_short;
516 u16 *bufp;
517
518 req->req.actual += avail;
519
520 dev_vdbg(ep->dev->dev, "read packet %s req %p len %u avail %u\n",
521 ep->ep.name, req, avail,
522 (net2272_ep_read(ep, EP_AVAIL1) << 8) | net2272_ep_read(ep, EP_AVAIL0));
523
524 is_short = (avail < ep->ep.maxpacket);
525
526 if (unlikely(avail == 0)) {
527 /* remove any zlp from the buffer */
528 (void)readw(ep_data);
529 return is_short;
530 }
531
532 /* Ensure we get the final byte */
533 if (unlikely(avail % 2))
534 avail++;
535 bufp = (u16 *)buf;
536
537 do {
538 *bufp++ = readw(ep_data);
539 avail -= 2;
540 } while (avail);
541
542 /*
543 * To avoid false endpoint available race condition must read
544 * ep stat0 twice in the case of a short transfer
545 */
546 if (net2272_ep_read(ep, EP_STAT0) & (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT))
547 net2272_ep_read(ep, EP_STAT0);
548
549 return is_short;
550}
551
552static int
553net2272_read_fifo(struct net2272_ep *ep, struct net2272_request *req)
554{
555 u8 *buf;
556 unsigned is_short;
557 int count;
558 int tmp;
559 int cleanup = 0;
560 int status = -1;
561
562 dev_vdbg(ep->dev->dev, "read_fifo %s actual %d len %d\n",
563 ep->ep.name, req->req.actual, req->req.length);
564
565 top:
566 do {
567 buf = req->req.buf + req->req.actual;
568 prefetchw(buf);
569
570 count = (net2272_ep_read(ep, EP_AVAIL1) << 8)
571 | net2272_ep_read(ep, EP_AVAIL0);
572
573 net2272_ep_write(ep, EP_STAT0,
574 (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT) |
575 (1 << DATA_PACKET_RECEIVED_INTERRUPT));
576
577 tmp = req->req.length - req->req.actual;
578
579 if (count > tmp) {
580 if ((tmp % ep->ep.maxpacket) != 0) {
581 dev_err(ep->dev->dev,
582 "%s out fifo %d bytes, expected %d\n",
583 ep->ep.name, count, tmp);
584 cleanup = 1;
585 }
586 count = (tmp > 0) ? tmp : 0;
587 }
588
589 is_short = net2272_read_packet(ep, buf, req, count);
590
591 /* completion */
592 if (unlikely(cleanup || is_short ||
593 ((req->req.actual == req->req.length)
594 && !req->req.zero))) {
595
596 if (cleanup) {
597 net2272_out_flush(ep);
598 net2272_done(ep, req, -EOVERFLOW);
599 } else
600 net2272_done(ep, req, 0);
601
602 /* re-initialize endpoint transfer registers
603 * otherwise they may result in erroneous pre-validation
604 * for subsequent control reads
605 */
606 if (unlikely(ep->num == 0)) {
607 net2272_ep_write(ep, EP_TRANSFER2, 0);
608 net2272_ep_write(ep, EP_TRANSFER1, 0);
609 net2272_ep_write(ep, EP_TRANSFER0, 0);
610 }
611
612 if (!list_empty(&ep->queue)) {
613 req = list_entry(ep->queue.next,
614 struct net2272_request, queue);
615 status = net2272_kick_dma(ep, req);
616 if ((status < 0) &&
617 !(net2272_ep_read(ep, EP_STAT0) & (1 << BUFFER_EMPTY)))
618 goto top;
619 }
620 return 1;
621 }
622 } while (!(net2272_ep_read(ep, EP_STAT0) & (1 << BUFFER_EMPTY)));
623
624 return 0;
625}
626
627static void
628net2272_pio_advance(struct net2272_ep *ep)
629{
630 struct net2272_request *req;
631
632 if (unlikely(list_empty(&ep->queue)))
633 return;
634
635 req = list_entry(ep->queue.next, struct net2272_request, queue);
636 (ep->is_in ? net2272_write_fifo : net2272_read_fifo)(ep, req);
637}
638
639/* returns 0 on success, else negative errno */
640static int
641net2272_request_dma(struct net2272 *dev, unsigned ep, u32 buf,
642 unsigned len, unsigned dir)
643{
644 dev_vdbg(dev->dev, "request_dma ep %d buf %08x len %d dir %d\n",
645 ep, buf, len, dir);
646
647 /* The NET2272 only supports a single dma channel */
648 if (dev->dma_busy)
649 return -EBUSY;
650 /*
651 * EP_TRANSFER (used to determine the number of bytes received
652 * in an OUT transfer) is 24 bits wide; don't ask for more than that.
653 */
654 if ((dir == 1) && (len > 0x1000000))
655 return -EINVAL;
656
657 dev->dma_busy = 1;
658
659 /* initialize platform's dma */
660#ifdef CONFIG_PCI
661 /* NET2272 addr, buffer addr, length, etc. */
662 switch (dev->dev_id) {
663 case PCI_DEVICE_ID_RDK1:
664 /* Setup PLX 9054 DMA mode */
665 writel((1 << LOCAL_BUS_WIDTH) |
666 (1 << TA_READY_INPUT_ENABLE) |
667 (0 << LOCAL_BURST_ENABLE) |
668 (1 << DONE_INTERRUPT_ENABLE) |
669 (1 << LOCAL_ADDRESSING_MODE) |
670 (1 << DEMAND_MODE) |
671 (1 << DMA_EOT_ENABLE) |
672 (1 << FAST_SLOW_TERMINATE_MODE_SELECT) |
673 (1 << DMA_CHANNEL_INTERRUPT_SELECT),
674 dev->rdk1.plx9054_base_addr + DMAMODE0);
675
676 writel(0x100000, dev->rdk1.plx9054_base_addr + DMALADR0);
677 writel(buf, dev->rdk1.plx9054_base_addr + DMAPADR0);
678 writel(len, dev->rdk1.plx9054_base_addr + DMASIZ0);
679 writel((dir << DIRECTION_OF_TRANSFER) |
680 (1 << INTERRUPT_AFTER_TERMINAL_COUNT),
681 dev->rdk1.plx9054_base_addr + DMADPR0);
682 writel((1 << LOCAL_DMA_CHANNEL_0_INTERRUPT_ENABLE) |
683 readl(dev->rdk1.plx9054_base_addr + INTCSR),
684 dev->rdk1.plx9054_base_addr + INTCSR);
685
686 break;
687 }
688#endif
689
690 net2272_write(dev, DMAREQ,
691 (0 << DMA_BUFFER_VALID) |
692 (1 << DMA_REQUEST_ENABLE) |
693 (1 << DMA_CONTROL_DACK) |
694 (dev->dma_eot_polarity << EOT_POLARITY) |
695 (dev->dma_dack_polarity << DACK_POLARITY) |
696 (dev->dma_dreq_polarity << DREQ_POLARITY) |
697 ((ep >> 1) << DMA_ENDPOINT_SELECT));
698
699 (void) net2272_read(dev, SCRATCH);
700
701 return 0;
702}
703
704static void
705net2272_start_dma(struct net2272 *dev)
706{
707 /* start platform's dma controller */
708#ifdef CONFIG_PCI
709 switch (dev->dev_id) {
710 case PCI_DEVICE_ID_RDK1:
711 writeb((1 << CHANNEL_ENABLE) | (1 << CHANNEL_START),
712 dev->rdk1.plx9054_base_addr + DMACSR0);
713 break;
714 }
715#endif
716}
717
718/* returns 0 on success, else negative errno */
719static int
720net2272_kick_dma(struct net2272_ep *ep, struct net2272_request *req)
721{
722 unsigned size;
723 u8 tmp;
724
725 if (!use_dma || (ep->num < 1) || (ep->num > 2) || !ep->dma)
726 return -EINVAL;
727
728 /* don't use dma for odd-length transfers
729 * otherwise, we'd need to deal with the last byte with pio
730 */
731 if (req->req.length & 1)
732 return -EINVAL;
733
Felipe Balbi7b30d192011-07-04 11:16:06 +0300734 dev_vdbg(ep->dev->dev, "kick_dma %s req %p dma %08llx\n",
735 ep->ep.name, req, (unsigned long long) req->req.dma);
Seth Levyceb80362011-06-06 19:42:44 -0400736
737 net2272_ep_write(ep, EP_RSPSET, 1 << ALT_NAK_OUT_PACKETS);
738
739 /* The NET2272 can only use DMA on one endpoint at a time */
740 if (ep->dev->dma_busy)
741 return -EBUSY;
742
743 /* Make sure we only DMA an even number of bytes (we'll use
744 * pio to complete the transfer)
745 */
746 size = req->req.length;
747 size &= ~1;
748
749 /* device-to-host transfer */
750 if (ep->is_in) {
751 /* initialize platform's dma controller */
752 if (net2272_request_dma(ep->dev, ep->num, req->req.dma, size, 0))
753 /* unable to obtain DMA channel; return error and use pio mode */
754 return -EBUSY;
755 req->req.actual += size;
756
757 /* host-to-device transfer */
758 } else {
759 tmp = net2272_ep_read(ep, EP_STAT0);
760
761 /* initialize platform's dma controller */
762 if (net2272_request_dma(ep->dev, ep->num, req->req.dma, size, 1))
763 /* unable to obtain DMA channel; return error and use pio mode */
764 return -EBUSY;
765
766 if (!(tmp & (1 << BUFFER_EMPTY)))
767 ep->not_empty = 1;
768 else
769 ep->not_empty = 0;
770
771
772 /* allow the endpoint's buffer to fill */
773 net2272_ep_write(ep, EP_RSPCLR, 1 << ALT_NAK_OUT_PACKETS);
774
775 /* this transfer completed and data's already in the fifo
776 * return error so pio gets used.
777 */
778 if (tmp & (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT)) {
779
780 /* deassert dreq */
781 net2272_write(ep->dev, DMAREQ,
782 (0 << DMA_BUFFER_VALID) |
783 (0 << DMA_REQUEST_ENABLE) |
784 (1 << DMA_CONTROL_DACK) |
785 (ep->dev->dma_eot_polarity << EOT_POLARITY) |
786 (ep->dev->dma_dack_polarity << DACK_POLARITY) |
787 (ep->dev->dma_dreq_polarity << DREQ_POLARITY) |
788 ((ep->num >> 1) << DMA_ENDPOINT_SELECT));
789
790 return -EBUSY;
791 }
792 }
793
794 /* Don't use per-packet interrupts: use dma interrupts only */
795 net2272_ep_write(ep, EP_IRQENB, 0);
796
797 net2272_start_dma(ep->dev);
798
799 return 0;
800}
801
802static void net2272_cancel_dma(struct net2272 *dev)
803{
804#ifdef CONFIG_PCI
805 switch (dev->dev_id) {
806 case PCI_DEVICE_ID_RDK1:
807 writeb(0, dev->rdk1.plx9054_base_addr + DMACSR0);
808 writeb(1 << CHANNEL_ABORT, dev->rdk1.plx9054_base_addr + DMACSR0);
809 while (!(readb(dev->rdk1.plx9054_base_addr + DMACSR0) &
810 (1 << CHANNEL_DONE)))
811 continue; /* wait for dma to stabalize */
812
813 /* dma abort generates an interrupt */
814 writeb(1 << CHANNEL_CLEAR_INTERRUPT,
815 dev->rdk1.plx9054_base_addr + DMACSR0);
816 break;
817 }
818#endif
819
820 dev->dma_busy = 0;
821}
822
823/*---------------------------------------------------------------------------*/
824
825static int
826net2272_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
827{
828 struct net2272_request *req;
829 struct net2272_ep *ep;
830 struct net2272 *dev;
831 unsigned long flags;
832 int status = -1;
833 u8 s;
834
835 req = container_of(_req, struct net2272_request, req);
836 if (!_req || !_req->complete || !_req->buf
837 || !list_empty(&req->queue))
838 return -EINVAL;
839 ep = container_of(_ep, struct net2272_ep, ep);
840 if (!_ep || (!ep->desc && ep->num != 0))
841 return -EINVAL;
842 dev = ep->dev;
843 if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
844 return -ESHUTDOWN;
845
846 /* set up dma mapping in case the caller didn't */
Felipe Balbiaf93f2c2011-12-19 12:07:40 +0200847 if (use_dma && ep->dma) {
848 status = usb_gadget_map_request(&dev->gadget, _req,
849 ep->is_in);
850 if (status)
851 return status;
Seth Levyceb80362011-06-06 19:42:44 -0400852 }
853
Felipe Balbi7b30d192011-07-04 11:16:06 +0300854 dev_vdbg(dev->dev, "%s queue req %p, len %d buf %p dma %08llx %s\n",
Seth Levyceb80362011-06-06 19:42:44 -0400855 _ep->name, _req, _req->length, _req->buf,
Felipe Balbi7b30d192011-07-04 11:16:06 +0300856 (unsigned long long) _req->dma, _req->zero ? "zero" : "!zero");
Seth Levyceb80362011-06-06 19:42:44 -0400857
858 spin_lock_irqsave(&dev->lock, flags);
859
860 _req->status = -EINPROGRESS;
861 _req->actual = 0;
862
863 /* kickstart this i/o queue? */
864 if (list_empty(&ep->queue) && !ep->stopped) {
865 /* maybe there's no control data, just status ack */
866 if (ep->num == 0 && _req->length == 0) {
867 net2272_done(ep, req, 0);
868 dev_vdbg(dev->dev, "%s status ack\n", ep->ep.name);
869 goto done;
870 }
871
872 /* Return zlp, don't let it block subsequent packets */
873 s = net2272_ep_read(ep, EP_STAT0);
874 if (s & (1 << BUFFER_EMPTY)) {
875 /* Buffer is empty check for a blocking zlp, handle it */
876 if ((s & (1 << NAK_OUT_PACKETS)) &&
877 net2272_ep_read(ep, EP_STAT1) & (1 << LOCAL_OUT_ZLP)) {
878 dev_dbg(dev->dev, "WARNING: returning ZLP short packet termination!\n");
879 /*
880 * Request is going to terminate with a short packet ...
881 * hope the client is ready for it!
882 */
883 status = net2272_read_fifo(ep, req);
884 /* clear short packet naking */
885 net2272_ep_write(ep, EP_STAT0, (1 << NAK_OUT_PACKETS));
886 goto done;
887 }
888 }
889
890 /* try dma first */
891 status = net2272_kick_dma(ep, req);
892
893 if (status < 0) {
894 /* dma failed (most likely in use by another endpoint)
895 * fallback to pio
896 */
897 status = 0;
898
899 if (ep->is_in)
900 status = net2272_write_fifo(ep, req);
901 else {
902 s = net2272_ep_read(ep, EP_STAT0);
903 if ((s & (1 << BUFFER_EMPTY)) == 0)
904 status = net2272_read_fifo(ep, req);
905 }
906
907 if (unlikely(status != 0)) {
908 if (status > 0)
909 status = 0;
910 req = NULL;
911 }
912 }
913 }
Felipe Balbi69147122013-03-21 12:19:31 +0200914 if (likely(req))
Seth Levyceb80362011-06-06 19:42:44 -0400915 list_add_tail(&req->queue, &ep->queue);
916
917 if (likely(!list_empty(&ep->queue)))
918 net2272_ep_write(ep, EP_RSPCLR, 1 << ALT_NAK_OUT_PACKETS);
919 done:
920 spin_unlock_irqrestore(&dev->lock, flags);
921
922 return 0;
923}
924
925/* dequeue ALL requests */
926static void
927net2272_dequeue_all(struct net2272_ep *ep)
928{
929 struct net2272_request *req;
930
931 /* called with spinlock held */
932 ep->stopped = 1;
933
934 while (!list_empty(&ep->queue)) {
935 req = list_entry(ep->queue.next,
936 struct net2272_request,
937 queue);
938 net2272_done(ep, req, -ESHUTDOWN);
939 }
940}
941
942/* dequeue JUST ONE request */
943static int
944net2272_dequeue(struct usb_ep *_ep, struct usb_request *_req)
945{
946 struct net2272_ep *ep;
947 struct net2272_request *req;
948 unsigned long flags;
949 int stopped;
950
951 ep = container_of(_ep, struct net2272_ep, ep);
952 if (!_ep || (!ep->desc && ep->num != 0) || !_req)
953 return -EINVAL;
954
955 spin_lock_irqsave(&ep->dev->lock, flags);
956 stopped = ep->stopped;
957 ep->stopped = 1;
958
959 /* make sure it's still queued on this endpoint */
960 list_for_each_entry(req, &ep->queue, queue) {
961 if (&req->req == _req)
962 break;
963 }
964 if (&req->req != _req) {
965 spin_unlock_irqrestore(&ep->dev->lock, flags);
966 return -EINVAL;
967 }
968
969 /* queue head may be partially complete */
970 if (ep->queue.next == &req->queue) {
971 dev_dbg(ep->dev->dev, "unlink (%s) pio\n", _ep->name);
972 net2272_done(ep, req, -ECONNRESET);
973 }
974 req = NULL;
975 ep->stopped = stopped;
976
977 spin_unlock_irqrestore(&ep->dev->lock, flags);
978 return 0;
979}
980
981/*---------------------------------------------------------------------------*/
982
983static int
984net2272_set_halt_and_wedge(struct usb_ep *_ep, int value, int wedged)
985{
986 struct net2272_ep *ep;
987 unsigned long flags;
988 int ret = 0;
989
990 ep = container_of(_ep, struct net2272_ep, ep);
991 if (!_ep || (!ep->desc && ep->num != 0))
992 return -EINVAL;
993 if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
994 return -ESHUTDOWN;
995 if (ep->desc /* not ep0 */ && usb_endpoint_xfer_isoc(ep->desc))
996 return -EINVAL;
997
998 spin_lock_irqsave(&ep->dev->lock, flags);
999 if (!list_empty(&ep->queue))
1000 ret = -EAGAIN;
1001 else if (ep->is_in && value && net2272_fifo_status(_ep) != 0)
1002 ret = -EAGAIN;
1003 else {
1004 dev_vdbg(ep->dev->dev, "%s %s %s\n", _ep->name,
1005 value ? "set" : "clear",
1006 wedged ? "wedge" : "halt");
1007 /* set/clear */
1008 if (value) {
1009 if (ep->num == 0)
1010 ep->dev->protocol_stall = 1;
1011 else
1012 set_halt(ep);
1013 if (wedged)
1014 ep->wedged = 1;
1015 } else {
1016 clear_halt(ep);
1017 ep->wedged = 0;
1018 }
1019 }
1020 spin_unlock_irqrestore(&ep->dev->lock, flags);
1021
1022 return ret;
1023}
1024
1025static int
1026net2272_set_halt(struct usb_ep *_ep, int value)
1027{
1028 return net2272_set_halt_and_wedge(_ep, value, 0);
1029}
1030
1031static int
1032net2272_set_wedge(struct usb_ep *_ep)
1033{
1034 if (!_ep || _ep->name == ep0name)
1035 return -EINVAL;
1036 return net2272_set_halt_and_wedge(_ep, 1, 1);
1037}
1038
1039static int
1040net2272_fifo_status(struct usb_ep *_ep)
1041{
1042 struct net2272_ep *ep;
1043 u16 avail;
1044
1045 ep = container_of(_ep, struct net2272_ep, ep);
1046 if (!_ep || (!ep->desc && ep->num != 0))
1047 return -ENODEV;
1048 if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
1049 return -ESHUTDOWN;
1050
1051 avail = net2272_ep_read(ep, EP_AVAIL1) << 8;
1052 avail |= net2272_ep_read(ep, EP_AVAIL0);
1053 if (avail > ep->fifo_size)
1054 return -EOVERFLOW;
1055 if (ep->is_in)
1056 avail = ep->fifo_size - avail;
1057 return avail;
1058}
1059
1060static void
1061net2272_fifo_flush(struct usb_ep *_ep)
1062{
1063 struct net2272_ep *ep;
1064
1065 ep = container_of(_ep, struct net2272_ep, ep);
1066 if (!_ep || (!ep->desc && ep->num != 0))
1067 return;
1068 if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
1069 return;
1070
1071 net2272_ep_write(ep, EP_STAT1, 1 << BUFFER_FLUSH);
1072}
1073
1074static struct usb_ep_ops net2272_ep_ops = {
1075 .enable = net2272_enable,
1076 .disable = net2272_disable,
1077
1078 .alloc_request = net2272_alloc_request,
1079 .free_request = net2272_free_request,
1080
1081 .queue = net2272_queue,
1082 .dequeue = net2272_dequeue,
1083
1084 .set_halt = net2272_set_halt,
1085 .set_wedge = net2272_set_wedge,
1086 .fifo_status = net2272_fifo_status,
1087 .fifo_flush = net2272_fifo_flush,
1088};
1089
1090/*---------------------------------------------------------------------------*/
1091
1092static int
1093net2272_get_frame(struct usb_gadget *_gadget)
1094{
1095 struct net2272 *dev;
1096 unsigned long flags;
1097 u16 ret;
1098
1099 if (!_gadget)
1100 return -ENODEV;
1101 dev = container_of(_gadget, struct net2272, gadget);
1102 spin_lock_irqsave(&dev->lock, flags);
1103
1104 ret = net2272_read(dev, FRAME1) << 8;
1105 ret |= net2272_read(dev, FRAME0);
1106
1107 spin_unlock_irqrestore(&dev->lock, flags);
1108 return ret;
1109}
1110
1111static int
1112net2272_wakeup(struct usb_gadget *_gadget)
1113{
1114 struct net2272 *dev;
1115 u8 tmp;
1116 unsigned long flags;
1117
1118 if (!_gadget)
1119 return 0;
1120 dev = container_of(_gadget, struct net2272, gadget);
1121
1122 spin_lock_irqsave(&dev->lock, flags);
1123 tmp = net2272_read(dev, USBCTL0);
1124 if (tmp & (1 << IO_WAKEUP_ENABLE))
1125 net2272_write(dev, USBCTL1, (1 << GENERATE_RESUME));
1126
1127 spin_unlock_irqrestore(&dev->lock, flags);
1128
1129 return 0;
1130}
1131
1132static int
1133net2272_set_selfpowered(struct usb_gadget *_gadget, int value)
1134{
1135 struct net2272 *dev;
1136
1137 if (!_gadget)
1138 return -ENODEV;
1139 dev = container_of(_gadget, struct net2272, gadget);
1140
1141 dev->is_selfpowered = value;
1142
1143 return 0;
1144}
1145
1146static int
1147net2272_pullup(struct usb_gadget *_gadget, int is_on)
1148{
1149 struct net2272 *dev;
1150 u8 tmp;
1151 unsigned long flags;
1152
1153 if (!_gadget)
1154 return -ENODEV;
1155 dev = container_of(_gadget, struct net2272, gadget);
1156
1157 spin_lock_irqsave(&dev->lock, flags);
1158 tmp = net2272_read(dev, USBCTL0);
1159 dev->softconnect = (is_on != 0);
1160 if (is_on)
1161 tmp |= (1 << USB_DETECT_ENABLE);
1162 else
1163 tmp &= ~(1 << USB_DETECT_ENABLE);
1164 net2272_write(dev, USBCTL0, tmp);
1165 spin_unlock_irqrestore(&dev->lock, flags);
1166
1167 return 0;
1168}
1169
Felipe Balbi96f8db62011-10-10 10:33:47 +03001170static int net2272_start(struct usb_gadget *_gadget,
1171 struct usb_gadget_driver *driver);
1172static int net2272_stop(struct usb_gadget *_gadget,
1173 struct usb_gadget_driver *driver);
Sebastian Andrzej Siewior0f913492011-06-28 16:33:47 +03001174
Seth Levyceb80362011-06-06 19:42:44 -04001175static const struct usb_gadget_ops net2272_ops = {
Felipe Balbi96f8db62011-10-10 10:33:47 +03001176 .get_frame = net2272_get_frame,
1177 .wakeup = net2272_wakeup,
Seth Levyceb80362011-06-06 19:42:44 -04001178 .set_selfpowered = net2272_set_selfpowered,
Felipe Balbi96f8db62011-10-10 10:33:47 +03001179 .pullup = net2272_pullup,
1180 .udc_start = net2272_start,
1181 .udc_stop = net2272_stop,
Seth Levyceb80362011-06-06 19:42:44 -04001182};
1183
1184/*---------------------------------------------------------------------------*/
1185
1186static ssize_t
Greg Kroah-Hartmance26bd22013-08-23 16:34:43 -07001187registers_show(struct device *_dev, struct device_attribute *attr, char *buf)
Seth Levyceb80362011-06-06 19:42:44 -04001188{
1189 struct net2272 *dev;
1190 char *next;
1191 unsigned size, t;
1192 unsigned long flags;
1193 u8 t1, t2;
1194 int i;
1195 const char *s;
1196
1197 dev = dev_get_drvdata(_dev);
1198 next = buf;
1199 size = PAGE_SIZE;
1200 spin_lock_irqsave(&dev->lock, flags);
1201
1202 if (dev->driver)
1203 s = dev->driver->driver.name;
1204 else
1205 s = "(none)";
1206
1207 /* Main Control Registers */
1208 t = scnprintf(next, size, "%s version %s,"
1209 "chiprev %02x, locctl %02x\n"
1210 "irqenb0 %02x irqenb1 %02x "
1211 "irqstat0 %02x irqstat1 %02x\n",
1212 driver_name, driver_vers, dev->chiprev,
1213 net2272_read(dev, LOCCTL),
1214 net2272_read(dev, IRQENB0),
1215 net2272_read(dev, IRQENB1),
1216 net2272_read(dev, IRQSTAT0),
1217 net2272_read(dev, IRQSTAT1));
1218 size -= t;
1219 next += t;
1220
1221 /* DMA */
1222 t1 = net2272_read(dev, DMAREQ);
1223 t = scnprintf(next, size, "\ndmareq %02x: %s %s%s%s%s\n",
1224 t1, ep_name[(t1 & 0x01) + 1],
1225 t1 & (1 << DMA_CONTROL_DACK) ? "dack " : "",
1226 t1 & (1 << DMA_REQUEST_ENABLE) ? "reqenb " : "",
1227 t1 & (1 << DMA_REQUEST) ? "req " : "",
1228 t1 & (1 << DMA_BUFFER_VALID) ? "valid " : "");
1229 size -= t;
1230 next += t;
1231
1232 /* USB Control Registers */
1233 t1 = net2272_read(dev, USBCTL1);
1234 if (t1 & (1 << VBUS_PIN)) {
1235 if (t1 & (1 << USB_HIGH_SPEED))
1236 s = "high speed";
1237 else if (dev->gadget.speed == USB_SPEED_UNKNOWN)
1238 s = "powered";
1239 else
1240 s = "full speed";
1241 } else
1242 s = "not attached";
1243 t = scnprintf(next, size,
1244 "usbctl0 %02x usbctl1 %02x addr 0x%02x (%s)\n",
1245 net2272_read(dev, USBCTL0), t1,
1246 net2272_read(dev, OURADDR), s);
1247 size -= t;
1248 next += t;
1249
1250 /* Endpoint Registers */
1251 for (i = 0; i < 4; ++i) {
1252 struct net2272_ep *ep;
1253
1254 ep = &dev->ep[i];
1255 if (i && !ep->desc)
1256 continue;
1257
1258 t1 = net2272_ep_read(ep, EP_CFG);
1259 t2 = net2272_ep_read(ep, EP_RSPSET);
1260 t = scnprintf(next, size,
1261 "\n%s\tcfg %02x rsp (%02x) %s%s%s%s%s%s%s%s"
1262 "irqenb %02x\n",
1263 ep->ep.name, t1, t2,
1264 (t2 & (1 << ALT_NAK_OUT_PACKETS)) ? "NAK " : "",
1265 (t2 & (1 << HIDE_STATUS_PHASE)) ? "hide " : "",
1266 (t2 & (1 << AUTOVALIDATE)) ? "auto " : "",
1267 (t2 & (1 << INTERRUPT_MODE)) ? "interrupt " : "",
1268 (t2 & (1 << CONTROL_STATUS_PHASE_HANDSHAKE)) ? "status " : "",
1269 (t2 & (1 << NAK_OUT_PACKETS_MODE)) ? "NAKmode " : "",
1270 (t2 & (1 << ENDPOINT_TOGGLE)) ? "DATA1 " : "DATA0 ",
1271 (t2 & (1 << ENDPOINT_HALT)) ? "HALT " : "",
1272 net2272_ep_read(ep, EP_IRQENB));
1273 size -= t;
1274 next += t;
1275
1276 t = scnprintf(next, size,
1277 "\tstat0 %02x stat1 %02x avail %04x "
1278 "(ep%d%s-%s)%s\n",
1279 net2272_ep_read(ep, EP_STAT0),
1280 net2272_ep_read(ep, EP_STAT1),
1281 (net2272_ep_read(ep, EP_AVAIL1) << 8) | net2272_ep_read(ep, EP_AVAIL0),
1282 t1 & 0x0f,
1283 ep->is_in ? "in" : "out",
1284 type_string(t1 >> 5),
1285 ep->stopped ? "*" : "");
1286 size -= t;
1287 next += t;
1288
1289 t = scnprintf(next, size,
1290 "\tep_transfer %06x\n",
1291 ((net2272_ep_read(ep, EP_TRANSFER2) & 0xff) << 16) |
1292 ((net2272_ep_read(ep, EP_TRANSFER1) & 0xff) << 8) |
1293 ((net2272_ep_read(ep, EP_TRANSFER0) & 0xff)));
1294 size -= t;
1295 next += t;
1296
1297 t1 = net2272_ep_read(ep, EP_BUFF_STATES) & 0x03;
1298 t2 = (net2272_ep_read(ep, EP_BUFF_STATES) >> 2) & 0x03;
1299 t = scnprintf(next, size,
1300 "\tbuf-a %s buf-b %s\n",
1301 buf_state_string(t1),
1302 buf_state_string(t2));
1303 size -= t;
1304 next += t;
1305 }
1306
1307 spin_unlock_irqrestore(&dev->lock, flags);
1308
1309 return PAGE_SIZE - size;
1310}
Greg Kroah-Hartmance26bd22013-08-23 16:34:43 -07001311static DEVICE_ATTR_RO(registers);
Seth Levyceb80362011-06-06 19:42:44 -04001312
1313/*---------------------------------------------------------------------------*/
1314
1315static void
1316net2272_set_fifo_mode(struct net2272 *dev, int mode)
1317{
1318 u8 tmp;
1319
1320 tmp = net2272_read(dev, LOCCTL) & 0x3f;
1321 tmp |= (mode << 6);
1322 net2272_write(dev, LOCCTL, tmp);
1323
1324 INIT_LIST_HEAD(&dev->gadget.ep_list);
1325
1326 /* always ep-a, ep-c ... maybe not ep-b */
1327 list_add_tail(&dev->ep[1].ep.ep_list, &dev->gadget.ep_list);
1328
1329 switch (mode) {
1330 case 0:
1331 list_add_tail(&dev->ep[2].ep.ep_list, &dev->gadget.ep_list);
1332 dev->ep[1].fifo_size = dev->ep[2].fifo_size = 512;
1333 break;
1334 case 1:
1335 list_add_tail(&dev->ep[2].ep.ep_list, &dev->gadget.ep_list);
1336 dev->ep[1].fifo_size = 1024;
1337 dev->ep[2].fifo_size = 512;
1338 break;
1339 case 2:
1340 list_add_tail(&dev->ep[2].ep.ep_list, &dev->gadget.ep_list);
1341 dev->ep[1].fifo_size = dev->ep[2].fifo_size = 1024;
1342 break;
1343 case 3:
1344 dev->ep[1].fifo_size = 1024;
1345 break;
1346 }
1347
1348 /* ep-c is always 2 512 byte buffers */
1349 list_add_tail(&dev->ep[3].ep.ep_list, &dev->gadget.ep_list);
1350 dev->ep[3].fifo_size = 512;
1351}
1352
1353/*---------------------------------------------------------------------------*/
1354
Seth Levyceb80362011-06-06 19:42:44 -04001355static void
1356net2272_usb_reset(struct net2272 *dev)
1357{
1358 dev->gadget.speed = USB_SPEED_UNKNOWN;
1359
1360 net2272_cancel_dma(dev);
1361
1362 net2272_write(dev, IRQENB0, 0);
1363 net2272_write(dev, IRQENB1, 0);
1364
1365 /* clear irq state */
1366 net2272_write(dev, IRQSTAT0, 0xff);
1367 net2272_write(dev, IRQSTAT1, ~(1 << SUSPEND_REQUEST_INTERRUPT));
1368
1369 net2272_write(dev, DMAREQ,
1370 (0 << DMA_BUFFER_VALID) |
1371 (0 << DMA_REQUEST_ENABLE) |
1372 (1 << DMA_CONTROL_DACK) |
1373 (dev->dma_eot_polarity << EOT_POLARITY) |
1374 (dev->dma_dack_polarity << DACK_POLARITY) |
1375 (dev->dma_dreq_polarity << DREQ_POLARITY) |
1376 ((dma_ep >> 1) << DMA_ENDPOINT_SELECT));
1377
1378 net2272_cancel_dma(dev);
1379 net2272_set_fifo_mode(dev, (fifo_mode <= 3) ? fifo_mode : 0);
1380
1381 /* Set the NET2272 ep fifo data width to 16-bit mode and for correct byte swapping
1382 * note that the higher level gadget drivers are expected to convert data to little endian.
1383 * Enable byte swap for your local bus/cpu if needed by setting BYTE_SWAP in LOCCTL here
1384 */
1385 net2272_write(dev, LOCCTL, net2272_read(dev, LOCCTL) | (1 << DATA_WIDTH));
1386 net2272_write(dev, LOCCTL1, (dma_mode << DMA_MODE));
1387}
1388
1389static void
1390net2272_usb_reinit(struct net2272 *dev)
1391{
1392 int i;
1393
1394 /* basic endpoint init */
1395 for (i = 0; i < 4; ++i) {
1396 struct net2272_ep *ep = &dev->ep[i];
1397
1398 ep->ep.name = ep_name[i];
1399 ep->dev = dev;
1400 ep->num = i;
1401 ep->not_empty = 0;
1402
1403 if (use_dma && ep->num == dma_ep)
1404 ep->dma = 1;
1405
1406 if (i > 0 && i <= 3)
1407 ep->fifo_size = 512;
1408 else
1409 ep->fifo_size = 64;
1410 net2272_ep_reset(ep);
1411 }
Robert Baldygae117e742013-12-13 12:23:38 +01001412 usb_ep_set_maxpacket_limit(&dev->ep[0].ep, 64);
Seth Levyceb80362011-06-06 19:42:44 -04001413
1414 dev->gadget.ep0 = &dev->ep[0].ep;
1415 dev->ep[0].stopped = 0;
1416 INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
1417}
1418
1419static void
1420net2272_ep0_start(struct net2272 *dev)
1421{
1422 struct net2272_ep *ep0 = &dev->ep[0];
1423
1424 net2272_ep_write(ep0, EP_RSPSET,
1425 (1 << NAK_OUT_PACKETS_MODE) |
1426 (1 << ALT_NAK_OUT_PACKETS));
1427 net2272_ep_write(ep0, EP_RSPCLR,
1428 (1 << HIDE_STATUS_PHASE) |
1429 (1 << CONTROL_STATUS_PHASE_HANDSHAKE));
1430 net2272_write(dev, USBCTL0,
1431 (dev->softconnect << USB_DETECT_ENABLE) |
1432 (1 << USB_ROOT_PORT_WAKEUP_ENABLE) |
1433 (1 << IO_WAKEUP_ENABLE));
1434 net2272_write(dev, IRQENB0,
1435 (1 << SETUP_PACKET_INTERRUPT_ENABLE) |
1436 (1 << ENDPOINT_0_INTERRUPT_ENABLE) |
1437 (1 << DMA_DONE_INTERRUPT_ENABLE));
1438 net2272_write(dev, IRQENB1,
1439 (1 << VBUS_INTERRUPT_ENABLE) |
1440 (1 << ROOT_PORT_RESET_INTERRUPT_ENABLE) |
1441 (1 << SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE));
1442}
1443
1444/* when a driver is successfully registered, it will receive
1445 * control requests including set_configuration(), which enables
1446 * non-control requests. then usb traffic follows until a
1447 * disconnect is reported. then a host may connect again, or
1448 * the driver might get unbound.
1449 */
Felipe Balbi96f8db62011-10-10 10:33:47 +03001450static int net2272_start(struct usb_gadget *_gadget,
1451 struct usb_gadget_driver *driver)
Seth Levyceb80362011-06-06 19:42:44 -04001452{
Felipe Balbi96f8db62011-10-10 10:33:47 +03001453 struct net2272 *dev;
Seth Levyceb80362011-06-06 19:42:44 -04001454 unsigned i;
1455
Felipe Balbi96f8db62011-10-10 10:33:47 +03001456 if (!driver || !driver->unbind || !driver->setup ||
Michal Nazarewicz7177aed2011-11-19 18:27:38 +01001457 driver->max_speed != USB_SPEED_HIGH)
Seth Levyceb80362011-06-06 19:42:44 -04001458 return -EINVAL;
Felipe Balbi96f8db62011-10-10 10:33:47 +03001459
1460 dev = container_of(_gadget, struct net2272, gadget);
Seth Levyceb80362011-06-06 19:42:44 -04001461
1462 for (i = 0; i < 4; ++i)
1463 dev->ep[i].irqs = 0;
1464 /* hook up the driver ... */
1465 dev->softconnect = 1;
1466 driver->driver.bus = NULL;
1467 dev->driver = driver;
Seth Levyceb80362011-06-06 19:42:44 -04001468
1469 /* ... then enable host detection and ep0; and we're ready
1470 * for set_configuration as well as eventual disconnect.
1471 */
1472 net2272_ep0_start(dev);
1473
1474 dev_dbg(dev->dev, "%s ready\n", driver->driver.name);
1475
1476 return 0;
1477}
Seth Levyceb80362011-06-06 19:42:44 -04001478
1479static void
1480stop_activity(struct net2272 *dev, struct usb_gadget_driver *driver)
1481{
1482 int i;
1483
1484 /* don't disconnect if it's not connected */
1485 if (dev->gadget.speed == USB_SPEED_UNKNOWN)
1486 driver = NULL;
1487
1488 /* stop hardware; prevent new request submissions;
1489 * and kill any outstanding requests.
1490 */
1491 net2272_usb_reset(dev);
1492 for (i = 0; i < 4; ++i)
1493 net2272_dequeue_all(&dev->ep[i]);
1494
Felipe Balbi699412d2013-03-18 10:14:47 +02001495 /* report disconnect; the driver is already quiesced */
1496 if (driver) {
1497 spin_unlock(&dev->lock);
1498 driver->disconnect(&dev->gadget);
1499 spin_lock(&dev->lock);
1500 }
1501
Seth Levyceb80362011-06-06 19:42:44 -04001502 net2272_usb_reinit(dev);
1503}
1504
Felipe Balbi96f8db62011-10-10 10:33:47 +03001505static int net2272_stop(struct usb_gadget *_gadget,
1506 struct usb_gadget_driver *driver)
Seth Levyceb80362011-06-06 19:42:44 -04001507{
Felipe Balbi96f8db62011-10-10 10:33:47 +03001508 struct net2272 *dev;
Seth Levyceb80362011-06-06 19:42:44 -04001509 unsigned long flags;
1510
Felipe Balbi96f8db62011-10-10 10:33:47 +03001511 dev = container_of(_gadget, struct net2272, gadget);
Seth Levyceb80362011-06-06 19:42:44 -04001512
1513 spin_lock_irqsave(&dev->lock, flags);
1514 stop_activity(dev, driver);
1515 spin_unlock_irqrestore(&dev->lock, flags);
1516
Seth Levyceb80362011-06-06 19:42:44 -04001517 dev->driver = NULL;
1518
1519 dev_dbg(dev->dev, "unregistered driver '%s'\n", driver->driver.name);
1520 return 0;
1521}
Seth Levyceb80362011-06-06 19:42:44 -04001522
1523/*---------------------------------------------------------------------------*/
1524/* handle ep-a/ep-b dma completions */
1525static void
1526net2272_handle_dma(struct net2272_ep *ep)
1527{
1528 struct net2272_request *req;
1529 unsigned len;
1530 int status;
1531
1532 if (!list_empty(&ep->queue))
1533 req = list_entry(ep->queue.next,
1534 struct net2272_request, queue);
1535 else
1536 req = NULL;
1537
1538 dev_vdbg(ep->dev->dev, "handle_dma %s req %p\n", ep->ep.name, req);
1539
1540 /* Ensure DREQ is de-asserted */
1541 net2272_write(ep->dev, DMAREQ,
1542 (0 << DMA_BUFFER_VALID)
1543 | (0 << DMA_REQUEST_ENABLE)
1544 | (1 << DMA_CONTROL_DACK)
1545 | (ep->dev->dma_eot_polarity << EOT_POLARITY)
1546 | (ep->dev->dma_dack_polarity << DACK_POLARITY)
1547 | (ep->dev->dma_dreq_polarity << DREQ_POLARITY)
Felipe Balbi69147122013-03-21 12:19:31 +02001548 | (ep->dma << DMA_ENDPOINT_SELECT));
Seth Levyceb80362011-06-06 19:42:44 -04001549
1550 ep->dev->dma_busy = 0;
1551
1552 net2272_ep_write(ep, EP_IRQENB,
1553 (1 << DATA_PACKET_RECEIVED_INTERRUPT_ENABLE)
1554 | (1 << DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE)
1555 | net2272_ep_read(ep, EP_IRQENB));
1556
1557 /* device-to-host transfer completed */
1558 if (ep->is_in) {
1559 /* validate a short packet or zlp if necessary */
1560 if ((req->req.length % ep->ep.maxpacket != 0) ||
1561 req->req.zero)
1562 set_fifo_bytecount(ep, 0);
1563
1564 net2272_done(ep, req, 0);
1565 if (!list_empty(&ep->queue)) {
1566 req = list_entry(ep->queue.next,
1567 struct net2272_request, queue);
1568 status = net2272_kick_dma(ep, req);
1569 if (status < 0)
1570 net2272_pio_advance(ep);
1571 }
1572
1573 /* host-to-device transfer completed */
1574 } else {
1575 /* terminated with a short packet? */
1576 if (net2272_read(ep->dev, IRQSTAT0) &
1577 (1 << DMA_DONE_INTERRUPT)) {
1578 /* abort system dma */
1579 net2272_cancel_dma(ep->dev);
1580 }
1581
1582 /* EP_TRANSFER will contain the number of bytes
1583 * actually received.
1584 * NOTE: There is no overflow detection on EP_TRANSFER:
1585 * We can't deal with transfers larger than 2^24 bytes!
1586 */
1587 len = (net2272_ep_read(ep, EP_TRANSFER2) << 16)
1588 | (net2272_ep_read(ep, EP_TRANSFER1) << 8)
1589 | (net2272_ep_read(ep, EP_TRANSFER0));
1590
1591 if (ep->not_empty)
1592 len += 4;
1593
1594 req->req.actual += len;
1595
1596 /* get any remaining data */
1597 net2272_pio_advance(ep);
1598 }
1599}
1600
1601/*---------------------------------------------------------------------------*/
1602
1603static void
1604net2272_handle_ep(struct net2272_ep *ep)
1605{
1606 struct net2272_request *req;
1607 u8 stat0, stat1;
1608
1609 if (!list_empty(&ep->queue))
1610 req = list_entry(ep->queue.next,
1611 struct net2272_request, queue);
1612 else
1613 req = NULL;
1614
1615 /* ack all, and handle what we care about */
1616 stat0 = net2272_ep_read(ep, EP_STAT0);
1617 stat1 = net2272_ep_read(ep, EP_STAT1);
1618 ep->irqs++;
1619
1620 dev_vdbg(ep->dev->dev, "%s ack ep_stat0 %02x, ep_stat1 %02x, req %p\n",
Felipe Balbi69147122013-03-21 12:19:31 +02001621 ep->ep.name, stat0, stat1, req ? &req->req : NULL);
Seth Levyceb80362011-06-06 19:42:44 -04001622
1623 net2272_ep_write(ep, EP_STAT0, stat0 &
1624 ~((1 << NAK_OUT_PACKETS)
1625 | (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT)));
1626 net2272_ep_write(ep, EP_STAT1, stat1);
1627
1628 /* data packet(s) received (in the fifo, OUT)
1629 * direction must be validated, otherwise control read status phase
1630 * could be interpreted as a valid packet
1631 */
1632 if (!ep->is_in && (stat0 & (1 << DATA_PACKET_RECEIVED_INTERRUPT)))
1633 net2272_pio_advance(ep);
1634 /* data packet(s) transmitted (IN) */
1635 else if (stat0 & (1 << DATA_PACKET_TRANSMITTED_INTERRUPT))
1636 net2272_pio_advance(ep);
1637}
1638
1639static struct net2272_ep *
1640net2272_get_ep_by_addr(struct net2272 *dev, u16 wIndex)
1641{
1642 struct net2272_ep *ep;
1643
1644 if ((wIndex & USB_ENDPOINT_NUMBER_MASK) == 0)
1645 return &dev->ep[0];
1646
1647 list_for_each_entry(ep, &dev->gadget.ep_list, ep.ep_list) {
1648 u8 bEndpointAddress;
1649
1650 if (!ep->desc)
1651 continue;
1652 bEndpointAddress = ep->desc->bEndpointAddress;
1653 if ((wIndex ^ bEndpointAddress) & USB_DIR_IN)
1654 continue;
1655 if ((wIndex & 0x0f) == (bEndpointAddress & 0x0f))
1656 return ep;
1657 }
1658 return NULL;
1659}
1660
1661/*
1662 * USB Test Packet:
1663 * JKJKJKJK * 9
1664 * JJKKJJKK * 8
1665 * JJJJKKKK * 8
1666 * JJJJJJJKKKKKKK * 8
1667 * JJJJJJJK * 8
1668 * {JKKKKKKK * 10}, JK
1669 */
1670static const u8 net2272_test_packet[] = {
1671 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1672 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
1673 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE,
1674 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
1675 0x7F, 0xBF, 0xDF, 0xEF, 0xF7, 0xFB, 0xFD,
1676 0xFC, 0x7E, 0xBF, 0xDF, 0xEF, 0xF7, 0xFD, 0x7E
1677};
1678
1679static void
1680net2272_set_test_mode(struct net2272 *dev, int mode)
1681{
1682 int i;
1683
1684 /* Disable all net2272 interrupts:
1685 * Nothing but a power cycle should stop the test.
1686 */
1687 net2272_write(dev, IRQENB0, 0x00);
1688 net2272_write(dev, IRQENB1, 0x00);
1689
1690 /* Force tranceiver to high-speed */
1691 net2272_write(dev, XCVRDIAG, 1 << FORCE_HIGH_SPEED);
1692
1693 net2272_write(dev, PAGESEL, 0);
1694 net2272_write(dev, EP_STAT0, 1 << DATA_PACKET_TRANSMITTED_INTERRUPT);
1695 net2272_write(dev, EP_RSPCLR,
1696 (1 << CONTROL_STATUS_PHASE_HANDSHAKE)
1697 | (1 << HIDE_STATUS_PHASE));
1698 net2272_write(dev, EP_CFG, 1 << ENDPOINT_DIRECTION);
1699 net2272_write(dev, EP_STAT1, 1 << BUFFER_FLUSH);
1700
1701 /* wait for status phase to complete */
1702 while (!(net2272_read(dev, EP_STAT0) &
1703 (1 << DATA_PACKET_TRANSMITTED_INTERRUPT)))
1704 ;
1705
1706 /* Enable test mode */
1707 net2272_write(dev, USBTEST, mode);
1708
1709 /* load test packet */
1710 if (mode == TEST_PACKET) {
1711 /* switch to 8 bit mode */
1712 net2272_write(dev, LOCCTL, net2272_read(dev, LOCCTL) &
1713 ~(1 << DATA_WIDTH));
1714
1715 for (i = 0; i < sizeof(net2272_test_packet); ++i)
1716 net2272_write(dev, EP_DATA, net2272_test_packet[i]);
1717
1718 /* Validate test packet */
1719 net2272_write(dev, EP_TRANSFER0, 0);
1720 }
1721}
1722
1723static void
1724net2272_handle_stat0_irqs(struct net2272 *dev, u8 stat)
1725{
1726 struct net2272_ep *ep;
1727 u8 num, scratch;
1728
1729 /* starting a control request? */
1730 if (unlikely(stat & (1 << SETUP_PACKET_INTERRUPT))) {
1731 union {
1732 u8 raw[8];
1733 struct usb_ctrlrequest r;
1734 } u;
1735 int tmp = 0;
1736 struct net2272_request *req;
1737
1738 if (dev->gadget.speed == USB_SPEED_UNKNOWN) {
1739 if (net2272_read(dev, USBCTL1) & (1 << USB_HIGH_SPEED))
1740 dev->gadget.speed = USB_SPEED_HIGH;
1741 else
1742 dev->gadget.speed = USB_SPEED_FULL;
Michal Nazarewicze538dfd2011-08-30 17:11:19 +02001743 dev_dbg(dev->dev, "%s\n",
1744 usb_speed_string(dev->gadget.speed));
Seth Levyceb80362011-06-06 19:42:44 -04001745 }
1746
1747 ep = &dev->ep[0];
1748 ep->irqs++;
1749
1750 /* make sure any leftover interrupt state is cleared */
1751 stat &= ~(1 << ENDPOINT_0_INTERRUPT);
1752 while (!list_empty(&ep->queue)) {
1753 req = list_entry(ep->queue.next,
1754 struct net2272_request, queue);
1755 net2272_done(ep, req,
1756 (req->req.actual == req->req.length) ? 0 : -EPROTO);
1757 }
1758 ep->stopped = 0;
1759 dev->protocol_stall = 0;
1760 net2272_ep_write(ep, EP_STAT0,
1761 (1 << DATA_IN_TOKEN_INTERRUPT)
1762 | (1 << DATA_OUT_TOKEN_INTERRUPT)
1763 | (1 << DATA_PACKET_TRANSMITTED_INTERRUPT)
1764 | (1 << DATA_PACKET_RECEIVED_INTERRUPT)
1765 | (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT));
1766 net2272_ep_write(ep, EP_STAT1,
1767 (1 << TIMEOUT)
1768 | (1 << USB_OUT_ACK_SENT)
1769 | (1 << USB_OUT_NAK_SENT)
1770 | (1 << USB_IN_ACK_RCVD)
1771 | (1 << USB_IN_NAK_SENT)
1772 | (1 << USB_STALL_SENT)
1773 | (1 << LOCAL_OUT_ZLP));
1774
1775 /*
1776 * Ensure Control Read pre-validation setting is beyond maximum size
1777 * - Control Writes can leave non-zero values in EP_TRANSFER. If
1778 * an EP0 transfer following the Control Write is a Control Read,
1779 * the NET2272 sees the non-zero EP_TRANSFER as an unexpected
1780 * pre-validation count.
1781 * - Setting EP_TRANSFER beyond the maximum EP0 transfer size ensures
1782 * the pre-validation count cannot cause an unexpected validatation
1783 */
1784 net2272_write(dev, PAGESEL, 0);
1785 net2272_write(dev, EP_TRANSFER2, 0xff);
1786 net2272_write(dev, EP_TRANSFER1, 0xff);
1787 net2272_write(dev, EP_TRANSFER0, 0xff);
1788
1789 u.raw[0] = net2272_read(dev, SETUP0);
1790 u.raw[1] = net2272_read(dev, SETUP1);
1791 u.raw[2] = net2272_read(dev, SETUP2);
1792 u.raw[3] = net2272_read(dev, SETUP3);
1793 u.raw[4] = net2272_read(dev, SETUP4);
1794 u.raw[5] = net2272_read(dev, SETUP5);
1795 u.raw[6] = net2272_read(dev, SETUP6);
1796 u.raw[7] = net2272_read(dev, SETUP7);
1797 /*
1798 * If you have a big endian cpu make sure le16_to_cpus
1799 * performs the proper byte swapping here...
1800 */
1801 le16_to_cpus(&u.r.wValue);
1802 le16_to_cpus(&u.r.wIndex);
1803 le16_to_cpus(&u.r.wLength);
1804
1805 /* ack the irq */
1806 net2272_write(dev, IRQSTAT0, 1 << SETUP_PACKET_INTERRUPT);
1807 stat ^= (1 << SETUP_PACKET_INTERRUPT);
1808
1809 /* watch control traffic at the token level, and force
1810 * synchronization before letting the status phase happen.
1811 */
1812 ep->is_in = (u.r.bRequestType & USB_DIR_IN) != 0;
1813 if (ep->is_in) {
1814 scratch = (1 << DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE)
1815 | (1 << DATA_OUT_TOKEN_INTERRUPT_ENABLE)
1816 | (1 << DATA_IN_TOKEN_INTERRUPT_ENABLE);
1817 stop_out_naking(ep);
1818 } else
1819 scratch = (1 << DATA_PACKET_RECEIVED_INTERRUPT_ENABLE)
1820 | (1 << DATA_OUT_TOKEN_INTERRUPT_ENABLE)
1821 | (1 << DATA_IN_TOKEN_INTERRUPT_ENABLE);
1822 net2272_ep_write(ep, EP_IRQENB, scratch);
1823
1824 if ((u.r.bRequestType & USB_TYPE_MASK) != USB_TYPE_STANDARD)
1825 goto delegate;
1826 switch (u.r.bRequest) {
1827 case USB_REQ_GET_STATUS: {
1828 struct net2272_ep *e;
1829 u16 status = 0;
1830
1831 switch (u.r.bRequestType & USB_RECIP_MASK) {
1832 case USB_RECIP_ENDPOINT:
1833 e = net2272_get_ep_by_addr(dev, u.r.wIndex);
1834 if (!e || u.r.wLength > 2)
1835 goto do_stall;
1836 if (net2272_ep_read(e, EP_RSPSET) & (1 << ENDPOINT_HALT))
1837 status = __constant_cpu_to_le16(1);
1838 else
1839 status = __constant_cpu_to_le16(0);
1840
1841 /* don't bother with a request object! */
1842 net2272_ep_write(&dev->ep[0], EP_IRQENB, 0);
1843 writew(status, net2272_reg_addr(dev, EP_DATA));
1844 set_fifo_bytecount(&dev->ep[0], 0);
1845 allow_status(ep);
1846 dev_vdbg(dev->dev, "%s stat %02x\n",
1847 ep->ep.name, status);
1848 goto next_endpoints;
1849 case USB_RECIP_DEVICE:
1850 if (u.r.wLength > 2)
1851 goto do_stall;
1852 if (dev->is_selfpowered)
1853 status = (1 << USB_DEVICE_SELF_POWERED);
1854
1855 /* don't bother with a request object! */
1856 net2272_ep_write(&dev->ep[0], EP_IRQENB, 0);
1857 writew(status, net2272_reg_addr(dev, EP_DATA));
1858 set_fifo_bytecount(&dev->ep[0], 0);
1859 allow_status(ep);
1860 dev_vdbg(dev->dev, "device stat %02x\n", status);
1861 goto next_endpoints;
1862 case USB_RECIP_INTERFACE:
1863 if (u.r.wLength > 2)
1864 goto do_stall;
1865
1866 /* don't bother with a request object! */
1867 net2272_ep_write(&dev->ep[0], EP_IRQENB, 0);
1868 writew(status, net2272_reg_addr(dev, EP_DATA));
1869 set_fifo_bytecount(&dev->ep[0], 0);
1870 allow_status(ep);
1871 dev_vdbg(dev->dev, "interface status %02x\n", status);
1872 goto next_endpoints;
1873 }
1874
1875 break;
1876 }
1877 case USB_REQ_CLEAR_FEATURE: {
1878 struct net2272_ep *e;
1879
1880 if (u.r.bRequestType != USB_RECIP_ENDPOINT)
1881 goto delegate;
1882 if (u.r.wValue != USB_ENDPOINT_HALT ||
1883 u.r.wLength != 0)
1884 goto do_stall;
1885 e = net2272_get_ep_by_addr(dev, u.r.wIndex);
1886 if (!e)
1887 goto do_stall;
1888 if (e->wedged) {
1889 dev_vdbg(dev->dev, "%s wedged, halt not cleared\n",
1890 ep->ep.name);
1891 } else {
1892 dev_vdbg(dev->dev, "%s clear halt\n", ep->ep.name);
1893 clear_halt(e);
1894 }
1895 allow_status(ep);
1896 goto next_endpoints;
1897 }
1898 case USB_REQ_SET_FEATURE: {
1899 struct net2272_ep *e;
1900
1901 if (u.r.bRequestType == USB_RECIP_DEVICE) {
1902 if (u.r.wIndex != NORMAL_OPERATION)
1903 net2272_set_test_mode(dev, (u.r.wIndex >> 8));
1904 allow_status(ep);
1905 dev_vdbg(dev->dev, "test mode: %d\n", u.r.wIndex);
1906 goto next_endpoints;
1907 } else if (u.r.bRequestType != USB_RECIP_ENDPOINT)
1908 goto delegate;
1909 if (u.r.wValue != USB_ENDPOINT_HALT ||
1910 u.r.wLength != 0)
1911 goto do_stall;
1912 e = net2272_get_ep_by_addr(dev, u.r.wIndex);
1913 if (!e)
1914 goto do_stall;
1915 set_halt(e);
1916 allow_status(ep);
1917 dev_vdbg(dev->dev, "%s set halt\n", ep->ep.name);
1918 goto next_endpoints;
1919 }
1920 case USB_REQ_SET_ADDRESS: {
1921 net2272_write(dev, OURADDR, u.r.wValue & 0xff);
1922 allow_status(ep);
1923 break;
1924 }
1925 default:
1926 delegate:
1927 dev_vdbg(dev->dev, "setup %02x.%02x v%04x i%04x "
1928 "ep_cfg %08x\n",
1929 u.r.bRequestType, u.r.bRequest,
1930 u.r.wValue, u.r.wIndex,
1931 net2272_ep_read(ep, EP_CFG));
1932 spin_unlock(&dev->lock);
1933 tmp = dev->driver->setup(&dev->gadget, &u.r);
1934 spin_lock(&dev->lock);
1935 }
1936
1937 /* stall ep0 on error */
1938 if (tmp < 0) {
1939 do_stall:
1940 dev_vdbg(dev->dev, "req %02x.%02x protocol STALL; stat %d\n",
1941 u.r.bRequestType, u.r.bRequest, tmp);
1942 dev->protocol_stall = 1;
1943 }
1944 /* endpoint dma irq? */
1945 } else if (stat & (1 << DMA_DONE_INTERRUPT)) {
1946 net2272_cancel_dma(dev);
1947 net2272_write(dev, IRQSTAT0, 1 << DMA_DONE_INTERRUPT);
1948 stat &= ~(1 << DMA_DONE_INTERRUPT);
1949 num = (net2272_read(dev, DMAREQ) & (1 << DMA_ENDPOINT_SELECT))
1950 ? 2 : 1;
1951
1952 ep = &dev->ep[num];
1953 net2272_handle_dma(ep);
1954 }
1955
1956 next_endpoints:
1957 /* endpoint data irq? */
1958 scratch = stat & 0x0f;
1959 stat &= ~0x0f;
1960 for (num = 0; scratch; num++) {
1961 u8 t;
1962
1963 /* does this endpoint's FIFO and queue need tending? */
1964 t = 1 << num;
1965 if ((scratch & t) == 0)
1966 continue;
1967 scratch ^= t;
1968
1969 ep = &dev->ep[num];
1970 net2272_handle_ep(ep);
1971 }
1972
1973 /* some interrupts we can just ignore */
1974 stat &= ~(1 << SOF_INTERRUPT);
1975
1976 if (stat)
1977 dev_dbg(dev->dev, "unhandled irqstat0 %02x\n", stat);
1978}
1979
1980static void
1981net2272_handle_stat1_irqs(struct net2272 *dev, u8 stat)
1982{
1983 u8 tmp, mask;
1984
1985 /* after disconnect there's nothing else to do! */
1986 tmp = (1 << VBUS_INTERRUPT) | (1 << ROOT_PORT_RESET_INTERRUPT);
1987 mask = (1 << USB_HIGH_SPEED) | (1 << USB_FULL_SPEED);
1988
1989 if (stat & tmp) {
1990 net2272_write(dev, IRQSTAT1, tmp);
1991 if ((((stat & (1 << ROOT_PORT_RESET_INTERRUPT)) &&
1992 ((net2272_read(dev, USBCTL1) & mask) == 0))
1993 || ((net2272_read(dev, USBCTL1) & (1 << VBUS_PIN))
1994 == 0))
1995 && (dev->gadget.speed != USB_SPEED_UNKNOWN)) {
1996 dev_dbg(dev->dev, "disconnect %s\n",
1997 dev->driver->driver.name);
1998 stop_activity(dev, dev->driver);
1999 net2272_ep0_start(dev);
2000 return;
2001 }
2002 stat &= ~tmp;
2003
2004 if (!stat)
2005 return;
2006 }
2007
2008 tmp = (1 << SUSPEND_REQUEST_CHANGE_INTERRUPT);
2009 if (stat & tmp) {
2010 net2272_write(dev, IRQSTAT1, tmp);
2011 if (stat & (1 << SUSPEND_REQUEST_INTERRUPT)) {
2012 if (dev->driver->suspend)
2013 dev->driver->suspend(&dev->gadget);
2014 if (!enable_suspend) {
2015 stat &= ~(1 << SUSPEND_REQUEST_INTERRUPT);
2016 dev_dbg(dev->dev, "Suspend disabled, ignoring\n");
2017 }
2018 } else {
2019 if (dev->driver->resume)
2020 dev->driver->resume(&dev->gadget);
2021 }
2022 stat &= ~tmp;
2023 }
2024
2025 /* clear any other status/irqs */
2026 if (stat)
2027 net2272_write(dev, IRQSTAT1, stat);
2028
2029 /* some status we can just ignore */
2030 stat &= ~((1 << CONTROL_STATUS_INTERRUPT)
2031 | (1 << SUSPEND_REQUEST_INTERRUPT)
2032 | (1 << RESUME_INTERRUPT));
2033 if (!stat)
2034 return;
2035 else
2036 dev_dbg(dev->dev, "unhandled irqstat1 %02x\n", stat);
2037}
2038
2039static irqreturn_t net2272_irq(int irq, void *_dev)
2040{
2041 struct net2272 *dev = _dev;
2042#if defined(PLX_PCI_RDK) || defined(PLX_PCI_RDK2)
2043 u32 intcsr;
2044#endif
2045#if defined(PLX_PCI_RDK)
2046 u8 dmareq;
2047#endif
2048 spin_lock(&dev->lock);
2049#if defined(PLX_PCI_RDK)
2050 intcsr = readl(dev->rdk1.plx9054_base_addr + INTCSR);
2051
2052 if ((intcsr & LOCAL_INTERRUPT_TEST) == LOCAL_INTERRUPT_TEST) {
2053 writel(intcsr & ~(1 << PCI_INTERRUPT_ENABLE),
2054 dev->rdk1.plx9054_base_addr + INTCSR);
2055 net2272_handle_stat1_irqs(dev, net2272_read(dev, IRQSTAT1));
2056 net2272_handle_stat0_irqs(dev, net2272_read(dev, IRQSTAT0));
2057 intcsr = readl(dev->rdk1.plx9054_base_addr + INTCSR);
2058 writel(intcsr | (1 << PCI_INTERRUPT_ENABLE),
2059 dev->rdk1.plx9054_base_addr + INTCSR);
2060 }
2061 if ((intcsr & DMA_CHANNEL_0_TEST) == DMA_CHANNEL_0_TEST) {
2062 writeb((1 << CHANNEL_CLEAR_INTERRUPT | (0 << CHANNEL_ENABLE)),
2063 dev->rdk1.plx9054_base_addr + DMACSR0);
2064
2065 dmareq = net2272_read(dev, DMAREQ);
2066 if (dmareq & 0x01)
2067 net2272_handle_dma(&dev->ep[2]);
2068 else
2069 net2272_handle_dma(&dev->ep[1]);
2070 }
2071#endif
2072#if defined(PLX_PCI_RDK2)
2073 /* see if PCI int for us by checking irqstat */
2074 intcsr = readl(dev->rdk2.fpga_base_addr + RDK2_IRQSTAT);
Wei Yongjun000b7f52012-10-22 13:51:11 +08002075 if (!intcsr & (1 << NET2272_PCI_IRQ)) {
2076 spin_unlock(&dev->lock);
Seth Levyceb80362011-06-06 19:42:44 -04002077 return IRQ_NONE;
Wei Yongjun000b7f52012-10-22 13:51:11 +08002078 }
Seth Levyceb80362011-06-06 19:42:44 -04002079 /* check dma interrupts */
2080#endif
2081 /* Platform/devcice interrupt handler */
2082#if !defined(PLX_PCI_RDK)
2083 net2272_handle_stat1_irqs(dev, net2272_read(dev, IRQSTAT1));
2084 net2272_handle_stat0_irqs(dev, net2272_read(dev, IRQSTAT0));
2085#endif
2086 spin_unlock(&dev->lock);
2087
2088 return IRQ_HANDLED;
2089}
2090
2091static int net2272_present(struct net2272 *dev)
2092{
2093 /*
2094 * Quick test to see if CPU can communicate properly with the NET2272.
2095 * Verifies connection using writes and reads to write/read and
2096 * read-only registers.
2097 *
2098 * This routine is strongly recommended especially during early bring-up
2099 * of new hardware, however for designs that do not apply Power On System
2100 * Tests (POST) it may discarded (or perhaps minimized).
2101 */
2102 unsigned int ii;
2103 u8 val, refval;
2104
2105 /* Verify NET2272 write/read SCRATCH register can write and read */
2106 refval = net2272_read(dev, SCRATCH);
2107 for (ii = 0; ii < 0x100; ii += 7) {
2108 net2272_write(dev, SCRATCH, ii);
2109 val = net2272_read(dev, SCRATCH);
2110 if (val != ii) {
2111 dev_dbg(dev->dev,
2112 "%s: write/read SCRATCH register test failed: "
2113 "wrote:0x%2.2x, read:0x%2.2x\n",
2114 __func__, ii, val);
2115 return -EINVAL;
2116 }
2117 }
2118 /* To be nice, we write the original SCRATCH value back: */
2119 net2272_write(dev, SCRATCH, refval);
2120
2121 /* Verify NET2272 CHIPREV register is read-only: */
2122 refval = net2272_read(dev, CHIPREV_2272);
2123 for (ii = 0; ii < 0x100; ii += 7) {
2124 net2272_write(dev, CHIPREV_2272, ii);
2125 val = net2272_read(dev, CHIPREV_2272);
2126 if (val != refval) {
2127 dev_dbg(dev->dev,
2128 "%s: write/read CHIPREV register test failed: "
2129 "wrote 0x%2.2x, read:0x%2.2x expected:0x%2.2x\n",
2130 __func__, ii, val, refval);
2131 return -EINVAL;
2132 }
2133 }
2134
2135 /*
2136 * Verify NET2272's "NET2270 legacy revision" register
2137 * - NET2272 has two revision registers. The NET2270 legacy revision
2138 * register should read the same value, regardless of the NET2272
2139 * silicon revision. The legacy register applies to NET2270
2140 * firmware being applied to the NET2272.
2141 */
2142 val = net2272_read(dev, CHIPREV_LEGACY);
2143 if (val != NET2270_LEGACY_REV) {
2144 /*
2145 * Unexpected legacy revision value
2146 * - Perhaps the chip is a NET2270?
2147 */
2148 dev_dbg(dev->dev,
2149 "%s: WARNING: UNEXPECTED NET2272 LEGACY REGISTER VALUE:\n"
2150 " - CHIPREV_LEGACY: expected 0x%2.2x, got:0x%2.2x. (Not NET2272?)\n",
2151 __func__, NET2270_LEGACY_REV, val);
2152 return -EINVAL;
2153 }
2154
2155 /*
2156 * Verify NET2272 silicon revision
2157 * - This revision register is appropriate for the silicon version
2158 * of the NET2272
2159 */
2160 val = net2272_read(dev, CHIPREV_2272);
2161 switch (val) {
2162 case CHIPREV_NET2272_R1:
2163 /*
2164 * NET2272 Rev 1 has DMA related errata:
2165 * - Newer silicon (Rev 1A or better) required
2166 */
2167 dev_dbg(dev->dev,
2168 "%s: Rev 1 detected: newer silicon recommended for DMA support\n",
2169 __func__);
2170 break;
2171 case CHIPREV_NET2272_R1A:
2172 break;
2173 default:
2174 /* NET2272 silicon version *may* not work with this firmware */
2175 dev_dbg(dev->dev,
2176 "%s: unexpected silicon revision register value: "
2177 " CHIPREV_2272: 0x%2.2x\n",
2178 __func__, val);
2179 /*
2180 * Return Success, even though the chip rev is not an expected value
2181 * - Older, pre-built firmware can attempt to operate on newer silicon
2182 * - Often, new silicon is perfectly compatible
2183 */
2184 }
2185
2186 /* Success: NET2272 checks out OK */
2187 return 0;
2188}
2189
2190static void
2191net2272_gadget_release(struct device *_dev)
2192{
2193 struct net2272 *dev = dev_get_drvdata(_dev);
2194 kfree(dev);
2195}
2196
2197/*---------------------------------------------------------------------------*/
2198
Bill Pembertonfb4e98a2012-11-19 13:26:20 -05002199static void
Seth Levyceb80362011-06-06 19:42:44 -04002200net2272_remove(struct net2272 *dev)
2201{
Sebastian Andrzej Siewior0f913492011-06-28 16:33:47 +03002202 usb_del_gadget_udc(&dev->gadget);
2203
Seth Levyceb80362011-06-06 19:42:44 -04002204 /* start with the driver above us */
2205 if (dev->driver) {
2206 /* should have been done already by driver model core */
2207 dev_warn(dev->dev, "pci remove, driver '%s' is still registered\n",
2208 dev->driver->driver.name);
2209 usb_gadget_unregister_driver(dev->driver);
2210 }
2211
2212 free_irq(dev->irq, dev);
2213 iounmap(dev->base_addr);
2214
Seth Levyceb80362011-06-06 19:42:44 -04002215 device_remove_file(dev->dev, &dev_attr_registers);
2216
2217 dev_info(dev->dev, "unbind\n");
Seth Levyceb80362011-06-06 19:42:44 -04002218}
2219
Bill Pemberton41ac7b32012-11-19 13:21:48 -05002220static struct net2272 *net2272_probe_init(struct device *dev, unsigned int irq)
Seth Levyceb80362011-06-06 19:42:44 -04002221{
2222 struct net2272 *ret;
2223
Seth Levyceb80362011-06-06 19:42:44 -04002224 if (!irq) {
2225 dev_dbg(dev, "No IRQ!\n");
2226 return ERR_PTR(-ENODEV);
2227 }
2228
2229 /* alloc, and start init */
2230 ret = kzalloc(sizeof(*ret), GFP_KERNEL);
2231 if (!ret)
2232 return ERR_PTR(-ENOMEM);
2233
2234 spin_lock_init(&ret->lock);
2235 ret->irq = irq;
2236 ret->dev = dev;
2237 ret->gadget.ops = &net2272_ops;
Michal Nazarewiczd327ab52011-11-19 18:27:37 +01002238 ret->gadget.max_speed = USB_SPEED_HIGH;
Seth Levyceb80362011-06-06 19:42:44 -04002239
2240 /* the "gadget" abstracts/virtualizes the controller */
Seth Levyceb80362011-06-06 19:42:44 -04002241 ret->gadget.name = driver_name;
2242
2243 return ret;
2244}
2245
Bill Pemberton41ac7b32012-11-19 13:21:48 -05002246static int
Seth Levyceb80362011-06-06 19:42:44 -04002247net2272_probe_fin(struct net2272 *dev, unsigned int irqflags)
2248{
2249 int ret;
2250
2251 /* See if there... */
2252 if (net2272_present(dev)) {
2253 dev_warn(dev->dev, "2272 not found!\n");
2254 ret = -ENODEV;
2255 goto err;
2256 }
2257
2258 net2272_usb_reset(dev);
2259 net2272_usb_reinit(dev);
2260
2261 ret = request_irq(dev->irq, net2272_irq, irqflags, driver_name, dev);
2262 if (ret) {
2263 dev_err(dev->dev, "request interrupt %i failed\n", dev->irq);
2264 goto err;
2265 }
2266
2267 dev->chiprev = net2272_read(dev, CHIPREV_2272);
2268
2269 /* done */
2270 dev_info(dev->dev, "%s\n", driver_desc);
2271 dev_info(dev->dev, "irq %i, mem %p, chip rev %04x, dma %s\n",
2272 dev->irq, dev->base_addr, dev->chiprev,
2273 dma_mode_string());
2274 dev_info(dev->dev, "version: %s\n", driver_vers);
2275
Seth Levyceb80362011-06-06 19:42:44 -04002276 ret = device_create_file(dev->dev, &dev_attr_registers);
2277 if (ret)
Felipe Balbic9f9c842013-01-24 16:48:12 +02002278 goto err_irq;
Seth Levyceb80362011-06-06 19:42:44 -04002279
Felipe Balbi8efeeef2013-02-26 15:15:27 +02002280 ret = usb_add_gadget_udc_release(dev->dev, &dev->gadget,
2281 net2272_gadget_release);
Sebastian Andrzej Siewior0f913492011-06-28 16:33:47 +03002282 if (ret)
2283 goto err_add_udc;
2284
Seth Levyceb80362011-06-06 19:42:44 -04002285 return 0;
2286
Sebastian Andrzej Siewior0f913492011-06-28 16:33:47 +03002287err_add_udc:
2288 device_remove_file(dev->dev, &dev_attr_registers);
Seth Levyceb80362011-06-06 19:42:44 -04002289 err_irq:
2290 free_irq(dev->irq, dev);
2291 err:
2292 return ret;
2293}
2294
2295#ifdef CONFIG_PCI
2296
2297/*
2298 * wrap this driver around the specified device, but
2299 * don't respond over USB until a gadget driver binds to us
2300 */
2301
Bill Pemberton41ac7b32012-11-19 13:21:48 -05002302static int
Seth Levyceb80362011-06-06 19:42:44 -04002303net2272_rdk1_probe(struct pci_dev *pdev, struct net2272 *dev)
2304{
2305 unsigned long resource, len, tmp;
2306 void __iomem *mem_mapped_addr[4];
2307 int ret, i;
2308
2309 /*
2310 * BAR 0 holds PLX 9054 config registers
2311 * BAR 1 is i/o memory; unused here
2312 * BAR 2 holds EPLD config registers
2313 * BAR 3 holds NET2272 registers
2314 */
2315
2316 /* Find and map all address spaces */
2317 for (i = 0; i < 4; ++i) {
2318 if (i == 1)
2319 continue; /* BAR1 unused */
2320
2321 resource = pci_resource_start(pdev, i);
2322 len = pci_resource_len(pdev, i);
2323
2324 if (!request_mem_region(resource, len, driver_name)) {
2325 dev_dbg(dev->dev, "controller already in use\n");
2326 ret = -EBUSY;
2327 goto err;
2328 }
2329
2330 mem_mapped_addr[i] = ioremap_nocache(resource, len);
2331 if (mem_mapped_addr[i] == NULL) {
2332 release_mem_region(resource, len);
2333 dev_dbg(dev->dev, "can't map memory\n");
2334 ret = -EFAULT;
2335 goto err;
2336 }
2337 }
2338
2339 dev->rdk1.plx9054_base_addr = mem_mapped_addr[0];
2340 dev->rdk1.epld_base_addr = mem_mapped_addr[2];
2341 dev->base_addr = mem_mapped_addr[3];
2342
2343 /* Set PLX 9054 bus width (16 bits) */
2344 tmp = readl(dev->rdk1.plx9054_base_addr + LBRD1);
2345 writel((tmp & ~(3 << MEMORY_SPACE_LOCAL_BUS_WIDTH)) | W16_BIT,
2346 dev->rdk1.plx9054_base_addr + LBRD1);
2347
2348 /* Enable PLX 9054 Interrupts */
2349 writel(readl(dev->rdk1.plx9054_base_addr + INTCSR) |
2350 (1 << PCI_INTERRUPT_ENABLE) |
2351 (1 << LOCAL_INTERRUPT_INPUT_ENABLE),
2352 dev->rdk1.plx9054_base_addr + INTCSR);
2353
2354 writeb((1 << CHANNEL_CLEAR_INTERRUPT | (0 << CHANNEL_ENABLE)),
2355 dev->rdk1.plx9054_base_addr + DMACSR0);
2356
2357 /* reset */
2358 writeb((1 << EPLD_DMA_ENABLE) |
2359 (1 << DMA_CTL_DACK) |
2360 (1 << DMA_TIMEOUT_ENABLE) |
2361 (1 << USER) |
2362 (0 << MPX_MODE) |
2363 (1 << BUSWIDTH) |
2364 (1 << NET2272_RESET),
2365 dev->base_addr + EPLD_IO_CONTROL_REGISTER);
2366
2367 mb();
2368 writeb(readb(dev->base_addr + EPLD_IO_CONTROL_REGISTER) &
2369 ~(1 << NET2272_RESET),
2370 dev->base_addr + EPLD_IO_CONTROL_REGISTER);
2371 udelay(200);
2372
2373 return 0;
2374
2375 err:
2376 while (--i >= 0) {
2377 iounmap(mem_mapped_addr[i]);
2378 release_mem_region(pci_resource_start(pdev, i),
2379 pci_resource_len(pdev, i));
2380 }
2381
2382 return ret;
2383}
2384
Bill Pemberton41ac7b32012-11-19 13:21:48 -05002385static int
Seth Levyceb80362011-06-06 19:42:44 -04002386net2272_rdk2_probe(struct pci_dev *pdev, struct net2272 *dev)
2387{
2388 unsigned long resource, len;
2389 void __iomem *mem_mapped_addr[2];
2390 int ret, i;
2391
2392 /*
2393 * BAR 0 holds FGPA config registers
2394 * BAR 1 holds NET2272 registers
2395 */
2396
2397 /* Find and map all address spaces, bar2-3 unused in rdk 2 */
2398 for (i = 0; i < 2; ++i) {
2399 resource = pci_resource_start(pdev, i);
2400 len = pci_resource_len(pdev, i);
2401
2402 if (!request_mem_region(resource, len, driver_name)) {
2403 dev_dbg(dev->dev, "controller already in use\n");
2404 ret = -EBUSY;
2405 goto err;
2406 }
2407
2408 mem_mapped_addr[i] = ioremap_nocache(resource, len);
2409 if (mem_mapped_addr[i] == NULL) {
2410 release_mem_region(resource, len);
2411 dev_dbg(dev->dev, "can't map memory\n");
2412 ret = -EFAULT;
2413 goto err;
2414 }
2415 }
2416
2417 dev->rdk2.fpga_base_addr = mem_mapped_addr[0];
2418 dev->base_addr = mem_mapped_addr[1];
2419
2420 mb();
2421 /* Set 2272 bus width (16 bits) and reset */
2422 writel((1 << CHIP_RESET), dev->rdk2.fpga_base_addr + RDK2_LOCCTLRDK);
2423 udelay(200);
2424 writel((1 << BUS_WIDTH), dev->rdk2.fpga_base_addr + RDK2_LOCCTLRDK);
2425 /* Print fpga version number */
2426 dev_info(dev->dev, "RDK2 FPGA version %08x\n",
2427 readl(dev->rdk2.fpga_base_addr + RDK2_FPGAREV));
2428 /* Enable FPGA Interrupts */
2429 writel((1 << NET2272_PCI_IRQ), dev->rdk2.fpga_base_addr + RDK2_IRQENB);
2430
2431 return 0;
2432
2433 err:
2434 while (--i >= 0) {
2435 iounmap(mem_mapped_addr[i]);
2436 release_mem_region(pci_resource_start(pdev, i),
2437 pci_resource_len(pdev, i));
2438 }
2439
2440 return ret;
2441}
2442
Bill Pemberton41ac7b32012-11-19 13:21:48 -05002443static int
Seth Levyceb80362011-06-06 19:42:44 -04002444net2272_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2445{
2446 struct net2272 *dev;
2447 int ret;
2448
2449 dev = net2272_probe_init(&pdev->dev, pdev->irq);
2450 if (IS_ERR(dev))
2451 return PTR_ERR(dev);
2452 dev->dev_id = pdev->device;
2453
2454 if (pci_enable_device(pdev) < 0) {
2455 ret = -ENODEV;
2456 goto err_free;
2457 }
2458
2459 pci_set_master(pdev);
2460
2461 switch (pdev->device) {
2462 case PCI_DEVICE_ID_RDK1: ret = net2272_rdk1_probe(pdev, dev); break;
2463 case PCI_DEVICE_ID_RDK2: ret = net2272_rdk2_probe(pdev, dev); break;
2464 default: BUG();
2465 }
2466 if (ret)
2467 goto err_pci;
2468
2469 ret = net2272_probe_fin(dev, 0);
2470 if (ret)
2471 goto err_pci;
2472
2473 pci_set_drvdata(pdev, dev);
2474
2475 return 0;
2476
2477 err_pci:
2478 pci_disable_device(pdev);
2479 err_free:
2480 kfree(dev);
2481
2482 return ret;
2483}
2484
Bill Pembertonfb4e98a2012-11-19 13:26:20 -05002485static void
Seth Levyceb80362011-06-06 19:42:44 -04002486net2272_rdk1_remove(struct pci_dev *pdev, struct net2272 *dev)
2487{
2488 int i;
2489
2490 /* disable PLX 9054 interrupts */
2491 writel(readl(dev->rdk1.plx9054_base_addr + INTCSR) &
2492 ~(1 << PCI_INTERRUPT_ENABLE),
2493 dev->rdk1.plx9054_base_addr + INTCSR);
2494
2495 /* clean up resources allocated during probe() */
2496 iounmap(dev->rdk1.plx9054_base_addr);
2497 iounmap(dev->rdk1.epld_base_addr);
2498
2499 for (i = 0; i < 4; ++i) {
2500 if (i == 1)
2501 continue; /* BAR1 unused */
2502 release_mem_region(pci_resource_start(pdev, i),
2503 pci_resource_len(pdev, i));
2504 }
2505}
2506
Bill Pembertonfb4e98a2012-11-19 13:26:20 -05002507static void
Seth Levyceb80362011-06-06 19:42:44 -04002508net2272_rdk2_remove(struct pci_dev *pdev, struct net2272 *dev)
2509{
2510 int i;
2511
2512 /* disable fpga interrupts
2513 writel(readl(dev->rdk1.plx9054_base_addr + INTCSR) &
2514 ~(1 << PCI_INTERRUPT_ENABLE),
2515 dev->rdk1.plx9054_base_addr + INTCSR);
2516 */
2517
2518 /* clean up resources allocated during probe() */
2519 iounmap(dev->rdk2.fpga_base_addr);
2520
2521 for (i = 0; i < 2; ++i)
2522 release_mem_region(pci_resource_start(pdev, i),
2523 pci_resource_len(pdev, i));
2524}
2525
Bill Pembertonfb4e98a2012-11-19 13:26:20 -05002526static void
Seth Levyceb80362011-06-06 19:42:44 -04002527net2272_pci_remove(struct pci_dev *pdev)
2528{
2529 struct net2272 *dev = pci_get_drvdata(pdev);
2530
2531 net2272_remove(dev);
2532
2533 switch (pdev->device) {
2534 case PCI_DEVICE_ID_RDK1: net2272_rdk1_remove(pdev, dev); break;
2535 case PCI_DEVICE_ID_RDK2: net2272_rdk2_remove(pdev, dev); break;
2536 default: BUG();
2537 }
2538
2539 pci_disable_device(pdev);
2540
2541 kfree(dev);
2542}
2543
2544/* Table of matching PCI IDs */
Bill Pembertond3608b62012-11-19 13:24:34 -05002545static struct pci_device_id pci_ids[] = {
Seth Levyceb80362011-06-06 19:42:44 -04002546 { /* RDK 1 card */
2547 .class = ((PCI_CLASS_BRIDGE_OTHER << 8) | 0xfe),
2548 .class_mask = 0,
2549 .vendor = PCI_VENDOR_ID_PLX,
2550 .device = PCI_DEVICE_ID_RDK1,
2551 .subvendor = PCI_ANY_ID,
2552 .subdevice = PCI_ANY_ID,
2553 },
2554 { /* RDK 2 card */
2555 .class = ((PCI_CLASS_BRIDGE_OTHER << 8) | 0xfe),
2556 .class_mask = 0,
2557 .vendor = PCI_VENDOR_ID_PLX,
2558 .device = PCI_DEVICE_ID_RDK2,
2559 .subvendor = PCI_ANY_ID,
2560 .subdevice = PCI_ANY_ID,
2561 },
2562 { }
2563};
2564MODULE_DEVICE_TABLE(pci, pci_ids);
2565
2566static struct pci_driver net2272_pci_driver = {
2567 .name = driver_name,
2568 .id_table = pci_ids,
2569
2570 .probe = net2272_pci_probe,
Bill Pemberton76904172012-11-19 13:21:08 -05002571 .remove = net2272_pci_remove,
Seth Levyceb80362011-06-06 19:42:44 -04002572};
2573
Sebastian Andrzej Siewiore4fe0562011-06-29 16:41:54 +03002574static int net2272_pci_register(void)
2575{
2576 return pci_register_driver(&net2272_pci_driver);
2577}
2578
2579static void net2272_pci_unregister(void)
2580{
2581 pci_unregister_driver(&net2272_pci_driver);
2582}
2583
Seth Levyceb80362011-06-06 19:42:44 -04002584#else
Sebastian Andrzej Siewiore4fe0562011-06-29 16:41:54 +03002585static inline int net2272_pci_register(void) { return 0; }
2586static inline void net2272_pci_unregister(void) { }
Seth Levyceb80362011-06-06 19:42:44 -04002587#endif
2588
2589/*---------------------------------------------------------------------------*/
2590
Bill Pemberton41ac7b32012-11-19 13:21:48 -05002591static int
Seth Levyceb80362011-06-06 19:42:44 -04002592net2272_plat_probe(struct platform_device *pdev)
2593{
2594 struct net2272 *dev;
2595 int ret;
2596 unsigned int irqflags;
2597 resource_size_t base, len;
2598 struct resource *iomem, *iomem_bus, *irq_res;
2599
2600 irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
2601 iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2602 iomem_bus = platform_get_resource(pdev, IORESOURCE_BUS, 0);
2603 if (!irq_res || !iomem) {
2604 dev_err(&pdev->dev, "must provide irq/base addr");
2605 return -EINVAL;
2606 }
2607
2608 dev = net2272_probe_init(&pdev->dev, irq_res->start);
2609 if (IS_ERR(dev))
2610 return PTR_ERR(dev);
2611
2612 irqflags = 0;
2613 if (irq_res->flags & IORESOURCE_IRQ_HIGHEDGE)
2614 irqflags |= IRQF_TRIGGER_RISING;
2615 if (irq_res->flags & IORESOURCE_IRQ_LOWEDGE)
2616 irqflags |= IRQF_TRIGGER_FALLING;
2617 if (irq_res->flags & IORESOURCE_IRQ_HIGHLEVEL)
2618 irqflags |= IRQF_TRIGGER_HIGH;
2619 if (irq_res->flags & IORESOURCE_IRQ_LOWLEVEL)
2620 irqflags |= IRQF_TRIGGER_LOW;
2621
2622 base = iomem->start;
2623 len = resource_size(iomem);
2624 if (iomem_bus)
2625 dev->base_shift = iomem_bus->start;
2626
2627 if (!request_mem_region(base, len, driver_name)) {
2628 dev_dbg(dev->dev, "get request memory region!\n");
2629 ret = -EBUSY;
2630 goto err;
2631 }
2632 dev->base_addr = ioremap_nocache(base, len);
2633 if (!dev->base_addr) {
2634 dev_dbg(dev->dev, "can't map memory\n");
2635 ret = -EFAULT;
2636 goto err_req;
2637 }
2638
2639 ret = net2272_probe_fin(dev, IRQF_TRIGGER_LOW);
2640 if (ret)
2641 goto err_io;
2642
2643 platform_set_drvdata(pdev, dev);
2644 dev_info(&pdev->dev, "running in 16-bit, %sbyte swap local bus mode\n",
2645 (net2272_read(dev, LOCCTL) & (1 << BYTE_SWAP)) ? "" : "no ");
2646
Seth Levyceb80362011-06-06 19:42:44 -04002647 return 0;
2648
2649 err_io:
2650 iounmap(dev->base_addr);
2651 err_req:
2652 release_mem_region(base, len);
2653 err:
2654 return ret;
2655}
2656
Bill Pembertonfb4e98a2012-11-19 13:26:20 -05002657static int
Seth Levyceb80362011-06-06 19:42:44 -04002658net2272_plat_remove(struct platform_device *pdev)
2659{
2660 struct net2272 *dev = platform_get_drvdata(pdev);
2661
2662 net2272_remove(dev);
2663
2664 release_mem_region(pdev->resource[0].start,
2665 resource_size(&pdev->resource[0]));
2666
2667 kfree(dev);
2668
2669 return 0;
2670}
2671
2672static struct platform_driver net2272_plat_driver = {
2673 .probe = net2272_plat_probe,
Bill Pemberton76904172012-11-19 13:21:08 -05002674 .remove = net2272_plat_remove,
Seth Levyceb80362011-06-06 19:42:44 -04002675 .driver = {
2676 .name = driver_name,
2677 .owner = THIS_MODULE,
2678 },
2679 /* FIXME .suspend, .resume */
2680};
Sebastian Andrzej Siewior86081d72011-06-29 16:41:55 +03002681MODULE_ALIAS("platform:net2272");
Seth Levyceb80362011-06-06 19:42:44 -04002682
2683static int __init net2272_init(void)
2684{
Sebastian Andrzej Siewiore4fe0562011-06-29 16:41:54 +03002685 int ret;
2686
2687 ret = net2272_pci_register();
2688 if (ret)
2689 return ret;
2690 ret = platform_driver_register(&net2272_plat_driver);
2691 if (ret)
2692 goto err_pci;
2693 return ret;
2694
2695err_pci:
2696 net2272_pci_unregister();
2697 return ret;
Seth Levyceb80362011-06-06 19:42:44 -04002698}
2699module_init(net2272_init);
2700
2701static void __exit net2272_cleanup(void)
2702{
Sebastian Andrzej Siewiore4fe0562011-06-29 16:41:54 +03002703 net2272_pci_unregister();
Seth Levyceb80362011-06-06 19:42:44 -04002704 platform_driver_unregister(&net2272_plat_driver);
2705}
2706module_exit(net2272_cleanup);
2707
2708MODULE_DESCRIPTION(DRIVER_DESC);
2709MODULE_AUTHOR("PLX Technology, Inc.");
2710MODULE_LICENSE("GPL");