blob: 888fbb43b338ecebcaf7cf8cb267fb2de35cc240 [file] [log] [blame]
Kevin Cernekee613065e2012-08-25 12:38:52 -07001/*
2 * bcm63xx_udc.c -- BCM63xx UDC high/full speed USB device controller
3 *
4 * Copyright (C) 2012 Kevin Cernekee <cernekee@gmail.com>
5 * Copyright (C) 2012 Broadcom Corporation
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 */
12
13#include <linux/bitops.h>
14#include <linux/bug.h>
15#include <linux/clk.h>
16#include <linux/compiler.h>
17#include <linux/debugfs.h>
18#include <linux/delay.h>
19#include <linux/device.h>
20#include <linux/dma-mapping.h>
21#include <linux/errno.h>
Kevin Cernekee613065e2012-08-25 12:38:52 -070022#include <linux/interrupt.h>
23#include <linux/ioport.h>
24#include <linux/kconfig.h>
25#include <linux/kernel.h>
26#include <linux/list.h>
27#include <linux/module.h>
28#include <linux/moduleparam.h>
29#include <linux/platform_device.h>
30#include <linux/sched.h>
31#include <linux/seq_file.h>
32#include <linux/slab.h>
33#include <linux/timer.h>
34#include <linux/usb/ch9.h>
35#include <linux/usb/gadget.h>
36#include <linux/workqueue.h>
37
38#include <bcm63xx_cpu.h>
39#include <bcm63xx_iudma.h>
40#include <bcm63xx_dev_usb_usbd.h>
41#include <bcm63xx_io.h>
42#include <bcm63xx_regs.h>
43
44#define DRV_MODULE_NAME "bcm63xx_udc"
45
46static const char bcm63xx_ep0name[] = "ep0";
47static const char *const bcm63xx_ep_name[] = {
48 bcm63xx_ep0name,
49 "ep1in-bulk", "ep2out-bulk", "ep3in-int", "ep4out-int",
50};
51
52static bool use_fullspeed;
53module_param(use_fullspeed, bool, S_IRUGO);
54MODULE_PARM_DESC(use_fullspeed, "true for fullspeed only");
55
56/*
57 * RX IRQ coalescing options:
58 *
59 * false (default) - one IRQ per DATAx packet. Slow but reliable. The
60 * driver is able to pass the "testusb" suite and recover from conditions like:
61 *
62 * 1) Device queues up a 2048-byte RX IUDMA transaction on an OUT bulk ep
63 * 2) Host sends 512 bytes of data
64 * 3) Host decides to reconfigure the device and sends SET_INTERFACE
65 * 4) Device shuts down the endpoint and cancels the RX transaction
66 *
67 * true - one IRQ per transfer, for transfers <= 2048B. Generates
68 * considerably fewer IRQs, but error recovery is less robust. Does not
69 * reliably pass "testusb".
70 *
71 * TX always uses coalescing, because we can cancel partially complete TX
72 * transfers by repeatedly flushing the FIFO. The hardware doesn't allow
73 * this on RX.
74 */
75static bool irq_coalesce;
76module_param(irq_coalesce, bool, S_IRUGO);
77MODULE_PARM_DESC(irq_coalesce, "take one IRQ per RX transfer");
78
79#define BCM63XX_NUM_EP 5
80#define BCM63XX_NUM_IUDMA 6
81#define BCM63XX_NUM_FIFO_PAIRS 3
82
83#define IUDMA_RESET_TIMEOUT_US 10000
84
85#define IUDMA_EP0_RXCHAN 0
86#define IUDMA_EP0_TXCHAN 1
87
88#define IUDMA_MAX_FRAGMENT 2048
89#define BCM63XX_MAX_CTRL_PKT 64
90
91#define BCMEP_CTRL 0x00
92#define BCMEP_ISOC 0x01
93#define BCMEP_BULK 0x02
94#define BCMEP_INTR 0x03
95
96#define BCMEP_OUT 0x00
97#define BCMEP_IN 0x01
98
99#define BCM63XX_SPD_FULL 1
100#define BCM63XX_SPD_HIGH 0
101
102#define IUDMA_DMAC_OFFSET 0x200
103#define IUDMA_DMAS_OFFSET 0x400
104
105enum bcm63xx_ep0_state {
106 EP0_REQUEUE,
107 EP0_IDLE,
108 EP0_IN_DATA_PHASE_SETUP,
109 EP0_IN_DATA_PHASE_COMPLETE,
110 EP0_OUT_DATA_PHASE_SETUP,
111 EP0_OUT_DATA_PHASE_COMPLETE,
112 EP0_OUT_STATUS_PHASE,
113 EP0_IN_FAKE_STATUS_PHASE,
114 EP0_SHUTDOWN,
115};
116
117static const char __maybe_unused bcm63xx_ep0_state_names[][32] = {
118 "REQUEUE",
119 "IDLE",
120 "IN_DATA_PHASE_SETUP",
121 "IN_DATA_PHASE_COMPLETE",
122 "OUT_DATA_PHASE_SETUP",
123 "OUT_DATA_PHASE_COMPLETE",
124 "OUT_STATUS_PHASE",
125 "IN_FAKE_STATUS_PHASE",
126 "SHUTDOWN",
127};
128
129/**
130 * struct iudma_ch_cfg - Static configuration for an IUDMA channel.
131 * @ep_num: USB endpoint number.
132 * @n_bds: Number of buffer descriptors in the ring.
133 * @ep_type: Endpoint type (control, bulk, interrupt).
134 * @dir: Direction (in, out).
135 * @n_fifo_slots: Number of FIFO entries to allocate for this channel.
136 * @max_pkt_hs: Maximum packet size in high speed mode.
137 * @max_pkt_fs: Maximum packet size in full speed mode.
138 */
139struct iudma_ch_cfg {
140 int ep_num;
141 int n_bds;
142 int ep_type;
143 int dir;
144 int n_fifo_slots;
145 int max_pkt_hs;
146 int max_pkt_fs;
147};
148
149static const struct iudma_ch_cfg iudma_defaults[] = {
150
151 /* This controller was designed to support a CDC/RNDIS application.
152 It may be possible to reconfigure some of the endpoints, but
153 the hardware limitations (FIFO sizing and number of DMA channels)
154 may significantly impact flexibility and/or stability. Change
155 these values at your own risk.
156
157 ep_num ep_type n_fifo_slots max_pkt_fs
158 idx | n_bds | dir | max_pkt_hs |
159 | | | | | | | | */
160 [0] = { -1, 4, BCMEP_CTRL, BCMEP_OUT, 32, 64, 64 },
161 [1] = { 0, 4, BCMEP_CTRL, BCMEP_OUT, 32, 64, 64 },
162 [2] = { 2, 16, BCMEP_BULK, BCMEP_OUT, 128, 512, 64 },
163 [3] = { 1, 16, BCMEP_BULK, BCMEP_IN, 128, 512, 64 },
164 [4] = { 4, 4, BCMEP_INTR, BCMEP_OUT, 32, 64, 64 },
165 [5] = { 3, 4, BCMEP_INTR, BCMEP_IN, 32, 64, 64 },
166};
167
168struct bcm63xx_udc;
169
170/**
171 * struct iudma_ch - Represents the current state of a single IUDMA channel.
172 * @ch_idx: IUDMA channel index (0 to BCM63XX_NUM_IUDMA-1).
173 * @ep_num: USB endpoint number. -1 for ep0 RX.
174 * @enabled: Whether bcm63xx_ep_enable() has been called.
175 * @max_pkt: "Chunk size" on the USB interface. Based on interface speed.
176 * @is_tx: true for TX, false for RX.
177 * @bep: Pointer to the associated endpoint. NULL for ep0 RX.
178 * @udc: Reference to the device controller.
179 * @read_bd: Next buffer descriptor to reap from the hardware.
180 * @write_bd: Next BD available for a new packet.
181 * @end_bd: Points to the final BD in the ring.
182 * @n_bds_used: Number of BD entries currently occupied.
183 * @bd_ring: Base pointer to the BD ring.
184 * @bd_ring_dma: Physical (DMA) address of bd_ring.
185 * @n_bds: Total number of BDs in the ring.
186 *
187 * ep0 has two IUDMA channels (IUDMA_EP0_RXCHAN and IUDMA_EP0_TXCHAN), as it is
188 * bidirectional. The "struct usb_ep" associated with ep0 is for TX (IN)
189 * only.
190 *
191 * Each bulk/intr endpoint has a single IUDMA channel and a single
192 * struct usb_ep.
193 */
194struct iudma_ch {
195 unsigned int ch_idx;
196 int ep_num;
197 bool enabled;
198 int max_pkt;
199 bool is_tx;
200 struct bcm63xx_ep *bep;
201 struct bcm63xx_udc *udc;
202
203 struct bcm_enet_desc *read_bd;
204 struct bcm_enet_desc *write_bd;
205 struct bcm_enet_desc *end_bd;
206 int n_bds_used;
207
208 struct bcm_enet_desc *bd_ring;
209 dma_addr_t bd_ring_dma;
210 unsigned int n_bds;
211};
212
213/**
214 * struct bcm63xx_ep - Internal (driver) state of a single endpoint.
215 * @ep_num: USB endpoint number.
216 * @iudma: Pointer to IUDMA channel state.
217 * @ep: USB gadget layer representation of the EP.
218 * @udc: Reference to the device controller.
219 * @queue: Linked list of outstanding requests for this EP.
220 * @halted: 1 if the EP is stalled; 0 otherwise.
221 */
222struct bcm63xx_ep {
223 unsigned int ep_num;
224 struct iudma_ch *iudma;
225 struct usb_ep ep;
226 struct bcm63xx_udc *udc;
227 struct list_head queue;
228 unsigned halted:1;
229};
230
231/**
232 * struct bcm63xx_req - Internal (driver) state of a single request.
233 * @queue: Links back to the EP's request list.
234 * @req: USB gadget layer representation of the request.
235 * @offset: Current byte offset into the data buffer (next byte to queue).
236 * @bd_bytes: Number of data bytes in outstanding BD entries.
237 * @iudma: IUDMA channel used for the request.
238 */
239struct bcm63xx_req {
240 struct list_head queue; /* ep's requests */
241 struct usb_request req;
242 unsigned int offset;
243 unsigned int bd_bytes;
244 struct iudma_ch *iudma;
245};
246
247/**
248 * struct bcm63xx_udc - Driver/hardware private context.
249 * @lock: Spinlock to mediate access to this struct, and (most) HW regs.
250 * @dev: Generic Linux device structure.
251 * @pd: Platform data (board/port info).
252 * @usbd_clk: Clock descriptor for the USB device block.
253 * @usbh_clk: Clock descriptor for the USB host block.
254 * @gadget: USB slave device.
255 * @driver: Driver for USB slave devices.
256 * @usbd_regs: Base address of the USBD/USB20D block.
257 * @iudma_regs: Base address of the USBD's associated IUDMA block.
258 * @bep: Array of endpoints, including ep0.
259 * @iudma: Array of all IUDMA channels used by this controller.
260 * @cfg: USB configuration number, from SET_CONFIGURATION wValue.
261 * @iface: USB interface number, from SET_INTERFACE wIndex.
262 * @alt_iface: USB alt interface number, from SET_INTERFACE wValue.
263 * @ep0_ctrl_req: Request object for bcm63xx_udc-initiated ep0 transactions.
264 * @ep0_ctrl_buf: Data buffer for ep0_ctrl_req.
265 * @ep0state: Current state of the ep0 state machine.
266 * @ep0_wq: Workqueue struct used to wake up the ep0 state machine.
267 * @wedgemap: Bitmap of wedged endpoints.
268 * @ep0_req_reset: USB reset is pending.
269 * @ep0_req_set_cfg: Need to spoof a SET_CONFIGURATION packet.
270 * @ep0_req_set_iface: Need to spoof a SET_INTERFACE packet.
271 * @ep0_req_shutdown: Driver is shutting down; requesting ep0 to halt activity.
272 * @ep0_req_completed: ep0 request has completed; worker has not seen it yet.
273 * @ep0_reply: Pending reply from gadget driver.
274 * @ep0_request: Outstanding ep0 request.
275 * @debugfs_root: debugfs directory: /sys/kernel/debug/<DRV_MODULE_NAME>.
276 * @debugfs_usbd: debugfs file "usbd" for controller state.
277 * @debugfs_iudma: debugfs file "usbd" for IUDMA state.
278 */
279struct bcm63xx_udc {
280 spinlock_t lock;
281
282 struct device *dev;
283 struct bcm63xx_usbd_platform_data *pd;
284 struct clk *usbd_clk;
285 struct clk *usbh_clk;
286
287 struct usb_gadget gadget;
288 struct usb_gadget_driver *driver;
289
290 void __iomem *usbd_regs;
291 void __iomem *iudma_regs;
292
293 struct bcm63xx_ep bep[BCM63XX_NUM_EP];
294 struct iudma_ch iudma[BCM63XX_NUM_IUDMA];
295
296 int cfg;
297 int iface;
298 int alt_iface;
299
300 struct bcm63xx_req ep0_ctrl_req;
301 u8 *ep0_ctrl_buf;
302
303 int ep0state;
304 struct work_struct ep0_wq;
305
306 unsigned long wedgemap;
307
308 unsigned ep0_req_reset:1;
309 unsigned ep0_req_set_cfg:1;
310 unsigned ep0_req_set_iface:1;
311 unsigned ep0_req_shutdown:1;
312
313 unsigned ep0_req_completed:1;
314 struct usb_request *ep0_reply;
315 struct usb_request *ep0_request;
316
317 struct dentry *debugfs_root;
318 struct dentry *debugfs_usbd;
319 struct dentry *debugfs_iudma;
320};
321
322static const struct usb_ep_ops bcm63xx_udc_ep_ops;
323
324/***********************************************************************
325 * Convenience functions
326 ***********************************************************************/
327
328static inline struct bcm63xx_udc *gadget_to_udc(struct usb_gadget *g)
329{
330 return container_of(g, struct bcm63xx_udc, gadget);
331}
332
333static inline struct bcm63xx_ep *our_ep(struct usb_ep *ep)
334{
335 return container_of(ep, struct bcm63xx_ep, ep);
336}
337
338static inline struct bcm63xx_req *our_req(struct usb_request *req)
339{
340 return container_of(req, struct bcm63xx_req, req);
341}
342
343static inline u32 usbd_readl(struct bcm63xx_udc *udc, u32 off)
344{
345 return bcm_readl(udc->usbd_regs + off);
346}
347
348static inline void usbd_writel(struct bcm63xx_udc *udc, u32 val, u32 off)
349{
350 bcm_writel(val, udc->usbd_regs + off);
351}
352
353static inline u32 usb_dma_readl(struct bcm63xx_udc *udc, u32 off)
354{
355 return bcm_readl(udc->iudma_regs + off);
356}
357
358static inline void usb_dma_writel(struct bcm63xx_udc *udc, u32 val, u32 off)
359{
360 bcm_writel(val, udc->iudma_regs + off);
361}
362
363static inline u32 usb_dmac_readl(struct bcm63xx_udc *udc, u32 off)
364{
365 return bcm_readl(udc->iudma_regs + IUDMA_DMAC_OFFSET + off);
366}
367
368static inline void usb_dmac_writel(struct bcm63xx_udc *udc, u32 val, u32 off)
369{
370 bcm_writel(val, udc->iudma_regs + IUDMA_DMAC_OFFSET + off);
371}
372
373static inline u32 usb_dmas_readl(struct bcm63xx_udc *udc, u32 off)
374{
375 return bcm_readl(udc->iudma_regs + IUDMA_DMAS_OFFSET + off);
376}
377
378static inline void usb_dmas_writel(struct bcm63xx_udc *udc, u32 val, u32 off)
379{
380 bcm_writel(val, udc->iudma_regs + IUDMA_DMAS_OFFSET + off);
381}
382
383static inline void set_clocks(struct bcm63xx_udc *udc, bool is_enabled)
384{
385 if (is_enabled) {
386 clk_enable(udc->usbh_clk);
387 clk_enable(udc->usbd_clk);
388 udelay(10);
389 } else {
390 clk_disable(udc->usbd_clk);
391 clk_disable(udc->usbh_clk);
392 }
393}
394
395/***********************************************************************
396 * Low-level IUDMA / FIFO operations
397 ***********************************************************************/
398
399/**
400 * bcm63xx_ep_dma_select - Helper function to set up the init_sel signal.
401 * @udc: Reference to the device controller.
402 * @idx: Desired init_sel value.
403 *
404 * The "init_sel" signal is used as a selection index for both endpoints
405 * and IUDMA channels. Since these do not map 1:1, the use of this signal
406 * depends on the context.
407 */
408static void bcm63xx_ep_dma_select(struct bcm63xx_udc *udc, int idx)
409{
410 u32 val = usbd_readl(udc, USBD_CONTROL_REG);
411
412 val &= ~USBD_CONTROL_INIT_SEL_MASK;
413 val |= idx << USBD_CONTROL_INIT_SEL_SHIFT;
414 usbd_writel(udc, val, USBD_CONTROL_REG);
415}
416
417/**
418 * bcm63xx_set_stall - Enable/disable stall on one endpoint.
419 * @udc: Reference to the device controller.
420 * @bep: Endpoint on which to operate.
421 * @is_stalled: true to enable stall, false to disable.
422 *
423 * See notes in bcm63xx_update_wedge() regarding automatic clearing of
424 * halt/stall conditions.
425 */
426static void bcm63xx_set_stall(struct bcm63xx_udc *udc, struct bcm63xx_ep *bep,
427 bool is_stalled)
428{
429 u32 val;
430
431 val = USBD_STALL_UPDATE_MASK |
432 (is_stalled ? USBD_STALL_ENABLE_MASK : 0) |
433 (bep->ep_num << USBD_STALL_EPNUM_SHIFT);
434 usbd_writel(udc, val, USBD_STALL_REG);
435}
436
437/**
438 * bcm63xx_fifo_setup - (Re)initialize FIFO boundaries and settings.
439 * @udc: Reference to the device controller.
440 *
441 * These parameters depend on the USB link speed. Settings are
442 * per-IUDMA-channel-pair.
443 */
444static void bcm63xx_fifo_setup(struct bcm63xx_udc *udc)
445{
446 int is_hs = udc->gadget.speed == USB_SPEED_HIGH;
447 u32 i, val, rx_fifo_slot, tx_fifo_slot;
448
449 /* set up FIFO boundaries and packet sizes; this is done in pairs */
450 rx_fifo_slot = tx_fifo_slot = 0;
451 for (i = 0; i < BCM63XX_NUM_IUDMA; i += 2) {
452 const struct iudma_ch_cfg *rx_cfg = &iudma_defaults[i];
453 const struct iudma_ch_cfg *tx_cfg = &iudma_defaults[i + 1];
454
455 bcm63xx_ep_dma_select(udc, i >> 1);
456
457 val = (rx_fifo_slot << USBD_RXFIFO_CONFIG_START_SHIFT) |
458 ((rx_fifo_slot + rx_cfg->n_fifo_slots - 1) <<
459 USBD_RXFIFO_CONFIG_END_SHIFT);
460 rx_fifo_slot += rx_cfg->n_fifo_slots;
461 usbd_writel(udc, val, USBD_RXFIFO_CONFIG_REG);
462 usbd_writel(udc,
463 is_hs ? rx_cfg->max_pkt_hs : rx_cfg->max_pkt_fs,
464 USBD_RXFIFO_EPSIZE_REG);
465
466 val = (tx_fifo_slot << USBD_TXFIFO_CONFIG_START_SHIFT) |
467 ((tx_fifo_slot + tx_cfg->n_fifo_slots - 1) <<
468 USBD_TXFIFO_CONFIG_END_SHIFT);
469 tx_fifo_slot += tx_cfg->n_fifo_slots;
470 usbd_writel(udc, val, USBD_TXFIFO_CONFIG_REG);
471 usbd_writel(udc,
472 is_hs ? tx_cfg->max_pkt_hs : tx_cfg->max_pkt_fs,
473 USBD_TXFIFO_EPSIZE_REG);
474
475 usbd_readl(udc, USBD_TXFIFO_EPSIZE_REG);
476 }
477}
478
479/**
480 * bcm63xx_fifo_reset_ep - Flush a single endpoint's FIFO.
481 * @udc: Reference to the device controller.
482 * @ep_num: Endpoint number.
483 */
484static void bcm63xx_fifo_reset_ep(struct bcm63xx_udc *udc, int ep_num)
485{
486 u32 val;
487
488 bcm63xx_ep_dma_select(udc, ep_num);
489
490 val = usbd_readl(udc, USBD_CONTROL_REG);
491 val |= USBD_CONTROL_FIFO_RESET_MASK;
492 usbd_writel(udc, val, USBD_CONTROL_REG);
493 usbd_readl(udc, USBD_CONTROL_REG);
494}
495
496/**
497 * bcm63xx_fifo_reset - Flush all hardware FIFOs.
498 * @udc: Reference to the device controller.
499 */
500static void bcm63xx_fifo_reset(struct bcm63xx_udc *udc)
501{
502 int i;
503
504 for (i = 0; i < BCM63XX_NUM_FIFO_PAIRS; i++)
505 bcm63xx_fifo_reset_ep(udc, i);
506}
507
508/**
509 * bcm63xx_ep_init - Initial (one-time) endpoint initialization.
510 * @udc: Reference to the device controller.
511 */
512static void bcm63xx_ep_init(struct bcm63xx_udc *udc)
513{
514 u32 i, val;
515
516 for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
517 const struct iudma_ch_cfg *cfg = &iudma_defaults[i];
518
519 if (cfg->ep_num < 0)
520 continue;
521
522 bcm63xx_ep_dma_select(udc, cfg->ep_num);
523 val = (cfg->ep_type << USBD_EPNUM_TYPEMAP_TYPE_SHIFT) |
524 ((i >> 1) << USBD_EPNUM_TYPEMAP_DMA_CH_SHIFT);
525 usbd_writel(udc, val, USBD_EPNUM_TYPEMAP_REG);
526 }
527}
528
529/**
530 * bcm63xx_ep_setup - Configure per-endpoint settings.
531 * @udc: Reference to the device controller.
532 *
533 * This needs to be rerun if the speed/cfg/intf/altintf changes.
534 */
535static void bcm63xx_ep_setup(struct bcm63xx_udc *udc)
536{
537 u32 val, i;
538
539 usbd_writel(udc, USBD_CSR_SETUPADDR_DEF, USBD_CSR_SETUPADDR_REG);
540
541 for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
542 const struct iudma_ch_cfg *cfg = &iudma_defaults[i];
543 int max_pkt = udc->gadget.speed == USB_SPEED_HIGH ?
544 cfg->max_pkt_hs : cfg->max_pkt_fs;
545 int idx = cfg->ep_num;
546
547 udc->iudma[i].max_pkt = max_pkt;
548
549 if (idx < 0)
550 continue;
Robert Baldygae117e742013-12-13 12:23:38 +0100551 usb_ep_set_maxpacket_limit(&udc->bep[idx].ep, max_pkt);
Kevin Cernekee613065e2012-08-25 12:38:52 -0700552
553 val = (idx << USBD_CSR_EP_LOG_SHIFT) |
554 (cfg->dir << USBD_CSR_EP_DIR_SHIFT) |
555 (cfg->ep_type << USBD_CSR_EP_TYPE_SHIFT) |
556 (udc->cfg << USBD_CSR_EP_CFG_SHIFT) |
557 (udc->iface << USBD_CSR_EP_IFACE_SHIFT) |
558 (udc->alt_iface << USBD_CSR_EP_ALTIFACE_SHIFT) |
559 (max_pkt << USBD_CSR_EP_MAXPKT_SHIFT);
560 usbd_writel(udc, val, USBD_CSR_EP_REG(idx));
561 }
562}
563
564/**
565 * iudma_write - Queue a single IUDMA transaction.
566 * @udc: Reference to the device controller.
567 * @iudma: IUDMA channel to use.
568 * @breq: Request containing the transaction data.
569 *
570 * For RX IUDMA, this will queue a single buffer descriptor, as RX IUDMA
571 * does not honor SOP/EOP so the handling of multiple buffers is ambiguous.
572 * So iudma_write() may be called several times to fulfill a single
573 * usb_request.
574 *
575 * For TX IUDMA, this can queue multiple buffer descriptors if needed.
576 */
577static void iudma_write(struct bcm63xx_udc *udc, struct iudma_ch *iudma,
578 struct bcm63xx_req *breq)
579{
580 int first_bd = 1, last_bd = 0, extra_zero_pkt = 0;
581 unsigned int bytes_left = breq->req.length - breq->offset;
582 const int max_bd_bytes = !irq_coalesce && !iudma->is_tx ?
583 iudma->max_pkt : IUDMA_MAX_FRAGMENT;
584
585 iudma->n_bds_used = 0;
586 breq->bd_bytes = 0;
587 breq->iudma = iudma;
588
589 if ((bytes_left % iudma->max_pkt == 0) && bytes_left && breq->req.zero)
590 extra_zero_pkt = 1;
591
592 do {
593 struct bcm_enet_desc *d = iudma->write_bd;
594 u32 dmaflags = 0;
595 unsigned int n_bytes;
596
597 if (d == iudma->end_bd) {
598 dmaflags |= DMADESC_WRAP_MASK;
599 iudma->write_bd = iudma->bd_ring;
600 } else {
601 iudma->write_bd++;
602 }
603 iudma->n_bds_used++;
604
605 n_bytes = min_t(int, bytes_left, max_bd_bytes);
606 if (n_bytes)
607 dmaflags |= n_bytes << DMADESC_LENGTH_SHIFT;
608 else
609 dmaflags |= (1 << DMADESC_LENGTH_SHIFT) |
610 DMADESC_USB_ZERO_MASK;
611
612 dmaflags |= DMADESC_OWNER_MASK;
613 if (first_bd) {
614 dmaflags |= DMADESC_SOP_MASK;
615 first_bd = 0;
616 }
617
618 /*
619 * extra_zero_pkt forces one more iteration through the loop
620 * after all data is queued up, to send the zero packet
621 */
622 if (extra_zero_pkt && !bytes_left)
623 extra_zero_pkt = 0;
624
625 if (!iudma->is_tx || iudma->n_bds_used == iudma->n_bds ||
626 (n_bytes == bytes_left && !extra_zero_pkt)) {
627 last_bd = 1;
628 dmaflags |= DMADESC_EOP_MASK;
629 }
630
631 d->address = breq->req.dma + breq->offset;
632 mb();
633 d->len_stat = dmaflags;
634
635 breq->offset += n_bytes;
636 breq->bd_bytes += n_bytes;
637 bytes_left -= n_bytes;
638 } while (!last_bd);
639
640 usb_dmac_writel(udc, ENETDMAC_CHANCFG_EN_MASK,
641 ENETDMAC_CHANCFG_REG(iudma->ch_idx));
642}
643
644/**
645 * iudma_read - Check for IUDMA buffer completion.
646 * @udc: Reference to the device controller.
647 * @iudma: IUDMA channel to use.
648 *
649 * This checks to see if ALL of the outstanding BDs on the DMA channel
650 * have been filled. If so, it returns the actual transfer length;
651 * otherwise it returns -EBUSY.
652 */
653static int iudma_read(struct bcm63xx_udc *udc, struct iudma_ch *iudma)
654{
655 int i, actual_len = 0;
656 struct bcm_enet_desc *d = iudma->read_bd;
657
658 if (!iudma->n_bds_used)
659 return -EINVAL;
660
661 for (i = 0; i < iudma->n_bds_used; i++) {
662 u32 dmaflags;
663
664 dmaflags = d->len_stat;
665
666 if (dmaflags & DMADESC_OWNER_MASK)
667 return -EBUSY;
668
669 actual_len += (dmaflags & DMADESC_LENGTH_MASK) >>
670 DMADESC_LENGTH_SHIFT;
671 if (d == iudma->end_bd)
672 d = iudma->bd_ring;
673 else
674 d++;
675 }
676
677 iudma->read_bd = d;
678 iudma->n_bds_used = 0;
679 return actual_len;
680}
681
682/**
683 * iudma_reset_channel - Stop DMA on a single channel.
684 * @udc: Reference to the device controller.
685 * @iudma: IUDMA channel to reset.
686 */
687static void iudma_reset_channel(struct bcm63xx_udc *udc, struct iudma_ch *iudma)
688{
689 int timeout = IUDMA_RESET_TIMEOUT_US;
690 struct bcm_enet_desc *d;
691 int ch_idx = iudma->ch_idx;
692
693 if (!iudma->is_tx)
694 bcm63xx_fifo_reset_ep(udc, max(0, iudma->ep_num));
695
696 /* stop DMA, then wait for the hardware to wrap up */
697 usb_dmac_writel(udc, 0, ENETDMAC_CHANCFG_REG(ch_idx));
698
699 while (usb_dmac_readl(udc, ENETDMAC_CHANCFG_REG(ch_idx)) &
700 ENETDMAC_CHANCFG_EN_MASK) {
701 udelay(1);
702
703 /* repeatedly flush the FIFO data until the BD completes */
704 if (iudma->is_tx && iudma->ep_num >= 0)
705 bcm63xx_fifo_reset_ep(udc, iudma->ep_num);
706
707 if (!timeout--) {
708 dev_err(udc->dev, "can't reset IUDMA channel %d\n",
709 ch_idx);
710 break;
711 }
712 if (timeout == IUDMA_RESET_TIMEOUT_US / 2) {
713 dev_warn(udc->dev, "forcibly halting IUDMA channel %d\n",
714 ch_idx);
715 usb_dmac_writel(udc, ENETDMAC_CHANCFG_BUFHALT_MASK,
716 ENETDMAC_CHANCFG_REG(ch_idx));
717 }
718 }
719 usb_dmac_writel(udc, ~0, ENETDMAC_IR_REG(ch_idx));
720
721 /* don't leave "live" HW-owned entries for the next guy to step on */
722 for (d = iudma->bd_ring; d <= iudma->end_bd; d++)
723 d->len_stat = 0;
724 mb();
725
726 iudma->read_bd = iudma->write_bd = iudma->bd_ring;
727 iudma->n_bds_used = 0;
728
729 /* set up IRQs, UBUS burst size, and BD base for this channel */
730 usb_dmac_writel(udc, ENETDMAC_IR_BUFDONE_MASK,
731 ENETDMAC_IRMASK_REG(ch_idx));
732 usb_dmac_writel(udc, 8, ENETDMAC_MAXBURST_REG(ch_idx));
733
734 usb_dmas_writel(udc, iudma->bd_ring_dma, ENETDMAS_RSTART_REG(ch_idx));
735 usb_dmas_writel(udc, 0, ENETDMAS_SRAM2_REG(ch_idx));
736}
737
738/**
739 * iudma_init_channel - One-time IUDMA channel initialization.
740 * @udc: Reference to the device controller.
741 * @ch_idx: Channel to initialize.
742 */
743static int iudma_init_channel(struct bcm63xx_udc *udc, unsigned int ch_idx)
744{
745 struct iudma_ch *iudma = &udc->iudma[ch_idx];
746 const struct iudma_ch_cfg *cfg = &iudma_defaults[ch_idx];
747 unsigned int n_bds = cfg->n_bds;
748 struct bcm63xx_ep *bep = NULL;
749
750 iudma->ep_num = cfg->ep_num;
751 iudma->ch_idx = ch_idx;
752 iudma->is_tx = !!(ch_idx & 0x01);
753 if (iudma->ep_num >= 0) {
754 bep = &udc->bep[iudma->ep_num];
755 bep->iudma = iudma;
756 INIT_LIST_HEAD(&bep->queue);
757 }
758
759 iudma->bep = bep;
760 iudma->udc = udc;
761
762 /* ep0 is always active; others are controlled by the gadget driver */
763 if (iudma->ep_num <= 0)
764 iudma->enabled = true;
765
766 iudma->n_bds = n_bds;
767 iudma->bd_ring = dmam_alloc_coherent(udc->dev,
768 n_bds * sizeof(struct bcm_enet_desc),
769 &iudma->bd_ring_dma, GFP_KERNEL);
770 if (!iudma->bd_ring)
771 return -ENOMEM;
772 iudma->end_bd = &iudma->bd_ring[n_bds - 1];
773
774 return 0;
775}
776
777/**
778 * iudma_init - One-time initialization of all IUDMA channels.
779 * @udc: Reference to the device controller.
780 *
781 * Enable DMA, flush channels, and enable global IUDMA IRQs.
782 */
783static int iudma_init(struct bcm63xx_udc *udc)
784{
785 int i, rc;
786
787 usb_dma_writel(udc, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
788
789 for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
790 rc = iudma_init_channel(udc, i);
791 if (rc)
792 return rc;
793 iudma_reset_channel(udc, &udc->iudma[i]);
794 }
795
796 usb_dma_writel(udc, BIT(BCM63XX_NUM_IUDMA)-1, ENETDMA_GLB_IRQMASK_REG);
797 return 0;
798}
799
800/**
801 * iudma_uninit - Uninitialize IUDMA channels.
802 * @udc: Reference to the device controller.
803 *
804 * Kill global IUDMA IRQs, flush channels, and kill DMA.
805 */
806static void iudma_uninit(struct bcm63xx_udc *udc)
807{
808 int i;
809
810 usb_dma_writel(udc, 0, ENETDMA_GLB_IRQMASK_REG);
811
812 for (i = 0; i < BCM63XX_NUM_IUDMA; i++)
813 iudma_reset_channel(udc, &udc->iudma[i]);
814
815 usb_dma_writel(udc, 0, ENETDMA_CFG_REG);
816}
817
818/***********************************************************************
819 * Other low-level USBD operations
820 ***********************************************************************/
821
822/**
823 * bcm63xx_set_ctrl_irqs - Mask/unmask control path interrupts.
824 * @udc: Reference to the device controller.
825 * @enable_irqs: true to enable, false to disable.
826 */
827static void bcm63xx_set_ctrl_irqs(struct bcm63xx_udc *udc, bool enable_irqs)
828{
829 u32 val;
830
831 usbd_writel(udc, 0, USBD_STATUS_REG);
832
833 val = BIT(USBD_EVENT_IRQ_USB_RESET) |
834 BIT(USBD_EVENT_IRQ_SETUP) |
835 BIT(USBD_EVENT_IRQ_SETCFG) |
836 BIT(USBD_EVENT_IRQ_SETINTF) |
837 BIT(USBD_EVENT_IRQ_USB_LINK);
838 usbd_writel(udc, enable_irqs ? val : 0, USBD_EVENT_IRQ_MASK_REG);
839 usbd_writel(udc, val, USBD_EVENT_IRQ_STATUS_REG);
840}
841
842/**
843 * bcm63xx_select_phy_mode - Select between USB device and host mode.
844 * @udc: Reference to the device controller.
845 * @is_device: true for device, false for host.
846 *
847 * This should probably be reworked to use the drivers/usb/otg
848 * infrastructure.
849 *
850 * By default, the AFE/pullups are disabled in device mode, until
851 * bcm63xx_select_pullup() is called.
852 */
853static void bcm63xx_select_phy_mode(struct bcm63xx_udc *udc, bool is_device)
854{
855 u32 val, portmask = BIT(udc->pd->port_no);
856
857 if (BCMCPU_IS_6328()) {
858 /* configure pinmux to sense VBUS signal */
859 val = bcm_gpio_readl(GPIO_PINMUX_OTHR_REG);
860 val &= ~GPIO_PINMUX_OTHR_6328_USB_MASK;
861 val |= is_device ? GPIO_PINMUX_OTHR_6328_USB_DEV :
862 GPIO_PINMUX_OTHR_6328_USB_HOST;
863 bcm_gpio_writel(val, GPIO_PINMUX_OTHR_REG);
864 }
865
866 val = bcm_rset_readl(RSET_USBH_PRIV, USBH_PRIV_UTMI_CTL_6368_REG);
867 if (is_device) {
868 val |= (portmask << USBH_PRIV_UTMI_CTL_HOSTB_SHIFT);
869 val |= (portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT);
870 } else {
871 val &= ~(portmask << USBH_PRIV_UTMI_CTL_HOSTB_SHIFT);
872 val &= ~(portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT);
873 }
874 bcm_rset_writel(RSET_USBH_PRIV, val, USBH_PRIV_UTMI_CTL_6368_REG);
875
876 val = bcm_rset_readl(RSET_USBH_PRIV, USBH_PRIV_SWAP_6368_REG);
877 if (is_device)
878 val |= USBH_PRIV_SWAP_USBD_MASK;
879 else
880 val &= ~USBH_PRIV_SWAP_USBD_MASK;
881 bcm_rset_writel(RSET_USBH_PRIV, val, USBH_PRIV_SWAP_6368_REG);
882}
883
884/**
885 * bcm63xx_select_pullup - Enable/disable the pullup on D+
886 * @udc: Reference to the device controller.
887 * @is_on: true to enable the pullup, false to disable.
888 *
889 * If the pullup is active, the host will sense a FS/HS device connected to
890 * the port. If the pullup is inactive, the host will think the USB
891 * device has been disconnected.
892 */
893static void bcm63xx_select_pullup(struct bcm63xx_udc *udc, bool is_on)
894{
895 u32 val, portmask = BIT(udc->pd->port_no);
896
897 val = bcm_rset_readl(RSET_USBH_PRIV, USBH_PRIV_UTMI_CTL_6368_REG);
898 if (is_on)
899 val &= ~(portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT);
900 else
901 val |= (portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT);
902 bcm_rset_writel(RSET_USBH_PRIV, val, USBH_PRIV_UTMI_CTL_6368_REG);
903}
904
905/**
906 * bcm63xx_uninit_udc_hw - Shut down the hardware prior to driver removal.
907 * @udc: Reference to the device controller.
908 *
909 * This just masks the IUDMA IRQs and releases the clocks. It is assumed
910 * that bcm63xx_udc_stop() has already run, and the clocks are stopped.
911 */
912static void bcm63xx_uninit_udc_hw(struct bcm63xx_udc *udc)
913{
914 set_clocks(udc, true);
915 iudma_uninit(udc);
916 set_clocks(udc, false);
917
918 clk_put(udc->usbd_clk);
919 clk_put(udc->usbh_clk);
920}
921
922/**
923 * bcm63xx_init_udc_hw - Initialize the controller hardware and data structures.
924 * @udc: Reference to the device controller.
925 */
926static int bcm63xx_init_udc_hw(struct bcm63xx_udc *udc)
927{
928 int i, rc = 0;
929 u32 val;
930
931 udc->ep0_ctrl_buf = devm_kzalloc(udc->dev, BCM63XX_MAX_CTRL_PKT,
932 GFP_KERNEL);
933 if (!udc->ep0_ctrl_buf)
934 return -ENOMEM;
935
936 INIT_LIST_HEAD(&udc->gadget.ep_list);
937 for (i = 0; i < BCM63XX_NUM_EP; i++) {
938 struct bcm63xx_ep *bep = &udc->bep[i];
939
940 bep->ep.name = bcm63xx_ep_name[i];
941 bep->ep_num = i;
942 bep->ep.ops = &bcm63xx_udc_ep_ops;
943 list_add_tail(&bep->ep.ep_list, &udc->gadget.ep_list);
944 bep->halted = 0;
Robert Baldygae117e742013-12-13 12:23:38 +0100945 usb_ep_set_maxpacket_limit(&bep->ep, BCM63XX_MAX_CTRL_PKT);
Kevin Cernekee613065e2012-08-25 12:38:52 -0700946 bep->udc = udc;
947 bep->ep.desc = NULL;
948 INIT_LIST_HEAD(&bep->queue);
949 }
950
951 udc->gadget.ep0 = &udc->bep[0].ep;
952 list_del(&udc->bep[0].ep.ep_list);
953
954 udc->gadget.speed = USB_SPEED_UNKNOWN;
955 udc->ep0state = EP0_SHUTDOWN;
956
957 udc->usbh_clk = clk_get(udc->dev, "usbh");
958 if (IS_ERR(udc->usbh_clk))
959 return -EIO;
960
961 udc->usbd_clk = clk_get(udc->dev, "usbd");
962 if (IS_ERR(udc->usbd_clk)) {
963 clk_put(udc->usbh_clk);
964 return -EIO;
965 }
966
967 set_clocks(udc, true);
968
969 val = USBD_CONTROL_AUTO_CSRS_MASK |
970 USBD_CONTROL_DONE_CSRS_MASK |
971 (irq_coalesce ? USBD_CONTROL_RXZSCFG_MASK : 0);
972 usbd_writel(udc, val, USBD_CONTROL_REG);
973
974 val = USBD_STRAPS_APP_SELF_PWR_MASK |
975 USBD_STRAPS_APP_RAM_IF_MASK |
976 USBD_STRAPS_APP_CSRPRGSUP_MASK |
977 USBD_STRAPS_APP_8BITPHY_MASK |
978 USBD_STRAPS_APP_RMTWKUP_MASK;
979
980 if (udc->gadget.max_speed == USB_SPEED_HIGH)
981 val |= (BCM63XX_SPD_HIGH << USBD_STRAPS_SPEED_SHIFT);
982 else
983 val |= (BCM63XX_SPD_FULL << USBD_STRAPS_SPEED_SHIFT);
984 usbd_writel(udc, val, USBD_STRAPS_REG);
985
986 bcm63xx_set_ctrl_irqs(udc, false);
987
988 usbd_writel(udc, 0, USBD_EVENT_IRQ_CFG_LO_REG);
989
990 val = USBD_EVENT_IRQ_CFG_FALLING(USBD_EVENT_IRQ_ENUM_ON) |
991 USBD_EVENT_IRQ_CFG_FALLING(USBD_EVENT_IRQ_SET_CSRS);
992 usbd_writel(udc, val, USBD_EVENT_IRQ_CFG_HI_REG);
993
994 rc = iudma_init(udc);
995 set_clocks(udc, false);
996 if (rc)
997 bcm63xx_uninit_udc_hw(udc);
998
999 return 0;
1000}
1001
1002/***********************************************************************
1003 * Standard EP gadget operations
1004 ***********************************************************************/
1005
1006/**
1007 * bcm63xx_ep_enable - Enable one endpoint.
1008 * @ep: Endpoint to enable.
1009 * @desc: Contains max packet, direction, etc.
1010 *
1011 * Most of the endpoint parameters are fixed in this controller, so there
1012 * isn't much for this function to do.
1013 */
1014static int bcm63xx_ep_enable(struct usb_ep *ep,
1015 const struct usb_endpoint_descriptor *desc)
1016{
1017 struct bcm63xx_ep *bep = our_ep(ep);
1018 struct bcm63xx_udc *udc = bep->udc;
1019 struct iudma_ch *iudma = bep->iudma;
1020 unsigned long flags;
1021
1022 if (!ep || !desc || ep->name == bcm63xx_ep0name)
1023 return -EINVAL;
1024
1025 if (!udc->driver)
1026 return -ESHUTDOWN;
1027
1028 spin_lock_irqsave(&udc->lock, flags);
1029 if (iudma->enabled) {
1030 spin_unlock_irqrestore(&udc->lock, flags);
1031 return -EINVAL;
1032 }
1033
1034 iudma->enabled = true;
1035 BUG_ON(!list_empty(&bep->queue));
1036
1037 iudma_reset_channel(udc, iudma);
1038
1039 bep->halted = 0;
1040 bcm63xx_set_stall(udc, bep, false);
1041 clear_bit(bep->ep_num, &udc->wedgemap);
1042
1043 ep->desc = desc;
1044 ep->maxpacket = usb_endpoint_maxp(desc);
1045
1046 spin_unlock_irqrestore(&udc->lock, flags);
1047 return 0;
1048}
1049
1050/**
1051 * bcm63xx_ep_disable - Disable one endpoint.
1052 * @ep: Endpoint to disable.
1053 */
1054static int bcm63xx_ep_disable(struct usb_ep *ep)
1055{
1056 struct bcm63xx_ep *bep = our_ep(ep);
1057 struct bcm63xx_udc *udc = bep->udc;
1058 struct iudma_ch *iudma = bep->iudma;
1059 struct list_head *pos, *n;
1060 unsigned long flags;
1061
1062 if (!ep || !ep->desc)
1063 return -EINVAL;
1064
1065 spin_lock_irqsave(&udc->lock, flags);
1066 if (!iudma->enabled) {
1067 spin_unlock_irqrestore(&udc->lock, flags);
1068 return -EINVAL;
1069 }
1070 iudma->enabled = false;
1071
1072 iudma_reset_channel(udc, iudma);
1073
1074 if (!list_empty(&bep->queue)) {
1075 list_for_each_safe(pos, n, &bep->queue) {
1076 struct bcm63xx_req *breq =
1077 list_entry(pos, struct bcm63xx_req, queue);
1078
1079 usb_gadget_unmap_request(&udc->gadget, &breq->req,
1080 iudma->is_tx);
1081 list_del(&breq->queue);
1082 breq->req.status = -ESHUTDOWN;
1083
1084 spin_unlock_irqrestore(&udc->lock, flags);
1085 breq->req.complete(&iudma->bep->ep, &breq->req);
1086 spin_lock_irqsave(&udc->lock, flags);
1087 }
1088 }
1089 ep->desc = NULL;
1090
1091 spin_unlock_irqrestore(&udc->lock, flags);
1092 return 0;
1093}
1094
1095/**
1096 * bcm63xx_udc_alloc_request - Allocate a new request.
1097 * @ep: Endpoint associated with the request.
1098 * @mem_flags: Flags to pass to kzalloc().
1099 */
1100static struct usb_request *bcm63xx_udc_alloc_request(struct usb_ep *ep,
1101 gfp_t mem_flags)
1102{
1103 struct bcm63xx_req *breq;
1104
1105 breq = kzalloc(sizeof(*breq), mem_flags);
1106 if (!breq)
1107 return NULL;
1108 return &breq->req;
1109}
1110
1111/**
1112 * bcm63xx_udc_free_request - Free a request.
1113 * @ep: Endpoint associated with the request.
1114 * @req: Request to free.
1115 */
1116static void bcm63xx_udc_free_request(struct usb_ep *ep,
1117 struct usb_request *req)
1118{
1119 struct bcm63xx_req *breq = our_req(req);
1120 kfree(breq);
1121}
1122
1123/**
1124 * bcm63xx_udc_queue - Queue up a new request.
1125 * @ep: Endpoint associated with the request.
1126 * @req: Request to add.
1127 * @mem_flags: Unused.
1128 *
1129 * If the queue is empty, start this request immediately. Otherwise, add
1130 * it to the list.
1131 *
1132 * ep0 replies are sent through this function from the gadget driver, but
1133 * they are treated differently because they need to be handled by the ep0
1134 * state machine. (Sometimes they are replies to control requests that
1135 * were spoofed by this driver, and so they shouldn't be transmitted at all.)
1136 */
1137static int bcm63xx_udc_queue(struct usb_ep *ep, struct usb_request *req,
1138 gfp_t mem_flags)
1139{
1140 struct bcm63xx_ep *bep = our_ep(ep);
1141 struct bcm63xx_udc *udc = bep->udc;
1142 struct bcm63xx_req *breq = our_req(req);
1143 unsigned long flags;
1144 int rc = 0;
1145
1146 if (unlikely(!req || !req->complete || !req->buf || !ep))
1147 return -EINVAL;
1148
1149 req->actual = 0;
1150 req->status = 0;
1151 breq->offset = 0;
1152
1153 if (bep == &udc->bep[0]) {
1154 /* only one reply per request, please */
1155 if (udc->ep0_reply)
1156 return -EINVAL;
1157
1158 udc->ep0_reply = req;
1159 schedule_work(&udc->ep0_wq);
1160 return 0;
1161 }
1162
1163 spin_lock_irqsave(&udc->lock, flags);
1164 if (!bep->iudma->enabled) {
1165 rc = -ESHUTDOWN;
1166 goto out;
1167 }
1168
1169 rc = usb_gadget_map_request(&udc->gadget, req, bep->iudma->is_tx);
1170 if (rc == 0) {
1171 list_add_tail(&breq->queue, &bep->queue);
1172 if (list_is_singular(&bep->queue))
1173 iudma_write(udc, bep->iudma, breq);
1174 }
1175
1176out:
1177 spin_unlock_irqrestore(&udc->lock, flags);
1178 return rc;
1179}
1180
1181/**
1182 * bcm63xx_udc_dequeue - Remove a pending request from the queue.
1183 * @ep: Endpoint associated with the request.
1184 * @req: Request to remove.
1185 *
1186 * If the request is not at the head of the queue, this is easy - just nuke
1187 * it. If the request is at the head of the queue, we'll need to stop the
1188 * DMA transaction and then queue up the successor.
1189 */
1190static int bcm63xx_udc_dequeue(struct usb_ep *ep, struct usb_request *req)
1191{
1192 struct bcm63xx_ep *bep = our_ep(ep);
1193 struct bcm63xx_udc *udc = bep->udc;
1194 struct bcm63xx_req *breq = our_req(req), *cur;
1195 unsigned long flags;
1196 int rc = 0;
1197
1198 spin_lock_irqsave(&udc->lock, flags);
1199 if (list_empty(&bep->queue)) {
1200 rc = -EINVAL;
1201 goto out;
1202 }
1203
1204 cur = list_first_entry(&bep->queue, struct bcm63xx_req, queue);
1205 usb_gadget_unmap_request(&udc->gadget, &breq->req, bep->iudma->is_tx);
1206
1207 if (breq == cur) {
1208 iudma_reset_channel(udc, bep->iudma);
1209 list_del(&breq->queue);
1210
1211 if (!list_empty(&bep->queue)) {
1212 struct bcm63xx_req *next;
1213
1214 next = list_first_entry(&bep->queue,
1215 struct bcm63xx_req, queue);
1216 iudma_write(udc, bep->iudma, next);
1217 }
1218 } else {
1219 list_del(&breq->queue);
1220 }
1221
1222out:
1223 spin_unlock_irqrestore(&udc->lock, flags);
1224
1225 req->status = -ESHUTDOWN;
1226 req->complete(ep, req);
1227
1228 return rc;
1229}
1230
1231/**
1232 * bcm63xx_udc_set_halt - Enable/disable STALL flag in the hardware.
1233 * @ep: Endpoint to halt.
1234 * @value: Zero to clear halt; nonzero to set halt.
1235 *
1236 * See comments in bcm63xx_update_wedge().
1237 */
1238static int bcm63xx_udc_set_halt(struct usb_ep *ep, int value)
1239{
1240 struct bcm63xx_ep *bep = our_ep(ep);
1241 struct bcm63xx_udc *udc = bep->udc;
1242 unsigned long flags;
1243
1244 spin_lock_irqsave(&udc->lock, flags);
1245 bcm63xx_set_stall(udc, bep, !!value);
1246 bep->halted = value;
1247 spin_unlock_irqrestore(&udc->lock, flags);
1248
1249 return 0;
1250}
1251
1252/**
1253 * bcm63xx_udc_set_wedge - Stall the endpoint until the next reset.
1254 * @ep: Endpoint to wedge.
1255 *
1256 * See comments in bcm63xx_update_wedge().
1257 */
1258static int bcm63xx_udc_set_wedge(struct usb_ep *ep)
1259{
1260 struct bcm63xx_ep *bep = our_ep(ep);
1261 struct bcm63xx_udc *udc = bep->udc;
1262 unsigned long flags;
1263
1264 spin_lock_irqsave(&udc->lock, flags);
1265 set_bit(bep->ep_num, &udc->wedgemap);
1266 bcm63xx_set_stall(udc, bep, true);
1267 spin_unlock_irqrestore(&udc->lock, flags);
1268
1269 return 0;
1270}
1271
1272static const struct usb_ep_ops bcm63xx_udc_ep_ops = {
1273 .enable = bcm63xx_ep_enable,
1274 .disable = bcm63xx_ep_disable,
1275
1276 .alloc_request = bcm63xx_udc_alloc_request,
1277 .free_request = bcm63xx_udc_free_request,
1278
1279 .queue = bcm63xx_udc_queue,
1280 .dequeue = bcm63xx_udc_dequeue,
1281
1282 .set_halt = bcm63xx_udc_set_halt,
1283 .set_wedge = bcm63xx_udc_set_wedge,
1284};
1285
1286/***********************************************************************
1287 * EP0 handling
1288 ***********************************************************************/
1289
1290/**
1291 * bcm63xx_ep0_setup_callback - Drop spinlock to invoke ->setup callback.
1292 * @udc: Reference to the device controller.
1293 * @ctrl: 8-byte SETUP request.
1294 */
1295static int bcm63xx_ep0_setup_callback(struct bcm63xx_udc *udc,
1296 struct usb_ctrlrequest *ctrl)
1297{
1298 int rc;
1299
1300 spin_unlock_irq(&udc->lock);
1301 rc = udc->driver->setup(&udc->gadget, ctrl);
1302 spin_lock_irq(&udc->lock);
1303 return rc;
1304}
1305
1306/**
1307 * bcm63xx_ep0_spoof_set_cfg - Synthesize a SET_CONFIGURATION request.
1308 * @udc: Reference to the device controller.
1309 *
1310 * Many standard requests are handled automatically in the hardware, but
1311 * we still need to pass them to the gadget driver so that it can
1312 * reconfigure the interfaces/endpoints if necessary.
1313 *
1314 * Unfortunately we are not able to send a STALL response if the host
1315 * requests an invalid configuration. If this happens, we'll have to be
1316 * content with printing a warning.
1317 */
1318static int bcm63xx_ep0_spoof_set_cfg(struct bcm63xx_udc *udc)
1319{
1320 struct usb_ctrlrequest ctrl;
1321 int rc;
1322
1323 ctrl.bRequestType = USB_DIR_OUT | USB_RECIP_DEVICE;
1324 ctrl.bRequest = USB_REQ_SET_CONFIGURATION;
1325 ctrl.wValue = cpu_to_le16(udc->cfg);
1326 ctrl.wIndex = 0;
1327 ctrl.wLength = 0;
1328
1329 rc = bcm63xx_ep0_setup_callback(udc, &ctrl);
1330 if (rc < 0) {
1331 dev_warn_ratelimited(udc->dev,
1332 "hardware auto-acked bad SET_CONFIGURATION(%d) request\n",
1333 udc->cfg);
1334 }
1335 return rc;
1336}
1337
1338/**
1339 * bcm63xx_ep0_spoof_set_iface - Synthesize a SET_INTERFACE request.
1340 * @udc: Reference to the device controller.
1341 */
1342static int bcm63xx_ep0_spoof_set_iface(struct bcm63xx_udc *udc)
1343{
1344 struct usb_ctrlrequest ctrl;
1345 int rc;
1346
1347 ctrl.bRequestType = USB_DIR_OUT | USB_RECIP_INTERFACE;
1348 ctrl.bRequest = USB_REQ_SET_INTERFACE;
1349 ctrl.wValue = cpu_to_le16(udc->alt_iface);
1350 ctrl.wIndex = cpu_to_le16(udc->iface);
1351 ctrl.wLength = 0;
1352
1353 rc = bcm63xx_ep0_setup_callback(udc, &ctrl);
1354 if (rc < 0) {
1355 dev_warn_ratelimited(udc->dev,
1356 "hardware auto-acked bad SET_INTERFACE(%d,%d) request\n",
1357 udc->iface, udc->alt_iface);
1358 }
1359 return rc;
1360}
1361
1362/**
1363 * bcm63xx_ep0_map_write - dma_map and iudma_write a single request.
1364 * @udc: Reference to the device controller.
1365 * @ch_idx: IUDMA channel number.
1366 * @req: USB gadget layer representation of the request.
1367 */
1368static void bcm63xx_ep0_map_write(struct bcm63xx_udc *udc, int ch_idx,
1369 struct usb_request *req)
1370{
1371 struct bcm63xx_req *breq = our_req(req);
1372 struct iudma_ch *iudma = &udc->iudma[ch_idx];
1373
1374 BUG_ON(udc->ep0_request);
1375 udc->ep0_request = req;
1376
1377 req->actual = 0;
1378 breq->offset = 0;
1379 usb_gadget_map_request(&udc->gadget, req, iudma->is_tx);
1380 iudma_write(udc, iudma, breq);
1381}
1382
1383/**
1384 * bcm63xx_ep0_complete - Set completion status and "stage" the callback.
1385 * @udc: Reference to the device controller.
1386 * @req: USB gadget layer representation of the request.
1387 * @status: Status to return to the gadget driver.
1388 */
1389static void bcm63xx_ep0_complete(struct bcm63xx_udc *udc,
1390 struct usb_request *req, int status)
1391{
1392 req->status = status;
1393 if (status)
1394 req->actual = 0;
1395 if (req->complete) {
1396 spin_unlock_irq(&udc->lock);
1397 req->complete(&udc->bep[0].ep, req);
1398 spin_lock_irq(&udc->lock);
1399 }
1400}
1401
1402/**
1403 * bcm63xx_ep0_nuke_reply - Abort request from the gadget driver due to
1404 * reset/shutdown.
1405 * @udc: Reference to the device controller.
1406 * @is_tx: Nonzero for TX (IN), zero for RX (OUT).
1407 */
1408static void bcm63xx_ep0_nuke_reply(struct bcm63xx_udc *udc, int is_tx)
1409{
1410 struct usb_request *req = udc->ep0_reply;
1411
1412 udc->ep0_reply = NULL;
1413 usb_gadget_unmap_request(&udc->gadget, req, is_tx);
1414 if (udc->ep0_request == req) {
1415 udc->ep0_req_completed = 0;
1416 udc->ep0_request = NULL;
1417 }
1418 bcm63xx_ep0_complete(udc, req, -ESHUTDOWN);
1419}
1420
1421/**
1422 * bcm63xx_ep0_read_complete - Close out the pending ep0 request; return
1423 * transfer len.
1424 * @udc: Reference to the device controller.
1425 */
1426static int bcm63xx_ep0_read_complete(struct bcm63xx_udc *udc)
1427{
1428 struct usb_request *req = udc->ep0_request;
1429
1430 udc->ep0_req_completed = 0;
1431 udc->ep0_request = NULL;
1432
1433 return req->actual;
1434}
1435
1436/**
1437 * bcm63xx_ep0_internal_request - Helper function to submit an ep0 request.
1438 * @udc: Reference to the device controller.
1439 * @ch_idx: IUDMA channel number.
1440 * @length: Number of bytes to TX/RX.
1441 *
1442 * Used for simple transfers performed by the ep0 worker. This will always
1443 * use ep0_ctrl_req / ep0_ctrl_buf.
1444 */
1445static void bcm63xx_ep0_internal_request(struct bcm63xx_udc *udc, int ch_idx,
1446 int length)
1447{
1448 struct usb_request *req = &udc->ep0_ctrl_req.req;
1449
1450 req->buf = udc->ep0_ctrl_buf;
1451 req->length = length;
1452 req->complete = NULL;
1453
1454 bcm63xx_ep0_map_write(udc, ch_idx, req);
1455}
1456
1457/**
1458 * bcm63xx_ep0_do_setup - Parse new SETUP packet and decide how to handle it.
1459 * @udc: Reference to the device controller.
1460 *
1461 * EP0_IDLE probably shouldn't ever happen. EP0_REQUEUE means we're ready
1462 * for the next packet. Anything else means the transaction requires multiple
1463 * stages of handling.
1464 */
1465static enum bcm63xx_ep0_state bcm63xx_ep0_do_setup(struct bcm63xx_udc *udc)
1466{
1467 int rc;
1468 struct usb_ctrlrequest *ctrl = (void *)udc->ep0_ctrl_buf;
1469
1470 rc = bcm63xx_ep0_read_complete(udc);
1471
1472 if (rc < 0) {
1473 dev_err(udc->dev, "missing SETUP packet\n");
1474 return EP0_IDLE;
1475 }
1476
1477 /*
1478 * Handle 0-byte IN STATUS acknowledgement. The hardware doesn't
1479 * ALWAYS deliver these 100% of the time, so if we happen to see one,
1480 * just throw it away.
1481 */
1482 if (rc == 0)
1483 return EP0_REQUEUE;
1484
1485 /* Drop malformed SETUP packets */
1486 if (rc != sizeof(*ctrl)) {
1487 dev_warn_ratelimited(udc->dev,
1488 "malformed SETUP packet (%d bytes)\n", rc);
1489 return EP0_REQUEUE;
1490 }
1491
1492 /* Process new SETUP packet arriving on ep0 */
1493 rc = bcm63xx_ep0_setup_callback(udc, ctrl);
1494 if (rc < 0) {
1495 bcm63xx_set_stall(udc, &udc->bep[0], true);
1496 return EP0_REQUEUE;
1497 }
1498
1499 if (!ctrl->wLength)
1500 return EP0_REQUEUE;
1501 else if (ctrl->bRequestType & USB_DIR_IN)
1502 return EP0_IN_DATA_PHASE_SETUP;
1503 else
1504 return EP0_OUT_DATA_PHASE_SETUP;
1505}
1506
1507/**
1508 * bcm63xx_ep0_do_idle - Check for outstanding requests if ep0 is idle.
1509 * @udc: Reference to the device controller.
1510 *
1511 * In state EP0_IDLE, the RX descriptor is either pending, or has been
1512 * filled with a SETUP packet from the host. This function handles new
1513 * SETUP packets, control IRQ events (which can generate fake SETUP packets),
1514 * and reset/shutdown events.
1515 *
1516 * Returns 0 if work was done; -EAGAIN if nothing to do.
1517 */
1518static int bcm63xx_ep0_do_idle(struct bcm63xx_udc *udc)
1519{
1520 if (udc->ep0_req_reset) {
1521 udc->ep0_req_reset = 0;
1522 } else if (udc->ep0_req_set_cfg) {
1523 udc->ep0_req_set_cfg = 0;
1524 if (bcm63xx_ep0_spoof_set_cfg(udc) >= 0)
1525 udc->ep0state = EP0_IN_FAKE_STATUS_PHASE;
1526 } else if (udc->ep0_req_set_iface) {
1527 udc->ep0_req_set_iface = 0;
1528 if (bcm63xx_ep0_spoof_set_iface(udc) >= 0)
1529 udc->ep0state = EP0_IN_FAKE_STATUS_PHASE;
1530 } else if (udc->ep0_req_completed) {
1531 udc->ep0state = bcm63xx_ep0_do_setup(udc);
1532 return udc->ep0state == EP0_IDLE ? -EAGAIN : 0;
1533 } else if (udc->ep0_req_shutdown) {
1534 udc->ep0_req_shutdown = 0;
1535 udc->ep0_req_completed = 0;
1536 udc->ep0_request = NULL;
1537 iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_RXCHAN]);
1538 usb_gadget_unmap_request(&udc->gadget,
1539 &udc->ep0_ctrl_req.req, 0);
1540
1541 /* bcm63xx_udc_pullup() is waiting for this */
1542 mb();
1543 udc->ep0state = EP0_SHUTDOWN;
1544 } else if (udc->ep0_reply) {
1545 /*
1546 * This could happen if a USB RESET shows up during an ep0
1547 * transaction (especially if a laggy driver like gadgetfs
1548 * is in use).
1549 */
1550 dev_warn(udc->dev, "nuking unexpected reply\n");
1551 bcm63xx_ep0_nuke_reply(udc, 0);
1552 } else {
1553 return -EAGAIN;
1554 }
1555
1556 return 0;
1557}
1558
1559/**
1560 * bcm63xx_ep0_one_round - Handle the current ep0 state.
1561 * @udc: Reference to the device controller.
1562 *
1563 * Returns 0 if work was done; -EAGAIN if nothing to do.
1564 */
1565static int bcm63xx_ep0_one_round(struct bcm63xx_udc *udc)
1566{
1567 enum bcm63xx_ep0_state ep0state = udc->ep0state;
1568 bool shutdown = udc->ep0_req_reset || udc->ep0_req_shutdown;
1569
1570 switch (udc->ep0state) {
1571 case EP0_REQUEUE:
1572 /* set up descriptor to receive SETUP packet */
1573 bcm63xx_ep0_internal_request(udc, IUDMA_EP0_RXCHAN,
1574 BCM63XX_MAX_CTRL_PKT);
1575 ep0state = EP0_IDLE;
1576 break;
1577 case EP0_IDLE:
1578 return bcm63xx_ep0_do_idle(udc);
1579 case EP0_IN_DATA_PHASE_SETUP:
1580 /*
1581 * Normal case: TX request is in ep0_reply (queued by the
1582 * callback), or will be queued shortly. When it's here,
1583 * send it to the HW and go to EP0_IN_DATA_PHASE_COMPLETE.
1584 *
1585 * Shutdown case: Stop waiting for the reply. Just
1586 * REQUEUE->IDLE. The gadget driver is NOT expected to
1587 * queue anything else now.
1588 */
1589 if (udc->ep0_reply) {
1590 bcm63xx_ep0_map_write(udc, IUDMA_EP0_TXCHAN,
1591 udc->ep0_reply);
1592 ep0state = EP0_IN_DATA_PHASE_COMPLETE;
1593 } else if (shutdown) {
1594 ep0state = EP0_REQUEUE;
1595 }
1596 break;
1597 case EP0_IN_DATA_PHASE_COMPLETE: {
1598 /*
1599 * Normal case: TX packet (ep0_reply) is in flight; wait for
1600 * it to finish, then go back to REQUEUE->IDLE.
1601 *
1602 * Shutdown case: Reset the TX channel, send -ESHUTDOWN
1603 * completion to the gadget driver, then REQUEUE->IDLE.
1604 */
1605 if (udc->ep0_req_completed) {
1606 udc->ep0_reply = NULL;
1607 bcm63xx_ep0_read_complete(udc);
1608 /*
1609 * the "ack" sometimes gets eaten (see
1610 * bcm63xx_ep0_do_idle)
1611 */
1612 ep0state = EP0_REQUEUE;
1613 } else if (shutdown) {
1614 iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_TXCHAN]);
1615 bcm63xx_ep0_nuke_reply(udc, 1);
1616 ep0state = EP0_REQUEUE;
1617 }
1618 break;
1619 }
1620 case EP0_OUT_DATA_PHASE_SETUP:
1621 /* Similar behavior to EP0_IN_DATA_PHASE_SETUP */
1622 if (udc->ep0_reply) {
1623 bcm63xx_ep0_map_write(udc, IUDMA_EP0_RXCHAN,
1624 udc->ep0_reply);
1625 ep0state = EP0_OUT_DATA_PHASE_COMPLETE;
1626 } else if (shutdown) {
1627 ep0state = EP0_REQUEUE;
1628 }
1629 break;
1630 case EP0_OUT_DATA_PHASE_COMPLETE: {
1631 /* Similar behavior to EP0_IN_DATA_PHASE_COMPLETE */
1632 if (udc->ep0_req_completed) {
1633 udc->ep0_reply = NULL;
1634 bcm63xx_ep0_read_complete(udc);
1635
1636 /* send 0-byte ack to host */
1637 bcm63xx_ep0_internal_request(udc, IUDMA_EP0_TXCHAN, 0);
1638 ep0state = EP0_OUT_STATUS_PHASE;
1639 } else if (shutdown) {
1640 iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_RXCHAN]);
1641 bcm63xx_ep0_nuke_reply(udc, 0);
1642 ep0state = EP0_REQUEUE;
1643 }
1644 break;
1645 }
1646 case EP0_OUT_STATUS_PHASE:
1647 /*
1648 * Normal case: 0-byte OUT ack packet is in flight; wait
1649 * for it to finish, then go back to REQUEUE->IDLE.
1650 *
1651 * Shutdown case: just cancel the transmission. Don't bother
1652 * calling the completion, because it originated from this
1653 * function anyway. Then go back to REQUEUE->IDLE.
1654 */
1655 if (udc->ep0_req_completed) {
1656 bcm63xx_ep0_read_complete(udc);
1657 ep0state = EP0_REQUEUE;
1658 } else if (shutdown) {
1659 iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_TXCHAN]);
1660 udc->ep0_request = NULL;
1661 ep0state = EP0_REQUEUE;
1662 }
1663 break;
1664 case EP0_IN_FAKE_STATUS_PHASE: {
1665 /*
1666 * Normal case: we spoofed a SETUP packet and are now
1667 * waiting for the gadget driver to send a 0-byte reply.
1668 * This doesn't actually get sent to the HW because the
1669 * HW has already sent its own reply. Once we get the
1670 * response, return to IDLE.
1671 *
1672 * Shutdown case: return to IDLE immediately.
1673 *
1674 * Note that the ep0 RX descriptor has remained queued
1675 * (and possibly unfilled) during this entire transaction.
1676 * The HW datapath (IUDMA) never even sees SET_CONFIGURATION
1677 * or SET_INTERFACE transactions.
1678 */
1679 struct usb_request *r = udc->ep0_reply;
1680
1681 if (!r) {
1682 if (shutdown)
1683 ep0state = EP0_IDLE;
1684 break;
1685 }
1686
1687 bcm63xx_ep0_complete(udc, r, 0);
1688 udc->ep0_reply = NULL;
1689 ep0state = EP0_IDLE;
1690 break;
1691 }
1692 case EP0_SHUTDOWN:
1693 break;
1694 }
1695
1696 if (udc->ep0state == ep0state)
1697 return -EAGAIN;
1698
1699 udc->ep0state = ep0state;
1700 return 0;
1701}
1702
1703/**
1704 * bcm63xx_ep0_process - ep0 worker thread / state machine.
1705 * @w: Workqueue struct.
1706 *
1707 * bcm63xx_ep0_process is triggered any time an event occurs on ep0. It
1708 * is used to synchronize ep0 events and ensure that both HW and SW events
1709 * occur in a well-defined order. When the ep0 IUDMA queues are idle, it may
1710 * synthesize SET_CONFIGURATION / SET_INTERFACE requests that were consumed
1711 * by the USBD hardware.
1712 *
1713 * The worker function will continue iterating around the state machine
1714 * until there is nothing left to do. Usually "nothing left to do" means
1715 * that we're waiting for a new event from the hardware.
1716 */
1717static void bcm63xx_ep0_process(struct work_struct *w)
1718{
1719 struct bcm63xx_udc *udc = container_of(w, struct bcm63xx_udc, ep0_wq);
1720 spin_lock_irq(&udc->lock);
1721 while (bcm63xx_ep0_one_round(udc) == 0)
1722 ;
1723 spin_unlock_irq(&udc->lock);
1724}
1725
1726/***********************************************************************
1727 * Standard UDC gadget operations
1728 ***********************************************************************/
1729
1730/**
1731 * bcm63xx_udc_get_frame - Read current SOF frame number from the HW.
1732 * @gadget: USB slave device.
1733 */
1734static int bcm63xx_udc_get_frame(struct usb_gadget *gadget)
1735{
1736 struct bcm63xx_udc *udc = gadget_to_udc(gadget);
1737
1738 return (usbd_readl(udc, USBD_STATUS_REG) &
1739 USBD_STATUS_SOF_MASK) >> USBD_STATUS_SOF_SHIFT;
1740}
1741
1742/**
1743 * bcm63xx_udc_pullup - Enable/disable pullup on D+ line.
1744 * @gadget: USB slave device.
1745 * @is_on: 0 to disable pullup, 1 to enable.
1746 *
1747 * See notes in bcm63xx_select_pullup().
1748 */
1749static int bcm63xx_udc_pullup(struct usb_gadget *gadget, int is_on)
1750{
1751 struct bcm63xx_udc *udc = gadget_to_udc(gadget);
1752 unsigned long flags;
1753 int i, rc = -EINVAL;
1754
1755 spin_lock_irqsave(&udc->lock, flags);
1756 if (is_on && udc->ep0state == EP0_SHUTDOWN) {
1757 udc->gadget.speed = USB_SPEED_UNKNOWN;
1758 udc->ep0state = EP0_REQUEUE;
1759 bcm63xx_fifo_setup(udc);
1760 bcm63xx_fifo_reset(udc);
1761 bcm63xx_ep_setup(udc);
1762
1763 bitmap_zero(&udc->wedgemap, BCM63XX_NUM_EP);
1764 for (i = 0; i < BCM63XX_NUM_EP; i++)
1765 bcm63xx_set_stall(udc, &udc->bep[i], false);
1766
1767 bcm63xx_set_ctrl_irqs(udc, true);
1768 bcm63xx_select_pullup(gadget_to_udc(gadget), true);
1769 rc = 0;
1770 } else if (!is_on && udc->ep0state != EP0_SHUTDOWN) {
1771 bcm63xx_select_pullup(gadget_to_udc(gadget), false);
1772
1773 udc->ep0_req_shutdown = 1;
1774 spin_unlock_irqrestore(&udc->lock, flags);
1775
1776 while (1) {
1777 schedule_work(&udc->ep0_wq);
1778 if (udc->ep0state == EP0_SHUTDOWN)
1779 break;
1780 msleep(50);
1781 }
1782 bcm63xx_set_ctrl_irqs(udc, false);
1783 cancel_work_sync(&udc->ep0_wq);
1784 return 0;
1785 }
1786
1787 spin_unlock_irqrestore(&udc->lock, flags);
1788 return rc;
1789}
1790
1791/**
1792 * bcm63xx_udc_start - Start the controller.
1793 * @gadget: USB slave device.
1794 * @driver: Driver for USB slave devices.
1795 */
1796static int bcm63xx_udc_start(struct usb_gadget *gadget,
1797 struct usb_gadget_driver *driver)
1798{
1799 struct bcm63xx_udc *udc = gadget_to_udc(gadget);
1800 unsigned long flags;
1801
1802 if (!driver || driver->max_speed < USB_SPEED_HIGH ||
1803 !driver->setup)
1804 return -EINVAL;
1805 if (!udc)
1806 return -ENODEV;
1807 if (udc->driver)
1808 return -EBUSY;
1809
1810 spin_lock_irqsave(&udc->lock, flags);
1811
1812 set_clocks(udc, true);
1813 bcm63xx_fifo_setup(udc);
1814 bcm63xx_ep_init(udc);
1815 bcm63xx_ep_setup(udc);
1816 bcm63xx_fifo_reset(udc);
1817 bcm63xx_select_phy_mode(udc, true);
1818
1819 udc->driver = driver;
1820 driver->driver.bus = NULL;
Kevin Cernekee613065e2012-08-25 12:38:52 -07001821 udc->gadget.dev.of_node = udc->dev->of_node;
1822
1823 spin_unlock_irqrestore(&udc->lock, flags);
1824
1825 return 0;
1826}
1827
1828/**
1829 * bcm63xx_udc_stop - Shut down the controller.
1830 * @gadget: USB slave device.
1831 * @driver: Driver for USB slave devices.
1832 */
1833static int bcm63xx_udc_stop(struct usb_gadget *gadget,
1834 struct usb_gadget_driver *driver)
1835{
1836 struct bcm63xx_udc *udc = gadget_to_udc(gadget);
1837 unsigned long flags;
1838
1839 spin_lock_irqsave(&udc->lock, flags);
1840
1841 udc->driver = NULL;
Kevin Cernekee613065e2012-08-25 12:38:52 -07001842
1843 /*
1844 * If we switch the PHY too abruptly after dropping D+, the host
1845 * will often complain:
1846 *
1847 * hub 1-0:1.0: port 1 disabled by hub (EMI?), re-enabling...
1848 */
1849 msleep(100);
1850
1851 bcm63xx_select_phy_mode(udc, false);
1852 set_clocks(udc, false);
1853
1854 spin_unlock_irqrestore(&udc->lock, flags);
1855
1856 return 0;
1857}
1858
1859static const struct usb_gadget_ops bcm63xx_udc_ops = {
1860 .get_frame = bcm63xx_udc_get_frame,
1861 .pullup = bcm63xx_udc_pullup,
1862 .udc_start = bcm63xx_udc_start,
1863 .udc_stop = bcm63xx_udc_stop,
1864};
1865
1866/***********************************************************************
1867 * IRQ handling
1868 ***********************************************************************/
1869
1870/**
1871 * bcm63xx_update_cfg_iface - Read current configuration/interface settings.
1872 * @udc: Reference to the device controller.
1873 *
1874 * This controller intercepts SET_CONFIGURATION and SET_INTERFACE messages.
1875 * The driver never sees the raw control packets coming in on the ep0
1876 * IUDMA channel, but at least we get an interrupt event to tell us that
1877 * new values are waiting in the USBD_STATUS register.
1878 */
1879static void bcm63xx_update_cfg_iface(struct bcm63xx_udc *udc)
1880{
1881 u32 reg = usbd_readl(udc, USBD_STATUS_REG);
1882
1883 udc->cfg = (reg & USBD_STATUS_CFG_MASK) >> USBD_STATUS_CFG_SHIFT;
1884 udc->iface = (reg & USBD_STATUS_INTF_MASK) >> USBD_STATUS_INTF_SHIFT;
1885 udc->alt_iface = (reg & USBD_STATUS_ALTINTF_MASK) >>
1886 USBD_STATUS_ALTINTF_SHIFT;
1887 bcm63xx_ep_setup(udc);
1888}
1889
1890/**
1891 * bcm63xx_update_link_speed - Check to see if the link speed has changed.
1892 * @udc: Reference to the device controller.
1893 *
1894 * The link speed update coincides with a SETUP IRQ. Returns 1 if the
1895 * speed has changed, so that the caller can update the endpoint settings.
1896 */
1897static int bcm63xx_update_link_speed(struct bcm63xx_udc *udc)
1898{
1899 u32 reg = usbd_readl(udc, USBD_STATUS_REG);
1900 enum usb_device_speed oldspeed = udc->gadget.speed;
1901
1902 switch ((reg & USBD_STATUS_SPD_MASK) >> USBD_STATUS_SPD_SHIFT) {
1903 case BCM63XX_SPD_HIGH:
1904 udc->gadget.speed = USB_SPEED_HIGH;
1905 break;
1906 case BCM63XX_SPD_FULL:
1907 udc->gadget.speed = USB_SPEED_FULL;
1908 break;
1909 default:
1910 /* this should never happen */
1911 udc->gadget.speed = USB_SPEED_UNKNOWN;
1912 dev_err(udc->dev,
1913 "received SETUP packet with invalid link speed\n");
1914 return 0;
1915 }
1916
1917 if (udc->gadget.speed != oldspeed) {
1918 dev_info(udc->dev, "link up, %s-speed mode\n",
1919 udc->gadget.speed == USB_SPEED_HIGH ? "high" : "full");
1920 return 1;
1921 } else {
1922 return 0;
1923 }
1924}
1925
1926/**
1927 * bcm63xx_update_wedge - Iterate through wedged endpoints.
1928 * @udc: Reference to the device controller.
1929 * @new_status: true to "refresh" wedge status; false to clear it.
1930 *
1931 * On a SETUP interrupt, we need to manually "refresh" the wedge status
1932 * because the controller hardware is designed to automatically clear
1933 * stalls in response to a CLEAR_FEATURE request from the host.
1934 *
1935 * On a RESET interrupt, we do want to restore all wedged endpoints.
1936 */
1937static void bcm63xx_update_wedge(struct bcm63xx_udc *udc, bool new_status)
1938{
1939 int i;
1940
1941 for_each_set_bit(i, &udc->wedgemap, BCM63XX_NUM_EP) {
1942 bcm63xx_set_stall(udc, &udc->bep[i], new_status);
1943 if (!new_status)
1944 clear_bit(i, &udc->wedgemap);
1945 }
1946}
1947
1948/**
1949 * bcm63xx_udc_ctrl_isr - ISR for control path events (USBD).
1950 * @irq: IRQ number (unused).
1951 * @dev_id: Reference to the device controller.
1952 *
1953 * This is where we handle link (VBUS) down, USB reset, speed changes,
1954 * SET_CONFIGURATION, and SET_INTERFACE events.
1955 */
1956static irqreturn_t bcm63xx_udc_ctrl_isr(int irq, void *dev_id)
1957{
1958 struct bcm63xx_udc *udc = dev_id;
1959 u32 stat;
1960 bool disconnected = false;
1961
1962 stat = usbd_readl(udc, USBD_EVENT_IRQ_STATUS_REG) &
1963 usbd_readl(udc, USBD_EVENT_IRQ_MASK_REG);
1964
1965 usbd_writel(udc, stat, USBD_EVENT_IRQ_STATUS_REG);
1966
1967 spin_lock(&udc->lock);
1968 if (stat & BIT(USBD_EVENT_IRQ_USB_LINK)) {
1969 /* VBUS toggled */
1970
1971 if (!(usbd_readl(udc, USBD_EVENTS_REG) &
1972 USBD_EVENTS_USB_LINK_MASK) &&
1973 udc->gadget.speed != USB_SPEED_UNKNOWN)
1974 dev_info(udc->dev, "link down\n");
1975
1976 udc->gadget.speed = USB_SPEED_UNKNOWN;
1977 disconnected = true;
1978 }
1979 if (stat & BIT(USBD_EVENT_IRQ_USB_RESET)) {
1980 bcm63xx_fifo_setup(udc);
1981 bcm63xx_fifo_reset(udc);
1982 bcm63xx_ep_setup(udc);
1983
1984 bcm63xx_update_wedge(udc, false);
1985
1986 udc->ep0_req_reset = 1;
1987 schedule_work(&udc->ep0_wq);
1988 disconnected = true;
1989 }
1990 if (stat & BIT(USBD_EVENT_IRQ_SETUP)) {
1991 if (bcm63xx_update_link_speed(udc)) {
1992 bcm63xx_fifo_setup(udc);
1993 bcm63xx_ep_setup(udc);
1994 }
1995 bcm63xx_update_wedge(udc, true);
1996 }
1997 if (stat & BIT(USBD_EVENT_IRQ_SETCFG)) {
1998 bcm63xx_update_cfg_iface(udc);
1999 udc->ep0_req_set_cfg = 1;
2000 schedule_work(&udc->ep0_wq);
2001 }
2002 if (stat & BIT(USBD_EVENT_IRQ_SETINTF)) {
2003 bcm63xx_update_cfg_iface(udc);
2004 udc->ep0_req_set_iface = 1;
2005 schedule_work(&udc->ep0_wq);
2006 }
2007 spin_unlock(&udc->lock);
2008
2009 if (disconnected && udc->driver)
2010 udc->driver->disconnect(&udc->gadget);
2011
2012 return IRQ_HANDLED;
2013}
2014
2015/**
2016 * bcm63xx_udc_data_isr - ISR for data path events (IUDMA).
2017 * @irq: IRQ number (unused).
2018 * @dev_id: Reference to the IUDMA channel that generated the interrupt.
2019 *
2020 * For the two ep0 channels, we have special handling that triggers the
2021 * ep0 worker thread. For normal bulk/intr channels, either queue up
2022 * the next buffer descriptor for the transaction (incomplete transaction),
2023 * or invoke the completion callback (complete transactions).
2024 */
2025static irqreturn_t bcm63xx_udc_data_isr(int irq, void *dev_id)
2026{
2027 struct iudma_ch *iudma = dev_id;
2028 struct bcm63xx_udc *udc = iudma->udc;
2029 struct bcm63xx_ep *bep;
2030 struct usb_request *req = NULL;
2031 struct bcm63xx_req *breq = NULL;
2032 int rc;
2033 bool is_done = false;
2034
2035 spin_lock(&udc->lock);
2036
2037 usb_dmac_writel(udc, ENETDMAC_IR_BUFDONE_MASK,
2038 ENETDMAC_IR_REG(iudma->ch_idx));
2039 bep = iudma->bep;
2040 rc = iudma_read(udc, iudma);
2041
2042 /* special handling for EP0 RX (0) and TX (1) */
2043 if (iudma->ch_idx == IUDMA_EP0_RXCHAN ||
2044 iudma->ch_idx == IUDMA_EP0_TXCHAN) {
2045 req = udc->ep0_request;
2046 breq = our_req(req);
2047
2048 /* a single request could require multiple submissions */
2049 if (rc >= 0) {
2050 req->actual += rc;
2051
2052 if (req->actual >= req->length || breq->bd_bytes > rc) {
2053 udc->ep0_req_completed = 1;
2054 is_done = true;
2055 schedule_work(&udc->ep0_wq);
2056
2057 /* "actual" on a ZLP is 1 byte */
2058 req->actual = min(req->actual, req->length);
2059 } else {
2060 /* queue up the next BD (same request) */
2061 iudma_write(udc, iudma, breq);
2062 }
2063 }
2064 } else if (!list_empty(&bep->queue)) {
2065 breq = list_first_entry(&bep->queue, struct bcm63xx_req, queue);
2066 req = &breq->req;
2067
2068 if (rc >= 0) {
2069 req->actual += rc;
2070
2071 if (req->actual >= req->length || breq->bd_bytes > rc) {
2072 is_done = true;
2073 list_del(&breq->queue);
2074
2075 req->actual = min(req->actual, req->length);
2076
2077 if (!list_empty(&bep->queue)) {
2078 struct bcm63xx_req *next;
2079
2080 next = list_first_entry(&bep->queue,
2081 struct bcm63xx_req, queue);
2082 iudma_write(udc, iudma, next);
2083 }
2084 } else {
2085 iudma_write(udc, iudma, breq);
2086 }
2087 }
2088 }
2089 spin_unlock(&udc->lock);
2090
2091 if (is_done) {
2092 usb_gadget_unmap_request(&udc->gadget, req, iudma->is_tx);
2093 if (req->complete)
2094 req->complete(&bep->ep, req);
2095 }
2096
2097 return IRQ_HANDLED;
2098}
2099
2100/***********************************************************************
2101 * Debug filesystem
2102 ***********************************************************************/
2103
2104/*
2105 * bcm63xx_usbd_dbg_show - Show USBD controller state.
2106 * @s: seq_file to which the information will be written.
2107 * @p: Unused.
2108 *
2109 * This file nominally shows up as /sys/kernel/debug/bcm63xx_udc/usbd
2110 */
2111static int bcm63xx_usbd_dbg_show(struct seq_file *s, void *p)
2112{
2113 struct bcm63xx_udc *udc = s->private;
2114
2115 if (!udc->driver)
2116 return -ENODEV;
2117
2118 seq_printf(s, "ep0 state: %s\n",
2119 bcm63xx_ep0_state_names[udc->ep0state]);
2120 seq_printf(s, " pending requests: %s%s%s%s%s%s%s\n",
2121 udc->ep0_req_reset ? "reset " : "",
2122 udc->ep0_req_set_cfg ? "set_cfg " : "",
2123 udc->ep0_req_set_iface ? "set_iface " : "",
2124 udc->ep0_req_shutdown ? "shutdown " : "",
2125 udc->ep0_request ? "pending " : "",
2126 udc->ep0_req_completed ? "completed " : "",
2127 udc->ep0_reply ? "reply " : "");
2128 seq_printf(s, "cfg: %d; iface: %d; alt_iface: %d\n",
2129 udc->cfg, udc->iface, udc->alt_iface);
2130 seq_printf(s, "regs:\n");
2131 seq_printf(s, " control: %08x; straps: %08x; status: %08x\n",
2132 usbd_readl(udc, USBD_CONTROL_REG),
2133 usbd_readl(udc, USBD_STRAPS_REG),
2134 usbd_readl(udc, USBD_STATUS_REG));
2135 seq_printf(s, " events: %08x; stall: %08x\n",
2136 usbd_readl(udc, USBD_EVENTS_REG),
2137 usbd_readl(udc, USBD_STALL_REG));
2138
2139 return 0;
2140}
2141
2142/*
2143 * bcm63xx_iudma_dbg_show - Show IUDMA status and descriptors.
2144 * @s: seq_file to which the information will be written.
2145 * @p: Unused.
2146 *
2147 * This file nominally shows up as /sys/kernel/debug/bcm63xx_udc/iudma
2148 */
2149static int bcm63xx_iudma_dbg_show(struct seq_file *s, void *p)
2150{
2151 struct bcm63xx_udc *udc = s->private;
2152 int ch_idx, i;
2153 u32 sram2, sram3;
2154
2155 if (!udc->driver)
2156 return -ENODEV;
2157
2158 for (ch_idx = 0; ch_idx < BCM63XX_NUM_IUDMA; ch_idx++) {
2159 struct iudma_ch *iudma = &udc->iudma[ch_idx];
2160 struct list_head *pos;
2161
2162 seq_printf(s, "IUDMA channel %d -- ", ch_idx);
2163 switch (iudma_defaults[ch_idx].ep_type) {
2164 case BCMEP_CTRL:
2165 seq_printf(s, "control");
2166 break;
2167 case BCMEP_BULK:
2168 seq_printf(s, "bulk");
2169 break;
2170 case BCMEP_INTR:
2171 seq_printf(s, "interrupt");
2172 break;
2173 }
2174 seq_printf(s, ch_idx & 0x01 ? " tx" : " rx");
2175 seq_printf(s, " [ep%d]:\n",
2176 max_t(int, iudma_defaults[ch_idx].ep_num, 0));
2177 seq_printf(s, " cfg: %08x; irqstat: %08x; irqmask: %08x; maxburst: %08x\n",
2178 usb_dmac_readl(udc, ENETDMAC_CHANCFG_REG(ch_idx)),
2179 usb_dmac_readl(udc, ENETDMAC_IR_REG(ch_idx)),
2180 usb_dmac_readl(udc, ENETDMAC_IRMASK_REG(ch_idx)),
2181 usb_dmac_readl(udc, ENETDMAC_MAXBURST_REG(ch_idx)));
2182
2183 sram2 = usb_dmas_readl(udc, ENETDMAS_SRAM2_REG(ch_idx));
2184 sram3 = usb_dmas_readl(udc, ENETDMAS_SRAM3_REG(ch_idx));
2185 seq_printf(s, " base: %08x; index: %04x_%04x; desc: %04x_%04x %08x\n",
2186 usb_dmas_readl(udc, ENETDMAS_RSTART_REG(ch_idx)),
2187 sram2 >> 16, sram2 & 0xffff,
2188 sram3 >> 16, sram3 & 0xffff,
2189 usb_dmas_readl(udc, ENETDMAS_SRAM4_REG(ch_idx)));
2190 seq_printf(s, " desc: %d/%d used", iudma->n_bds_used,
2191 iudma->n_bds);
2192
2193 if (iudma->bep) {
2194 i = 0;
2195 list_for_each(pos, &iudma->bep->queue)
2196 i++;
2197 seq_printf(s, "; %d queued\n", i);
2198 } else {
2199 seq_printf(s, "\n");
2200 }
2201
2202 for (i = 0; i < iudma->n_bds; i++) {
2203 struct bcm_enet_desc *d = &iudma->bd_ring[i];
2204
2205 seq_printf(s, " %03x (%02x): len_stat: %04x_%04x; pa %08x",
2206 i * sizeof(*d), i,
2207 d->len_stat >> 16, d->len_stat & 0xffff,
2208 d->address);
2209 if (d == iudma->read_bd)
2210 seq_printf(s, " <<RD");
2211 if (d == iudma->write_bd)
2212 seq_printf(s, " <<WR");
2213 seq_printf(s, "\n");
2214 }
2215
2216 seq_printf(s, "\n");
2217 }
2218
2219 return 0;
2220}
2221
2222static int bcm63xx_usbd_dbg_open(struct inode *inode, struct file *file)
2223{
2224 return single_open(file, bcm63xx_usbd_dbg_show, inode->i_private);
2225}
2226
2227static int bcm63xx_iudma_dbg_open(struct inode *inode, struct file *file)
2228{
2229 return single_open(file, bcm63xx_iudma_dbg_show, inode->i_private);
2230}
2231
2232static const struct file_operations usbd_dbg_fops = {
2233 .owner = THIS_MODULE,
2234 .open = bcm63xx_usbd_dbg_open,
2235 .llseek = seq_lseek,
2236 .read = seq_read,
2237 .release = single_release,
2238};
2239
2240static const struct file_operations iudma_dbg_fops = {
2241 .owner = THIS_MODULE,
2242 .open = bcm63xx_iudma_dbg_open,
2243 .llseek = seq_lseek,
2244 .read = seq_read,
2245 .release = single_release,
2246};
2247
2248
2249/**
2250 * bcm63xx_udc_init_debugfs - Create debugfs entries.
2251 * @udc: Reference to the device controller.
2252 */
2253static void bcm63xx_udc_init_debugfs(struct bcm63xx_udc *udc)
2254{
2255 struct dentry *root, *usbd, *iudma;
2256
2257 if (!IS_ENABLED(CONFIG_USB_GADGET_DEBUG_FS))
2258 return;
2259
2260 root = debugfs_create_dir(udc->gadget.name, NULL);
2261 if (IS_ERR(root) || !root)
2262 goto err_root;
2263
2264 usbd = debugfs_create_file("usbd", 0400, root, udc,
2265 &usbd_dbg_fops);
2266 if (!usbd)
2267 goto err_usbd;
2268 iudma = debugfs_create_file("iudma", 0400, root, udc,
2269 &iudma_dbg_fops);
2270 if (!iudma)
2271 goto err_iudma;
2272
2273 udc->debugfs_root = root;
2274 udc->debugfs_usbd = usbd;
2275 udc->debugfs_iudma = iudma;
2276 return;
2277err_iudma:
2278 debugfs_remove(usbd);
2279err_usbd:
2280 debugfs_remove(root);
2281err_root:
2282 dev_err(udc->dev, "debugfs is not available\n");
2283}
2284
2285/**
2286 * bcm63xx_udc_cleanup_debugfs - Remove debugfs entries.
2287 * @udc: Reference to the device controller.
2288 *
2289 * debugfs_remove() is safe to call with a NULL argument.
2290 */
2291static void bcm63xx_udc_cleanup_debugfs(struct bcm63xx_udc *udc)
2292{
2293 debugfs_remove(udc->debugfs_iudma);
2294 debugfs_remove(udc->debugfs_usbd);
2295 debugfs_remove(udc->debugfs_root);
2296 udc->debugfs_iudma = NULL;
2297 udc->debugfs_usbd = NULL;
2298 udc->debugfs_root = NULL;
2299}
2300
2301/***********************************************************************
2302 * Driver init/exit
2303 ***********************************************************************/
2304
2305/**
Kevin Cernekee613065e2012-08-25 12:38:52 -07002306 * bcm63xx_udc_probe - Initialize a new instance of the UDC.
2307 * @pdev: Platform device struct from the bcm63xx BSP code.
2308 *
2309 * Note that platform data is required, because pd.port_no varies from chip
2310 * to chip and is used to switch the correct USB port to device mode.
2311 */
Bill Pemberton41ac7b32012-11-19 13:21:48 -05002312static int bcm63xx_udc_probe(struct platform_device *pdev)
Kevin Cernekee613065e2012-08-25 12:38:52 -07002313{
2314 struct device *dev = &pdev->dev;
Jingoo Hane01ee9f2013-07-30 17:00:51 +09002315 struct bcm63xx_usbd_platform_data *pd = dev_get_platdata(dev);
Kevin Cernekee613065e2012-08-25 12:38:52 -07002316 struct bcm63xx_udc *udc;
2317 struct resource *res;
2318 int rc = -ENOMEM, i, irq;
2319
2320 udc = devm_kzalloc(dev, sizeof(*udc), GFP_KERNEL);
2321 if (!udc) {
2322 dev_err(dev, "cannot allocate memory\n");
2323 return -ENOMEM;
2324 }
2325
2326 platform_set_drvdata(pdev, udc);
2327 udc->dev = dev;
2328 udc->pd = pd;
2329
2330 if (!pd) {
2331 dev_err(dev, "missing platform data\n");
2332 return -EINVAL;
2333 }
2334
2335 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Thierry Reding148e1132013-01-21 11:09:22 +01002336 udc->usbd_regs = devm_ioremap_resource(dev, res);
2337 if (IS_ERR(udc->usbd_regs))
2338 return PTR_ERR(udc->usbd_regs);
Kevin Cernekee613065e2012-08-25 12:38:52 -07002339
2340 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
Thierry Reding148e1132013-01-21 11:09:22 +01002341 udc->iudma_regs = devm_ioremap_resource(dev, res);
2342 if (IS_ERR(udc->iudma_regs))
2343 return PTR_ERR(udc->iudma_regs);
Kevin Cernekee613065e2012-08-25 12:38:52 -07002344
2345 spin_lock_init(&udc->lock);
2346 INIT_WORK(&udc->ep0_wq, bcm63xx_ep0_process);
Kevin Cernekee613065e2012-08-25 12:38:52 -07002347
2348 udc->gadget.ops = &bcm63xx_udc_ops;
2349 udc->gadget.name = dev_name(dev);
Kevin Cernekee613065e2012-08-25 12:38:52 -07002350
2351 if (!pd->use_fullspeed && !use_fullspeed)
2352 udc->gadget.max_speed = USB_SPEED_HIGH;
2353 else
2354 udc->gadget.max_speed = USB_SPEED_FULL;
2355
2356 /* request clocks, allocate buffers, and clear any pending IRQs */
2357 rc = bcm63xx_init_udc_hw(udc);
2358 if (rc)
2359 return rc;
2360
2361 rc = -ENXIO;
2362
2363 /* IRQ resource #0: control interrupt (VBUS, speed, etc.) */
2364 irq = platform_get_irq(pdev, 0);
2365 if (irq < 0) {
2366 dev_err(dev, "missing IRQ resource #0\n");
2367 goto out_uninit;
2368 }
2369 if (devm_request_irq(dev, irq, &bcm63xx_udc_ctrl_isr, 0,
2370 dev_name(dev), udc) < 0) {
2371 dev_err(dev, "error requesting IRQ #%d\n", irq);
2372 goto out_uninit;
2373 }
2374
2375 /* IRQ resources #1-6: data interrupts for IUDMA channels 0-5 */
2376 for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
2377 irq = platform_get_irq(pdev, i + 1);
2378 if (irq < 0) {
2379 dev_err(dev, "missing IRQ resource #%d\n", i + 1);
2380 goto out_uninit;
2381 }
2382 if (devm_request_irq(dev, irq, &bcm63xx_udc_data_isr, 0,
2383 dev_name(dev), &udc->iudma[i]) < 0) {
2384 dev_err(dev, "error requesting IRQ #%d\n", irq);
2385 goto out_uninit;
2386 }
2387 }
2388
Kevin Cernekee613065e2012-08-25 12:38:52 -07002389 bcm63xx_udc_init_debugfs(udc);
2390 rc = usb_add_gadget_udc(dev, &udc->gadget);
2391 if (!rc)
2392 return 0;
2393
2394 bcm63xx_udc_cleanup_debugfs(udc);
Kevin Cernekee613065e2012-08-25 12:38:52 -07002395out_uninit:
2396 bcm63xx_uninit_udc_hw(udc);
2397 return rc;
2398}
2399
2400/**
2401 * bcm63xx_udc_remove - Remove the device from the system.
2402 * @pdev: Platform device struct from the bcm63xx BSP code.
2403 */
Bill Pembertonfb4e98a2012-11-19 13:26:20 -05002404static int bcm63xx_udc_remove(struct platform_device *pdev)
Kevin Cernekee613065e2012-08-25 12:38:52 -07002405{
2406 struct bcm63xx_udc *udc = platform_get_drvdata(pdev);
2407
2408 bcm63xx_udc_cleanup_debugfs(udc);
2409 usb_del_gadget_udc(&udc->gadget);
Kevin Cernekee613065e2012-08-25 12:38:52 -07002410 BUG_ON(udc->driver);
2411
Kevin Cernekee613065e2012-08-25 12:38:52 -07002412 bcm63xx_uninit_udc_hw(udc);
2413
2414 return 0;
2415}
2416
2417static struct platform_driver bcm63xx_udc_driver = {
2418 .probe = bcm63xx_udc_probe,
Bill Pemberton76904172012-11-19 13:21:08 -05002419 .remove = bcm63xx_udc_remove,
Kevin Cernekee613065e2012-08-25 12:38:52 -07002420 .driver = {
2421 .name = DRV_MODULE_NAME,
2422 .owner = THIS_MODULE,
2423 },
2424};
2425module_platform_driver(bcm63xx_udc_driver);
2426
2427MODULE_DESCRIPTION("BCM63xx USB Peripheral Controller");
2428MODULE_AUTHOR("Kevin Cernekee <cernekee@gmail.com>");
2429MODULE_LICENSE("GPL");
2430MODULE_ALIAS("platform:" DRV_MODULE_NAME);