blob: 6e6518264c42253b810bb41d93fbcd46da7a4906 [file] [log] [blame]
Kevin Cernekee613065e2012-08-25 12:38:52 -07001/*
2 * bcm63xx_udc.c -- BCM63xx UDC high/full speed USB device controller
3 *
4 * Copyright (C) 2012 Kevin Cernekee <cernekee@gmail.com>
5 * Copyright (C) 2012 Broadcom Corporation
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 */
12
13#include <linux/bitops.h>
14#include <linux/bug.h>
15#include <linux/clk.h>
16#include <linux/compiler.h>
17#include <linux/debugfs.h>
18#include <linux/delay.h>
19#include <linux/device.h>
20#include <linux/dma-mapping.h>
21#include <linux/errno.h>
22#include <linux/init.h>
23#include <linux/interrupt.h>
24#include <linux/ioport.h>
25#include <linux/kconfig.h>
26#include <linux/kernel.h>
27#include <linux/list.h>
28#include <linux/module.h>
29#include <linux/moduleparam.h>
30#include <linux/platform_device.h>
31#include <linux/sched.h>
32#include <linux/seq_file.h>
33#include <linux/slab.h>
34#include <linux/timer.h>
35#include <linux/usb/ch9.h>
36#include <linux/usb/gadget.h>
37#include <linux/workqueue.h>
38
39#include <bcm63xx_cpu.h>
40#include <bcm63xx_iudma.h>
41#include <bcm63xx_dev_usb_usbd.h>
42#include <bcm63xx_io.h>
43#include <bcm63xx_regs.h>
44
45#define DRV_MODULE_NAME "bcm63xx_udc"
46
47static const char bcm63xx_ep0name[] = "ep0";
48static const char *const bcm63xx_ep_name[] = {
49 bcm63xx_ep0name,
50 "ep1in-bulk", "ep2out-bulk", "ep3in-int", "ep4out-int",
51};
52
53static bool use_fullspeed;
54module_param(use_fullspeed, bool, S_IRUGO);
55MODULE_PARM_DESC(use_fullspeed, "true for fullspeed only");
56
57/*
58 * RX IRQ coalescing options:
59 *
60 * false (default) - one IRQ per DATAx packet. Slow but reliable. The
61 * driver is able to pass the "testusb" suite and recover from conditions like:
62 *
63 * 1) Device queues up a 2048-byte RX IUDMA transaction on an OUT bulk ep
64 * 2) Host sends 512 bytes of data
65 * 3) Host decides to reconfigure the device and sends SET_INTERFACE
66 * 4) Device shuts down the endpoint and cancels the RX transaction
67 *
68 * true - one IRQ per transfer, for transfers <= 2048B. Generates
69 * considerably fewer IRQs, but error recovery is less robust. Does not
70 * reliably pass "testusb".
71 *
72 * TX always uses coalescing, because we can cancel partially complete TX
73 * transfers by repeatedly flushing the FIFO. The hardware doesn't allow
74 * this on RX.
75 */
76static bool irq_coalesce;
77module_param(irq_coalesce, bool, S_IRUGO);
78MODULE_PARM_DESC(irq_coalesce, "take one IRQ per RX transfer");
79
80#define BCM63XX_NUM_EP 5
81#define BCM63XX_NUM_IUDMA 6
82#define BCM63XX_NUM_FIFO_PAIRS 3
83
84#define IUDMA_RESET_TIMEOUT_US 10000
85
86#define IUDMA_EP0_RXCHAN 0
87#define IUDMA_EP0_TXCHAN 1
88
89#define IUDMA_MAX_FRAGMENT 2048
90#define BCM63XX_MAX_CTRL_PKT 64
91
92#define BCMEP_CTRL 0x00
93#define BCMEP_ISOC 0x01
94#define BCMEP_BULK 0x02
95#define BCMEP_INTR 0x03
96
97#define BCMEP_OUT 0x00
98#define BCMEP_IN 0x01
99
100#define BCM63XX_SPD_FULL 1
101#define BCM63XX_SPD_HIGH 0
102
103#define IUDMA_DMAC_OFFSET 0x200
104#define IUDMA_DMAS_OFFSET 0x400
105
106enum bcm63xx_ep0_state {
107 EP0_REQUEUE,
108 EP0_IDLE,
109 EP0_IN_DATA_PHASE_SETUP,
110 EP0_IN_DATA_PHASE_COMPLETE,
111 EP0_OUT_DATA_PHASE_SETUP,
112 EP0_OUT_DATA_PHASE_COMPLETE,
113 EP0_OUT_STATUS_PHASE,
114 EP0_IN_FAKE_STATUS_PHASE,
115 EP0_SHUTDOWN,
116};
117
118static const char __maybe_unused bcm63xx_ep0_state_names[][32] = {
119 "REQUEUE",
120 "IDLE",
121 "IN_DATA_PHASE_SETUP",
122 "IN_DATA_PHASE_COMPLETE",
123 "OUT_DATA_PHASE_SETUP",
124 "OUT_DATA_PHASE_COMPLETE",
125 "OUT_STATUS_PHASE",
126 "IN_FAKE_STATUS_PHASE",
127 "SHUTDOWN",
128};
129
130/**
131 * struct iudma_ch_cfg - Static configuration for an IUDMA channel.
132 * @ep_num: USB endpoint number.
133 * @n_bds: Number of buffer descriptors in the ring.
134 * @ep_type: Endpoint type (control, bulk, interrupt).
135 * @dir: Direction (in, out).
136 * @n_fifo_slots: Number of FIFO entries to allocate for this channel.
137 * @max_pkt_hs: Maximum packet size in high speed mode.
138 * @max_pkt_fs: Maximum packet size in full speed mode.
139 */
140struct iudma_ch_cfg {
141 int ep_num;
142 int n_bds;
143 int ep_type;
144 int dir;
145 int n_fifo_slots;
146 int max_pkt_hs;
147 int max_pkt_fs;
148};
149
150static const struct iudma_ch_cfg iudma_defaults[] = {
151
152 /* This controller was designed to support a CDC/RNDIS application.
153 It may be possible to reconfigure some of the endpoints, but
154 the hardware limitations (FIFO sizing and number of DMA channels)
155 may significantly impact flexibility and/or stability. Change
156 these values at your own risk.
157
158 ep_num ep_type n_fifo_slots max_pkt_fs
159 idx | n_bds | dir | max_pkt_hs |
160 | | | | | | | | */
161 [0] = { -1, 4, BCMEP_CTRL, BCMEP_OUT, 32, 64, 64 },
162 [1] = { 0, 4, BCMEP_CTRL, BCMEP_OUT, 32, 64, 64 },
163 [2] = { 2, 16, BCMEP_BULK, BCMEP_OUT, 128, 512, 64 },
164 [3] = { 1, 16, BCMEP_BULK, BCMEP_IN, 128, 512, 64 },
165 [4] = { 4, 4, BCMEP_INTR, BCMEP_OUT, 32, 64, 64 },
166 [5] = { 3, 4, BCMEP_INTR, BCMEP_IN, 32, 64, 64 },
167};
168
169struct bcm63xx_udc;
170
171/**
172 * struct iudma_ch - Represents the current state of a single IUDMA channel.
173 * @ch_idx: IUDMA channel index (0 to BCM63XX_NUM_IUDMA-1).
174 * @ep_num: USB endpoint number. -1 for ep0 RX.
175 * @enabled: Whether bcm63xx_ep_enable() has been called.
176 * @max_pkt: "Chunk size" on the USB interface. Based on interface speed.
177 * @is_tx: true for TX, false for RX.
178 * @bep: Pointer to the associated endpoint. NULL for ep0 RX.
179 * @udc: Reference to the device controller.
180 * @read_bd: Next buffer descriptor to reap from the hardware.
181 * @write_bd: Next BD available for a new packet.
182 * @end_bd: Points to the final BD in the ring.
183 * @n_bds_used: Number of BD entries currently occupied.
184 * @bd_ring: Base pointer to the BD ring.
185 * @bd_ring_dma: Physical (DMA) address of bd_ring.
186 * @n_bds: Total number of BDs in the ring.
187 *
188 * ep0 has two IUDMA channels (IUDMA_EP0_RXCHAN and IUDMA_EP0_TXCHAN), as it is
189 * bidirectional. The "struct usb_ep" associated with ep0 is for TX (IN)
190 * only.
191 *
192 * Each bulk/intr endpoint has a single IUDMA channel and a single
193 * struct usb_ep.
194 */
195struct iudma_ch {
196 unsigned int ch_idx;
197 int ep_num;
198 bool enabled;
199 int max_pkt;
200 bool is_tx;
201 struct bcm63xx_ep *bep;
202 struct bcm63xx_udc *udc;
203
204 struct bcm_enet_desc *read_bd;
205 struct bcm_enet_desc *write_bd;
206 struct bcm_enet_desc *end_bd;
207 int n_bds_used;
208
209 struct bcm_enet_desc *bd_ring;
210 dma_addr_t bd_ring_dma;
211 unsigned int n_bds;
212};
213
214/**
215 * struct bcm63xx_ep - Internal (driver) state of a single endpoint.
216 * @ep_num: USB endpoint number.
217 * @iudma: Pointer to IUDMA channel state.
218 * @ep: USB gadget layer representation of the EP.
219 * @udc: Reference to the device controller.
220 * @queue: Linked list of outstanding requests for this EP.
221 * @halted: 1 if the EP is stalled; 0 otherwise.
222 */
223struct bcm63xx_ep {
224 unsigned int ep_num;
225 struct iudma_ch *iudma;
226 struct usb_ep ep;
227 struct bcm63xx_udc *udc;
228 struct list_head queue;
229 unsigned halted:1;
230};
231
232/**
233 * struct bcm63xx_req - Internal (driver) state of a single request.
234 * @queue: Links back to the EP's request list.
235 * @req: USB gadget layer representation of the request.
236 * @offset: Current byte offset into the data buffer (next byte to queue).
237 * @bd_bytes: Number of data bytes in outstanding BD entries.
238 * @iudma: IUDMA channel used for the request.
239 */
240struct bcm63xx_req {
241 struct list_head queue; /* ep's requests */
242 struct usb_request req;
243 unsigned int offset;
244 unsigned int bd_bytes;
245 struct iudma_ch *iudma;
246};
247
248/**
249 * struct bcm63xx_udc - Driver/hardware private context.
250 * @lock: Spinlock to mediate access to this struct, and (most) HW regs.
251 * @dev: Generic Linux device structure.
252 * @pd: Platform data (board/port info).
253 * @usbd_clk: Clock descriptor for the USB device block.
254 * @usbh_clk: Clock descriptor for the USB host block.
255 * @gadget: USB slave device.
256 * @driver: Driver for USB slave devices.
257 * @usbd_regs: Base address of the USBD/USB20D block.
258 * @iudma_regs: Base address of the USBD's associated IUDMA block.
259 * @bep: Array of endpoints, including ep0.
260 * @iudma: Array of all IUDMA channels used by this controller.
261 * @cfg: USB configuration number, from SET_CONFIGURATION wValue.
262 * @iface: USB interface number, from SET_INTERFACE wIndex.
263 * @alt_iface: USB alt interface number, from SET_INTERFACE wValue.
264 * @ep0_ctrl_req: Request object for bcm63xx_udc-initiated ep0 transactions.
265 * @ep0_ctrl_buf: Data buffer for ep0_ctrl_req.
266 * @ep0state: Current state of the ep0 state machine.
267 * @ep0_wq: Workqueue struct used to wake up the ep0 state machine.
268 * @wedgemap: Bitmap of wedged endpoints.
269 * @ep0_req_reset: USB reset is pending.
270 * @ep0_req_set_cfg: Need to spoof a SET_CONFIGURATION packet.
271 * @ep0_req_set_iface: Need to spoof a SET_INTERFACE packet.
272 * @ep0_req_shutdown: Driver is shutting down; requesting ep0 to halt activity.
273 * @ep0_req_completed: ep0 request has completed; worker has not seen it yet.
274 * @ep0_reply: Pending reply from gadget driver.
275 * @ep0_request: Outstanding ep0 request.
276 * @debugfs_root: debugfs directory: /sys/kernel/debug/<DRV_MODULE_NAME>.
277 * @debugfs_usbd: debugfs file "usbd" for controller state.
278 * @debugfs_iudma: debugfs file "usbd" for IUDMA state.
279 */
280struct bcm63xx_udc {
281 spinlock_t lock;
282
283 struct device *dev;
284 struct bcm63xx_usbd_platform_data *pd;
285 struct clk *usbd_clk;
286 struct clk *usbh_clk;
287
288 struct usb_gadget gadget;
289 struct usb_gadget_driver *driver;
290
291 void __iomem *usbd_regs;
292 void __iomem *iudma_regs;
293
294 struct bcm63xx_ep bep[BCM63XX_NUM_EP];
295 struct iudma_ch iudma[BCM63XX_NUM_IUDMA];
296
297 int cfg;
298 int iface;
299 int alt_iface;
300
301 struct bcm63xx_req ep0_ctrl_req;
302 u8 *ep0_ctrl_buf;
303
304 int ep0state;
305 struct work_struct ep0_wq;
306
307 unsigned long wedgemap;
308
309 unsigned ep0_req_reset:1;
310 unsigned ep0_req_set_cfg:1;
311 unsigned ep0_req_set_iface:1;
312 unsigned ep0_req_shutdown:1;
313
314 unsigned ep0_req_completed:1;
315 struct usb_request *ep0_reply;
316 struct usb_request *ep0_request;
317
318 struct dentry *debugfs_root;
319 struct dentry *debugfs_usbd;
320 struct dentry *debugfs_iudma;
321};
322
323static const struct usb_ep_ops bcm63xx_udc_ep_ops;
324
325/***********************************************************************
326 * Convenience functions
327 ***********************************************************************/
328
329static inline struct bcm63xx_udc *gadget_to_udc(struct usb_gadget *g)
330{
331 return container_of(g, struct bcm63xx_udc, gadget);
332}
333
334static inline struct bcm63xx_ep *our_ep(struct usb_ep *ep)
335{
336 return container_of(ep, struct bcm63xx_ep, ep);
337}
338
339static inline struct bcm63xx_req *our_req(struct usb_request *req)
340{
341 return container_of(req, struct bcm63xx_req, req);
342}
343
344static inline u32 usbd_readl(struct bcm63xx_udc *udc, u32 off)
345{
346 return bcm_readl(udc->usbd_regs + off);
347}
348
349static inline void usbd_writel(struct bcm63xx_udc *udc, u32 val, u32 off)
350{
351 bcm_writel(val, udc->usbd_regs + off);
352}
353
354static inline u32 usb_dma_readl(struct bcm63xx_udc *udc, u32 off)
355{
356 return bcm_readl(udc->iudma_regs + off);
357}
358
359static inline void usb_dma_writel(struct bcm63xx_udc *udc, u32 val, u32 off)
360{
361 bcm_writel(val, udc->iudma_regs + off);
362}
363
364static inline u32 usb_dmac_readl(struct bcm63xx_udc *udc, u32 off)
365{
366 return bcm_readl(udc->iudma_regs + IUDMA_DMAC_OFFSET + off);
367}
368
369static inline void usb_dmac_writel(struct bcm63xx_udc *udc, u32 val, u32 off)
370{
371 bcm_writel(val, udc->iudma_regs + IUDMA_DMAC_OFFSET + off);
372}
373
374static inline u32 usb_dmas_readl(struct bcm63xx_udc *udc, u32 off)
375{
376 return bcm_readl(udc->iudma_regs + IUDMA_DMAS_OFFSET + off);
377}
378
379static inline void usb_dmas_writel(struct bcm63xx_udc *udc, u32 val, u32 off)
380{
381 bcm_writel(val, udc->iudma_regs + IUDMA_DMAS_OFFSET + off);
382}
383
384static inline void set_clocks(struct bcm63xx_udc *udc, bool is_enabled)
385{
386 if (is_enabled) {
387 clk_enable(udc->usbh_clk);
388 clk_enable(udc->usbd_clk);
389 udelay(10);
390 } else {
391 clk_disable(udc->usbd_clk);
392 clk_disable(udc->usbh_clk);
393 }
394}
395
396/***********************************************************************
397 * Low-level IUDMA / FIFO operations
398 ***********************************************************************/
399
400/**
401 * bcm63xx_ep_dma_select - Helper function to set up the init_sel signal.
402 * @udc: Reference to the device controller.
403 * @idx: Desired init_sel value.
404 *
405 * The "init_sel" signal is used as a selection index for both endpoints
406 * and IUDMA channels. Since these do not map 1:1, the use of this signal
407 * depends on the context.
408 */
409static void bcm63xx_ep_dma_select(struct bcm63xx_udc *udc, int idx)
410{
411 u32 val = usbd_readl(udc, USBD_CONTROL_REG);
412
413 val &= ~USBD_CONTROL_INIT_SEL_MASK;
414 val |= idx << USBD_CONTROL_INIT_SEL_SHIFT;
415 usbd_writel(udc, val, USBD_CONTROL_REG);
416}
417
418/**
419 * bcm63xx_set_stall - Enable/disable stall on one endpoint.
420 * @udc: Reference to the device controller.
421 * @bep: Endpoint on which to operate.
422 * @is_stalled: true to enable stall, false to disable.
423 *
424 * See notes in bcm63xx_update_wedge() regarding automatic clearing of
425 * halt/stall conditions.
426 */
427static void bcm63xx_set_stall(struct bcm63xx_udc *udc, struct bcm63xx_ep *bep,
428 bool is_stalled)
429{
430 u32 val;
431
432 val = USBD_STALL_UPDATE_MASK |
433 (is_stalled ? USBD_STALL_ENABLE_MASK : 0) |
434 (bep->ep_num << USBD_STALL_EPNUM_SHIFT);
435 usbd_writel(udc, val, USBD_STALL_REG);
436}
437
438/**
439 * bcm63xx_fifo_setup - (Re)initialize FIFO boundaries and settings.
440 * @udc: Reference to the device controller.
441 *
442 * These parameters depend on the USB link speed. Settings are
443 * per-IUDMA-channel-pair.
444 */
445static void bcm63xx_fifo_setup(struct bcm63xx_udc *udc)
446{
447 int is_hs = udc->gadget.speed == USB_SPEED_HIGH;
448 u32 i, val, rx_fifo_slot, tx_fifo_slot;
449
450 /* set up FIFO boundaries and packet sizes; this is done in pairs */
451 rx_fifo_slot = tx_fifo_slot = 0;
452 for (i = 0; i < BCM63XX_NUM_IUDMA; i += 2) {
453 const struct iudma_ch_cfg *rx_cfg = &iudma_defaults[i];
454 const struct iudma_ch_cfg *tx_cfg = &iudma_defaults[i + 1];
455
456 bcm63xx_ep_dma_select(udc, i >> 1);
457
458 val = (rx_fifo_slot << USBD_RXFIFO_CONFIG_START_SHIFT) |
459 ((rx_fifo_slot + rx_cfg->n_fifo_slots - 1) <<
460 USBD_RXFIFO_CONFIG_END_SHIFT);
461 rx_fifo_slot += rx_cfg->n_fifo_slots;
462 usbd_writel(udc, val, USBD_RXFIFO_CONFIG_REG);
463 usbd_writel(udc,
464 is_hs ? rx_cfg->max_pkt_hs : rx_cfg->max_pkt_fs,
465 USBD_RXFIFO_EPSIZE_REG);
466
467 val = (tx_fifo_slot << USBD_TXFIFO_CONFIG_START_SHIFT) |
468 ((tx_fifo_slot + tx_cfg->n_fifo_slots - 1) <<
469 USBD_TXFIFO_CONFIG_END_SHIFT);
470 tx_fifo_slot += tx_cfg->n_fifo_slots;
471 usbd_writel(udc, val, USBD_TXFIFO_CONFIG_REG);
472 usbd_writel(udc,
473 is_hs ? tx_cfg->max_pkt_hs : tx_cfg->max_pkt_fs,
474 USBD_TXFIFO_EPSIZE_REG);
475
476 usbd_readl(udc, USBD_TXFIFO_EPSIZE_REG);
477 }
478}
479
480/**
481 * bcm63xx_fifo_reset_ep - Flush a single endpoint's FIFO.
482 * @udc: Reference to the device controller.
483 * @ep_num: Endpoint number.
484 */
485static void bcm63xx_fifo_reset_ep(struct bcm63xx_udc *udc, int ep_num)
486{
487 u32 val;
488
489 bcm63xx_ep_dma_select(udc, ep_num);
490
491 val = usbd_readl(udc, USBD_CONTROL_REG);
492 val |= USBD_CONTROL_FIFO_RESET_MASK;
493 usbd_writel(udc, val, USBD_CONTROL_REG);
494 usbd_readl(udc, USBD_CONTROL_REG);
495}
496
497/**
498 * bcm63xx_fifo_reset - Flush all hardware FIFOs.
499 * @udc: Reference to the device controller.
500 */
501static void bcm63xx_fifo_reset(struct bcm63xx_udc *udc)
502{
503 int i;
504
505 for (i = 0; i < BCM63XX_NUM_FIFO_PAIRS; i++)
506 bcm63xx_fifo_reset_ep(udc, i);
507}
508
509/**
510 * bcm63xx_ep_init - Initial (one-time) endpoint initialization.
511 * @udc: Reference to the device controller.
512 */
513static void bcm63xx_ep_init(struct bcm63xx_udc *udc)
514{
515 u32 i, val;
516
517 for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
518 const struct iudma_ch_cfg *cfg = &iudma_defaults[i];
519
520 if (cfg->ep_num < 0)
521 continue;
522
523 bcm63xx_ep_dma_select(udc, cfg->ep_num);
524 val = (cfg->ep_type << USBD_EPNUM_TYPEMAP_TYPE_SHIFT) |
525 ((i >> 1) << USBD_EPNUM_TYPEMAP_DMA_CH_SHIFT);
526 usbd_writel(udc, val, USBD_EPNUM_TYPEMAP_REG);
527 }
528}
529
530/**
531 * bcm63xx_ep_setup - Configure per-endpoint settings.
532 * @udc: Reference to the device controller.
533 *
534 * This needs to be rerun if the speed/cfg/intf/altintf changes.
535 */
536static void bcm63xx_ep_setup(struct bcm63xx_udc *udc)
537{
538 u32 val, i;
539
540 usbd_writel(udc, USBD_CSR_SETUPADDR_DEF, USBD_CSR_SETUPADDR_REG);
541
542 for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
543 const struct iudma_ch_cfg *cfg = &iudma_defaults[i];
544 int max_pkt = udc->gadget.speed == USB_SPEED_HIGH ?
545 cfg->max_pkt_hs : cfg->max_pkt_fs;
546 int idx = cfg->ep_num;
547
548 udc->iudma[i].max_pkt = max_pkt;
549
550 if (idx < 0)
551 continue;
552 udc->bep[idx].ep.maxpacket = max_pkt;
553
554 val = (idx << USBD_CSR_EP_LOG_SHIFT) |
555 (cfg->dir << USBD_CSR_EP_DIR_SHIFT) |
556 (cfg->ep_type << USBD_CSR_EP_TYPE_SHIFT) |
557 (udc->cfg << USBD_CSR_EP_CFG_SHIFT) |
558 (udc->iface << USBD_CSR_EP_IFACE_SHIFT) |
559 (udc->alt_iface << USBD_CSR_EP_ALTIFACE_SHIFT) |
560 (max_pkt << USBD_CSR_EP_MAXPKT_SHIFT);
561 usbd_writel(udc, val, USBD_CSR_EP_REG(idx));
562 }
563}
564
565/**
566 * iudma_write - Queue a single IUDMA transaction.
567 * @udc: Reference to the device controller.
568 * @iudma: IUDMA channel to use.
569 * @breq: Request containing the transaction data.
570 *
571 * For RX IUDMA, this will queue a single buffer descriptor, as RX IUDMA
572 * does not honor SOP/EOP so the handling of multiple buffers is ambiguous.
573 * So iudma_write() may be called several times to fulfill a single
574 * usb_request.
575 *
576 * For TX IUDMA, this can queue multiple buffer descriptors if needed.
577 */
578static void iudma_write(struct bcm63xx_udc *udc, struct iudma_ch *iudma,
579 struct bcm63xx_req *breq)
580{
581 int first_bd = 1, last_bd = 0, extra_zero_pkt = 0;
582 unsigned int bytes_left = breq->req.length - breq->offset;
583 const int max_bd_bytes = !irq_coalesce && !iudma->is_tx ?
584 iudma->max_pkt : IUDMA_MAX_FRAGMENT;
585
586 iudma->n_bds_used = 0;
587 breq->bd_bytes = 0;
588 breq->iudma = iudma;
589
590 if ((bytes_left % iudma->max_pkt == 0) && bytes_left && breq->req.zero)
591 extra_zero_pkt = 1;
592
593 do {
594 struct bcm_enet_desc *d = iudma->write_bd;
595 u32 dmaflags = 0;
596 unsigned int n_bytes;
597
598 if (d == iudma->end_bd) {
599 dmaflags |= DMADESC_WRAP_MASK;
600 iudma->write_bd = iudma->bd_ring;
601 } else {
602 iudma->write_bd++;
603 }
604 iudma->n_bds_used++;
605
606 n_bytes = min_t(int, bytes_left, max_bd_bytes);
607 if (n_bytes)
608 dmaflags |= n_bytes << DMADESC_LENGTH_SHIFT;
609 else
610 dmaflags |= (1 << DMADESC_LENGTH_SHIFT) |
611 DMADESC_USB_ZERO_MASK;
612
613 dmaflags |= DMADESC_OWNER_MASK;
614 if (first_bd) {
615 dmaflags |= DMADESC_SOP_MASK;
616 first_bd = 0;
617 }
618
619 /*
620 * extra_zero_pkt forces one more iteration through the loop
621 * after all data is queued up, to send the zero packet
622 */
623 if (extra_zero_pkt && !bytes_left)
624 extra_zero_pkt = 0;
625
626 if (!iudma->is_tx || iudma->n_bds_used == iudma->n_bds ||
627 (n_bytes == bytes_left && !extra_zero_pkt)) {
628 last_bd = 1;
629 dmaflags |= DMADESC_EOP_MASK;
630 }
631
632 d->address = breq->req.dma + breq->offset;
633 mb();
634 d->len_stat = dmaflags;
635
636 breq->offset += n_bytes;
637 breq->bd_bytes += n_bytes;
638 bytes_left -= n_bytes;
639 } while (!last_bd);
640
641 usb_dmac_writel(udc, ENETDMAC_CHANCFG_EN_MASK,
642 ENETDMAC_CHANCFG_REG(iudma->ch_idx));
643}
644
645/**
646 * iudma_read - Check for IUDMA buffer completion.
647 * @udc: Reference to the device controller.
648 * @iudma: IUDMA channel to use.
649 *
650 * This checks to see if ALL of the outstanding BDs on the DMA channel
651 * have been filled. If so, it returns the actual transfer length;
652 * otherwise it returns -EBUSY.
653 */
654static int iudma_read(struct bcm63xx_udc *udc, struct iudma_ch *iudma)
655{
656 int i, actual_len = 0;
657 struct bcm_enet_desc *d = iudma->read_bd;
658
659 if (!iudma->n_bds_used)
660 return -EINVAL;
661
662 for (i = 0; i < iudma->n_bds_used; i++) {
663 u32 dmaflags;
664
665 dmaflags = d->len_stat;
666
667 if (dmaflags & DMADESC_OWNER_MASK)
668 return -EBUSY;
669
670 actual_len += (dmaflags & DMADESC_LENGTH_MASK) >>
671 DMADESC_LENGTH_SHIFT;
672 if (d == iudma->end_bd)
673 d = iudma->bd_ring;
674 else
675 d++;
676 }
677
678 iudma->read_bd = d;
679 iudma->n_bds_used = 0;
680 return actual_len;
681}
682
683/**
684 * iudma_reset_channel - Stop DMA on a single channel.
685 * @udc: Reference to the device controller.
686 * @iudma: IUDMA channel to reset.
687 */
688static void iudma_reset_channel(struct bcm63xx_udc *udc, struct iudma_ch *iudma)
689{
690 int timeout = IUDMA_RESET_TIMEOUT_US;
691 struct bcm_enet_desc *d;
692 int ch_idx = iudma->ch_idx;
693
694 if (!iudma->is_tx)
695 bcm63xx_fifo_reset_ep(udc, max(0, iudma->ep_num));
696
697 /* stop DMA, then wait for the hardware to wrap up */
698 usb_dmac_writel(udc, 0, ENETDMAC_CHANCFG_REG(ch_idx));
699
700 while (usb_dmac_readl(udc, ENETDMAC_CHANCFG_REG(ch_idx)) &
701 ENETDMAC_CHANCFG_EN_MASK) {
702 udelay(1);
703
704 /* repeatedly flush the FIFO data until the BD completes */
705 if (iudma->is_tx && iudma->ep_num >= 0)
706 bcm63xx_fifo_reset_ep(udc, iudma->ep_num);
707
708 if (!timeout--) {
709 dev_err(udc->dev, "can't reset IUDMA channel %d\n",
710 ch_idx);
711 break;
712 }
713 if (timeout == IUDMA_RESET_TIMEOUT_US / 2) {
714 dev_warn(udc->dev, "forcibly halting IUDMA channel %d\n",
715 ch_idx);
716 usb_dmac_writel(udc, ENETDMAC_CHANCFG_BUFHALT_MASK,
717 ENETDMAC_CHANCFG_REG(ch_idx));
718 }
719 }
720 usb_dmac_writel(udc, ~0, ENETDMAC_IR_REG(ch_idx));
721
722 /* don't leave "live" HW-owned entries for the next guy to step on */
723 for (d = iudma->bd_ring; d <= iudma->end_bd; d++)
724 d->len_stat = 0;
725 mb();
726
727 iudma->read_bd = iudma->write_bd = iudma->bd_ring;
728 iudma->n_bds_used = 0;
729
730 /* set up IRQs, UBUS burst size, and BD base for this channel */
731 usb_dmac_writel(udc, ENETDMAC_IR_BUFDONE_MASK,
732 ENETDMAC_IRMASK_REG(ch_idx));
733 usb_dmac_writel(udc, 8, ENETDMAC_MAXBURST_REG(ch_idx));
734
735 usb_dmas_writel(udc, iudma->bd_ring_dma, ENETDMAS_RSTART_REG(ch_idx));
736 usb_dmas_writel(udc, 0, ENETDMAS_SRAM2_REG(ch_idx));
737}
738
739/**
740 * iudma_init_channel - One-time IUDMA channel initialization.
741 * @udc: Reference to the device controller.
742 * @ch_idx: Channel to initialize.
743 */
744static int iudma_init_channel(struct bcm63xx_udc *udc, unsigned int ch_idx)
745{
746 struct iudma_ch *iudma = &udc->iudma[ch_idx];
747 const struct iudma_ch_cfg *cfg = &iudma_defaults[ch_idx];
748 unsigned int n_bds = cfg->n_bds;
749 struct bcm63xx_ep *bep = NULL;
750
751 iudma->ep_num = cfg->ep_num;
752 iudma->ch_idx = ch_idx;
753 iudma->is_tx = !!(ch_idx & 0x01);
754 if (iudma->ep_num >= 0) {
755 bep = &udc->bep[iudma->ep_num];
756 bep->iudma = iudma;
757 INIT_LIST_HEAD(&bep->queue);
758 }
759
760 iudma->bep = bep;
761 iudma->udc = udc;
762
763 /* ep0 is always active; others are controlled by the gadget driver */
764 if (iudma->ep_num <= 0)
765 iudma->enabled = true;
766
767 iudma->n_bds = n_bds;
768 iudma->bd_ring = dmam_alloc_coherent(udc->dev,
769 n_bds * sizeof(struct bcm_enet_desc),
770 &iudma->bd_ring_dma, GFP_KERNEL);
771 if (!iudma->bd_ring)
772 return -ENOMEM;
773 iudma->end_bd = &iudma->bd_ring[n_bds - 1];
774
775 return 0;
776}
777
778/**
779 * iudma_init - One-time initialization of all IUDMA channels.
780 * @udc: Reference to the device controller.
781 *
782 * Enable DMA, flush channels, and enable global IUDMA IRQs.
783 */
784static int iudma_init(struct bcm63xx_udc *udc)
785{
786 int i, rc;
787
788 usb_dma_writel(udc, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
789
790 for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
791 rc = iudma_init_channel(udc, i);
792 if (rc)
793 return rc;
794 iudma_reset_channel(udc, &udc->iudma[i]);
795 }
796
797 usb_dma_writel(udc, BIT(BCM63XX_NUM_IUDMA)-1, ENETDMA_GLB_IRQMASK_REG);
798 return 0;
799}
800
801/**
802 * iudma_uninit - Uninitialize IUDMA channels.
803 * @udc: Reference to the device controller.
804 *
805 * Kill global IUDMA IRQs, flush channels, and kill DMA.
806 */
807static void iudma_uninit(struct bcm63xx_udc *udc)
808{
809 int i;
810
811 usb_dma_writel(udc, 0, ENETDMA_GLB_IRQMASK_REG);
812
813 for (i = 0; i < BCM63XX_NUM_IUDMA; i++)
814 iudma_reset_channel(udc, &udc->iudma[i]);
815
816 usb_dma_writel(udc, 0, ENETDMA_CFG_REG);
817}
818
819/***********************************************************************
820 * Other low-level USBD operations
821 ***********************************************************************/
822
823/**
824 * bcm63xx_set_ctrl_irqs - Mask/unmask control path interrupts.
825 * @udc: Reference to the device controller.
826 * @enable_irqs: true to enable, false to disable.
827 */
828static void bcm63xx_set_ctrl_irqs(struct bcm63xx_udc *udc, bool enable_irqs)
829{
830 u32 val;
831
832 usbd_writel(udc, 0, USBD_STATUS_REG);
833
834 val = BIT(USBD_EVENT_IRQ_USB_RESET) |
835 BIT(USBD_EVENT_IRQ_SETUP) |
836 BIT(USBD_EVENT_IRQ_SETCFG) |
837 BIT(USBD_EVENT_IRQ_SETINTF) |
838 BIT(USBD_EVENT_IRQ_USB_LINK);
839 usbd_writel(udc, enable_irqs ? val : 0, USBD_EVENT_IRQ_MASK_REG);
840 usbd_writel(udc, val, USBD_EVENT_IRQ_STATUS_REG);
841}
842
843/**
844 * bcm63xx_select_phy_mode - Select between USB device and host mode.
845 * @udc: Reference to the device controller.
846 * @is_device: true for device, false for host.
847 *
848 * This should probably be reworked to use the drivers/usb/otg
849 * infrastructure.
850 *
851 * By default, the AFE/pullups are disabled in device mode, until
852 * bcm63xx_select_pullup() is called.
853 */
854static void bcm63xx_select_phy_mode(struct bcm63xx_udc *udc, bool is_device)
855{
856 u32 val, portmask = BIT(udc->pd->port_no);
857
858 if (BCMCPU_IS_6328()) {
859 /* configure pinmux to sense VBUS signal */
860 val = bcm_gpio_readl(GPIO_PINMUX_OTHR_REG);
861 val &= ~GPIO_PINMUX_OTHR_6328_USB_MASK;
862 val |= is_device ? GPIO_PINMUX_OTHR_6328_USB_DEV :
863 GPIO_PINMUX_OTHR_6328_USB_HOST;
864 bcm_gpio_writel(val, GPIO_PINMUX_OTHR_REG);
865 }
866
867 val = bcm_rset_readl(RSET_USBH_PRIV, USBH_PRIV_UTMI_CTL_6368_REG);
868 if (is_device) {
869 val |= (portmask << USBH_PRIV_UTMI_CTL_HOSTB_SHIFT);
870 val |= (portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT);
871 } else {
872 val &= ~(portmask << USBH_PRIV_UTMI_CTL_HOSTB_SHIFT);
873 val &= ~(portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT);
874 }
875 bcm_rset_writel(RSET_USBH_PRIV, val, USBH_PRIV_UTMI_CTL_6368_REG);
876
877 val = bcm_rset_readl(RSET_USBH_PRIV, USBH_PRIV_SWAP_6368_REG);
878 if (is_device)
879 val |= USBH_PRIV_SWAP_USBD_MASK;
880 else
881 val &= ~USBH_PRIV_SWAP_USBD_MASK;
882 bcm_rset_writel(RSET_USBH_PRIV, val, USBH_PRIV_SWAP_6368_REG);
883}
884
885/**
886 * bcm63xx_select_pullup - Enable/disable the pullup on D+
887 * @udc: Reference to the device controller.
888 * @is_on: true to enable the pullup, false to disable.
889 *
890 * If the pullup is active, the host will sense a FS/HS device connected to
891 * the port. If the pullup is inactive, the host will think the USB
892 * device has been disconnected.
893 */
894static void bcm63xx_select_pullup(struct bcm63xx_udc *udc, bool is_on)
895{
896 u32 val, portmask = BIT(udc->pd->port_no);
897
898 val = bcm_rset_readl(RSET_USBH_PRIV, USBH_PRIV_UTMI_CTL_6368_REG);
899 if (is_on)
900 val &= ~(portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT);
901 else
902 val |= (portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT);
903 bcm_rset_writel(RSET_USBH_PRIV, val, USBH_PRIV_UTMI_CTL_6368_REG);
904}
905
906/**
907 * bcm63xx_uninit_udc_hw - Shut down the hardware prior to driver removal.
908 * @udc: Reference to the device controller.
909 *
910 * This just masks the IUDMA IRQs and releases the clocks. It is assumed
911 * that bcm63xx_udc_stop() has already run, and the clocks are stopped.
912 */
913static void bcm63xx_uninit_udc_hw(struct bcm63xx_udc *udc)
914{
915 set_clocks(udc, true);
916 iudma_uninit(udc);
917 set_clocks(udc, false);
918
919 clk_put(udc->usbd_clk);
920 clk_put(udc->usbh_clk);
921}
922
923/**
924 * bcm63xx_init_udc_hw - Initialize the controller hardware and data structures.
925 * @udc: Reference to the device controller.
926 */
927static int bcm63xx_init_udc_hw(struct bcm63xx_udc *udc)
928{
929 int i, rc = 0;
930 u32 val;
931
932 udc->ep0_ctrl_buf = devm_kzalloc(udc->dev, BCM63XX_MAX_CTRL_PKT,
933 GFP_KERNEL);
934 if (!udc->ep0_ctrl_buf)
935 return -ENOMEM;
936
937 INIT_LIST_HEAD(&udc->gadget.ep_list);
938 for (i = 0; i < BCM63XX_NUM_EP; i++) {
939 struct bcm63xx_ep *bep = &udc->bep[i];
940
941 bep->ep.name = bcm63xx_ep_name[i];
942 bep->ep_num = i;
943 bep->ep.ops = &bcm63xx_udc_ep_ops;
944 list_add_tail(&bep->ep.ep_list, &udc->gadget.ep_list);
945 bep->halted = 0;
946 bep->ep.maxpacket = BCM63XX_MAX_CTRL_PKT;
947 bep->udc = udc;
948 bep->ep.desc = NULL;
949 INIT_LIST_HEAD(&bep->queue);
950 }
951
952 udc->gadget.ep0 = &udc->bep[0].ep;
953 list_del(&udc->bep[0].ep.ep_list);
954
955 udc->gadget.speed = USB_SPEED_UNKNOWN;
956 udc->ep0state = EP0_SHUTDOWN;
957
958 udc->usbh_clk = clk_get(udc->dev, "usbh");
959 if (IS_ERR(udc->usbh_clk))
960 return -EIO;
961
962 udc->usbd_clk = clk_get(udc->dev, "usbd");
963 if (IS_ERR(udc->usbd_clk)) {
964 clk_put(udc->usbh_clk);
965 return -EIO;
966 }
967
968 set_clocks(udc, true);
969
970 val = USBD_CONTROL_AUTO_CSRS_MASK |
971 USBD_CONTROL_DONE_CSRS_MASK |
972 (irq_coalesce ? USBD_CONTROL_RXZSCFG_MASK : 0);
973 usbd_writel(udc, val, USBD_CONTROL_REG);
974
975 val = USBD_STRAPS_APP_SELF_PWR_MASK |
976 USBD_STRAPS_APP_RAM_IF_MASK |
977 USBD_STRAPS_APP_CSRPRGSUP_MASK |
978 USBD_STRAPS_APP_8BITPHY_MASK |
979 USBD_STRAPS_APP_RMTWKUP_MASK;
980
981 if (udc->gadget.max_speed == USB_SPEED_HIGH)
982 val |= (BCM63XX_SPD_HIGH << USBD_STRAPS_SPEED_SHIFT);
983 else
984 val |= (BCM63XX_SPD_FULL << USBD_STRAPS_SPEED_SHIFT);
985 usbd_writel(udc, val, USBD_STRAPS_REG);
986
987 bcm63xx_set_ctrl_irqs(udc, false);
988
989 usbd_writel(udc, 0, USBD_EVENT_IRQ_CFG_LO_REG);
990
991 val = USBD_EVENT_IRQ_CFG_FALLING(USBD_EVENT_IRQ_ENUM_ON) |
992 USBD_EVENT_IRQ_CFG_FALLING(USBD_EVENT_IRQ_SET_CSRS);
993 usbd_writel(udc, val, USBD_EVENT_IRQ_CFG_HI_REG);
994
995 rc = iudma_init(udc);
996 set_clocks(udc, false);
997 if (rc)
998 bcm63xx_uninit_udc_hw(udc);
999
1000 return 0;
1001}
1002
1003/***********************************************************************
1004 * Standard EP gadget operations
1005 ***********************************************************************/
1006
1007/**
1008 * bcm63xx_ep_enable - Enable one endpoint.
1009 * @ep: Endpoint to enable.
1010 * @desc: Contains max packet, direction, etc.
1011 *
1012 * Most of the endpoint parameters are fixed in this controller, so there
1013 * isn't much for this function to do.
1014 */
1015static int bcm63xx_ep_enable(struct usb_ep *ep,
1016 const struct usb_endpoint_descriptor *desc)
1017{
1018 struct bcm63xx_ep *bep = our_ep(ep);
1019 struct bcm63xx_udc *udc = bep->udc;
1020 struct iudma_ch *iudma = bep->iudma;
1021 unsigned long flags;
1022
1023 if (!ep || !desc || ep->name == bcm63xx_ep0name)
1024 return -EINVAL;
1025
1026 if (!udc->driver)
1027 return -ESHUTDOWN;
1028
1029 spin_lock_irqsave(&udc->lock, flags);
1030 if (iudma->enabled) {
1031 spin_unlock_irqrestore(&udc->lock, flags);
1032 return -EINVAL;
1033 }
1034
1035 iudma->enabled = true;
1036 BUG_ON(!list_empty(&bep->queue));
1037
1038 iudma_reset_channel(udc, iudma);
1039
1040 bep->halted = 0;
1041 bcm63xx_set_stall(udc, bep, false);
1042 clear_bit(bep->ep_num, &udc->wedgemap);
1043
1044 ep->desc = desc;
1045 ep->maxpacket = usb_endpoint_maxp(desc);
1046
1047 spin_unlock_irqrestore(&udc->lock, flags);
1048 return 0;
1049}
1050
1051/**
1052 * bcm63xx_ep_disable - Disable one endpoint.
1053 * @ep: Endpoint to disable.
1054 */
1055static int bcm63xx_ep_disable(struct usb_ep *ep)
1056{
1057 struct bcm63xx_ep *bep = our_ep(ep);
1058 struct bcm63xx_udc *udc = bep->udc;
1059 struct iudma_ch *iudma = bep->iudma;
1060 struct list_head *pos, *n;
1061 unsigned long flags;
1062
1063 if (!ep || !ep->desc)
1064 return -EINVAL;
1065
1066 spin_lock_irqsave(&udc->lock, flags);
1067 if (!iudma->enabled) {
1068 spin_unlock_irqrestore(&udc->lock, flags);
1069 return -EINVAL;
1070 }
1071 iudma->enabled = false;
1072
1073 iudma_reset_channel(udc, iudma);
1074
1075 if (!list_empty(&bep->queue)) {
1076 list_for_each_safe(pos, n, &bep->queue) {
1077 struct bcm63xx_req *breq =
1078 list_entry(pos, struct bcm63xx_req, queue);
1079
1080 usb_gadget_unmap_request(&udc->gadget, &breq->req,
1081 iudma->is_tx);
1082 list_del(&breq->queue);
1083 breq->req.status = -ESHUTDOWN;
1084
1085 spin_unlock_irqrestore(&udc->lock, flags);
1086 breq->req.complete(&iudma->bep->ep, &breq->req);
1087 spin_lock_irqsave(&udc->lock, flags);
1088 }
1089 }
1090 ep->desc = NULL;
1091
1092 spin_unlock_irqrestore(&udc->lock, flags);
1093 return 0;
1094}
1095
1096/**
1097 * bcm63xx_udc_alloc_request - Allocate a new request.
1098 * @ep: Endpoint associated with the request.
1099 * @mem_flags: Flags to pass to kzalloc().
1100 */
1101static struct usb_request *bcm63xx_udc_alloc_request(struct usb_ep *ep,
1102 gfp_t mem_flags)
1103{
1104 struct bcm63xx_req *breq;
1105
1106 breq = kzalloc(sizeof(*breq), mem_flags);
1107 if (!breq)
1108 return NULL;
1109 return &breq->req;
1110}
1111
1112/**
1113 * bcm63xx_udc_free_request - Free a request.
1114 * @ep: Endpoint associated with the request.
1115 * @req: Request to free.
1116 */
1117static void bcm63xx_udc_free_request(struct usb_ep *ep,
1118 struct usb_request *req)
1119{
1120 struct bcm63xx_req *breq = our_req(req);
1121 kfree(breq);
1122}
1123
1124/**
1125 * bcm63xx_udc_queue - Queue up a new request.
1126 * @ep: Endpoint associated with the request.
1127 * @req: Request to add.
1128 * @mem_flags: Unused.
1129 *
1130 * If the queue is empty, start this request immediately. Otherwise, add
1131 * it to the list.
1132 *
1133 * ep0 replies are sent through this function from the gadget driver, but
1134 * they are treated differently because they need to be handled by the ep0
1135 * state machine. (Sometimes they are replies to control requests that
1136 * were spoofed by this driver, and so they shouldn't be transmitted at all.)
1137 */
1138static int bcm63xx_udc_queue(struct usb_ep *ep, struct usb_request *req,
1139 gfp_t mem_flags)
1140{
1141 struct bcm63xx_ep *bep = our_ep(ep);
1142 struct bcm63xx_udc *udc = bep->udc;
1143 struct bcm63xx_req *breq = our_req(req);
1144 unsigned long flags;
1145 int rc = 0;
1146
1147 if (unlikely(!req || !req->complete || !req->buf || !ep))
1148 return -EINVAL;
1149
1150 req->actual = 0;
1151 req->status = 0;
1152 breq->offset = 0;
1153
1154 if (bep == &udc->bep[0]) {
1155 /* only one reply per request, please */
1156 if (udc->ep0_reply)
1157 return -EINVAL;
1158
1159 udc->ep0_reply = req;
1160 schedule_work(&udc->ep0_wq);
1161 return 0;
1162 }
1163
1164 spin_lock_irqsave(&udc->lock, flags);
1165 if (!bep->iudma->enabled) {
1166 rc = -ESHUTDOWN;
1167 goto out;
1168 }
1169
1170 rc = usb_gadget_map_request(&udc->gadget, req, bep->iudma->is_tx);
1171 if (rc == 0) {
1172 list_add_tail(&breq->queue, &bep->queue);
1173 if (list_is_singular(&bep->queue))
1174 iudma_write(udc, bep->iudma, breq);
1175 }
1176
1177out:
1178 spin_unlock_irqrestore(&udc->lock, flags);
1179 return rc;
1180}
1181
1182/**
1183 * bcm63xx_udc_dequeue - Remove a pending request from the queue.
1184 * @ep: Endpoint associated with the request.
1185 * @req: Request to remove.
1186 *
1187 * If the request is not at the head of the queue, this is easy - just nuke
1188 * it. If the request is at the head of the queue, we'll need to stop the
1189 * DMA transaction and then queue up the successor.
1190 */
1191static int bcm63xx_udc_dequeue(struct usb_ep *ep, struct usb_request *req)
1192{
1193 struct bcm63xx_ep *bep = our_ep(ep);
1194 struct bcm63xx_udc *udc = bep->udc;
1195 struct bcm63xx_req *breq = our_req(req), *cur;
1196 unsigned long flags;
1197 int rc = 0;
1198
1199 spin_lock_irqsave(&udc->lock, flags);
1200 if (list_empty(&bep->queue)) {
1201 rc = -EINVAL;
1202 goto out;
1203 }
1204
1205 cur = list_first_entry(&bep->queue, struct bcm63xx_req, queue);
1206 usb_gadget_unmap_request(&udc->gadget, &breq->req, bep->iudma->is_tx);
1207
1208 if (breq == cur) {
1209 iudma_reset_channel(udc, bep->iudma);
1210 list_del(&breq->queue);
1211
1212 if (!list_empty(&bep->queue)) {
1213 struct bcm63xx_req *next;
1214
1215 next = list_first_entry(&bep->queue,
1216 struct bcm63xx_req, queue);
1217 iudma_write(udc, bep->iudma, next);
1218 }
1219 } else {
1220 list_del(&breq->queue);
1221 }
1222
1223out:
1224 spin_unlock_irqrestore(&udc->lock, flags);
1225
1226 req->status = -ESHUTDOWN;
1227 req->complete(ep, req);
1228
1229 return rc;
1230}
1231
1232/**
1233 * bcm63xx_udc_set_halt - Enable/disable STALL flag in the hardware.
1234 * @ep: Endpoint to halt.
1235 * @value: Zero to clear halt; nonzero to set halt.
1236 *
1237 * See comments in bcm63xx_update_wedge().
1238 */
1239static int bcm63xx_udc_set_halt(struct usb_ep *ep, int value)
1240{
1241 struct bcm63xx_ep *bep = our_ep(ep);
1242 struct bcm63xx_udc *udc = bep->udc;
1243 unsigned long flags;
1244
1245 spin_lock_irqsave(&udc->lock, flags);
1246 bcm63xx_set_stall(udc, bep, !!value);
1247 bep->halted = value;
1248 spin_unlock_irqrestore(&udc->lock, flags);
1249
1250 return 0;
1251}
1252
1253/**
1254 * bcm63xx_udc_set_wedge - Stall the endpoint until the next reset.
1255 * @ep: Endpoint to wedge.
1256 *
1257 * See comments in bcm63xx_update_wedge().
1258 */
1259static int bcm63xx_udc_set_wedge(struct usb_ep *ep)
1260{
1261 struct bcm63xx_ep *bep = our_ep(ep);
1262 struct bcm63xx_udc *udc = bep->udc;
1263 unsigned long flags;
1264
1265 spin_lock_irqsave(&udc->lock, flags);
1266 set_bit(bep->ep_num, &udc->wedgemap);
1267 bcm63xx_set_stall(udc, bep, true);
1268 spin_unlock_irqrestore(&udc->lock, flags);
1269
1270 return 0;
1271}
1272
1273static const struct usb_ep_ops bcm63xx_udc_ep_ops = {
1274 .enable = bcm63xx_ep_enable,
1275 .disable = bcm63xx_ep_disable,
1276
1277 .alloc_request = bcm63xx_udc_alloc_request,
1278 .free_request = bcm63xx_udc_free_request,
1279
1280 .queue = bcm63xx_udc_queue,
1281 .dequeue = bcm63xx_udc_dequeue,
1282
1283 .set_halt = bcm63xx_udc_set_halt,
1284 .set_wedge = bcm63xx_udc_set_wedge,
1285};
1286
1287/***********************************************************************
1288 * EP0 handling
1289 ***********************************************************************/
1290
1291/**
1292 * bcm63xx_ep0_setup_callback - Drop spinlock to invoke ->setup callback.
1293 * @udc: Reference to the device controller.
1294 * @ctrl: 8-byte SETUP request.
1295 */
1296static int bcm63xx_ep0_setup_callback(struct bcm63xx_udc *udc,
1297 struct usb_ctrlrequest *ctrl)
1298{
1299 int rc;
1300
1301 spin_unlock_irq(&udc->lock);
1302 rc = udc->driver->setup(&udc->gadget, ctrl);
1303 spin_lock_irq(&udc->lock);
1304 return rc;
1305}
1306
1307/**
1308 * bcm63xx_ep0_spoof_set_cfg - Synthesize a SET_CONFIGURATION request.
1309 * @udc: Reference to the device controller.
1310 *
1311 * Many standard requests are handled automatically in the hardware, but
1312 * we still need to pass them to the gadget driver so that it can
1313 * reconfigure the interfaces/endpoints if necessary.
1314 *
1315 * Unfortunately we are not able to send a STALL response if the host
1316 * requests an invalid configuration. If this happens, we'll have to be
1317 * content with printing a warning.
1318 */
1319static int bcm63xx_ep0_spoof_set_cfg(struct bcm63xx_udc *udc)
1320{
1321 struct usb_ctrlrequest ctrl;
1322 int rc;
1323
1324 ctrl.bRequestType = USB_DIR_OUT | USB_RECIP_DEVICE;
1325 ctrl.bRequest = USB_REQ_SET_CONFIGURATION;
1326 ctrl.wValue = cpu_to_le16(udc->cfg);
1327 ctrl.wIndex = 0;
1328 ctrl.wLength = 0;
1329
1330 rc = bcm63xx_ep0_setup_callback(udc, &ctrl);
1331 if (rc < 0) {
1332 dev_warn_ratelimited(udc->dev,
1333 "hardware auto-acked bad SET_CONFIGURATION(%d) request\n",
1334 udc->cfg);
1335 }
1336 return rc;
1337}
1338
1339/**
1340 * bcm63xx_ep0_spoof_set_iface - Synthesize a SET_INTERFACE request.
1341 * @udc: Reference to the device controller.
1342 */
1343static int bcm63xx_ep0_spoof_set_iface(struct bcm63xx_udc *udc)
1344{
1345 struct usb_ctrlrequest ctrl;
1346 int rc;
1347
1348 ctrl.bRequestType = USB_DIR_OUT | USB_RECIP_INTERFACE;
1349 ctrl.bRequest = USB_REQ_SET_INTERFACE;
1350 ctrl.wValue = cpu_to_le16(udc->alt_iface);
1351 ctrl.wIndex = cpu_to_le16(udc->iface);
1352 ctrl.wLength = 0;
1353
1354 rc = bcm63xx_ep0_setup_callback(udc, &ctrl);
1355 if (rc < 0) {
1356 dev_warn_ratelimited(udc->dev,
1357 "hardware auto-acked bad SET_INTERFACE(%d,%d) request\n",
1358 udc->iface, udc->alt_iface);
1359 }
1360 return rc;
1361}
1362
1363/**
1364 * bcm63xx_ep0_map_write - dma_map and iudma_write a single request.
1365 * @udc: Reference to the device controller.
1366 * @ch_idx: IUDMA channel number.
1367 * @req: USB gadget layer representation of the request.
1368 */
1369static void bcm63xx_ep0_map_write(struct bcm63xx_udc *udc, int ch_idx,
1370 struct usb_request *req)
1371{
1372 struct bcm63xx_req *breq = our_req(req);
1373 struct iudma_ch *iudma = &udc->iudma[ch_idx];
1374
1375 BUG_ON(udc->ep0_request);
1376 udc->ep0_request = req;
1377
1378 req->actual = 0;
1379 breq->offset = 0;
1380 usb_gadget_map_request(&udc->gadget, req, iudma->is_tx);
1381 iudma_write(udc, iudma, breq);
1382}
1383
1384/**
1385 * bcm63xx_ep0_complete - Set completion status and "stage" the callback.
1386 * @udc: Reference to the device controller.
1387 * @req: USB gadget layer representation of the request.
1388 * @status: Status to return to the gadget driver.
1389 */
1390static void bcm63xx_ep0_complete(struct bcm63xx_udc *udc,
1391 struct usb_request *req, int status)
1392{
1393 req->status = status;
1394 if (status)
1395 req->actual = 0;
1396 if (req->complete) {
1397 spin_unlock_irq(&udc->lock);
1398 req->complete(&udc->bep[0].ep, req);
1399 spin_lock_irq(&udc->lock);
1400 }
1401}
1402
1403/**
1404 * bcm63xx_ep0_nuke_reply - Abort request from the gadget driver due to
1405 * reset/shutdown.
1406 * @udc: Reference to the device controller.
1407 * @is_tx: Nonzero for TX (IN), zero for RX (OUT).
1408 */
1409static void bcm63xx_ep0_nuke_reply(struct bcm63xx_udc *udc, int is_tx)
1410{
1411 struct usb_request *req = udc->ep0_reply;
1412
1413 udc->ep0_reply = NULL;
1414 usb_gadget_unmap_request(&udc->gadget, req, is_tx);
1415 if (udc->ep0_request == req) {
1416 udc->ep0_req_completed = 0;
1417 udc->ep0_request = NULL;
1418 }
1419 bcm63xx_ep0_complete(udc, req, -ESHUTDOWN);
1420}
1421
1422/**
1423 * bcm63xx_ep0_read_complete - Close out the pending ep0 request; return
1424 * transfer len.
1425 * @udc: Reference to the device controller.
1426 */
1427static int bcm63xx_ep0_read_complete(struct bcm63xx_udc *udc)
1428{
1429 struct usb_request *req = udc->ep0_request;
1430
1431 udc->ep0_req_completed = 0;
1432 udc->ep0_request = NULL;
1433
1434 return req->actual;
1435}
1436
1437/**
1438 * bcm63xx_ep0_internal_request - Helper function to submit an ep0 request.
1439 * @udc: Reference to the device controller.
1440 * @ch_idx: IUDMA channel number.
1441 * @length: Number of bytes to TX/RX.
1442 *
1443 * Used for simple transfers performed by the ep0 worker. This will always
1444 * use ep0_ctrl_req / ep0_ctrl_buf.
1445 */
1446static void bcm63xx_ep0_internal_request(struct bcm63xx_udc *udc, int ch_idx,
1447 int length)
1448{
1449 struct usb_request *req = &udc->ep0_ctrl_req.req;
1450
1451 req->buf = udc->ep0_ctrl_buf;
1452 req->length = length;
1453 req->complete = NULL;
1454
1455 bcm63xx_ep0_map_write(udc, ch_idx, req);
1456}
1457
1458/**
1459 * bcm63xx_ep0_do_setup - Parse new SETUP packet and decide how to handle it.
1460 * @udc: Reference to the device controller.
1461 *
1462 * EP0_IDLE probably shouldn't ever happen. EP0_REQUEUE means we're ready
1463 * for the next packet. Anything else means the transaction requires multiple
1464 * stages of handling.
1465 */
1466static enum bcm63xx_ep0_state bcm63xx_ep0_do_setup(struct bcm63xx_udc *udc)
1467{
1468 int rc;
1469 struct usb_ctrlrequest *ctrl = (void *)udc->ep0_ctrl_buf;
1470
1471 rc = bcm63xx_ep0_read_complete(udc);
1472
1473 if (rc < 0) {
1474 dev_err(udc->dev, "missing SETUP packet\n");
1475 return EP0_IDLE;
1476 }
1477
1478 /*
1479 * Handle 0-byte IN STATUS acknowledgement. The hardware doesn't
1480 * ALWAYS deliver these 100% of the time, so if we happen to see one,
1481 * just throw it away.
1482 */
1483 if (rc == 0)
1484 return EP0_REQUEUE;
1485
1486 /* Drop malformed SETUP packets */
1487 if (rc != sizeof(*ctrl)) {
1488 dev_warn_ratelimited(udc->dev,
1489 "malformed SETUP packet (%d bytes)\n", rc);
1490 return EP0_REQUEUE;
1491 }
1492
1493 /* Process new SETUP packet arriving on ep0 */
1494 rc = bcm63xx_ep0_setup_callback(udc, ctrl);
1495 if (rc < 0) {
1496 bcm63xx_set_stall(udc, &udc->bep[0], true);
1497 return EP0_REQUEUE;
1498 }
1499
1500 if (!ctrl->wLength)
1501 return EP0_REQUEUE;
1502 else if (ctrl->bRequestType & USB_DIR_IN)
1503 return EP0_IN_DATA_PHASE_SETUP;
1504 else
1505 return EP0_OUT_DATA_PHASE_SETUP;
1506}
1507
1508/**
1509 * bcm63xx_ep0_do_idle - Check for outstanding requests if ep0 is idle.
1510 * @udc: Reference to the device controller.
1511 *
1512 * In state EP0_IDLE, the RX descriptor is either pending, or has been
1513 * filled with a SETUP packet from the host. This function handles new
1514 * SETUP packets, control IRQ events (which can generate fake SETUP packets),
1515 * and reset/shutdown events.
1516 *
1517 * Returns 0 if work was done; -EAGAIN if nothing to do.
1518 */
1519static int bcm63xx_ep0_do_idle(struct bcm63xx_udc *udc)
1520{
1521 if (udc->ep0_req_reset) {
1522 udc->ep0_req_reset = 0;
1523 } else if (udc->ep0_req_set_cfg) {
1524 udc->ep0_req_set_cfg = 0;
1525 if (bcm63xx_ep0_spoof_set_cfg(udc) >= 0)
1526 udc->ep0state = EP0_IN_FAKE_STATUS_PHASE;
1527 } else if (udc->ep0_req_set_iface) {
1528 udc->ep0_req_set_iface = 0;
1529 if (bcm63xx_ep0_spoof_set_iface(udc) >= 0)
1530 udc->ep0state = EP0_IN_FAKE_STATUS_PHASE;
1531 } else if (udc->ep0_req_completed) {
1532 udc->ep0state = bcm63xx_ep0_do_setup(udc);
1533 return udc->ep0state == EP0_IDLE ? -EAGAIN : 0;
1534 } else if (udc->ep0_req_shutdown) {
1535 udc->ep0_req_shutdown = 0;
1536 udc->ep0_req_completed = 0;
1537 udc->ep0_request = NULL;
1538 iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_RXCHAN]);
1539 usb_gadget_unmap_request(&udc->gadget,
1540 &udc->ep0_ctrl_req.req, 0);
1541
1542 /* bcm63xx_udc_pullup() is waiting for this */
1543 mb();
1544 udc->ep0state = EP0_SHUTDOWN;
1545 } else if (udc->ep0_reply) {
1546 /*
1547 * This could happen if a USB RESET shows up during an ep0
1548 * transaction (especially if a laggy driver like gadgetfs
1549 * is in use).
1550 */
1551 dev_warn(udc->dev, "nuking unexpected reply\n");
1552 bcm63xx_ep0_nuke_reply(udc, 0);
1553 } else {
1554 return -EAGAIN;
1555 }
1556
1557 return 0;
1558}
1559
1560/**
1561 * bcm63xx_ep0_one_round - Handle the current ep0 state.
1562 * @udc: Reference to the device controller.
1563 *
1564 * Returns 0 if work was done; -EAGAIN if nothing to do.
1565 */
1566static int bcm63xx_ep0_one_round(struct bcm63xx_udc *udc)
1567{
1568 enum bcm63xx_ep0_state ep0state = udc->ep0state;
1569 bool shutdown = udc->ep0_req_reset || udc->ep0_req_shutdown;
1570
1571 switch (udc->ep0state) {
1572 case EP0_REQUEUE:
1573 /* set up descriptor to receive SETUP packet */
1574 bcm63xx_ep0_internal_request(udc, IUDMA_EP0_RXCHAN,
1575 BCM63XX_MAX_CTRL_PKT);
1576 ep0state = EP0_IDLE;
1577 break;
1578 case EP0_IDLE:
1579 return bcm63xx_ep0_do_idle(udc);
1580 case EP0_IN_DATA_PHASE_SETUP:
1581 /*
1582 * Normal case: TX request is in ep0_reply (queued by the
1583 * callback), or will be queued shortly. When it's here,
1584 * send it to the HW and go to EP0_IN_DATA_PHASE_COMPLETE.
1585 *
1586 * Shutdown case: Stop waiting for the reply. Just
1587 * REQUEUE->IDLE. The gadget driver is NOT expected to
1588 * queue anything else now.
1589 */
1590 if (udc->ep0_reply) {
1591 bcm63xx_ep0_map_write(udc, IUDMA_EP0_TXCHAN,
1592 udc->ep0_reply);
1593 ep0state = EP0_IN_DATA_PHASE_COMPLETE;
1594 } else if (shutdown) {
1595 ep0state = EP0_REQUEUE;
1596 }
1597 break;
1598 case EP0_IN_DATA_PHASE_COMPLETE: {
1599 /*
1600 * Normal case: TX packet (ep0_reply) is in flight; wait for
1601 * it to finish, then go back to REQUEUE->IDLE.
1602 *
1603 * Shutdown case: Reset the TX channel, send -ESHUTDOWN
1604 * completion to the gadget driver, then REQUEUE->IDLE.
1605 */
1606 if (udc->ep0_req_completed) {
1607 udc->ep0_reply = NULL;
1608 bcm63xx_ep0_read_complete(udc);
1609 /*
1610 * the "ack" sometimes gets eaten (see
1611 * bcm63xx_ep0_do_idle)
1612 */
1613 ep0state = EP0_REQUEUE;
1614 } else if (shutdown) {
1615 iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_TXCHAN]);
1616 bcm63xx_ep0_nuke_reply(udc, 1);
1617 ep0state = EP0_REQUEUE;
1618 }
1619 break;
1620 }
1621 case EP0_OUT_DATA_PHASE_SETUP:
1622 /* Similar behavior to EP0_IN_DATA_PHASE_SETUP */
1623 if (udc->ep0_reply) {
1624 bcm63xx_ep0_map_write(udc, IUDMA_EP0_RXCHAN,
1625 udc->ep0_reply);
1626 ep0state = EP0_OUT_DATA_PHASE_COMPLETE;
1627 } else if (shutdown) {
1628 ep0state = EP0_REQUEUE;
1629 }
1630 break;
1631 case EP0_OUT_DATA_PHASE_COMPLETE: {
1632 /* Similar behavior to EP0_IN_DATA_PHASE_COMPLETE */
1633 if (udc->ep0_req_completed) {
1634 udc->ep0_reply = NULL;
1635 bcm63xx_ep0_read_complete(udc);
1636
1637 /* send 0-byte ack to host */
1638 bcm63xx_ep0_internal_request(udc, IUDMA_EP0_TXCHAN, 0);
1639 ep0state = EP0_OUT_STATUS_PHASE;
1640 } else if (shutdown) {
1641 iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_RXCHAN]);
1642 bcm63xx_ep0_nuke_reply(udc, 0);
1643 ep0state = EP0_REQUEUE;
1644 }
1645 break;
1646 }
1647 case EP0_OUT_STATUS_PHASE:
1648 /*
1649 * Normal case: 0-byte OUT ack packet is in flight; wait
1650 * for it to finish, then go back to REQUEUE->IDLE.
1651 *
1652 * Shutdown case: just cancel the transmission. Don't bother
1653 * calling the completion, because it originated from this
1654 * function anyway. Then go back to REQUEUE->IDLE.
1655 */
1656 if (udc->ep0_req_completed) {
1657 bcm63xx_ep0_read_complete(udc);
1658 ep0state = EP0_REQUEUE;
1659 } else if (shutdown) {
1660 iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_TXCHAN]);
1661 udc->ep0_request = NULL;
1662 ep0state = EP0_REQUEUE;
1663 }
1664 break;
1665 case EP0_IN_FAKE_STATUS_PHASE: {
1666 /*
1667 * Normal case: we spoofed a SETUP packet and are now
1668 * waiting for the gadget driver to send a 0-byte reply.
1669 * This doesn't actually get sent to the HW because the
1670 * HW has already sent its own reply. Once we get the
1671 * response, return to IDLE.
1672 *
1673 * Shutdown case: return to IDLE immediately.
1674 *
1675 * Note that the ep0 RX descriptor has remained queued
1676 * (and possibly unfilled) during this entire transaction.
1677 * The HW datapath (IUDMA) never even sees SET_CONFIGURATION
1678 * or SET_INTERFACE transactions.
1679 */
1680 struct usb_request *r = udc->ep0_reply;
1681
1682 if (!r) {
1683 if (shutdown)
1684 ep0state = EP0_IDLE;
1685 break;
1686 }
1687
1688 bcm63xx_ep0_complete(udc, r, 0);
1689 udc->ep0_reply = NULL;
1690 ep0state = EP0_IDLE;
1691 break;
1692 }
1693 case EP0_SHUTDOWN:
1694 break;
1695 }
1696
1697 if (udc->ep0state == ep0state)
1698 return -EAGAIN;
1699
1700 udc->ep0state = ep0state;
1701 return 0;
1702}
1703
1704/**
1705 * bcm63xx_ep0_process - ep0 worker thread / state machine.
1706 * @w: Workqueue struct.
1707 *
1708 * bcm63xx_ep0_process is triggered any time an event occurs on ep0. It
1709 * is used to synchronize ep0 events and ensure that both HW and SW events
1710 * occur in a well-defined order. When the ep0 IUDMA queues are idle, it may
1711 * synthesize SET_CONFIGURATION / SET_INTERFACE requests that were consumed
1712 * by the USBD hardware.
1713 *
1714 * The worker function will continue iterating around the state machine
1715 * until there is nothing left to do. Usually "nothing left to do" means
1716 * that we're waiting for a new event from the hardware.
1717 */
1718static void bcm63xx_ep0_process(struct work_struct *w)
1719{
1720 struct bcm63xx_udc *udc = container_of(w, struct bcm63xx_udc, ep0_wq);
1721 spin_lock_irq(&udc->lock);
1722 while (bcm63xx_ep0_one_round(udc) == 0)
1723 ;
1724 spin_unlock_irq(&udc->lock);
1725}
1726
1727/***********************************************************************
1728 * Standard UDC gadget operations
1729 ***********************************************************************/
1730
1731/**
1732 * bcm63xx_udc_get_frame - Read current SOF frame number from the HW.
1733 * @gadget: USB slave device.
1734 */
1735static int bcm63xx_udc_get_frame(struct usb_gadget *gadget)
1736{
1737 struct bcm63xx_udc *udc = gadget_to_udc(gadget);
1738
1739 return (usbd_readl(udc, USBD_STATUS_REG) &
1740 USBD_STATUS_SOF_MASK) >> USBD_STATUS_SOF_SHIFT;
1741}
1742
1743/**
1744 * bcm63xx_udc_pullup - Enable/disable pullup on D+ line.
1745 * @gadget: USB slave device.
1746 * @is_on: 0 to disable pullup, 1 to enable.
1747 *
1748 * See notes in bcm63xx_select_pullup().
1749 */
1750static int bcm63xx_udc_pullup(struct usb_gadget *gadget, int is_on)
1751{
1752 struct bcm63xx_udc *udc = gadget_to_udc(gadget);
1753 unsigned long flags;
1754 int i, rc = -EINVAL;
1755
1756 spin_lock_irqsave(&udc->lock, flags);
1757 if (is_on && udc->ep0state == EP0_SHUTDOWN) {
1758 udc->gadget.speed = USB_SPEED_UNKNOWN;
1759 udc->ep0state = EP0_REQUEUE;
1760 bcm63xx_fifo_setup(udc);
1761 bcm63xx_fifo_reset(udc);
1762 bcm63xx_ep_setup(udc);
1763
1764 bitmap_zero(&udc->wedgemap, BCM63XX_NUM_EP);
1765 for (i = 0; i < BCM63XX_NUM_EP; i++)
1766 bcm63xx_set_stall(udc, &udc->bep[i], false);
1767
1768 bcm63xx_set_ctrl_irqs(udc, true);
1769 bcm63xx_select_pullup(gadget_to_udc(gadget), true);
1770 rc = 0;
1771 } else if (!is_on && udc->ep0state != EP0_SHUTDOWN) {
1772 bcm63xx_select_pullup(gadget_to_udc(gadget), false);
1773
1774 udc->ep0_req_shutdown = 1;
1775 spin_unlock_irqrestore(&udc->lock, flags);
1776
1777 while (1) {
1778 schedule_work(&udc->ep0_wq);
1779 if (udc->ep0state == EP0_SHUTDOWN)
1780 break;
1781 msleep(50);
1782 }
1783 bcm63xx_set_ctrl_irqs(udc, false);
1784 cancel_work_sync(&udc->ep0_wq);
1785 return 0;
1786 }
1787
1788 spin_unlock_irqrestore(&udc->lock, flags);
1789 return rc;
1790}
1791
1792/**
1793 * bcm63xx_udc_start - Start the controller.
1794 * @gadget: USB slave device.
1795 * @driver: Driver for USB slave devices.
1796 */
1797static int bcm63xx_udc_start(struct usb_gadget *gadget,
1798 struct usb_gadget_driver *driver)
1799{
1800 struct bcm63xx_udc *udc = gadget_to_udc(gadget);
1801 unsigned long flags;
1802
1803 if (!driver || driver->max_speed < USB_SPEED_HIGH ||
1804 !driver->setup)
1805 return -EINVAL;
1806 if (!udc)
1807 return -ENODEV;
1808 if (udc->driver)
1809 return -EBUSY;
1810
1811 spin_lock_irqsave(&udc->lock, flags);
1812
1813 set_clocks(udc, true);
1814 bcm63xx_fifo_setup(udc);
1815 bcm63xx_ep_init(udc);
1816 bcm63xx_ep_setup(udc);
1817 bcm63xx_fifo_reset(udc);
1818 bcm63xx_select_phy_mode(udc, true);
1819
1820 udc->driver = driver;
1821 driver->driver.bus = NULL;
Kevin Cernekee613065e2012-08-25 12:38:52 -07001822 udc->gadget.dev.of_node = udc->dev->of_node;
1823
1824 spin_unlock_irqrestore(&udc->lock, flags);
1825
1826 return 0;
1827}
1828
1829/**
1830 * bcm63xx_udc_stop - Shut down the controller.
1831 * @gadget: USB slave device.
1832 * @driver: Driver for USB slave devices.
1833 */
1834static int bcm63xx_udc_stop(struct usb_gadget *gadget,
1835 struct usb_gadget_driver *driver)
1836{
1837 struct bcm63xx_udc *udc = gadget_to_udc(gadget);
1838 unsigned long flags;
1839
1840 spin_lock_irqsave(&udc->lock, flags);
1841
1842 udc->driver = NULL;
Kevin Cernekee613065e2012-08-25 12:38:52 -07001843
1844 /*
1845 * If we switch the PHY too abruptly after dropping D+, the host
1846 * will often complain:
1847 *
1848 * hub 1-0:1.0: port 1 disabled by hub (EMI?), re-enabling...
1849 */
1850 msleep(100);
1851
1852 bcm63xx_select_phy_mode(udc, false);
1853 set_clocks(udc, false);
1854
1855 spin_unlock_irqrestore(&udc->lock, flags);
1856
1857 return 0;
1858}
1859
1860static const struct usb_gadget_ops bcm63xx_udc_ops = {
1861 .get_frame = bcm63xx_udc_get_frame,
1862 .pullup = bcm63xx_udc_pullup,
1863 .udc_start = bcm63xx_udc_start,
1864 .udc_stop = bcm63xx_udc_stop,
1865};
1866
1867/***********************************************************************
1868 * IRQ handling
1869 ***********************************************************************/
1870
1871/**
1872 * bcm63xx_update_cfg_iface - Read current configuration/interface settings.
1873 * @udc: Reference to the device controller.
1874 *
1875 * This controller intercepts SET_CONFIGURATION and SET_INTERFACE messages.
1876 * The driver never sees the raw control packets coming in on the ep0
1877 * IUDMA channel, but at least we get an interrupt event to tell us that
1878 * new values are waiting in the USBD_STATUS register.
1879 */
1880static void bcm63xx_update_cfg_iface(struct bcm63xx_udc *udc)
1881{
1882 u32 reg = usbd_readl(udc, USBD_STATUS_REG);
1883
1884 udc->cfg = (reg & USBD_STATUS_CFG_MASK) >> USBD_STATUS_CFG_SHIFT;
1885 udc->iface = (reg & USBD_STATUS_INTF_MASK) >> USBD_STATUS_INTF_SHIFT;
1886 udc->alt_iface = (reg & USBD_STATUS_ALTINTF_MASK) >>
1887 USBD_STATUS_ALTINTF_SHIFT;
1888 bcm63xx_ep_setup(udc);
1889}
1890
1891/**
1892 * bcm63xx_update_link_speed - Check to see if the link speed has changed.
1893 * @udc: Reference to the device controller.
1894 *
1895 * The link speed update coincides with a SETUP IRQ. Returns 1 if the
1896 * speed has changed, so that the caller can update the endpoint settings.
1897 */
1898static int bcm63xx_update_link_speed(struct bcm63xx_udc *udc)
1899{
1900 u32 reg = usbd_readl(udc, USBD_STATUS_REG);
1901 enum usb_device_speed oldspeed = udc->gadget.speed;
1902
1903 switch ((reg & USBD_STATUS_SPD_MASK) >> USBD_STATUS_SPD_SHIFT) {
1904 case BCM63XX_SPD_HIGH:
1905 udc->gadget.speed = USB_SPEED_HIGH;
1906 break;
1907 case BCM63XX_SPD_FULL:
1908 udc->gadget.speed = USB_SPEED_FULL;
1909 break;
1910 default:
1911 /* this should never happen */
1912 udc->gadget.speed = USB_SPEED_UNKNOWN;
1913 dev_err(udc->dev,
1914 "received SETUP packet with invalid link speed\n");
1915 return 0;
1916 }
1917
1918 if (udc->gadget.speed != oldspeed) {
1919 dev_info(udc->dev, "link up, %s-speed mode\n",
1920 udc->gadget.speed == USB_SPEED_HIGH ? "high" : "full");
1921 return 1;
1922 } else {
1923 return 0;
1924 }
1925}
1926
1927/**
1928 * bcm63xx_update_wedge - Iterate through wedged endpoints.
1929 * @udc: Reference to the device controller.
1930 * @new_status: true to "refresh" wedge status; false to clear it.
1931 *
1932 * On a SETUP interrupt, we need to manually "refresh" the wedge status
1933 * because the controller hardware is designed to automatically clear
1934 * stalls in response to a CLEAR_FEATURE request from the host.
1935 *
1936 * On a RESET interrupt, we do want to restore all wedged endpoints.
1937 */
1938static void bcm63xx_update_wedge(struct bcm63xx_udc *udc, bool new_status)
1939{
1940 int i;
1941
1942 for_each_set_bit(i, &udc->wedgemap, BCM63XX_NUM_EP) {
1943 bcm63xx_set_stall(udc, &udc->bep[i], new_status);
1944 if (!new_status)
1945 clear_bit(i, &udc->wedgemap);
1946 }
1947}
1948
1949/**
1950 * bcm63xx_udc_ctrl_isr - ISR for control path events (USBD).
1951 * @irq: IRQ number (unused).
1952 * @dev_id: Reference to the device controller.
1953 *
1954 * This is where we handle link (VBUS) down, USB reset, speed changes,
1955 * SET_CONFIGURATION, and SET_INTERFACE events.
1956 */
1957static irqreturn_t bcm63xx_udc_ctrl_isr(int irq, void *dev_id)
1958{
1959 struct bcm63xx_udc *udc = dev_id;
1960 u32 stat;
1961 bool disconnected = false;
1962
1963 stat = usbd_readl(udc, USBD_EVENT_IRQ_STATUS_REG) &
1964 usbd_readl(udc, USBD_EVENT_IRQ_MASK_REG);
1965
1966 usbd_writel(udc, stat, USBD_EVENT_IRQ_STATUS_REG);
1967
1968 spin_lock(&udc->lock);
1969 if (stat & BIT(USBD_EVENT_IRQ_USB_LINK)) {
1970 /* VBUS toggled */
1971
1972 if (!(usbd_readl(udc, USBD_EVENTS_REG) &
1973 USBD_EVENTS_USB_LINK_MASK) &&
1974 udc->gadget.speed != USB_SPEED_UNKNOWN)
1975 dev_info(udc->dev, "link down\n");
1976
1977 udc->gadget.speed = USB_SPEED_UNKNOWN;
1978 disconnected = true;
1979 }
1980 if (stat & BIT(USBD_EVENT_IRQ_USB_RESET)) {
1981 bcm63xx_fifo_setup(udc);
1982 bcm63xx_fifo_reset(udc);
1983 bcm63xx_ep_setup(udc);
1984
1985 bcm63xx_update_wedge(udc, false);
1986
1987 udc->ep0_req_reset = 1;
1988 schedule_work(&udc->ep0_wq);
1989 disconnected = true;
1990 }
1991 if (stat & BIT(USBD_EVENT_IRQ_SETUP)) {
1992 if (bcm63xx_update_link_speed(udc)) {
1993 bcm63xx_fifo_setup(udc);
1994 bcm63xx_ep_setup(udc);
1995 }
1996 bcm63xx_update_wedge(udc, true);
1997 }
1998 if (stat & BIT(USBD_EVENT_IRQ_SETCFG)) {
1999 bcm63xx_update_cfg_iface(udc);
2000 udc->ep0_req_set_cfg = 1;
2001 schedule_work(&udc->ep0_wq);
2002 }
2003 if (stat & BIT(USBD_EVENT_IRQ_SETINTF)) {
2004 bcm63xx_update_cfg_iface(udc);
2005 udc->ep0_req_set_iface = 1;
2006 schedule_work(&udc->ep0_wq);
2007 }
2008 spin_unlock(&udc->lock);
2009
2010 if (disconnected && udc->driver)
2011 udc->driver->disconnect(&udc->gadget);
2012
2013 return IRQ_HANDLED;
2014}
2015
2016/**
2017 * bcm63xx_udc_data_isr - ISR for data path events (IUDMA).
2018 * @irq: IRQ number (unused).
2019 * @dev_id: Reference to the IUDMA channel that generated the interrupt.
2020 *
2021 * For the two ep0 channels, we have special handling that triggers the
2022 * ep0 worker thread. For normal bulk/intr channels, either queue up
2023 * the next buffer descriptor for the transaction (incomplete transaction),
2024 * or invoke the completion callback (complete transactions).
2025 */
2026static irqreturn_t bcm63xx_udc_data_isr(int irq, void *dev_id)
2027{
2028 struct iudma_ch *iudma = dev_id;
2029 struct bcm63xx_udc *udc = iudma->udc;
2030 struct bcm63xx_ep *bep;
2031 struct usb_request *req = NULL;
2032 struct bcm63xx_req *breq = NULL;
2033 int rc;
2034 bool is_done = false;
2035
2036 spin_lock(&udc->lock);
2037
2038 usb_dmac_writel(udc, ENETDMAC_IR_BUFDONE_MASK,
2039 ENETDMAC_IR_REG(iudma->ch_idx));
2040 bep = iudma->bep;
2041 rc = iudma_read(udc, iudma);
2042
2043 /* special handling for EP0 RX (0) and TX (1) */
2044 if (iudma->ch_idx == IUDMA_EP0_RXCHAN ||
2045 iudma->ch_idx == IUDMA_EP0_TXCHAN) {
2046 req = udc->ep0_request;
2047 breq = our_req(req);
2048
2049 /* a single request could require multiple submissions */
2050 if (rc >= 0) {
2051 req->actual += rc;
2052
2053 if (req->actual >= req->length || breq->bd_bytes > rc) {
2054 udc->ep0_req_completed = 1;
2055 is_done = true;
2056 schedule_work(&udc->ep0_wq);
2057
2058 /* "actual" on a ZLP is 1 byte */
2059 req->actual = min(req->actual, req->length);
2060 } else {
2061 /* queue up the next BD (same request) */
2062 iudma_write(udc, iudma, breq);
2063 }
2064 }
2065 } else if (!list_empty(&bep->queue)) {
2066 breq = list_first_entry(&bep->queue, struct bcm63xx_req, queue);
2067 req = &breq->req;
2068
2069 if (rc >= 0) {
2070 req->actual += rc;
2071
2072 if (req->actual >= req->length || breq->bd_bytes > rc) {
2073 is_done = true;
2074 list_del(&breq->queue);
2075
2076 req->actual = min(req->actual, req->length);
2077
2078 if (!list_empty(&bep->queue)) {
2079 struct bcm63xx_req *next;
2080
2081 next = list_first_entry(&bep->queue,
2082 struct bcm63xx_req, queue);
2083 iudma_write(udc, iudma, next);
2084 }
2085 } else {
2086 iudma_write(udc, iudma, breq);
2087 }
2088 }
2089 }
2090 spin_unlock(&udc->lock);
2091
2092 if (is_done) {
2093 usb_gadget_unmap_request(&udc->gadget, req, iudma->is_tx);
2094 if (req->complete)
2095 req->complete(&bep->ep, req);
2096 }
2097
2098 return IRQ_HANDLED;
2099}
2100
2101/***********************************************************************
2102 * Debug filesystem
2103 ***********************************************************************/
2104
2105/*
2106 * bcm63xx_usbd_dbg_show - Show USBD controller state.
2107 * @s: seq_file to which the information will be written.
2108 * @p: Unused.
2109 *
2110 * This file nominally shows up as /sys/kernel/debug/bcm63xx_udc/usbd
2111 */
2112static int bcm63xx_usbd_dbg_show(struct seq_file *s, void *p)
2113{
2114 struct bcm63xx_udc *udc = s->private;
2115
2116 if (!udc->driver)
2117 return -ENODEV;
2118
2119 seq_printf(s, "ep0 state: %s\n",
2120 bcm63xx_ep0_state_names[udc->ep0state]);
2121 seq_printf(s, " pending requests: %s%s%s%s%s%s%s\n",
2122 udc->ep0_req_reset ? "reset " : "",
2123 udc->ep0_req_set_cfg ? "set_cfg " : "",
2124 udc->ep0_req_set_iface ? "set_iface " : "",
2125 udc->ep0_req_shutdown ? "shutdown " : "",
2126 udc->ep0_request ? "pending " : "",
2127 udc->ep0_req_completed ? "completed " : "",
2128 udc->ep0_reply ? "reply " : "");
2129 seq_printf(s, "cfg: %d; iface: %d; alt_iface: %d\n",
2130 udc->cfg, udc->iface, udc->alt_iface);
2131 seq_printf(s, "regs:\n");
2132 seq_printf(s, " control: %08x; straps: %08x; status: %08x\n",
2133 usbd_readl(udc, USBD_CONTROL_REG),
2134 usbd_readl(udc, USBD_STRAPS_REG),
2135 usbd_readl(udc, USBD_STATUS_REG));
2136 seq_printf(s, " events: %08x; stall: %08x\n",
2137 usbd_readl(udc, USBD_EVENTS_REG),
2138 usbd_readl(udc, USBD_STALL_REG));
2139
2140 return 0;
2141}
2142
2143/*
2144 * bcm63xx_iudma_dbg_show - Show IUDMA status and descriptors.
2145 * @s: seq_file to which the information will be written.
2146 * @p: Unused.
2147 *
2148 * This file nominally shows up as /sys/kernel/debug/bcm63xx_udc/iudma
2149 */
2150static int bcm63xx_iudma_dbg_show(struct seq_file *s, void *p)
2151{
2152 struct bcm63xx_udc *udc = s->private;
2153 int ch_idx, i;
2154 u32 sram2, sram3;
2155
2156 if (!udc->driver)
2157 return -ENODEV;
2158
2159 for (ch_idx = 0; ch_idx < BCM63XX_NUM_IUDMA; ch_idx++) {
2160 struct iudma_ch *iudma = &udc->iudma[ch_idx];
2161 struct list_head *pos;
2162
2163 seq_printf(s, "IUDMA channel %d -- ", ch_idx);
2164 switch (iudma_defaults[ch_idx].ep_type) {
2165 case BCMEP_CTRL:
2166 seq_printf(s, "control");
2167 break;
2168 case BCMEP_BULK:
2169 seq_printf(s, "bulk");
2170 break;
2171 case BCMEP_INTR:
2172 seq_printf(s, "interrupt");
2173 break;
2174 }
2175 seq_printf(s, ch_idx & 0x01 ? " tx" : " rx");
2176 seq_printf(s, " [ep%d]:\n",
2177 max_t(int, iudma_defaults[ch_idx].ep_num, 0));
2178 seq_printf(s, " cfg: %08x; irqstat: %08x; irqmask: %08x; maxburst: %08x\n",
2179 usb_dmac_readl(udc, ENETDMAC_CHANCFG_REG(ch_idx)),
2180 usb_dmac_readl(udc, ENETDMAC_IR_REG(ch_idx)),
2181 usb_dmac_readl(udc, ENETDMAC_IRMASK_REG(ch_idx)),
2182 usb_dmac_readl(udc, ENETDMAC_MAXBURST_REG(ch_idx)));
2183
2184 sram2 = usb_dmas_readl(udc, ENETDMAS_SRAM2_REG(ch_idx));
2185 sram3 = usb_dmas_readl(udc, ENETDMAS_SRAM3_REG(ch_idx));
2186 seq_printf(s, " base: %08x; index: %04x_%04x; desc: %04x_%04x %08x\n",
2187 usb_dmas_readl(udc, ENETDMAS_RSTART_REG(ch_idx)),
2188 sram2 >> 16, sram2 & 0xffff,
2189 sram3 >> 16, sram3 & 0xffff,
2190 usb_dmas_readl(udc, ENETDMAS_SRAM4_REG(ch_idx)));
2191 seq_printf(s, " desc: %d/%d used", iudma->n_bds_used,
2192 iudma->n_bds);
2193
2194 if (iudma->bep) {
2195 i = 0;
2196 list_for_each(pos, &iudma->bep->queue)
2197 i++;
2198 seq_printf(s, "; %d queued\n", i);
2199 } else {
2200 seq_printf(s, "\n");
2201 }
2202
2203 for (i = 0; i < iudma->n_bds; i++) {
2204 struct bcm_enet_desc *d = &iudma->bd_ring[i];
2205
2206 seq_printf(s, " %03x (%02x): len_stat: %04x_%04x; pa %08x",
2207 i * sizeof(*d), i,
2208 d->len_stat >> 16, d->len_stat & 0xffff,
2209 d->address);
2210 if (d == iudma->read_bd)
2211 seq_printf(s, " <<RD");
2212 if (d == iudma->write_bd)
2213 seq_printf(s, " <<WR");
2214 seq_printf(s, "\n");
2215 }
2216
2217 seq_printf(s, "\n");
2218 }
2219
2220 return 0;
2221}
2222
2223static int bcm63xx_usbd_dbg_open(struct inode *inode, struct file *file)
2224{
2225 return single_open(file, bcm63xx_usbd_dbg_show, inode->i_private);
2226}
2227
2228static int bcm63xx_iudma_dbg_open(struct inode *inode, struct file *file)
2229{
2230 return single_open(file, bcm63xx_iudma_dbg_show, inode->i_private);
2231}
2232
2233static const struct file_operations usbd_dbg_fops = {
2234 .owner = THIS_MODULE,
2235 .open = bcm63xx_usbd_dbg_open,
2236 .llseek = seq_lseek,
2237 .read = seq_read,
2238 .release = single_release,
2239};
2240
2241static const struct file_operations iudma_dbg_fops = {
2242 .owner = THIS_MODULE,
2243 .open = bcm63xx_iudma_dbg_open,
2244 .llseek = seq_lseek,
2245 .read = seq_read,
2246 .release = single_release,
2247};
2248
2249
2250/**
2251 * bcm63xx_udc_init_debugfs - Create debugfs entries.
2252 * @udc: Reference to the device controller.
2253 */
2254static void bcm63xx_udc_init_debugfs(struct bcm63xx_udc *udc)
2255{
2256 struct dentry *root, *usbd, *iudma;
2257
2258 if (!IS_ENABLED(CONFIG_USB_GADGET_DEBUG_FS))
2259 return;
2260
2261 root = debugfs_create_dir(udc->gadget.name, NULL);
2262 if (IS_ERR(root) || !root)
2263 goto err_root;
2264
2265 usbd = debugfs_create_file("usbd", 0400, root, udc,
2266 &usbd_dbg_fops);
2267 if (!usbd)
2268 goto err_usbd;
2269 iudma = debugfs_create_file("iudma", 0400, root, udc,
2270 &iudma_dbg_fops);
2271 if (!iudma)
2272 goto err_iudma;
2273
2274 udc->debugfs_root = root;
2275 udc->debugfs_usbd = usbd;
2276 udc->debugfs_iudma = iudma;
2277 return;
2278err_iudma:
2279 debugfs_remove(usbd);
2280err_usbd:
2281 debugfs_remove(root);
2282err_root:
2283 dev_err(udc->dev, "debugfs is not available\n");
2284}
2285
2286/**
2287 * bcm63xx_udc_cleanup_debugfs - Remove debugfs entries.
2288 * @udc: Reference to the device controller.
2289 *
2290 * debugfs_remove() is safe to call with a NULL argument.
2291 */
2292static void bcm63xx_udc_cleanup_debugfs(struct bcm63xx_udc *udc)
2293{
2294 debugfs_remove(udc->debugfs_iudma);
2295 debugfs_remove(udc->debugfs_usbd);
2296 debugfs_remove(udc->debugfs_root);
2297 udc->debugfs_iudma = NULL;
2298 udc->debugfs_usbd = NULL;
2299 udc->debugfs_root = NULL;
2300}
2301
2302/***********************************************************************
2303 * Driver init/exit
2304 ***********************************************************************/
2305
2306/**
Kevin Cernekee613065e2012-08-25 12:38:52 -07002307 * bcm63xx_udc_probe - Initialize a new instance of the UDC.
2308 * @pdev: Platform device struct from the bcm63xx BSP code.
2309 *
2310 * Note that platform data is required, because pd.port_no varies from chip
2311 * to chip and is used to switch the correct USB port to device mode.
2312 */
Bill Pemberton41ac7b32012-11-19 13:21:48 -05002313static int bcm63xx_udc_probe(struct platform_device *pdev)
Kevin Cernekee613065e2012-08-25 12:38:52 -07002314{
2315 struct device *dev = &pdev->dev;
2316 struct bcm63xx_usbd_platform_data *pd = dev->platform_data;
2317 struct bcm63xx_udc *udc;
2318 struct resource *res;
2319 int rc = -ENOMEM, i, irq;
2320
2321 udc = devm_kzalloc(dev, sizeof(*udc), GFP_KERNEL);
2322 if (!udc) {
2323 dev_err(dev, "cannot allocate memory\n");
2324 return -ENOMEM;
2325 }
2326
2327 platform_set_drvdata(pdev, udc);
2328 udc->dev = dev;
2329 udc->pd = pd;
2330
2331 if (!pd) {
2332 dev_err(dev, "missing platform data\n");
2333 return -EINVAL;
2334 }
2335
2336 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2337 if (!res) {
2338 dev_err(dev, "error finding USBD resource\n");
2339 return -ENXIO;
2340 }
Thierry Reding148e1132013-01-21 11:09:22 +01002341
2342 udc->usbd_regs = devm_ioremap_resource(dev, res);
2343 if (IS_ERR(udc->usbd_regs))
2344 return PTR_ERR(udc->usbd_regs);
Kevin Cernekee613065e2012-08-25 12:38:52 -07002345
2346 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
2347 if (!res) {
2348 dev_err(dev, "error finding IUDMA resource\n");
2349 return -ENXIO;
2350 }
Kevin Cernekee613065e2012-08-25 12:38:52 -07002351
Thierry Reding148e1132013-01-21 11:09:22 +01002352 udc->iudma_regs = devm_ioremap_resource(dev, res);
2353 if (IS_ERR(udc->iudma_regs))
2354 return PTR_ERR(udc->iudma_regs);
Kevin Cernekee613065e2012-08-25 12:38:52 -07002355
2356 spin_lock_init(&udc->lock);
2357 INIT_WORK(&udc->ep0_wq, bcm63xx_ep0_process);
Kevin Cernekee613065e2012-08-25 12:38:52 -07002358
2359 udc->gadget.ops = &bcm63xx_udc_ops;
2360 udc->gadget.name = dev_name(dev);
Kevin Cernekee613065e2012-08-25 12:38:52 -07002361
2362 if (!pd->use_fullspeed && !use_fullspeed)
2363 udc->gadget.max_speed = USB_SPEED_HIGH;
2364 else
2365 udc->gadget.max_speed = USB_SPEED_FULL;
2366
2367 /* request clocks, allocate buffers, and clear any pending IRQs */
2368 rc = bcm63xx_init_udc_hw(udc);
2369 if (rc)
2370 return rc;
2371
2372 rc = -ENXIO;
2373
2374 /* IRQ resource #0: control interrupt (VBUS, speed, etc.) */
2375 irq = platform_get_irq(pdev, 0);
2376 if (irq < 0) {
2377 dev_err(dev, "missing IRQ resource #0\n");
2378 goto out_uninit;
2379 }
2380 if (devm_request_irq(dev, irq, &bcm63xx_udc_ctrl_isr, 0,
2381 dev_name(dev), udc) < 0) {
2382 dev_err(dev, "error requesting IRQ #%d\n", irq);
2383 goto out_uninit;
2384 }
2385
2386 /* IRQ resources #1-6: data interrupts for IUDMA channels 0-5 */
2387 for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
2388 irq = platform_get_irq(pdev, i + 1);
2389 if (irq < 0) {
2390 dev_err(dev, "missing IRQ resource #%d\n", i + 1);
2391 goto out_uninit;
2392 }
2393 if (devm_request_irq(dev, irq, &bcm63xx_udc_data_isr, 0,
2394 dev_name(dev), &udc->iudma[i]) < 0) {
2395 dev_err(dev, "error requesting IRQ #%d\n", irq);
2396 goto out_uninit;
2397 }
2398 }
2399
Kevin Cernekee613065e2012-08-25 12:38:52 -07002400 bcm63xx_udc_init_debugfs(udc);
2401 rc = usb_add_gadget_udc(dev, &udc->gadget);
2402 if (!rc)
2403 return 0;
2404
2405 bcm63xx_udc_cleanup_debugfs(udc);
Kevin Cernekee613065e2012-08-25 12:38:52 -07002406out_uninit:
2407 bcm63xx_uninit_udc_hw(udc);
2408 return rc;
2409}
2410
2411/**
2412 * bcm63xx_udc_remove - Remove the device from the system.
2413 * @pdev: Platform device struct from the bcm63xx BSP code.
2414 */
Bill Pembertonfb4e98a2012-11-19 13:26:20 -05002415static int bcm63xx_udc_remove(struct platform_device *pdev)
Kevin Cernekee613065e2012-08-25 12:38:52 -07002416{
2417 struct bcm63xx_udc *udc = platform_get_drvdata(pdev);
2418
2419 bcm63xx_udc_cleanup_debugfs(udc);
2420 usb_del_gadget_udc(&udc->gadget);
Kevin Cernekee613065e2012-08-25 12:38:52 -07002421 BUG_ON(udc->driver);
2422
2423 platform_set_drvdata(pdev, NULL);
2424 bcm63xx_uninit_udc_hw(udc);
2425
2426 return 0;
2427}
2428
2429static struct platform_driver bcm63xx_udc_driver = {
2430 .probe = bcm63xx_udc_probe,
Bill Pemberton76904172012-11-19 13:21:08 -05002431 .remove = bcm63xx_udc_remove,
Kevin Cernekee613065e2012-08-25 12:38:52 -07002432 .driver = {
2433 .name = DRV_MODULE_NAME,
2434 .owner = THIS_MODULE,
2435 },
2436};
2437module_platform_driver(bcm63xx_udc_driver);
2438
2439MODULE_DESCRIPTION("BCM63xx USB Peripheral Controller");
2440MODULE_AUTHOR("Kevin Cernekee <cernekee@gmail.com>");
2441MODULE_LICENSE("GPL");
2442MODULE_ALIAS("platform:" DRV_MODULE_NAME);