blob: e969eb809a853dbb38093a0f5d3d39f57f931bb7 [file] [log] [blame]
Kevin Cernekee613065e2012-08-25 12:38:52 -07001/*
2 * bcm63xx_udc.c -- BCM63xx UDC high/full speed USB device controller
3 *
4 * Copyright (C) 2012 Kevin Cernekee <cernekee@gmail.com>
5 * Copyright (C) 2012 Broadcom Corporation
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 */
12
13#include <linux/bitops.h>
14#include <linux/bug.h>
15#include <linux/clk.h>
16#include <linux/compiler.h>
17#include <linux/debugfs.h>
18#include <linux/delay.h>
19#include <linux/device.h>
20#include <linux/dma-mapping.h>
21#include <linux/errno.h>
Kevin Cernekee613065e2012-08-25 12:38:52 -070022#include <linux/interrupt.h>
23#include <linux/ioport.h>
24#include <linux/kconfig.h>
25#include <linux/kernel.h>
26#include <linux/list.h>
27#include <linux/module.h>
28#include <linux/moduleparam.h>
29#include <linux/platform_device.h>
30#include <linux/sched.h>
31#include <linux/seq_file.h>
32#include <linux/slab.h>
33#include <linux/timer.h>
34#include <linux/usb/ch9.h>
35#include <linux/usb/gadget.h>
36#include <linux/workqueue.h>
37
38#include <bcm63xx_cpu.h>
39#include <bcm63xx_iudma.h>
40#include <bcm63xx_dev_usb_usbd.h>
41#include <bcm63xx_io.h>
42#include <bcm63xx_regs.h>
43
44#define DRV_MODULE_NAME "bcm63xx_udc"
45
46static const char bcm63xx_ep0name[] = "ep0";
47static const char *const bcm63xx_ep_name[] = {
48 bcm63xx_ep0name,
49 "ep1in-bulk", "ep2out-bulk", "ep3in-int", "ep4out-int",
50};
51
52static bool use_fullspeed;
53module_param(use_fullspeed, bool, S_IRUGO);
54MODULE_PARM_DESC(use_fullspeed, "true for fullspeed only");
55
56/*
57 * RX IRQ coalescing options:
58 *
59 * false (default) - one IRQ per DATAx packet. Slow but reliable. The
60 * driver is able to pass the "testusb" suite and recover from conditions like:
61 *
62 * 1) Device queues up a 2048-byte RX IUDMA transaction on an OUT bulk ep
63 * 2) Host sends 512 bytes of data
64 * 3) Host decides to reconfigure the device and sends SET_INTERFACE
65 * 4) Device shuts down the endpoint and cancels the RX transaction
66 *
67 * true - one IRQ per transfer, for transfers <= 2048B. Generates
68 * considerably fewer IRQs, but error recovery is less robust. Does not
69 * reliably pass "testusb".
70 *
71 * TX always uses coalescing, because we can cancel partially complete TX
72 * transfers by repeatedly flushing the FIFO. The hardware doesn't allow
73 * this on RX.
74 */
75static bool irq_coalesce;
76module_param(irq_coalesce, bool, S_IRUGO);
77MODULE_PARM_DESC(irq_coalesce, "take one IRQ per RX transfer");
78
79#define BCM63XX_NUM_EP 5
80#define BCM63XX_NUM_IUDMA 6
81#define BCM63XX_NUM_FIFO_PAIRS 3
82
83#define IUDMA_RESET_TIMEOUT_US 10000
84
85#define IUDMA_EP0_RXCHAN 0
86#define IUDMA_EP0_TXCHAN 1
87
88#define IUDMA_MAX_FRAGMENT 2048
89#define BCM63XX_MAX_CTRL_PKT 64
90
91#define BCMEP_CTRL 0x00
92#define BCMEP_ISOC 0x01
93#define BCMEP_BULK 0x02
94#define BCMEP_INTR 0x03
95
96#define BCMEP_OUT 0x00
97#define BCMEP_IN 0x01
98
99#define BCM63XX_SPD_FULL 1
100#define BCM63XX_SPD_HIGH 0
101
102#define IUDMA_DMAC_OFFSET 0x200
103#define IUDMA_DMAS_OFFSET 0x400
104
105enum bcm63xx_ep0_state {
106 EP0_REQUEUE,
107 EP0_IDLE,
108 EP0_IN_DATA_PHASE_SETUP,
109 EP0_IN_DATA_PHASE_COMPLETE,
110 EP0_OUT_DATA_PHASE_SETUP,
111 EP0_OUT_DATA_PHASE_COMPLETE,
112 EP0_OUT_STATUS_PHASE,
113 EP0_IN_FAKE_STATUS_PHASE,
114 EP0_SHUTDOWN,
115};
116
117static const char __maybe_unused bcm63xx_ep0_state_names[][32] = {
118 "REQUEUE",
119 "IDLE",
120 "IN_DATA_PHASE_SETUP",
121 "IN_DATA_PHASE_COMPLETE",
122 "OUT_DATA_PHASE_SETUP",
123 "OUT_DATA_PHASE_COMPLETE",
124 "OUT_STATUS_PHASE",
125 "IN_FAKE_STATUS_PHASE",
126 "SHUTDOWN",
127};
128
129/**
130 * struct iudma_ch_cfg - Static configuration for an IUDMA channel.
131 * @ep_num: USB endpoint number.
132 * @n_bds: Number of buffer descriptors in the ring.
133 * @ep_type: Endpoint type (control, bulk, interrupt).
134 * @dir: Direction (in, out).
135 * @n_fifo_slots: Number of FIFO entries to allocate for this channel.
136 * @max_pkt_hs: Maximum packet size in high speed mode.
137 * @max_pkt_fs: Maximum packet size in full speed mode.
138 */
139struct iudma_ch_cfg {
140 int ep_num;
141 int n_bds;
142 int ep_type;
143 int dir;
144 int n_fifo_slots;
145 int max_pkt_hs;
146 int max_pkt_fs;
147};
148
149static const struct iudma_ch_cfg iudma_defaults[] = {
150
151 /* This controller was designed to support a CDC/RNDIS application.
152 It may be possible to reconfigure some of the endpoints, but
153 the hardware limitations (FIFO sizing and number of DMA channels)
154 may significantly impact flexibility and/or stability. Change
155 these values at your own risk.
156
157 ep_num ep_type n_fifo_slots max_pkt_fs
158 idx | n_bds | dir | max_pkt_hs |
159 | | | | | | | | */
160 [0] = { -1, 4, BCMEP_CTRL, BCMEP_OUT, 32, 64, 64 },
161 [1] = { 0, 4, BCMEP_CTRL, BCMEP_OUT, 32, 64, 64 },
162 [2] = { 2, 16, BCMEP_BULK, BCMEP_OUT, 128, 512, 64 },
163 [3] = { 1, 16, BCMEP_BULK, BCMEP_IN, 128, 512, 64 },
164 [4] = { 4, 4, BCMEP_INTR, BCMEP_OUT, 32, 64, 64 },
165 [5] = { 3, 4, BCMEP_INTR, BCMEP_IN, 32, 64, 64 },
166};
167
168struct bcm63xx_udc;
169
170/**
171 * struct iudma_ch - Represents the current state of a single IUDMA channel.
172 * @ch_idx: IUDMA channel index (0 to BCM63XX_NUM_IUDMA-1).
173 * @ep_num: USB endpoint number. -1 for ep0 RX.
174 * @enabled: Whether bcm63xx_ep_enable() has been called.
175 * @max_pkt: "Chunk size" on the USB interface. Based on interface speed.
176 * @is_tx: true for TX, false for RX.
177 * @bep: Pointer to the associated endpoint. NULL for ep0 RX.
178 * @udc: Reference to the device controller.
179 * @read_bd: Next buffer descriptor to reap from the hardware.
180 * @write_bd: Next BD available for a new packet.
181 * @end_bd: Points to the final BD in the ring.
182 * @n_bds_used: Number of BD entries currently occupied.
183 * @bd_ring: Base pointer to the BD ring.
184 * @bd_ring_dma: Physical (DMA) address of bd_ring.
185 * @n_bds: Total number of BDs in the ring.
186 *
187 * ep0 has two IUDMA channels (IUDMA_EP0_RXCHAN and IUDMA_EP0_TXCHAN), as it is
188 * bidirectional. The "struct usb_ep" associated with ep0 is for TX (IN)
189 * only.
190 *
191 * Each bulk/intr endpoint has a single IUDMA channel and a single
192 * struct usb_ep.
193 */
194struct iudma_ch {
195 unsigned int ch_idx;
196 int ep_num;
197 bool enabled;
198 int max_pkt;
199 bool is_tx;
200 struct bcm63xx_ep *bep;
201 struct bcm63xx_udc *udc;
202
203 struct bcm_enet_desc *read_bd;
204 struct bcm_enet_desc *write_bd;
205 struct bcm_enet_desc *end_bd;
206 int n_bds_used;
207
208 struct bcm_enet_desc *bd_ring;
209 dma_addr_t bd_ring_dma;
210 unsigned int n_bds;
211};
212
213/**
214 * struct bcm63xx_ep - Internal (driver) state of a single endpoint.
215 * @ep_num: USB endpoint number.
216 * @iudma: Pointer to IUDMA channel state.
217 * @ep: USB gadget layer representation of the EP.
218 * @udc: Reference to the device controller.
219 * @queue: Linked list of outstanding requests for this EP.
220 * @halted: 1 if the EP is stalled; 0 otherwise.
221 */
222struct bcm63xx_ep {
223 unsigned int ep_num;
224 struct iudma_ch *iudma;
225 struct usb_ep ep;
226 struct bcm63xx_udc *udc;
227 struct list_head queue;
228 unsigned halted:1;
229};
230
231/**
232 * struct bcm63xx_req - Internal (driver) state of a single request.
233 * @queue: Links back to the EP's request list.
234 * @req: USB gadget layer representation of the request.
235 * @offset: Current byte offset into the data buffer (next byte to queue).
236 * @bd_bytes: Number of data bytes in outstanding BD entries.
237 * @iudma: IUDMA channel used for the request.
238 */
239struct bcm63xx_req {
240 struct list_head queue; /* ep's requests */
241 struct usb_request req;
242 unsigned int offset;
243 unsigned int bd_bytes;
244 struct iudma_ch *iudma;
245};
246
247/**
248 * struct bcm63xx_udc - Driver/hardware private context.
249 * @lock: Spinlock to mediate access to this struct, and (most) HW regs.
250 * @dev: Generic Linux device structure.
251 * @pd: Platform data (board/port info).
252 * @usbd_clk: Clock descriptor for the USB device block.
253 * @usbh_clk: Clock descriptor for the USB host block.
254 * @gadget: USB slave device.
255 * @driver: Driver for USB slave devices.
256 * @usbd_regs: Base address of the USBD/USB20D block.
257 * @iudma_regs: Base address of the USBD's associated IUDMA block.
258 * @bep: Array of endpoints, including ep0.
259 * @iudma: Array of all IUDMA channels used by this controller.
260 * @cfg: USB configuration number, from SET_CONFIGURATION wValue.
261 * @iface: USB interface number, from SET_INTERFACE wIndex.
262 * @alt_iface: USB alt interface number, from SET_INTERFACE wValue.
263 * @ep0_ctrl_req: Request object for bcm63xx_udc-initiated ep0 transactions.
264 * @ep0_ctrl_buf: Data buffer for ep0_ctrl_req.
265 * @ep0state: Current state of the ep0 state machine.
266 * @ep0_wq: Workqueue struct used to wake up the ep0 state machine.
267 * @wedgemap: Bitmap of wedged endpoints.
268 * @ep0_req_reset: USB reset is pending.
269 * @ep0_req_set_cfg: Need to spoof a SET_CONFIGURATION packet.
270 * @ep0_req_set_iface: Need to spoof a SET_INTERFACE packet.
271 * @ep0_req_shutdown: Driver is shutting down; requesting ep0 to halt activity.
272 * @ep0_req_completed: ep0 request has completed; worker has not seen it yet.
273 * @ep0_reply: Pending reply from gadget driver.
274 * @ep0_request: Outstanding ep0 request.
275 * @debugfs_root: debugfs directory: /sys/kernel/debug/<DRV_MODULE_NAME>.
276 * @debugfs_usbd: debugfs file "usbd" for controller state.
277 * @debugfs_iudma: debugfs file "usbd" for IUDMA state.
278 */
279struct bcm63xx_udc {
280 spinlock_t lock;
281
282 struct device *dev;
283 struct bcm63xx_usbd_platform_data *pd;
284 struct clk *usbd_clk;
285 struct clk *usbh_clk;
286
287 struct usb_gadget gadget;
288 struct usb_gadget_driver *driver;
289
290 void __iomem *usbd_regs;
291 void __iomem *iudma_regs;
292
293 struct bcm63xx_ep bep[BCM63XX_NUM_EP];
294 struct iudma_ch iudma[BCM63XX_NUM_IUDMA];
295
296 int cfg;
297 int iface;
298 int alt_iface;
299
300 struct bcm63xx_req ep0_ctrl_req;
301 u8 *ep0_ctrl_buf;
302
303 int ep0state;
304 struct work_struct ep0_wq;
305
306 unsigned long wedgemap;
307
308 unsigned ep0_req_reset:1;
309 unsigned ep0_req_set_cfg:1;
310 unsigned ep0_req_set_iface:1;
311 unsigned ep0_req_shutdown:1;
312
313 unsigned ep0_req_completed:1;
314 struct usb_request *ep0_reply;
315 struct usb_request *ep0_request;
316
317 struct dentry *debugfs_root;
318 struct dentry *debugfs_usbd;
319 struct dentry *debugfs_iudma;
320};
321
322static const struct usb_ep_ops bcm63xx_udc_ep_ops;
323
324/***********************************************************************
325 * Convenience functions
326 ***********************************************************************/
327
328static inline struct bcm63xx_udc *gadget_to_udc(struct usb_gadget *g)
329{
330 return container_of(g, struct bcm63xx_udc, gadget);
331}
332
333static inline struct bcm63xx_ep *our_ep(struct usb_ep *ep)
334{
335 return container_of(ep, struct bcm63xx_ep, ep);
336}
337
338static inline struct bcm63xx_req *our_req(struct usb_request *req)
339{
340 return container_of(req, struct bcm63xx_req, req);
341}
342
343static inline u32 usbd_readl(struct bcm63xx_udc *udc, u32 off)
344{
345 return bcm_readl(udc->usbd_regs + off);
346}
347
348static inline void usbd_writel(struct bcm63xx_udc *udc, u32 val, u32 off)
349{
350 bcm_writel(val, udc->usbd_regs + off);
351}
352
353static inline u32 usb_dma_readl(struct bcm63xx_udc *udc, u32 off)
354{
355 return bcm_readl(udc->iudma_regs + off);
356}
357
358static inline void usb_dma_writel(struct bcm63xx_udc *udc, u32 val, u32 off)
359{
360 bcm_writel(val, udc->iudma_regs + off);
361}
362
Florian Fainelli2d1f7af2014-01-14 15:36:29 -0800363static inline u32 usb_dmac_readl(struct bcm63xx_udc *udc, u32 off, int chan)
Kevin Cernekee613065e2012-08-25 12:38:52 -0700364{
Florian Fainelli2d1f7af2014-01-14 15:36:29 -0800365 return bcm_readl(udc->iudma_regs + IUDMA_DMAC_OFFSET + off +
366 (ENETDMA_CHAN_WIDTH * chan));
Kevin Cernekee613065e2012-08-25 12:38:52 -0700367}
368
Florian Fainelli2d1f7af2014-01-14 15:36:29 -0800369static inline void usb_dmac_writel(struct bcm63xx_udc *udc, u32 val, u32 off,
370 int chan)
Kevin Cernekee613065e2012-08-25 12:38:52 -0700371{
Florian Fainelli2d1f7af2014-01-14 15:36:29 -0800372 bcm_writel(val, udc->iudma_regs + IUDMA_DMAC_OFFSET + off +
373 (ENETDMA_CHAN_WIDTH * chan));
Kevin Cernekee613065e2012-08-25 12:38:52 -0700374}
375
Florian Fainelli2d1f7af2014-01-14 15:36:29 -0800376static inline u32 usb_dmas_readl(struct bcm63xx_udc *udc, u32 off, int chan)
Kevin Cernekee613065e2012-08-25 12:38:52 -0700377{
Florian Fainelli2d1f7af2014-01-14 15:36:29 -0800378 return bcm_readl(udc->iudma_regs + IUDMA_DMAS_OFFSET + off +
379 (ENETDMA_CHAN_WIDTH * chan));
Kevin Cernekee613065e2012-08-25 12:38:52 -0700380}
381
Florian Fainelli2d1f7af2014-01-14 15:36:29 -0800382static inline void usb_dmas_writel(struct bcm63xx_udc *udc, u32 val, u32 off,
383 int chan)
Kevin Cernekee613065e2012-08-25 12:38:52 -0700384{
Florian Fainelli2d1f7af2014-01-14 15:36:29 -0800385 bcm_writel(val, udc->iudma_regs + IUDMA_DMAS_OFFSET + off +
386 (ENETDMA_CHAN_WIDTH * chan));
Kevin Cernekee613065e2012-08-25 12:38:52 -0700387}
388
389static inline void set_clocks(struct bcm63xx_udc *udc, bool is_enabled)
390{
391 if (is_enabled) {
392 clk_enable(udc->usbh_clk);
393 clk_enable(udc->usbd_clk);
394 udelay(10);
395 } else {
396 clk_disable(udc->usbd_clk);
397 clk_disable(udc->usbh_clk);
398 }
399}
400
401/***********************************************************************
402 * Low-level IUDMA / FIFO operations
403 ***********************************************************************/
404
405/**
406 * bcm63xx_ep_dma_select - Helper function to set up the init_sel signal.
407 * @udc: Reference to the device controller.
408 * @idx: Desired init_sel value.
409 *
410 * The "init_sel" signal is used as a selection index for both endpoints
411 * and IUDMA channels. Since these do not map 1:1, the use of this signal
412 * depends on the context.
413 */
414static void bcm63xx_ep_dma_select(struct bcm63xx_udc *udc, int idx)
415{
416 u32 val = usbd_readl(udc, USBD_CONTROL_REG);
417
418 val &= ~USBD_CONTROL_INIT_SEL_MASK;
419 val |= idx << USBD_CONTROL_INIT_SEL_SHIFT;
420 usbd_writel(udc, val, USBD_CONTROL_REG);
421}
422
423/**
424 * bcm63xx_set_stall - Enable/disable stall on one endpoint.
425 * @udc: Reference to the device controller.
426 * @bep: Endpoint on which to operate.
427 * @is_stalled: true to enable stall, false to disable.
428 *
429 * See notes in bcm63xx_update_wedge() regarding automatic clearing of
430 * halt/stall conditions.
431 */
432static void bcm63xx_set_stall(struct bcm63xx_udc *udc, struct bcm63xx_ep *bep,
433 bool is_stalled)
434{
435 u32 val;
436
437 val = USBD_STALL_UPDATE_MASK |
438 (is_stalled ? USBD_STALL_ENABLE_MASK : 0) |
439 (bep->ep_num << USBD_STALL_EPNUM_SHIFT);
440 usbd_writel(udc, val, USBD_STALL_REG);
441}
442
443/**
444 * bcm63xx_fifo_setup - (Re)initialize FIFO boundaries and settings.
445 * @udc: Reference to the device controller.
446 *
447 * These parameters depend on the USB link speed. Settings are
448 * per-IUDMA-channel-pair.
449 */
450static void bcm63xx_fifo_setup(struct bcm63xx_udc *udc)
451{
452 int is_hs = udc->gadget.speed == USB_SPEED_HIGH;
453 u32 i, val, rx_fifo_slot, tx_fifo_slot;
454
455 /* set up FIFO boundaries and packet sizes; this is done in pairs */
456 rx_fifo_slot = tx_fifo_slot = 0;
457 for (i = 0; i < BCM63XX_NUM_IUDMA; i += 2) {
458 const struct iudma_ch_cfg *rx_cfg = &iudma_defaults[i];
459 const struct iudma_ch_cfg *tx_cfg = &iudma_defaults[i + 1];
460
461 bcm63xx_ep_dma_select(udc, i >> 1);
462
463 val = (rx_fifo_slot << USBD_RXFIFO_CONFIG_START_SHIFT) |
464 ((rx_fifo_slot + rx_cfg->n_fifo_slots - 1) <<
465 USBD_RXFIFO_CONFIG_END_SHIFT);
466 rx_fifo_slot += rx_cfg->n_fifo_slots;
467 usbd_writel(udc, val, USBD_RXFIFO_CONFIG_REG);
468 usbd_writel(udc,
469 is_hs ? rx_cfg->max_pkt_hs : rx_cfg->max_pkt_fs,
470 USBD_RXFIFO_EPSIZE_REG);
471
472 val = (tx_fifo_slot << USBD_TXFIFO_CONFIG_START_SHIFT) |
473 ((tx_fifo_slot + tx_cfg->n_fifo_slots - 1) <<
474 USBD_TXFIFO_CONFIG_END_SHIFT);
475 tx_fifo_slot += tx_cfg->n_fifo_slots;
476 usbd_writel(udc, val, USBD_TXFIFO_CONFIG_REG);
477 usbd_writel(udc,
478 is_hs ? tx_cfg->max_pkt_hs : tx_cfg->max_pkt_fs,
479 USBD_TXFIFO_EPSIZE_REG);
480
481 usbd_readl(udc, USBD_TXFIFO_EPSIZE_REG);
482 }
483}
484
485/**
486 * bcm63xx_fifo_reset_ep - Flush a single endpoint's FIFO.
487 * @udc: Reference to the device controller.
488 * @ep_num: Endpoint number.
489 */
490static void bcm63xx_fifo_reset_ep(struct bcm63xx_udc *udc, int ep_num)
491{
492 u32 val;
493
494 bcm63xx_ep_dma_select(udc, ep_num);
495
496 val = usbd_readl(udc, USBD_CONTROL_REG);
497 val |= USBD_CONTROL_FIFO_RESET_MASK;
498 usbd_writel(udc, val, USBD_CONTROL_REG);
499 usbd_readl(udc, USBD_CONTROL_REG);
500}
501
502/**
503 * bcm63xx_fifo_reset - Flush all hardware FIFOs.
504 * @udc: Reference to the device controller.
505 */
506static void bcm63xx_fifo_reset(struct bcm63xx_udc *udc)
507{
508 int i;
509
510 for (i = 0; i < BCM63XX_NUM_FIFO_PAIRS; i++)
511 bcm63xx_fifo_reset_ep(udc, i);
512}
513
514/**
515 * bcm63xx_ep_init - Initial (one-time) endpoint initialization.
516 * @udc: Reference to the device controller.
517 */
518static void bcm63xx_ep_init(struct bcm63xx_udc *udc)
519{
520 u32 i, val;
521
522 for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
523 const struct iudma_ch_cfg *cfg = &iudma_defaults[i];
524
525 if (cfg->ep_num < 0)
526 continue;
527
528 bcm63xx_ep_dma_select(udc, cfg->ep_num);
529 val = (cfg->ep_type << USBD_EPNUM_TYPEMAP_TYPE_SHIFT) |
530 ((i >> 1) << USBD_EPNUM_TYPEMAP_DMA_CH_SHIFT);
531 usbd_writel(udc, val, USBD_EPNUM_TYPEMAP_REG);
532 }
533}
534
535/**
536 * bcm63xx_ep_setup - Configure per-endpoint settings.
537 * @udc: Reference to the device controller.
538 *
539 * This needs to be rerun if the speed/cfg/intf/altintf changes.
540 */
541static void bcm63xx_ep_setup(struct bcm63xx_udc *udc)
542{
543 u32 val, i;
544
545 usbd_writel(udc, USBD_CSR_SETUPADDR_DEF, USBD_CSR_SETUPADDR_REG);
546
547 for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
548 const struct iudma_ch_cfg *cfg = &iudma_defaults[i];
549 int max_pkt = udc->gadget.speed == USB_SPEED_HIGH ?
550 cfg->max_pkt_hs : cfg->max_pkt_fs;
551 int idx = cfg->ep_num;
552
553 udc->iudma[i].max_pkt = max_pkt;
554
555 if (idx < 0)
556 continue;
Robert Baldygae117e742013-12-13 12:23:38 +0100557 usb_ep_set_maxpacket_limit(&udc->bep[idx].ep, max_pkt);
Kevin Cernekee613065e2012-08-25 12:38:52 -0700558
559 val = (idx << USBD_CSR_EP_LOG_SHIFT) |
560 (cfg->dir << USBD_CSR_EP_DIR_SHIFT) |
561 (cfg->ep_type << USBD_CSR_EP_TYPE_SHIFT) |
562 (udc->cfg << USBD_CSR_EP_CFG_SHIFT) |
563 (udc->iface << USBD_CSR_EP_IFACE_SHIFT) |
564 (udc->alt_iface << USBD_CSR_EP_ALTIFACE_SHIFT) |
565 (max_pkt << USBD_CSR_EP_MAXPKT_SHIFT);
566 usbd_writel(udc, val, USBD_CSR_EP_REG(idx));
567 }
568}
569
570/**
571 * iudma_write - Queue a single IUDMA transaction.
572 * @udc: Reference to the device controller.
573 * @iudma: IUDMA channel to use.
574 * @breq: Request containing the transaction data.
575 *
576 * For RX IUDMA, this will queue a single buffer descriptor, as RX IUDMA
577 * does not honor SOP/EOP so the handling of multiple buffers is ambiguous.
578 * So iudma_write() may be called several times to fulfill a single
579 * usb_request.
580 *
581 * For TX IUDMA, this can queue multiple buffer descriptors if needed.
582 */
583static void iudma_write(struct bcm63xx_udc *udc, struct iudma_ch *iudma,
584 struct bcm63xx_req *breq)
585{
586 int first_bd = 1, last_bd = 0, extra_zero_pkt = 0;
587 unsigned int bytes_left = breq->req.length - breq->offset;
588 const int max_bd_bytes = !irq_coalesce && !iudma->is_tx ?
589 iudma->max_pkt : IUDMA_MAX_FRAGMENT;
590
591 iudma->n_bds_used = 0;
592 breq->bd_bytes = 0;
593 breq->iudma = iudma;
594
595 if ((bytes_left % iudma->max_pkt == 0) && bytes_left && breq->req.zero)
596 extra_zero_pkt = 1;
597
598 do {
599 struct bcm_enet_desc *d = iudma->write_bd;
600 u32 dmaflags = 0;
601 unsigned int n_bytes;
602
603 if (d == iudma->end_bd) {
604 dmaflags |= DMADESC_WRAP_MASK;
605 iudma->write_bd = iudma->bd_ring;
606 } else {
607 iudma->write_bd++;
608 }
609 iudma->n_bds_used++;
610
611 n_bytes = min_t(int, bytes_left, max_bd_bytes);
612 if (n_bytes)
613 dmaflags |= n_bytes << DMADESC_LENGTH_SHIFT;
614 else
615 dmaflags |= (1 << DMADESC_LENGTH_SHIFT) |
616 DMADESC_USB_ZERO_MASK;
617
618 dmaflags |= DMADESC_OWNER_MASK;
619 if (first_bd) {
620 dmaflags |= DMADESC_SOP_MASK;
621 first_bd = 0;
622 }
623
624 /*
625 * extra_zero_pkt forces one more iteration through the loop
626 * after all data is queued up, to send the zero packet
627 */
628 if (extra_zero_pkt && !bytes_left)
629 extra_zero_pkt = 0;
630
631 if (!iudma->is_tx || iudma->n_bds_used == iudma->n_bds ||
632 (n_bytes == bytes_left && !extra_zero_pkt)) {
633 last_bd = 1;
634 dmaflags |= DMADESC_EOP_MASK;
635 }
636
637 d->address = breq->req.dma + breq->offset;
638 mb();
639 d->len_stat = dmaflags;
640
641 breq->offset += n_bytes;
642 breq->bd_bytes += n_bytes;
643 bytes_left -= n_bytes;
644 } while (!last_bd);
645
646 usb_dmac_writel(udc, ENETDMAC_CHANCFG_EN_MASK,
Florian Fainelli2d1f7af2014-01-14 15:36:29 -0800647 ENETDMAC_CHANCFG_REG, iudma->ch_idx);
Kevin Cernekee613065e2012-08-25 12:38:52 -0700648}
649
650/**
651 * iudma_read - Check for IUDMA buffer completion.
652 * @udc: Reference to the device controller.
653 * @iudma: IUDMA channel to use.
654 *
655 * This checks to see if ALL of the outstanding BDs on the DMA channel
656 * have been filled. If so, it returns the actual transfer length;
657 * otherwise it returns -EBUSY.
658 */
659static int iudma_read(struct bcm63xx_udc *udc, struct iudma_ch *iudma)
660{
661 int i, actual_len = 0;
662 struct bcm_enet_desc *d = iudma->read_bd;
663
664 if (!iudma->n_bds_used)
665 return -EINVAL;
666
667 for (i = 0; i < iudma->n_bds_used; i++) {
668 u32 dmaflags;
669
670 dmaflags = d->len_stat;
671
672 if (dmaflags & DMADESC_OWNER_MASK)
673 return -EBUSY;
674
675 actual_len += (dmaflags & DMADESC_LENGTH_MASK) >>
676 DMADESC_LENGTH_SHIFT;
677 if (d == iudma->end_bd)
678 d = iudma->bd_ring;
679 else
680 d++;
681 }
682
683 iudma->read_bd = d;
684 iudma->n_bds_used = 0;
685 return actual_len;
686}
687
688/**
689 * iudma_reset_channel - Stop DMA on a single channel.
690 * @udc: Reference to the device controller.
691 * @iudma: IUDMA channel to reset.
692 */
693static void iudma_reset_channel(struct bcm63xx_udc *udc, struct iudma_ch *iudma)
694{
695 int timeout = IUDMA_RESET_TIMEOUT_US;
696 struct bcm_enet_desc *d;
697 int ch_idx = iudma->ch_idx;
698
699 if (!iudma->is_tx)
700 bcm63xx_fifo_reset_ep(udc, max(0, iudma->ep_num));
701
702 /* stop DMA, then wait for the hardware to wrap up */
Florian Fainelli2d1f7af2014-01-14 15:36:29 -0800703 usb_dmac_writel(udc, 0, ENETDMAC_CHANCFG_REG, ch_idx);
Kevin Cernekee613065e2012-08-25 12:38:52 -0700704
Florian Fainelli2d1f7af2014-01-14 15:36:29 -0800705 while (usb_dmac_readl(udc, ENETDMAC_CHANCFG_REG, ch_idx) &
Kevin Cernekee613065e2012-08-25 12:38:52 -0700706 ENETDMAC_CHANCFG_EN_MASK) {
707 udelay(1);
708
709 /* repeatedly flush the FIFO data until the BD completes */
710 if (iudma->is_tx && iudma->ep_num >= 0)
711 bcm63xx_fifo_reset_ep(udc, iudma->ep_num);
712
713 if (!timeout--) {
714 dev_err(udc->dev, "can't reset IUDMA channel %d\n",
715 ch_idx);
716 break;
717 }
718 if (timeout == IUDMA_RESET_TIMEOUT_US / 2) {
719 dev_warn(udc->dev, "forcibly halting IUDMA channel %d\n",
720 ch_idx);
721 usb_dmac_writel(udc, ENETDMAC_CHANCFG_BUFHALT_MASK,
Florian Fainelli2d1f7af2014-01-14 15:36:29 -0800722 ENETDMAC_CHANCFG_REG, ch_idx);
Kevin Cernekee613065e2012-08-25 12:38:52 -0700723 }
724 }
Florian Fainelli2d1f7af2014-01-14 15:36:29 -0800725 usb_dmac_writel(udc, ~0, ENETDMAC_IR_REG, ch_idx);
Kevin Cernekee613065e2012-08-25 12:38:52 -0700726
727 /* don't leave "live" HW-owned entries for the next guy to step on */
728 for (d = iudma->bd_ring; d <= iudma->end_bd; d++)
729 d->len_stat = 0;
730 mb();
731
732 iudma->read_bd = iudma->write_bd = iudma->bd_ring;
733 iudma->n_bds_used = 0;
734
735 /* set up IRQs, UBUS burst size, and BD base for this channel */
736 usb_dmac_writel(udc, ENETDMAC_IR_BUFDONE_MASK,
Florian Fainelli2d1f7af2014-01-14 15:36:29 -0800737 ENETDMAC_IRMASK_REG, ch_idx);
738 usb_dmac_writel(udc, 8, ENETDMAC_MAXBURST_REG, ch_idx);
Kevin Cernekee613065e2012-08-25 12:38:52 -0700739
Florian Fainelli2d1f7af2014-01-14 15:36:29 -0800740 usb_dmas_writel(udc, iudma->bd_ring_dma, ENETDMAS_RSTART_REG, ch_idx);
741 usb_dmas_writel(udc, 0, ENETDMAS_SRAM2_REG, ch_idx);
Kevin Cernekee613065e2012-08-25 12:38:52 -0700742}
743
744/**
745 * iudma_init_channel - One-time IUDMA channel initialization.
746 * @udc: Reference to the device controller.
747 * @ch_idx: Channel to initialize.
748 */
749static int iudma_init_channel(struct bcm63xx_udc *udc, unsigned int ch_idx)
750{
751 struct iudma_ch *iudma = &udc->iudma[ch_idx];
752 const struct iudma_ch_cfg *cfg = &iudma_defaults[ch_idx];
753 unsigned int n_bds = cfg->n_bds;
754 struct bcm63xx_ep *bep = NULL;
755
756 iudma->ep_num = cfg->ep_num;
757 iudma->ch_idx = ch_idx;
758 iudma->is_tx = !!(ch_idx & 0x01);
759 if (iudma->ep_num >= 0) {
760 bep = &udc->bep[iudma->ep_num];
761 bep->iudma = iudma;
762 INIT_LIST_HEAD(&bep->queue);
763 }
764
765 iudma->bep = bep;
766 iudma->udc = udc;
767
768 /* ep0 is always active; others are controlled by the gadget driver */
769 if (iudma->ep_num <= 0)
770 iudma->enabled = true;
771
772 iudma->n_bds = n_bds;
773 iudma->bd_ring = dmam_alloc_coherent(udc->dev,
774 n_bds * sizeof(struct bcm_enet_desc),
775 &iudma->bd_ring_dma, GFP_KERNEL);
776 if (!iudma->bd_ring)
777 return -ENOMEM;
778 iudma->end_bd = &iudma->bd_ring[n_bds - 1];
779
780 return 0;
781}
782
783/**
784 * iudma_init - One-time initialization of all IUDMA channels.
785 * @udc: Reference to the device controller.
786 *
787 * Enable DMA, flush channels, and enable global IUDMA IRQs.
788 */
789static int iudma_init(struct bcm63xx_udc *udc)
790{
791 int i, rc;
792
793 usb_dma_writel(udc, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
794
795 for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
796 rc = iudma_init_channel(udc, i);
797 if (rc)
798 return rc;
799 iudma_reset_channel(udc, &udc->iudma[i]);
800 }
801
802 usb_dma_writel(udc, BIT(BCM63XX_NUM_IUDMA)-1, ENETDMA_GLB_IRQMASK_REG);
803 return 0;
804}
805
806/**
807 * iudma_uninit - Uninitialize IUDMA channels.
808 * @udc: Reference to the device controller.
809 *
810 * Kill global IUDMA IRQs, flush channels, and kill DMA.
811 */
812static void iudma_uninit(struct bcm63xx_udc *udc)
813{
814 int i;
815
816 usb_dma_writel(udc, 0, ENETDMA_GLB_IRQMASK_REG);
817
818 for (i = 0; i < BCM63XX_NUM_IUDMA; i++)
819 iudma_reset_channel(udc, &udc->iudma[i]);
820
821 usb_dma_writel(udc, 0, ENETDMA_CFG_REG);
822}
823
824/***********************************************************************
825 * Other low-level USBD operations
826 ***********************************************************************/
827
828/**
829 * bcm63xx_set_ctrl_irqs - Mask/unmask control path interrupts.
830 * @udc: Reference to the device controller.
831 * @enable_irqs: true to enable, false to disable.
832 */
833static void bcm63xx_set_ctrl_irqs(struct bcm63xx_udc *udc, bool enable_irqs)
834{
835 u32 val;
836
837 usbd_writel(udc, 0, USBD_STATUS_REG);
838
839 val = BIT(USBD_EVENT_IRQ_USB_RESET) |
840 BIT(USBD_EVENT_IRQ_SETUP) |
841 BIT(USBD_EVENT_IRQ_SETCFG) |
842 BIT(USBD_EVENT_IRQ_SETINTF) |
843 BIT(USBD_EVENT_IRQ_USB_LINK);
844 usbd_writel(udc, enable_irqs ? val : 0, USBD_EVENT_IRQ_MASK_REG);
845 usbd_writel(udc, val, USBD_EVENT_IRQ_STATUS_REG);
846}
847
848/**
849 * bcm63xx_select_phy_mode - Select between USB device and host mode.
850 * @udc: Reference to the device controller.
851 * @is_device: true for device, false for host.
852 *
853 * This should probably be reworked to use the drivers/usb/otg
854 * infrastructure.
855 *
856 * By default, the AFE/pullups are disabled in device mode, until
857 * bcm63xx_select_pullup() is called.
858 */
859static void bcm63xx_select_phy_mode(struct bcm63xx_udc *udc, bool is_device)
860{
861 u32 val, portmask = BIT(udc->pd->port_no);
862
863 if (BCMCPU_IS_6328()) {
864 /* configure pinmux to sense VBUS signal */
865 val = bcm_gpio_readl(GPIO_PINMUX_OTHR_REG);
866 val &= ~GPIO_PINMUX_OTHR_6328_USB_MASK;
867 val |= is_device ? GPIO_PINMUX_OTHR_6328_USB_DEV :
868 GPIO_PINMUX_OTHR_6328_USB_HOST;
869 bcm_gpio_writel(val, GPIO_PINMUX_OTHR_REG);
870 }
871
872 val = bcm_rset_readl(RSET_USBH_PRIV, USBH_PRIV_UTMI_CTL_6368_REG);
873 if (is_device) {
874 val |= (portmask << USBH_PRIV_UTMI_CTL_HOSTB_SHIFT);
875 val |= (portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT);
876 } else {
877 val &= ~(portmask << USBH_PRIV_UTMI_CTL_HOSTB_SHIFT);
878 val &= ~(portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT);
879 }
880 bcm_rset_writel(RSET_USBH_PRIV, val, USBH_PRIV_UTMI_CTL_6368_REG);
881
882 val = bcm_rset_readl(RSET_USBH_PRIV, USBH_PRIV_SWAP_6368_REG);
883 if (is_device)
884 val |= USBH_PRIV_SWAP_USBD_MASK;
885 else
886 val &= ~USBH_PRIV_SWAP_USBD_MASK;
887 bcm_rset_writel(RSET_USBH_PRIV, val, USBH_PRIV_SWAP_6368_REG);
888}
889
890/**
891 * bcm63xx_select_pullup - Enable/disable the pullup on D+
892 * @udc: Reference to the device controller.
893 * @is_on: true to enable the pullup, false to disable.
894 *
895 * If the pullup is active, the host will sense a FS/HS device connected to
896 * the port. If the pullup is inactive, the host will think the USB
897 * device has been disconnected.
898 */
899static void bcm63xx_select_pullup(struct bcm63xx_udc *udc, bool is_on)
900{
901 u32 val, portmask = BIT(udc->pd->port_no);
902
903 val = bcm_rset_readl(RSET_USBH_PRIV, USBH_PRIV_UTMI_CTL_6368_REG);
904 if (is_on)
905 val &= ~(portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT);
906 else
907 val |= (portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT);
908 bcm_rset_writel(RSET_USBH_PRIV, val, USBH_PRIV_UTMI_CTL_6368_REG);
909}
910
911/**
912 * bcm63xx_uninit_udc_hw - Shut down the hardware prior to driver removal.
913 * @udc: Reference to the device controller.
914 *
915 * This just masks the IUDMA IRQs and releases the clocks. It is assumed
916 * that bcm63xx_udc_stop() has already run, and the clocks are stopped.
917 */
918static void bcm63xx_uninit_udc_hw(struct bcm63xx_udc *udc)
919{
920 set_clocks(udc, true);
921 iudma_uninit(udc);
922 set_clocks(udc, false);
923
924 clk_put(udc->usbd_clk);
925 clk_put(udc->usbh_clk);
926}
927
928/**
929 * bcm63xx_init_udc_hw - Initialize the controller hardware and data structures.
930 * @udc: Reference to the device controller.
931 */
932static int bcm63xx_init_udc_hw(struct bcm63xx_udc *udc)
933{
934 int i, rc = 0;
935 u32 val;
936
937 udc->ep0_ctrl_buf = devm_kzalloc(udc->dev, BCM63XX_MAX_CTRL_PKT,
938 GFP_KERNEL);
939 if (!udc->ep0_ctrl_buf)
940 return -ENOMEM;
941
942 INIT_LIST_HEAD(&udc->gadget.ep_list);
943 for (i = 0; i < BCM63XX_NUM_EP; i++) {
944 struct bcm63xx_ep *bep = &udc->bep[i];
945
946 bep->ep.name = bcm63xx_ep_name[i];
947 bep->ep_num = i;
948 bep->ep.ops = &bcm63xx_udc_ep_ops;
949 list_add_tail(&bep->ep.ep_list, &udc->gadget.ep_list);
950 bep->halted = 0;
Robert Baldygae117e742013-12-13 12:23:38 +0100951 usb_ep_set_maxpacket_limit(&bep->ep, BCM63XX_MAX_CTRL_PKT);
Kevin Cernekee613065e2012-08-25 12:38:52 -0700952 bep->udc = udc;
953 bep->ep.desc = NULL;
954 INIT_LIST_HEAD(&bep->queue);
955 }
956
957 udc->gadget.ep0 = &udc->bep[0].ep;
958 list_del(&udc->bep[0].ep.ep_list);
959
960 udc->gadget.speed = USB_SPEED_UNKNOWN;
961 udc->ep0state = EP0_SHUTDOWN;
962
963 udc->usbh_clk = clk_get(udc->dev, "usbh");
964 if (IS_ERR(udc->usbh_clk))
965 return -EIO;
966
967 udc->usbd_clk = clk_get(udc->dev, "usbd");
968 if (IS_ERR(udc->usbd_clk)) {
969 clk_put(udc->usbh_clk);
970 return -EIO;
971 }
972
973 set_clocks(udc, true);
974
975 val = USBD_CONTROL_AUTO_CSRS_MASK |
976 USBD_CONTROL_DONE_CSRS_MASK |
977 (irq_coalesce ? USBD_CONTROL_RXZSCFG_MASK : 0);
978 usbd_writel(udc, val, USBD_CONTROL_REG);
979
980 val = USBD_STRAPS_APP_SELF_PWR_MASK |
981 USBD_STRAPS_APP_RAM_IF_MASK |
982 USBD_STRAPS_APP_CSRPRGSUP_MASK |
983 USBD_STRAPS_APP_8BITPHY_MASK |
984 USBD_STRAPS_APP_RMTWKUP_MASK;
985
986 if (udc->gadget.max_speed == USB_SPEED_HIGH)
987 val |= (BCM63XX_SPD_HIGH << USBD_STRAPS_SPEED_SHIFT);
988 else
989 val |= (BCM63XX_SPD_FULL << USBD_STRAPS_SPEED_SHIFT);
990 usbd_writel(udc, val, USBD_STRAPS_REG);
991
992 bcm63xx_set_ctrl_irqs(udc, false);
993
994 usbd_writel(udc, 0, USBD_EVENT_IRQ_CFG_LO_REG);
995
996 val = USBD_EVENT_IRQ_CFG_FALLING(USBD_EVENT_IRQ_ENUM_ON) |
997 USBD_EVENT_IRQ_CFG_FALLING(USBD_EVENT_IRQ_SET_CSRS);
998 usbd_writel(udc, val, USBD_EVENT_IRQ_CFG_HI_REG);
999
1000 rc = iudma_init(udc);
1001 set_clocks(udc, false);
1002 if (rc)
1003 bcm63xx_uninit_udc_hw(udc);
1004
1005 return 0;
1006}
1007
1008/***********************************************************************
1009 * Standard EP gadget operations
1010 ***********************************************************************/
1011
1012/**
1013 * bcm63xx_ep_enable - Enable one endpoint.
1014 * @ep: Endpoint to enable.
1015 * @desc: Contains max packet, direction, etc.
1016 *
1017 * Most of the endpoint parameters are fixed in this controller, so there
1018 * isn't much for this function to do.
1019 */
1020static int bcm63xx_ep_enable(struct usb_ep *ep,
1021 const struct usb_endpoint_descriptor *desc)
1022{
1023 struct bcm63xx_ep *bep = our_ep(ep);
1024 struct bcm63xx_udc *udc = bep->udc;
1025 struct iudma_ch *iudma = bep->iudma;
1026 unsigned long flags;
1027
1028 if (!ep || !desc || ep->name == bcm63xx_ep0name)
1029 return -EINVAL;
1030
1031 if (!udc->driver)
1032 return -ESHUTDOWN;
1033
1034 spin_lock_irqsave(&udc->lock, flags);
1035 if (iudma->enabled) {
1036 spin_unlock_irqrestore(&udc->lock, flags);
1037 return -EINVAL;
1038 }
1039
1040 iudma->enabled = true;
1041 BUG_ON(!list_empty(&bep->queue));
1042
1043 iudma_reset_channel(udc, iudma);
1044
1045 bep->halted = 0;
1046 bcm63xx_set_stall(udc, bep, false);
1047 clear_bit(bep->ep_num, &udc->wedgemap);
1048
1049 ep->desc = desc;
1050 ep->maxpacket = usb_endpoint_maxp(desc);
1051
1052 spin_unlock_irqrestore(&udc->lock, flags);
1053 return 0;
1054}
1055
1056/**
1057 * bcm63xx_ep_disable - Disable one endpoint.
1058 * @ep: Endpoint to disable.
1059 */
1060static int bcm63xx_ep_disable(struct usb_ep *ep)
1061{
1062 struct bcm63xx_ep *bep = our_ep(ep);
1063 struct bcm63xx_udc *udc = bep->udc;
1064 struct iudma_ch *iudma = bep->iudma;
1065 struct list_head *pos, *n;
1066 unsigned long flags;
1067
1068 if (!ep || !ep->desc)
1069 return -EINVAL;
1070
1071 spin_lock_irqsave(&udc->lock, flags);
1072 if (!iudma->enabled) {
1073 spin_unlock_irqrestore(&udc->lock, flags);
1074 return -EINVAL;
1075 }
1076 iudma->enabled = false;
1077
1078 iudma_reset_channel(udc, iudma);
1079
1080 if (!list_empty(&bep->queue)) {
1081 list_for_each_safe(pos, n, &bep->queue) {
1082 struct bcm63xx_req *breq =
1083 list_entry(pos, struct bcm63xx_req, queue);
1084
1085 usb_gadget_unmap_request(&udc->gadget, &breq->req,
1086 iudma->is_tx);
1087 list_del(&breq->queue);
1088 breq->req.status = -ESHUTDOWN;
1089
1090 spin_unlock_irqrestore(&udc->lock, flags);
1091 breq->req.complete(&iudma->bep->ep, &breq->req);
1092 spin_lock_irqsave(&udc->lock, flags);
1093 }
1094 }
1095 ep->desc = NULL;
1096
1097 spin_unlock_irqrestore(&udc->lock, flags);
1098 return 0;
1099}
1100
1101/**
1102 * bcm63xx_udc_alloc_request - Allocate a new request.
1103 * @ep: Endpoint associated with the request.
1104 * @mem_flags: Flags to pass to kzalloc().
1105 */
1106static struct usb_request *bcm63xx_udc_alloc_request(struct usb_ep *ep,
1107 gfp_t mem_flags)
1108{
1109 struct bcm63xx_req *breq;
1110
1111 breq = kzalloc(sizeof(*breq), mem_flags);
1112 if (!breq)
1113 return NULL;
1114 return &breq->req;
1115}
1116
1117/**
1118 * bcm63xx_udc_free_request - Free a request.
1119 * @ep: Endpoint associated with the request.
1120 * @req: Request to free.
1121 */
1122static void bcm63xx_udc_free_request(struct usb_ep *ep,
1123 struct usb_request *req)
1124{
1125 struct bcm63xx_req *breq = our_req(req);
1126 kfree(breq);
1127}
1128
1129/**
1130 * bcm63xx_udc_queue - Queue up a new request.
1131 * @ep: Endpoint associated with the request.
1132 * @req: Request to add.
1133 * @mem_flags: Unused.
1134 *
1135 * If the queue is empty, start this request immediately. Otherwise, add
1136 * it to the list.
1137 *
1138 * ep0 replies are sent through this function from the gadget driver, but
1139 * they are treated differently because they need to be handled by the ep0
1140 * state machine. (Sometimes they are replies to control requests that
1141 * were spoofed by this driver, and so they shouldn't be transmitted at all.)
1142 */
1143static int bcm63xx_udc_queue(struct usb_ep *ep, struct usb_request *req,
1144 gfp_t mem_flags)
1145{
1146 struct bcm63xx_ep *bep = our_ep(ep);
1147 struct bcm63xx_udc *udc = bep->udc;
1148 struct bcm63xx_req *breq = our_req(req);
1149 unsigned long flags;
1150 int rc = 0;
1151
1152 if (unlikely(!req || !req->complete || !req->buf || !ep))
1153 return -EINVAL;
1154
1155 req->actual = 0;
1156 req->status = 0;
1157 breq->offset = 0;
1158
1159 if (bep == &udc->bep[0]) {
1160 /* only one reply per request, please */
1161 if (udc->ep0_reply)
1162 return -EINVAL;
1163
1164 udc->ep0_reply = req;
1165 schedule_work(&udc->ep0_wq);
1166 return 0;
1167 }
1168
1169 spin_lock_irqsave(&udc->lock, flags);
1170 if (!bep->iudma->enabled) {
1171 rc = -ESHUTDOWN;
1172 goto out;
1173 }
1174
1175 rc = usb_gadget_map_request(&udc->gadget, req, bep->iudma->is_tx);
1176 if (rc == 0) {
1177 list_add_tail(&breq->queue, &bep->queue);
1178 if (list_is_singular(&bep->queue))
1179 iudma_write(udc, bep->iudma, breq);
1180 }
1181
1182out:
1183 spin_unlock_irqrestore(&udc->lock, flags);
1184 return rc;
1185}
1186
1187/**
1188 * bcm63xx_udc_dequeue - Remove a pending request from the queue.
1189 * @ep: Endpoint associated with the request.
1190 * @req: Request to remove.
1191 *
1192 * If the request is not at the head of the queue, this is easy - just nuke
1193 * it. If the request is at the head of the queue, we'll need to stop the
1194 * DMA transaction and then queue up the successor.
1195 */
1196static int bcm63xx_udc_dequeue(struct usb_ep *ep, struct usb_request *req)
1197{
1198 struct bcm63xx_ep *bep = our_ep(ep);
1199 struct bcm63xx_udc *udc = bep->udc;
1200 struct bcm63xx_req *breq = our_req(req), *cur;
1201 unsigned long flags;
1202 int rc = 0;
1203
1204 spin_lock_irqsave(&udc->lock, flags);
1205 if (list_empty(&bep->queue)) {
1206 rc = -EINVAL;
1207 goto out;
1208 }
1209
1210 cur = list_first_entry(&bep->queue, struct bcm63xx_req, queue);
1211 usb_gadget_unmap_request(&udc->gadget, &breq->req, bep->iudma->is_tx);
1212
1213 if (breq == cur) {
1214 iudma_reset_channel(udc, bep->iudma);
1215 list_del(&breq->queue);
1216
1217 if (!list_empty(&bep->queue)) {
1218 struct bcm63xx_req *next;
1219
1220 next = list_first_entry(&bep->queue,
1221 struct bcm63xx_req, queue);
1222 iudma_write(udc, bep->iudma, next);
1223 }
1224 } else {
1225 list_del(&breq->queue);
1226 }
1227
1228out:
1229 spin_unlock_irqrestore(&udc->lock, flags);
1230
1231 req->status = -ESHUTDOWN;
1232 req->complete(ep, req);
1233
1234 return rc;
1235}
1236
1237/**
1238 * bcm63xx_udc_set_halt - Enable/disable STALL flag in the hardware.
1239 * @ep: Endpoint to halt.
1240 * @value: Zero to clear halt; nonzero to set halt.
1241 *
1242 * See comments in bcm63xx_update_wedge().
1243 */
1244static int bcm63xx_udc_set_halt(struct usb_ep *ep, int value)
1245{
1246 struct bcm63xx_ep *bep = our_ep(ep);
1247 struct bcm63xx_udc *udc = bep->udc;
1248 unsigned long flags;
1249
1250 spin_lock_irqsave(&udc->lock, flags);
1251 bcm63xx_set_stall(udc, bep, !!value);
1252 bep->halted = value;
1253 spin_unlock_irqrestore(&udc->lock, flags);
1254
1255 return 0;
1256}
1257
1258/**
1259 * bcm63xx_udc_set_wedge - Stall the endpoint until the next reset.
1260 * @ep: Endpoint to wedge.
1261 *
1262 * See comments in bcm63xx_update_wedge().
1263 */
1264static int bcm63xx_udc_set_wedge(struct usb_ep *ep)
1265{
1266 struct bcm63xx_ep *bep = our_ep(ep);
1267 struct bcm63xx_udc *udc = bep->udc;
1268 unsigned long flags;
1269
1270 spin_lock_irqsave(&udc->lock, flags);
1271 set_bit(bep->ep_num, &udc->wedgemap);
1272 bcm63xx_set_stall(udc, bep, true);
1273 spin_unlock_irqrestore(&udc->lock, flags);
1274
1275 return 0;
1276}
1277
1278static const struct usb_ep_ops bcm63xx_udc_ep_ops = {
1279 .enable = bcm63xx_ep_enable,
1280 .disable = bcm63xx_ep_disable,
1281
1282 .alloc_request = bcm63xx_udc_alloc_request,
1283 .free_request = bcm63xx_udc_free_request,
1284
1285 .queue = bcm63xx_udc_queue,
1286 .dequeue = bcm63xx_udc_dequeue,
1287
1288 .set_halt = bcm63xx_udc_set_halt,
1289 .set_wedge = bcm63xx_udc_set_wedge,
1290};
1291
1292/***********************************************************************
1293 * EP0 handling
1294 ***********************************************************************/
1295
1296/**
1297 * bcm63xx_ep0_setup_callback - Drop spinlock to invoke ->setup callback.
1298 * @udc: Reference to the device controller.
1299 * @ctrl: 8-byte SETUP request.
1300 */
1301static int bcm63xx_ep0_setup_callback(struct bcm63xx_udc *udc,
1302 struct usb_ctrlrequest *ctrl)
1303{
1304 int rc;
1305
1306 spin_unlock_irq(&udc->lock);
1307 rc = udc->driver->setup(&udc->gadget, ctrl);
1308 spin_lock_irq(&udc->lock);
1309 return rc;
1310}
1311
1312/**
1313 * bcm63xx_ep0_spoof_set_cfg - Synthesize a SET_CONFIGURATION request.
1314 * @udc: Reference to the device controller.
1315 *
1316 * Many standard requests are handled automatically in the hardware, but
1317 * we still need to pass them to the gadget driver so that it can
1318 * reconfigure the interfaces/endpoints if necessary.
1319 *
1320 * Unfortunately we are not able to send a STALL response if the host
1321 * requests an invalid configuration. If this happens, we'll have to be
1322 * content with printing a warning.
1323 */
1324static int bcm63xx_ep0_spoof_set_cfg(struct bcm63xx_udc *udc)
1325{
1326 struct usb_ctrlrequest ctrl;
1327 int rc;
1328
1329 ctrl.bRequestType = USB_DIR_OUT | USB_RECIP_DEVICE;
1330 ctrl.bRequest = USB_REQ_SET_CONFIGURATION;
1331 ctrl.wValue = cpu_to_le16(udc->cfg);
1332 ctrl.wIndex = 0;
1333 ctrl.wLength = 0;
1334
1335 rc = bcm63xx_ep0_setup_callback(udc, &ctrl);
1336 if (rc < 0) {
1337 dev_warn_ratelimited(udc->dev,
1338 "hardware auto-acked bad SET_CONFIGURATION(%d) request\n",
1339 udc->cfg);
1340 }
1341 return rc;
1342}
1343
1344/**
1345 * bcm63xx_ep0_spoof_set_iface - Synthesize a SET_INTERFACE request.
1346 * @udc: Reference to the device controller.
1347 */
1348static int bcm63xx_ep0_spoof_set_iface(struct bcm63xx_udc *udc)
1349{
1350 struct usb_ctrlrequest ctrl;
1351 int rc;
1352
1353 ctrl.bRequestType = USB_DIR_OUT | USB_RECIP_INTERFACE;
1354 ctrl.bRequest = USB_REQ_SET_INTERFACE;
1355 ctrl.wValue = cpu_to_le16(udc->alt_iface);
1356 ctrl.wIndex = cpu_to_le16(udc->iface);
1357 ctrl.wLength = 0;
1358
1359 rc = bcm63xx_ep0_setup_callback(udc, &ctrl);
1360 if (rc < 0) {
1361 dev_warn_ratelimited(udc->dev,
1362 "hardware auto-acked bad SET_INTERFACE(%d,%d) request\n",
1363 udc->iface, udc->alt_iface);
1364 }
1365 return rc;
1366}
1367
1368/**
1369 * bcm63xx_ep0_map_write - dma_map and iudma_write a single request.
1370 * @udc: Reference to the device controller.
1371 * @ch_idx: IUDMA channel number.
1372 * @req: USB gadget layer representation of the request.
1373 */
1374static void bcm63xx_ep0_map_write(struct bcm63xx_udc *udc, int ch_idx,
1375 struct usb_request *req)
1376{
1377 struct bcm63xx_req *breq = our_req(req);
1378 struct iudma_ch *iudma = &udc->iudma[ch_idx];
1379
1380 BUG_ON(udc->ep0_request);
1381 udc->ep0_request = req;
1382
1383 req->actual = 0;
1384 breq->offset = 0;
1385 usb_gadget_map_request(&udc->gadget, req, iudma->is_tx);
1386 iudma_write(udc, iudma, breq);
1387}
1388
1389/**
1390 * bcm63xx_ep0_complete - Set completion status and "stage" the callback.
1391 * @udc: Reference to the device controller.
1392 * @req: USB gadget layer representation of the request.
1393 * @status: Status to return to the gadget driver.
1394 */
1395static void bcm63xx_ep0_complete(struct bcm63xx_udc *udc,
1396 struct usb_request *req, int status)
1397{
1398 req->status = status;
1399 if (status)
1400 req->actual = 0;
1401 if (req->complete) {
1402 spin_unlock_irq(&udc->lock);
1403 req->complete(&udc->bep[0].ep, req);
1404 spin_lock_irq(&udc->lock);
1405 }
1406}
1407
1408/**
1409 * bcm63xx_ep0_nuke_reply - Abort request from the gadget driver due to
1410 * reset/shutdown.
1411 * @udc: Reference to the device controller.
1412 * @is_tx: Nonzero for TX (IN), zero for RX (OUT).
1413 */
1414static void bcm63xx_ep0_nuke_reply(struct bcm63xx_udc *udc, int is_tx)
1415{
1416 struct usb_request *req = udc->ep0_reply;
1417
1418 udc->ep0_reply = NULL;
1419 usb_gadget_unmap_request(&udc->gadget, req, is_tx);
1420 if (udc->ep0_request == req) {
1421 udc->ep0_req_completed = 0;
1422 udc->ep0_request = NULL;
1423 }
1424 bcm63xx_ep0_complete(udc, req, -ESHUTDOWN);
1425}
1426
1427/**
1428 * bcm63xx_ep0_read_complete - Close out the pending ep0 request; return
1429 * transfer len.
1430 * @udc: Reference to the device controller.
1431 */
1432static int bcm63xx_ep0_read_complete(struct bcm63xx_udc *udc)
1433{
1434 struct usb_request *req = udc->ep0_request;
1435
1436 udc->ep0_req_completed = 0;
1437 udc->ep0_request = NULL;
1438
1439 return req->actual;
1440}
1441
1442/**
1443 * bcm63xx_ep0_internal_request - Helper function to submit an ep0 request.
1444 * @udc: Reference to the device controller.
1445 * @ch_idx: IUDMA channel number.
1446 * @length: Number of bytes to TX/RX.
1447 *
1448 * Used for simple transfers performed by the ep0 worker. This will always
1449 * use ep0_ctrl_req / ep0_ctrl_buf.
1450 */
1451static void bcm63xx_ep0_internal_request(struct bcm63xx_udc *udc, int ch_idx,
1452 int length)
1453{
1454 struct usb_request *req = &udc->ep0_ctrl_req.req;
1455
1456 req->buf = udc->ep0_ctrl_buf;
1457 req->length = length;
1458 req->complete = NULL;
1459
1460 bcm63xx_ep0_map_write(udc, ch_idx, req);
1461}
1462
1463/**
1464 * bcm63xx_ep0_do_setup - Parse new SETUP packet and decide how to handle it.
1465 * @udc: Reference to the device controller.
1466 *
1467 * EP0_IDLE probably shouldn't ever happen. EP0_REQUEUE means we're ready
1468 * for the next packet. Anything else means the transaction requires multiple
1469 * stages of handling.
1470 */
1471static enum bcm63xx_ep0_state bcm63xx_ep0_do_setup(struct bcm63xx_udc *udc)
1472{
1473 int rc;
1474 struct usb_ctrlrequest *ctrl = (void *)udc->ep0_ctrl_buf;
1475
1476 rc = bcm63xx_ep0_read_complete(udc);
1477
1478 if (rc < 0) {
1479 dev_err(udc->dev, "missing SETUP packet\n");
1480 return EP0_IDLE;
1481 }
1482
1483 /*
1484 * Handle 0-byte IN STATUS acknowledgement. The hardware doesn't
1485 * ALWAYS deliver these 100% of the time, so if we happen to see one,
1486 * just throw it away.
1487 */
1488 if (rc == 0)
1489 return EP0_REQUEUE;
1490
1491 /* Drop malformed SETUP packets */
1492 if (rc != sizeof(*ctrl)) {
1493 dev_warn_ratelimited(udc->dev,
1494 "malformed SETUP packet (%d bytes)\n", rc);
1495 return EP0_REQUEUE;
1496 }
1497
1498 /* Process new SETUP packet arriving on ep0 */
1499 rc = bcm63xx_ep0_setup_callback(udc, ctrl);
1500 if (rc < 0) {
1501 bcm63xx_set_stall(udc, &udc->bep[0], true);
1502 return EP0_REQUEUE;
1503 }
1504
1505 if (!ctrl->wLength)
1506 return EP0_REQUEUE;
1507 else if (ctrl->bRequestType & USB_DIR_IN)
1508 return EP0_IN_DATA_PHASE_SETUP;
1509 else
1510 return EP0_OUT_DATA_PHASE_SETUP;
1511}
1512
1513/**
1514 * bcm63xx_ep0_do_idle - Check for outstanding requests if ep0 is idle.
1515 * @udc: Reference to the device controller.
1516 *
1517 * In state EP0_IDLE, the RX descriptor is either pending, or has been
1518 * filled with a SETUP packet from the host. This function handles new
1519 * SETUP packets, control IRQ events (which can generate fake SETUP packets),
1520 * and reset/shutdown events.
1521 *
1522 * Returns 0 if work was done; -EAGAIN if nothing to do.
1523 */
1524static int bcm63xx_ep0_do_idle(struct bcm63xx_udc *udc)
1525{
1526 if (udc->ep0_req_reset) {
1527 udc->ep0_req_reset = 0;
1528 } else if (udc->ep0_req_set_cfg) {
1529 udc->ep0_req_set_cfg = 0;
1530 if (bcm63xx_ep0_spoof_set_cfg(udc) >= 0)
1531 udc->ep0state = EP0_IN_FAKE_STATUS_PHASE;
1532 } else if (udc->ep0_req_set_iface) {
1533 udc->ep0_req_set_iface = 0;
1534 if (bcm63xx_ep0_spoof_set_iface(udc) >= 0)
1535 udc->ep0state = EP0_IN_FAKE_STATUS_PHASE;
1536 } else if (udc->ep0_req_completed) {
1537 udc->ep0state = bcm63xx_ep0_do_setup(udc);
1538 return udc->ep0state == EP0_IDLE ? -EAGAIN : 0;
1539 } else if (udc->ep0_req_shutdown) {
1540 udc->ep0_req_shutdown = 0;
1541 udc->ep0_req_completed = 0;
1542 udc->ep0_request = NULL;
1543 iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_RXCHAN]);
1544 usb_gadget_unmap_request(&udc->gadget,
1545 &udc->ep0_ctrl_req.req, 0);
1546
1547 /* bcm63xx_udc_pullup() is waiting for this */
1548 mb();
1549 udc->ep0state = EP0_SHUTDOWN;
1550 } else if (udc->ep0_reply) {
1551 /*
1552 * This could happen if a USB RESET shows up during an ep0
1553 * transaction (especially if a laggy driver like gadgetfs
1554 * is in use).
1555 */
1556 dev_warn(udc->dev, "nuking unexpected reply\n");
1557 bcm63xx_ep0_nuke_reply(udc, 0);
1558 } else {
1559 return -EAGAIN;
1560 }
1561
1562 return 0;
1563}
1564
1565/**
1566 * bcm63xx_ep0_one_round - Handle the current ep0 state.
1567 * @udc: Reference to the device controller.
1568 *
1569 * Returns 0 if work was done; -EAGAIN if nothing to do.
1570 */
1571static int bcm63xx_ep0_one_round(struct bcm63xx_udc *udc)
1572{
1573 enum bcm63xx_ep0_state ep0state = udc->ep0state;
1574 bool shutdown = udc->ep0_req_reset || udc->ep0_req_shutdown;
1575
1576 switch (udc->ep0state) {
1577 case EP0_REQUEUE:
1578 /* set up descriptor to receive SETUP packet */
1579 bcm63xx_ep0_internal_request(udc, IUDMA_EP0_RXCHAN,
1580 BCM63XX_MAX_CTRL_PKT);
1581 ep0state = EP0_IDLE;
1582 break;
1583 case EP0_IDLE:
1584 return bcm63xx_ep0_do_idle(udc);
1585 case EP0_IN_DATA_PHASE_SETUP:
1586 /*
1587 * Normal case: TX request is in ep0_reply (queued by the
1588 * callback), or will be queued shortly. When it's here,
1589 * send it to the HW and go to EP0_IN_DATA_PHASE_COMPLETE.
1590 *
1591 * Shutdown case: Stop waiting for the reply. Just
1592 * REQUEUE->IDLE. The gadget driver is NOT expected to
1593 * queue anything else now.
1594 */
1595 if (udc->ep0_reply) {
1596 bcm63xx_ep0_map_write(udc, IUDMA_EP0_TXCHAN,
1597 udc->ep0_reply);
1598 ep0state = EP0_IN_DATA_PHASE_COMPLETE;
1599 } else if (shutdown) {
1600 ep0state = EP0_REQUEUE;
1601 }
1602 break;
1603 case EP0_IN_DATA_PHASE_COMPLETE: {
1604 /*
1605 * Normal case: TX packet (ep0_reply) is in flight; wait for
1606 * it to finish, then go back to REQUEUE->IDLE.
1607 *
1608 * Shutdown case: Reset the TX channel, send -ESHUTDOWN
1609 * completion to the gadget driver, then REQUEUE->IDLE.
1610 */
1611 if (udc->ep0_req_completed) {
1612 udc->ep0_reply = NULL;
1613 bcm63xx_ep0_read_complete(udc);
1614 /*
1615 * the "ack" sometimes gets eaten (see
1616 * bcm63xx_ep0_do_idle)
1617 */
1618 ep0state = EP0_REQUEUE;
1619 } else if (shutdown) {
1620 iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_TXCHAN]);
1621 bcm63xx_ep0_nuke_reply(udc, 1);
1622 ep0state = EP0_REQUEUE;
1623 }
1624 break;
1625 }
1626 case EP0_OUT_DATA_PHASE_SETUP:
1627 /* Similar behavior to EP0_IN_DATA_PHASE_SETUP */
1628 if (udc->ep0_reply) {
1629 bcm63xx_ep0_map_write(udc, IUDMA_EP0_RXCHAN,
1630 udc->ep0_reply);
1631 ep0state = EP0_OUT_DATA_PHASE_COMPLETE;
1632 } else if (shutdown) {
1633 ep0state = EP0_REQUEUE;
1634 }
1635 break;
1636 case EP0_OUT_DATA_PHASE_COMPLETE: {
1637 /* Similar behavior to EP0_IN_DATA_PHASE_COMPLETE */
1638 if (udc->ep0_req_completed) {
1639 udc->ep0_reply = NULL;
1640 bcm63xx_ep0_read_complete(udc);
1641
1642 /* send 0-byte ack to host */
1643 bcm63xx_ep0_internal_request(udc, IUDMA_EP0_TXCHAN, 0);
1644 ep0state = EP0_OUT_STATUS_PHASE;
1645 } else if (shutdown) {
1646 iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_RXCHAN]);
1647 bcm63xx_ep0_nuke_reply(udc, 0);
1648 ep0state = EP0_REQUEUE;
1649 }
1650 break;
1651 }
1652 case EP0_OUT_STATUS_PHASE:
1653 /*
1654 * Normal case: 0-byte OUT ack packet is in flight; wait
1655 * for it to finish, then go back to REQUEUE->IDLE.
1656 *
1657 * Shutdown case: just cancel the transmission. Don't bother
1658 * calling the completion, because it originated from this
1659 * function anyway. Then go back to REQUEUE->IDLE.
1660 */
1661 if (udc->ep0_req_completed) {
1662 bcm63xx_ep0_read_complete(udc);
1663 ep0state = EP0_REQUEUE;
1664 } else if (shutdown) {
1665 iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_TXCHAN]);
1666 udc->ep0_request = NULL;
1667 ep0state = EP0_REQUEUE;
1668 }
1669 break;
1670 case EP0_IN_FAKE_STATUS_PHASE: {
1671 /*
1672 * Normal case: we spoofed a SETUP packet and are now
1673 * waiting for the gadget driver to send a 0-byte reply.
1674 * This doesn't actually get sent to the HW because the
1675 * HW has already sent its own reply. Once we get the
1676 * response, return to IDLE.
1677 *
1678 * Shutdown case: return to IDLE immediately.
1679 *
1680 * Note that the ep0 RX descriptor has remained queued
1681 * (and possibly unfilled) during this entire transaction.
1682 * The HW datapath (IUDMA) never even sees SET_CONFIGURATION
1683 * or SET_INTERFACE transactions.
1684 */
1685 struct usb_request *r = udc->ep0_reply;
1686
1687 if (!r) {
1688 if (shutdown)
1689 ep0state = EP0_IDLE;
1690 break;
1691 }
1692
1693 bcm63xx_ep0_complete(udc, r, 0);
1694 udc->ep0_reply = NULL;
1695 ep0state = EP0_IDLE;
1696 break;
1697 }
1698 case EP0_SHUTDOWN:
1699 break;
1700 }
1701
1702 if (udc->ep0state == ep0state)
1703 return -EAGAIN;
1704
1705 udc->ep0state = ep0state;
1706 return 0;
1707}
1708
1709/**
1710 * bcm63xx_ep0_process - ep0 worker thread / state machine.
1711 * @w: Workqueue struct.
1712 *
1713 * bcm63xx_ep0_process is triggered any time an event occurs on ep0. It
1714 * is used to synchronize ep0 events and ensure that both HW and SW events
1715 * occur in a well-defined order. When the ep0 IUDMA queues are idle, it may
1716 * synthesize SET_CONFIGURATION / SET_INTERFACE requests that were consumed
1717 * by the USBD hardware.
1718 *
1719 * The worker function will continue iterating around the state machine
1720 * until there is nothing left to do. Usually "nothing left to do" means
1721 * that we're waiting for a new event from the hardware.
1722 */
1723static void bcm63xx_ep0_process(struct work_struct *w)
1724{
1725 struct bcm63xx_udc *udc = container_of(w, struct bcm63xx_udc, ep0_wq);
1726 spin_lock_irq(&udc->lock);
1727 while (bcm63xx_ep0_one_round(udc) == 0)
1728 ;
1729 spin_unlock_irq(&udc->lock);
1730}
1731
1732/***********************************************************************
1733 * Standard UDC gadget operations
1734 ***********************************************************************/
1735
1736/**
1737 * bcm63xx_udc_get_frame - Read current SOF frame number from the HW.
1738 * @gadget: USB slave device.
1739 */
1740static int bcm63xx_udc_get_frame(struct usb_gadget *gadget)
1741{
1742 struct bcm63xx_udc *udc = gadget_to_udc(gadget);
1743
1744 return (usbd_readl(udc, USBD_STATUS_REG) &
1745 USBD_STATUS_SOF_MASK) >> USBD_STATUS_SOF_SHIFT;
1746}
1747
1748/**
1749 * bcm63xx_udc_pullup - Enable/disable pullup on D+ line.
1750 * @gadget: USB slave device.
1751 * @is_on: 0 to disable pullup, 1 to enable.
1752 *
1753 * See notes in bcm63xx_select_pullup().
1754 */
1755static int bcm63xx_udc_pullup(struct usb_gadget *gadget, int is_on)
1756{
1757 struct bcm63xx_udc *udc = gadget_to_udc(gadget);
1758 unsigned long flags;
1759 int i, rc = -EINVAL;
1760
1761 spin_lock_irqsave(&udc->lock, flags);
1762 if (is_on && udc->ep0state == EP0_SHUTDOWN) {
1763 udc->gadget.speed = USB_SPEED_UNKNOWN;
1764 udc->ep0state = EP0_REQUEUE;
1765 bcm63xx_fifo_setup(udc);
1766 bcm63xx_fifo_reset(udc);
1767 bcm63xx_ep_setup(udc);
1768
1769 bitmap_zero(&udc->wedgemap, BCM63XX_NUM_EP);
1770 for (i = 0; i < BCM63XX_NUM_EP; i++)
1771 bcm63xx_set_stall(udc, &udc->bep[i], false);
1772
1773 bcm63xx_set_ctrl_irqs(udc, true);
1774 bcm63xx_select_pullup(gadget_to_udc(gadget), true);
1775 rc = 0;
1776 } else if (!is_on && udc->ep0state != EP0_SHUTDOWN) {
1777 bcm63xx_select_pullup(gadget_to_udc(gadget), false);
1778
1779 udc->ep0_req_shutdown = 1;
1780 spin_unlock_irqrestore(&udc->lock, flags);
1781
1782 while (1) {
1783 schedule_work(&udc->ep0_wq);
1784 if (udc->ep0state == EP0_SHUTDOWN)
1785 break;
1786 msleep(50);
1787 }
1788 bcm63xx_set_ctrl_irqs(udc, false);
1789 cancel_work_sync(&udc->ep0_wq);
1790 return 0;
1791 }
1792
1793 spin_unlock_irqrestore(&udc->lock, flags);
1794 return rc;
1795}
1796
1797/**
1798 * bcm63xx_udc_start - Start the controller.
1799 * @gadget: USB slave device.
1800 * @driver: Driver for USB slave devices.
1801 */
1802static int bcm63xx_udc_start(struct usb_gadget *gadget,
1803 struct usb_gadget_driver *driver)
1804{
1805 struct bcm63xx_udc *udc = gadget_to_udc(gadget);
1806 unsigned long flags;
1807
1808 if (!driver || driver->max_speed < USB_SPEED_HIGH ||
1809 !driver->setup)
1810 return -EINVAL;
1811 if (!udc)
1812 return -ENODEV;
1813 if (udc->driver)
1814 return -EBUSY;
1815
1816 spin_lock_irqsave(&udc->lock, flags);
1817
1818 set_clocks(udc, true);
1819 bcm63xx_fifo_setup(udc);
1820 bcm63xx_ep_init(udc);
1821 bcm63xx_ep_setup(udc);
1822 bcm63xx_fifo_reset(udc);
1823 bcm63xx_select_phy_mode(udc, true);
1824
1825 udc->driver = driver;
1826 driver->driver.bus = NULL;
Kevin Cernekee613065e2012-08-25 12:38:52 -07001827 udc->gadget.dev.of_node = udc->dev->of_node;
1828
1829 spin_unlock_irqrestore(&udc->lock, flags);
1830
1831 return 0;
1832}
1833
1834/**
1835 * bcm63xx_udc_stop - Shut down the controller.
1836 * @gadget: USB slave device.
1837 * @driver: Driver for USB slave devices.
1838 */
1839static int bcm63xx_udc_stop(struct usb_gadget *gadget,
1840 struct usb_gadget_driver *driver)
1841{
1842 struct bcm63xx_udc *udc = gadget_to_udc(gadget);
1843 unsigned long flags;
1844
1845 spin_lock_irqsave(&udc->lock, flags);
1846
1847 udc->driver = NULL;
Kevin Cernekee613065e2012-08-25 12:38:52 -07001848
1849 /*
1850 * If we switch the PHY too abruptly after dropping D+, the host
1851 * will often complain:
1852 *
1853 * hub 1-0:1.0: port 1 disabled by hub (EMI?), re-enabling...
1854 */
1855 msleep(100);
1856
1857 bcm63xx_select_phy_mode(udc, false);
1858 set_clocks(udc, false);
1859
1860 spin_unlock_irqrestore(&udc->lock, flags);
1861
1862 return 0;
1863}
1864
1865static const struct usb_gadget_ops bcm63xx_udc_ops = {
1866 .get_frame = bcm63xx_udc_get_frame,
1867 .pullup = bcm63xx_udc_pullup,
1868 .udc_start = bcm63xx_udc_start,
1869 .udc_stop = bcm63xx_udc_stop,
1870};
1871
1872/***********************************************************************
1873 * IRQ handling
1874 ***********************************************************************/
1875
1876/**
1877 * bcm63xx_update_cfg_iface - Read current configuration/interface settings.
1878 * @udc: Reference to the device controller.
1879 *
1880 * This controller intercepts SET_CONFIGURATION and SET_INTERFACE messages.
1881 * The driver never sees the raw control packets coming in on the ep0
1882 * IUDMA channel, but at least we get an interrupt event to tell us that
1883 * new values are waiting in the USBD_STATUS register.
1884 */
1885static void bcm63xx_update_cfg_iface(struct bcm63xx_udc *udc)
1886{
1887 u32 reg = usbd_readl(udc, USBD_STATUS_REG);
1888
1889 udc->cfg = (reg & USBD_STATUS_CFG_MASK) >> USBD_STATUS_CFG_SHIFT;
1890 udc->iface = (reg & USBD_STATUS_INTF_MASK) >> USBD_STATUS_INTF_SHIFT;
1891 udc->alt_iface = (reg & USBD_STATUS_ALTINTF_MASK) >>
1892 USBD_STATUS_ALTINTF_SHIFT;
1893 bcm63xx_ep_setup(udc);
1894}
1895
1896/**
1897 * bcm63xx_update_link_speed - Check to see if the link speed has changed.
1898 * @udc: Reference to the device controller.
1899 *
1900 * The link speed update coincides with a SETUP IRQ. Returns 1 if the
1901 * speed has changed, so that the caller can update the endpoint settings.
1902 */
1903static int bcm63xx_update_link_speed(struct bcm63xx_udc *udc)
1904{
1905 u32 reg = usbd_readl(udc, USBD_STATUS_REG);
1906 enum usb_device_speed oldspeed = udc->gadget.speed;
1907
1908 switch ((reg & USBD_STATUS_SPD_MASK) >> USBD_STATUS_SPD_SHIFT) {
1909 case BCM63XX_SPD_HIGH:
1910 udc->gadget.speed = USB_SPEED_HIGH;
1911 break;
1912 case BCM63XX_SPD_FULL:
1913 udc->gadget.speed = USB_SPEED_FULL;
1914 break;
1915 default:
1916 /* this should never happen */
1917 udc->gadget.speed = USB_SPEED_UNKNOWN;
1918 dev_err(udc->dev,
1919 "received SETUP packet with invalid link speed\n");
1920 return 0;
1921 }
1922
1923 if (udc->gadget.speed != oldspeed) {
1924 dev_info(udc->dev, "link up, %s-speed mode\n",
1925 udc->gadget.speed == USB_SPEED_HIGH ? "high" : "full");
1926 return 1;
1927 } else {
1928 return 0;
1929 }
1930}
1931
1932/**
1933 * bcm63xx_update_wedge - Iterate through wedged endpoints.
1934 * @udc: Reference to the device controller.
1935 * @new_status: true to "refresh" wedge status; false to clear it.
1936 *
1937 * On a SETUP interrupt, we need to manually "refresh" the wedge status
1938 * because the controller hardware is designed to automatically clear
1939 * stalls in response to a CLEAR_FEATURE request from the host.
1940 *
1941 * On a RESET interrupt, we do want to restore all wedged endpoints.
1942 */
1943static void bcm63xx_update_wedge(struct bcm63xx_udc *udc, bool new_status)
1944{
1945 int i;
1946
1947 for_each_set_bit(i, &udc->wedgemap, BCM63XX_NUM_EP) {
1948 bcm63xx_set_stall(udc, &udc->bep[i], new_status);
1949 if (!new_status)
1950 clear_bit(i, &udc->wedgemap);
1951 }
1952}
1953
1954/**
1955 * bcm63xx_udc_ctrl_isr - ISR for control path events (USBD).
1956 * @irq: IRQ number (unused).
1957 * @dev_id: Reference to the device controller.
1958 *
1959 * This is where we handle link (VBUS) down, USB reset, speed changes,
1960 * SET_CONFIGURATION, and SET_INTERFACE events.
1961 */
1962static irqreturn_t bcm63xx_udc_ctrl_isr(int irq, void *dev_id)
1963{
1964 struct bcm63xx_udc *udc = dev_id;
1965 u32 stat;
1966 bool disconnected = false;
1967
1968 stat = usbd_readl(udc, USBD_EVENT_IRQ_STATUS_REG) &
1969 usbd_readl(udc, USBD_EVENT_IRQ_MASK_REG);
1970
1971 usbd_writel(udc, stat, USBD_EVENT_IRQ_STATUS_REG);
1972
1973 spin_lock(&udc->lock);
1974 if (stat & BIT(USBD_EVENT_IRQ_USB_LINK)) {
1975 /* VBUS toggled */
1976
1977 if (!(usbd_readl(udc, USBD_EVENTS_REG) &
1978 USBD_EVENTS_USB_LINK_MASK) &&
1979 udc->gadget.speed != USB_SPEED_UNKNOWN)
1980 dev_info(udc->dev, "link down\n");
1981
1982 udc->gadget.speed = USB_SPEED_UNKNOWN;
1983 disconnected = true;
1984 }
1985 if (stat & BIT(USBD_EVENT_IRQ_USB_RESET)) {
1986 bcm63xx_fifo_setup(udc);
1987 bcm63xx_fifo_reset(udc);
1988 bcm63xx_ep_setup(udc);
1989
1990 bcm63xx_update_wedge(udc, false);
1991
1992 udc->ep0_req_reset = 1;
1993 schedule_work(&udc->ep0_wq);
1994 disconnected = true;
1995 }
1996 if (stat & BIT(USBD_EVENT_IRQ_SETUP)) {
1997 if (bcm63xx_update_link_speed(udc)) {
1998 bcm63xx_fifo_setup(udc);
1999 bcm63xx_ep_setup(udc);
2000 }
2001 bcm63xx_update_wedge(udc, true);
2002 }
2003 if (stat & BIT(USBD_EVENT_IRQ_SETCFG)) {
2004 bcm63xx_update_cfg_iface(udc);
2005 udc->ep0_req_set_cfg = 1;
2006 schedule_work(&udc->ep0_wq);
2007 }
2008 if (stat & BIT(USBD_EVENT_IRQ_SETINTF)) {
2009 bcm63xx_update_cfg_iface(udc);
2010 udc->ep0_req_set_iface = 1;
2011 schedule_work(&udc->ep0_wq);
2012 }
2013 spin_unlock(&udc->lock);
2014
2015 if (disconnected && udc->driver)
2016 udc->driver->disconnect(&udc->gadget);
2017
2018 return IRQ_HANDLED;
2019}
2020
2021/**
2022 * bcm63xx_udc_data_isr - ISR for data path events (IUDMA).
2023 * @irq: IRQ number (unused).
2024 * @dev_id: Reference to the IUDMA channel that generated the interrupt.
2025 *
2026 * For the two ep0 channels, we have special handling that triggers the
2027 * ep0 worker thread. For normal bulk/intr channels, either queue up
2028 * the next buffer descriptor for the transaction (incomplete transaction),
2029 * or invoke the completion callback (complete transactions).
2030 */
2031static irqreturn_t bcm63xx_udc_data_isr(int irq, void *dev_id)
2032{
2033 struct iudma_ch *iudma = dev_id;
2034 struct bcm63xx_udc *udc = iudma->udc;
2035 struct bcm63xx_ep *bep;
2036 struct usb_request *req = NULL;
2037 struct bcm63xx_req *breq = NULL;
2038 int rc;
2039 bool is_done = false;
2040
2041 spin_lock(&udc->lock);
2042
2043 usb_dmac_writel(udc, ENETDMAC_IR_BUFDONE_MASK,
Florian Fainelli2d1f7af2014-01-14 15:36:29 -08002044 ENETDMAC_IR_REG, iudma->ch_idx);
Kevin Cernekee613065e2012-08-25 12:38:52 -07002045 bep = iudma->bep;
2046 rc = iudma_read(udc, iudma);
2047
2048 /* special handling for EP0 RX (0) and TX (1) */
2049 if (iudma->ch_idx == IUDMA_EP0_RXCHAN ||
2050 iudma->ch_idx == IUDMA_EP0_TXCHAN) {
2051 req = udc->ep0_request;
2052 breq = our_req(req);
2053
2054 /* a single request could require multiple submissions */
2055 if (rc >= 0) {
2056 req->actual += rc;
2057
2058 if (req->actual >= req->length || breq->bd_bytes > rc) {
2059 udc->ep0_req_completed = 1;
2060 is_done = true;
2061 schedule_work(&udc->ep0_wq);
2062
2063 /* "actual" on a ZLP is 1 byte */
2064 req->actual = min(req->actual, req->length);
2065 } else {
2066 /* queue up the next BD (same request) */
2067 iudma_write(udc, iudma, breq);
2068 }
2069 }
2070 } else if (!list_empty(&bep->queue)) {
2071 breq = list_first_entry(&bep->queue, struct bcm63xx_req, queue);
2072 req = &breq->req;
2073
2074 if (rc >= 0) {
2075 req->actual += rc;
2076
2077 if (req->actual >= req->length || breq->bd_bytes > rc) {
2078 is_done = true;
2079 list_del(&breq->queue);
2080
2081 req->actual = min(req->actual, req->length);
2082
2083 if (!list_empty(&bep->queue)) {
2084 struct bcm63xx_req *next;
2085
2086 next = list_first_entry(&bep->queue,
2087 struct bcm63xx_req, queue);
2088 iudma_write(udc, iudma, next);
2089 }
2090 } else {
2091 iudma_write(udc, iudma, breq);
2092 }
2093 }
2094 }
2095 spin_unlock(&udc->lock);
2096
2097 if (is_done) {
2098 usb_gadget_unmap_request(&udc->gadget, req, iudma->is_tx);
2099 if (req->complete)
2100 req->complete(&bep->ep, req);
2101 }
2102
2103 return IRQ_HANDLED;
2104}
2105
2106/***********************************************************************
2107 * Debug filesystem
2108 ***********************************************************************/
2109
2110/*
2111 * bcm63xx_usbd_dbg_show - Show USBD controller state.
2112 * @s: seq_file to which the information will be written.
2113 * @p: Unused.
2114 *
2115 * This file nominally shows up as /sys/kernel/debug/bcm63xx_udc/usbd
2116 */
2117static int bcm63xx_usbd_dbg_show(struct seq_file *s, void *p)
2118{
2119 struct bcm63xx_udc *udc = s->private;
2120
2121 if (!udc->driver)
2122 return -ENODEV;
2123
2124 seq_printf(s, "ep0 state: %s\n",
2125 bcm63xx_ep0_state_names[udc->ep0state]);
2126 seq_printf(s, " pending requests: %s%s%s%s%s%s%s\n",
2127 udc->ep0_req_reset ? "reset " : "",
2128 udc->ep0_req_set_cfg ? "set_cfg " : "",
2129 udc->ep0_req_set_iface ? "set_iface " : "",
2130 udc->ep0_req_shutdown ? "shutdown " : "",
2131 udc->ep0_request ? "pending " : "",
2132 udc->ep0_req_completed ? "completed " : "",
2133 udc->ep0_reply ? "reply " : "");
2134 seq_printf(s, "cfg: %d; iface: %d; alt_iface: %d\n",
2135 udc->cfg, udc->iface, udc->alt_iface);
2136 seq_printf(s, "regs:\n");
2137 seq_printf(s, " control: %08x; straps: %08x; status: %08x\n",
2138 usbd_readl(udc, USBD_CONTROL_REG),
2139 usbd_readl(udc, USBD_STRAPS_REG),
2140 usbd_readl(udc, USBD_STATUS_REG));
2141 seq_printf(s, " events: %08x; stall: %08x\n",
2142 usbd_readl(udc, USBD_EVENTS_REG),
2143 usbd_readl(udc, USBD_STALL_REG));
2144
2145 return 0;
2146}
2147
2148/*
2149 * bcm63xx_iudma_dbg_show - Show IUDMA status and descriptors.
2150 * @s: seq_file to which the information will be written.
2151 * @p: Unused.
2152 *
2153 * This file nominally shows up as /sys/kernel/debug/bcm63xx_udc/iudma
2154 */
2155static int bcm63xx_iudma_dbg_show(struct seq_file *s, void *p)
2156{
2157 struct bcm63xx_udc *udc = s->private;
2158 int ch_idx, i;
2159 u32 sram2, sram3;
2160
2161 if (!udc->driver)
2162 return -ENODEV;
2163
2164 for (ch_idx = 0; ch_idx < BCM63XX_NUM_IUDMA; ch_idx++) {
2165 struct iudma_ch *iudma = &udc->iudma[ch_idx];
2166 struct list_head *pos;
2167
2168 seq_printf(s, "IUDMA channel %d -- ", ch_idx);
2169 switch (iudma_defaults[ch_idx].ep_type) {
2170 case BCMEP_CTRL:
2171 seq_printf(s, "control");
2172 break;
2173 case BCMEP_BULK:
2174 seq_printf(s, "bulk");
2175 break;
2176 case BCMEP_INTR:
2177 seq_printf(s, "interrupt");
2178 break;
2179 }
2180 seq_printf(s, ch_idx & 0x01 ? " tx" : " rx");
2181 seq_printf(s, " [ep%d]:\n",
2182 max_t(int, iudma_defaults[ch_idx].ep_num, 0));
2183 seq_printf(s, " cfg: %08x; irqstat: %08x; irqmask: %08x; maxburst: %08x\n",
Florian Fainelli2d1f7af2014-01-14 15:36:29 -08002184 usb_dmac_readl(udc, ENETDMAC_CHANCFG_REG, ch_idx),
2185 usb_dmac_readl(udc, ENETDMAC_IR_REG, ch_idx),
2186 usb_dmac_readl(udc, ENETDMAC_IRMASK_REG, ch_idx),
2187 usb_dmac_readl(udc, ENETDMAC_MAXBURST_REG, ch_idx));
Kevin Cernekee613065e2012-08-25 12:38:52 -07002188
Florian Fainelli2d1f7af2014-01-14 15:36:29 -08002189 sram2 = usb_dmas_readl(udc, ENETDMAS_SRAM2_REG, ch_idx);
2190 sram3 = usb_dmas_readl(udc, ENETDMAS_SRAM3_REG, ch_idx);
Kevin Cernekee613065e2012-08-25 12:38:52 -07002191 seq_printf(s, " base: %08x; index: %04x_%04x; desc: %04x_%04x %08x\n",
Florian Fainelli2d1f7af2014-01-14 15:36:29 -08002192 usb_dmas_readl(udc, ENETDMAS_RSTART_REG, ch_idx),
Kevin Cernekee613065e2012-08-25 12:38:52 -07002193 sram2 >> 16, sram2 & 0xffff,
2194 sram3 >> 16, sram3 & 0xffff,
Florian Fainelli2d1f7af2014-01-14 15:36:29 -08002195 usb_dmas_readl(udc, ENETDMAS_SRAM4_REG, ch_idx));
Kevin Cernekee613065e2012-08-25 12:38:52 -07002196 seq_printf(s, " desc: %d/%d used", iudma->n_bds_used,
2197 iudma->n_bds);
2198
2199 if (iudma->bep) {
2200 i = 0;
2201 list_for_each(pos, &iudma->bep->queue)
2202 i++;
2203 seq_printf(s, "; %d queued\n", i);
2204 } else {
2205 seq_printf(s, "\n");
2206 }
2207
2208 for (i = 0; i < iudma->n_bds; i++) {
2209 struct bcm_enet_desc *d = &iudma->bd_ring[i];
2210
2211 seq_printf(s, " %03x (%02x): len_stat: %04x_%04x; pa %08x",
2212 i * sizeof(*d), i,
2213 d->len_stat >> 16, d->len_stat & 0xffff,
2214 d->address);
2215 if (d == iudma->read_bd)
2216 seq_printf(s, " <<RD");
2217 if (d == iudma->write_bd)
2218 seq_printf(s, " <<WR");
2219 seq_printf(s, "\n");
2220 }
2221
2222 seq_printf(s, "\n");
2223 }
2224
2225 return 0;
2226}
2227
2228static int bcm63xx_usbd_dbg_open(struct inode *inode, struct file *file)
2229{
2230 return single_open(file, bcm63xx_usbd_dbg_show, inode->i_private);
2231}
2232
2233static int bcm63xx_iudma_dbg_open(struct inode *inode, struct file *file)
2234{
2235 return single_open(file, bcm63xx_iudma_dbg_show, inode->i_private);
2236}
2237
2238static const struct file_operations usbd_dbg_fops = {
2239 .owner = THIS_MODULE,
2240 .open = bcm63xx_usbd_dbg_open,
2241 .llseek = seq_lseek,
2242 .read = seq_read,
2243 .release = single_release,
2244};
2245
2246static const struct file_operations iudma_dbg_fops = {
2247 .owner = THIS_MODULE,
2248 .open = bcm63xx_iudma_dbg_open,
2249 .llseek = seq_lseek,
2250 .read = seq_read,
2251 .release = single_release,
2252};
2253
2254
2255/**
2256 * bcm63xx_udc_init_debugfs - Create debugfs entries.
2257 * @udc: Reference to the device controller.
2258 */
2259static void bcm63xx_udc_init_debugfs(struct bcm63xx_udc *udc)
2260{
2261 struct dentry *root, *usbd, *iudma;
2262
2263 if (!IS_ENABLED(CONFIG_USB_GADGET_DEBUG_FS))
2264 return;
2265
2266 root = debugfs_create_dir(udc->gadget.name, NULL);
2267 if (IS_ERR(root) || !root)
2268 goto err_root;
2269
2270 usbd = debugfs_create_file("usbd", 0400, root, udc,
2271 &usbd_dbg_fops);
2272 if (!usbd)
2273 goto err_usbd;
2274 iudma = debugfs_create_file("iudma", 0400, root, udc,
2275 &iudma_dbg_fops);
2276 if (!iudma)
2277 goto err_iudma;
2278
2279 udc->debugfs_root = root;
2280 udc->debugfs_usbd = usbd;
2281 udc->debugfs_iudma = iudma;
2282 return;
2283err_iudma:
2284 debugfs_remove(usbd);
2285err_usbd:
2286 debugfs_remove(root);
2287err_root:
2288 dev_err(udc->dev, "debugfs is not available\n");
2289}
2290
2291/**
2292 * bcm63xx_udc_cleanup_debugfs - Remove debugfs entries.
2293 * @udc: Reference to the device controller.
2294 *
2295 * debugfs_remove() is safe to call with a NULL argument.
2296 */
2297static void bcm63xx_udc_cleanup_debugfs(struct bcm63xx_udc *udc)
2298{
2299 debugfs_remove(udc->debugfs_iudma);
2300 debugfs_remove(udc->debugfs_usbd);
2301 debugfs_remove(udc->debugfs_root);
2302 udc->debugfs_iudma = NULL;
2303 udc->debugfs_usbd = NULL;
2304 udc->debugfs_root = NULL;
2305}
2306
2307/***********************************************************************
2308 * Driver init/exit
2309 ***********************************************************************/
2310
2311/**
Kevin Cernekee613065e2012-08-25 12:38:52 -07002312 * bcm63xx_udc_probe - Initialize a new instance of the UDC.
2313 * @pdev: Platform device struct from the bcm63xx BSP code.
2314 *
2315 * Note that platform data is required, because pd.port_no varies from chip
2316 * to chip and is used to switch the correct USB port to device mode.
2317 */
Bill Pemberton41ac7b32012-11-19 13:21:48 -05002318static int bcm63xx_udc_probe(struct platform_device *pdev)
Kevin Cernekee613065e2012-08-25 12:38:52 -07002319{
2320 struct device *dev = &pdev->dev;
Jingoo Hane01ee9f2013-07-30 17:00:51 +09002321 struct bcm63xx_usbd_platform_data *pd = dev_get_platdata(dev);
Kevin Cernekee613065e2012-08-25 12:38:52 -07002322 struct bcm63xx_udc *udc;
2323 struct resource *res;
2324 int rc = -ENOMEM, i, irq;
2325
2326 udc = devm_kzalloc(dev, sizeof(*udc), GFP_KERNEL);
2327 if (!udc) {
2328 dev_err(dev, "cannot allocate memory\n");
2329 return -ENOMEM;
2330 }
2331
2332 platform_set_drvdata(pdev, udc);
2333 udc->dev = dev;
2334 udc->pd = pd;
2335
2336 if (!pd) {
2337 dev_err(dev, "missing platform data\n");
2338 return -EINVAL;
2339 }
2340
2341 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Thierry Reding148e1132013-01-21 11:09:22 +01002342 udc->usbd_regs = devm_ioremap_resource(dev, res);
2343 if (IS_ERR(udc->usbd_regs))
2344 return PTR_ERR(udc->usbd_regs);
Kevin Cernekee613065e2012-08-25 12:38:52 -07002345
2346 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
Thierry Reding148e1132013-01-21 11:09:22 +01002347 udc->iudma_regs = devm_ioremap_resource(dev, res);
2348 if (IS_ERR(udc->iudma_regs))
2349 return PTR_ERR(udc->iudma_regs);
Kevin Cernekee613065e2012-08-25 12:38:52 -07002350
2351 spin_lock_init(&udc->lock);
2352 INIT_WORK(&udc->ep0_wq, bcm63xx_ep0_process);
Kevin Cernekee613065e2012-08-25 12:38:52 -07002353
2354 udc->gadget.ops = &bcm63xx_udc_ops;
2355 udc->gadget.name = dev_name(dev);
Kevin Cernekee613065e2012-08-25 12:38:52 -07002356
2357 if (!pd->use_fullspeed && !use_fullspeed)
2358 udc->gadget.max_speed = USB_SPEED_HIGH;
2359 else
2360 udc->gadget.max_speed = USB_SPEED_FULL;
2361
2362 /* request clocks, allocate buffers, and clear any pending IRQs */
2363 rc = bcm63xx_init_udc_hw(udc);
2364 if (rc)
2365 return rc;
2366
2367 rc = -ENXIO;
2368
2369 /* IRQ resource #0: control interrupt (VBUS, speed, etc.) */
2370 irq = platform_get_irq(pdev, 0);
2371 if (irq < 0) {
2372 dev_err(dev, "missing IRQ resource #0\n");
2373 goto out_uninit;
2374 }
2375 if (devm_request_irq(dev, irq, &bcm63xx_udc_ctrl_isr, 0,
2376 dev_name(dev), udc) < 0) {
2377 dev_err(dev, "error requesting IRQ #%d\n", irq);
2378 goto out_uninit;
2379 }
2380
2381 /* IRQ resources #1-6: data interrupts for IUDMA channels 0-5 */
2382 for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
2383 irq = platform_get_irq(pdev, i + 1);
2384 if (irq < 0) {
2385 dev_err(dev, "missing IRQ resource #%d\n", i + 1);
2386 goto out_uninit;
2387 }
2388 if (devm_request_irq(dev, irq, &bcm63xx_udc_data_isr, 0,
2389 dev_name(dev), &udc->iudma[i]) < 0) {
2390 dev_err(dev, "error requesting IRQ #%d\n", irq);
2391 goto out_uninit;
2392 }
2393 }
2394
Kevin Cernekee613065e2012-08-25 12:38:52 -07002395 bcm63xx_udc_init_debugfs(udc);
2396 rc = usb_add_gadget_udc(dev, &udc->gadget);
2397 if (!rc)
2398 return 0;
2399
2400 bcm63xx_udc_cleanup_debugfs(udc);
Kevin Cernekee613065e2012-08-25 12:38:52 -07002401out_uninit:
2402 bcm63xx_uninit_udc_hw(udc);
2403 return rc;
2404}
2405
2406/**
2407 * bcm63xx_udc_remove - Remove the device from the system.
2408 * @pdev: Platform device struct from the bcm63xx BSP code.
2409 */
Bill Pembertonfb4e98a2012-11-19 13:26:20 -05002410static int bcm63xx_udc_remove(struct platform_device *pdev)
Kevin Cernekee613065e2012-08-25 12:38:52 -07002411{
2412 struct bcm63xx_udc *udc = platform_get_drvdata(pdev);
2413
2414 bcm63xx_udc_cleanup_debugfs(udc);
2415 usb_del_gadget_udc(&udc->gadget);
Kevin Cernekee613065e2012-08-25 12:38:52 -07002416 BUG_ON(udc->driver);
2417
Kevin Cernekee613065e2012-08-25 12:38:52 -07002418 bcm63xx_uninit_udc_hw(udc);
2419
2420 return 0;
2421}
2422
2423static struct platform_driver bcm63xx_udc_driver = {
2424 .probe = bcm63xx_udc_probe,
Bill Pemberton76904172012-11-19 13:21:08 -05002425 .remove = bcm63xx_udc_remove,
Kevin Cernekee613065e2012-08-25 12:38:52 -07002426 .driver = {
2427 .name = DRV_MODULE_NAME,
2428 .owner = THIS_MODULE,
2429 },
2430};
2431module_platform_driver(bcm63xx_udc_driver);
2432
2433MODULE_DESCRIPTION("BCM63xx USB Peripheral Controller");
2434MODULE_AUTHOR("Kevin Cernekee <cernekee@gmail.com>");
2435MODULE_LICENSE("GPL");
2436MODULE_ALIAS("platform:" DRV_MODULE_NAME);