blob: 7e31ac2bf224ba0bafc04f2f74c0d9cbc5581572 [file] [log] [blame]
Roland Stigge24a28e42012-04-29 16:47:05 +02001/*
2 * USB Gadget driver for LPC32xx
3 *
4 * Authors:
5 * Kevin Wells <kevin.wells@nxp.com>
6 * Mike James
7 * Roland Stigge <stigge@antcom.de>
8 *
9 * Copyright (C) 2006 Philips Semiconductors
10 * Copyright (C) 2009 NXP Semiconductors
11 * Copyright (C) 2012 Roland Stigge
12 *
13 * Note: This driver is based on original work done by Mike James for
14 * the LPC3180.
15 *
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License as published by
18 * the Free Software Foundation; either version 2 of the License, or
19 * (at your option) any later version.
20 *
21 * This program is distributed in the hope that it will be useful,
22 * but WITHOUT ANY WARRANTY; without even the implied warranty of
23 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
24 * GNU General Public License for more details.
25 *
26 * You should have received a copy of the GNU General Public License
27 * along with this program; if not, write to the Free Software
28 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
29 */
30
31#include <linux/kernel.h>
32#include <linux/module.h>
33#include <linux/platform_device.h>
34#include <linux/delay.h>
35#include <linux/ioport.h>
36#include <linux/slab.h>
37#include <linux/errno.h>
38#include <linux/init.h>
39#include <linux/list.h>
40#include <linux/interrupt.h>
41#include <linux/proc_fs.h>
42#include <linux/clk.h>
43#include <linux/usb/ch9.h>
44#include <linux/usb/gadget.h>
45#include <linux/i2c.h>
46#include <linux/kthread.h>
47#include <linux/freezer.h>
48#include <linux/dma-mapping.h>
49#include <linux/dmapool.h>
50#include <linux/workqueue.h>
51#include <linux/of.h>
52#include <linux/usb/isp1301.h>
53
54#include <asm/byteorder.h>
55#include <mach/hardware.h>
56#include <linux/io.h>
57#include <asm/irq.h>
58#include <asm/system.h>
59
60#include <mach/platform.h>
61#include <mach/irqs.h>
62#include <mach/board.h>
63#ifdef CONFIG_USB_GADGET_DEBUG_FILES
Alexandre Pereira da Silvad7dbdb52012-06-20 09:37:57 -030064#include <linux/debugfs.h>
Roland Stigge24a28e42012-04-29 16:47:05 +020065#include <linux/seq_file.h>
66#endif
67
68/*
69 * USB device configuration structure
70 */
71typedef void (*usc_chg_event)(int);
72struct lpc32xx_usbd_cfg {
73 int vbus_drv_pol; /* 0=active low drive for VBUS via ISP1301 */
74 usc_chg_event conn_chgb; /* Connection change event (optional) */
75 usc_chg_event susp_chgb; /* Suspend/resume event (optional) */
76 usc_chg_event rmwk_chgb; /* Enable/disable remote wakeup */
77};
78
79/*
80 * controller driver data structures
81 */
82
83/* 16 endpoints (not to be confused with 32 hardware endpoints) */
84#define NUM_ENDPOINTS 16
85
86/*
87 * IRQ indices make reading the code a little easier
88 */
89#define IRQ_USB_LP 0
90#define IRQ_USB_HP 1
91#define IRQ_USB_DEVDMA 2
92#define IRQ_USB_ATX 3
93
94#define EP_OUT 0 /* RX (from host) */
95#define EP_IN 1 /* TX (to host) */
96
97/* Returns the interrupt mask for the selected hardware endpoint */
98#define EP_MASK_SEL(ep, dir) (1 << (((ep) * 2) + dir))
99
100#define EP_INT_TYPE 0
101#define EP_ISO_TYPE 1
102#define EP_BLK_TYPE 2
103#define EP_CTL_TYPE 3
104
105/* EP0 states */
106#define WAIT_FOR_SETUP 0 /* Wait for setup packet */
107#define DATA_IN 1 /* Expect dev->host transfer */
108#define DATA_OUT 2 /* Expect host->dev transfer */
109
110/* DD (DMA Descriptor) structure, requires word alignment, this is already
111 * defined in the LPC32XX USB device header file, but this version is slightly
112 * modified to tag some work data with each DMA descriptor. */
113struct lpc32xx_usbd_dd_gad {
114 u32 dd_next_phy;
115 u32 dd_setup;
116 u32 dd_buffer_addr;
117 u32 dd_status;
118 u32 dd_iso_ps_mem_addr;
119 u32 this_dma;
120 u32 iso_status[6]; /* 5 spare */
121 u32 dd_next_v;
122};
123
124/*
125 * Logical endpoint structure
126 */
127struct lpc32xx_ep {
128 struct usb_ep ep;
129 struct list_head queue;
130 struct lpc32xx_udc *udc;
131
132 u32 hwep_num_base; /* Physical hardware EP */
133 u32 hwep_num; /* Maps to hardware endpoint */
134 u32 maxpacket;
135 u32 lep;
136
137 bool is_in;
138 bool req_pending;
139 u32 eptype;
140
141 u32 totalints;
142
143 bool wedge;
144
145 const struct usb_endpoint_descriptor *desc;
146};
147
148/*
149 * Common UDC structure
150 */
151struct lpc32xx_udc {
152 struct usb_gadget gadget;
153 struct usb_gadget_driver *driver;
154 struct platform_device *pdev;
155 struct device *dev;
156 struct dentry *pde;
157 spinlock_t lock;
158 struct i2c_client *isp1301_i2c_client;
159
160 /* Board and device specific */
161 struct lpc32xx_usbd_cfg *board;
162 u32 io_p_start;
163 u32 io_p_size;
164 void __iomem *udp_baseaddr;
165 int udp_irq[4];
166 struct clk *usb_pll_clk;
167 struct clk *usb_slv_clk;
Alexandre Pereira da Silva50856692012-06-20 09:03:15 -0300168 struct clk *usb_otg_clk;
Roland Stigge24a28e42012-04-29 16:47:05 +0200169
170 /* DMA support */
171 u32 *udca_v_base;
172 u32 udca_p_base;
173 struct dma_pool *dd_cache;
174
175 /* Common EP and control data */
176 u32 enabled_devints;
177 u32 enabled_hwepints;
178 u32 dev_status;
179 u32 realized_eps;
180
181 /* VBUS detection, pullup, and power flags */
182 u8 vbus;
183 u8 last_vbus;
184 int pullup;
185 int poweron;
186
187 /* Work queues related to I2C support */
188 struct work_struct pullup_job;
189 struct work_struct vbus_job;
190 struct work_struct power_job;
191
192 /* USB device peripheral - various */
193 struct lpc32xx_ep ep[NUM_ENDPOINTS];
194 bool enabled;
195 bool clocked;
196 bool suspended;
197 bool selfpowered;
198 int ep0state;
199 atomic_t enabled_ep_cnt;
200 wait_queue_head_t ep_disable_wait_queue;
201};
202
203/*
204 * Endpoint request
205 */
206struct lpc32xx_request {
207 struct usb_request req;
208 struct list_head queue;
209 struct lpc32xx_usbd_dd_gad *dd_desc_ptr;
210 bool mapped;
211 bool send_zlp;
212};
213
214static inline struct lpc32xx_udc *to_udc(struct usb_gadget *g)
215{
216 return container_of(g, struct lpc32xx_udc, gadget);
217}
218
219#define ep_dbg(epp, fmt, arg...) \
220 dev_dbg(epp->udc->dev, "%s: " fmt, __func__, ## arg)
221#define ep_err(epp, fmt, arg...) \
222 dev_err(epp->udc->dev, "%s: " fmt, __func__, ## arg)
223#define ep_info(epp, fmt, arg...) \
224 dev_info(epp->udc->dev, "%s: " fmt, __func__, ## arg)
225#define ep_warn(epp, fmt, arg...) \
226 dev_warn(epp->udc->dev, "%s:" fmt, __func__, ## arg)
227
228#define UDCA_BUFF_SIZE (128)
229
230/* TODO: When the clock framework is introduced in LPC32xx, IO_ADDRESS will
Alexandre Pereira da Silva50856692012-06-20 09:03:15 -0300231 * be replaced with an inremap()ed pointer
Roland Stigge24a28e42012-04-29 16:47:05 +0200232 * */
233#define USB_CTRL IO_ADDRESS(LPC32XX_CLK_PM_BASE + 0x64)
Roland Stigge24a28e42012-04-29 16:47:05 +0200234
235/* USB_CTRL bit defines */
236#define USB_SLAVE_HCLK_EN (1 << 24)
237#define USB_HOST_NEED_CLK_EN (1 << 21)
238#define USB_DEV_NEED_CLK_EN (1 << 22)
239
Roland Stigge24a28e42012-04-29 16:47:05 +0200240/**********************************************************************
241 * USB device controller register offsets
242 **********************************************************************/
243
244#define USBD_DEVINTST(x) ((x) + 0x200)
245#define USBD_DEVINTEN(x) ((x) + 0x204)
246#define USBD_DEVINTCLR(x) ((x) + 0x208)
247#define USBD_DEVINTSET(x) ((x) + 0x20C)
248#define USBD_CMDCODE(x) ((x) + 0x210)
249#define USBD_CMDDATA(x) ((x) + 0x214)
250#define USBD_RXDATA(x) ((x) + 0x218)
251#define USBD_TXDATA(x) ((x) + 0x21C)
252#define USBD_RXPLEN(x) ((x) + 0x220)
253#define USBD_TXPLEN(x) ((x) + 0x224)
254#define USBD_CTRL(x) ((x) + 0x228)
255#define USBD_DEVINTPRI(x) ((x) + 0x22C)
256#define USBD_EPINTST(x) ((x) + 0x230)
257#define USBD_EPINTEN(x) ((x) + 0x234)
258#define USBD_EPINTCLR(x) ((x) + 0x238)
259#define USBD_EPINTSET(x) ((x) + 0x23C)
260#define USBD_EPINTPRI(x) ((x) + 0x240)
261#define USBD_REEP(x) ((x) + 0x244)
262#define USBD_EPIND(x) ((x) + 0x248)
263#define USBD_EPMAXPSIZE(x) ((x) + 0x24C)
264/* DMA support registers only below */
265/* Set, clear, or get enabled state of the DMA request status. If
266 * enabled, an IN or OUT token will start a DMA transfer for the EP */
267#define USBD_DMARST(x) ((x) + 0x250)
268#define USBD_DMARCLR(x) ((x) + 0x254)
269#define USBD_DMARSET(x) ((x) + 0x258)
270/* DMA UDCA head pointer */
271#define USBD_UDCAH(x) ((x) + 0x280)
272/* EP DMA status, enable, and disable. This is used to specifically
273 * enabled or disable DMA for a specific EP */
274#define USBD_EPDMAST(x) ((x) + 0x284)
275#define USBD_EPDMAEN(x) ((x) + 0x288)
276#define USBD_EPDMADIS(x) ((x) + 0x28C)
277/* DMA master interrupts enable and pending interrupts */
278#define USBD_DMAINTST(x) ((x) + 0x290)
279#define USBD_DMAINTEN(x) ((x) + 0x294)
280/* DMA end of transfer interrupt enable, disable, status */
281#define USBD_EOTINTST(x) ((x) + 0x2A0)
282#define USBD_EOTINTCLR(x) ((x) + 0x2A4)
283#define USBD_EOTINTSET(x) ((x) + 0x2A8)
284/* New DD request interrupt enable, disable, status */
285#define USBD_NDDRTINTST(x) ((x) + 0x2AC)
286#define USBD_NDDRTINTCLR(x) ((x) + 0x2B0)
287#define USBD_NDDRTINTSET(x) ((x) + 0x2B4)
288/* DMA error interrupt enable, disable, status */
289#define USBD_SYSERRTINTST(x) ((x) + 0x2B8)
290#define USBD_SYSERRTINTCLR(x) ((x) + 0x2BC)
291#define USBD_SYSERRTINTSET(x) ((x) + 0x2C0)
292
293/**********************************************************************
294 * USBD_DEVINTST/USBD_DEVINTEN/USBD_DEVINTCLR/USBD_DEVINTSET/
295 * USBD_DEVINTPRI register definitions
296 **********************************************************************/
297#define USBD_ERR_INT (1 << 9)
298#define USBD_EP_RLZED (1 << 8)
299#define USBD_TXENDPKT (1 << 7)
300#define USBD_RXENDPKT (1 << 6)
301#define USBD_CDFULL (1 << 5)
302#define USBD_CCEMPTY (1 << 4)
303#define USBD_DEV_STAT (1 << 3)
304#define USBD_EP_SLOW (1 << 2)
305#define USBD_EP_FAST (1 << 1)
306#define USBD_FRAME (1 << 0)
307
308/**********************************************************************
309 * USBD_EPINTST/USBD_EPINTEN/USBD_EPINTCLR/USBD_EPINTSET/
310 * USBD_EPINTPRI register definitions
311 **********************************************************************/
312/* End point selection macro (RX) */
313#define USBD_RX_EP_SEL(e) (1 << ((e) << 1))
314
315/* End point selection macro (TX) */
316#define USBD_TX_EP_SEL(e) (1 << (((e) << 1) + 1))
317
318/**********************************************************************
319 * USBD_REEP/USBD_DMARST/USBD_DMARCLR/USBD_DMARSET/USBD_EPDMAST/
320 * USBD_EPDMAEN/USBD_EPDMADIS/
321 * USBD_NDDRTINTST/USBD_NDDRTINTCLR/USBD_NDDRTINTSET/
322 * USBD_EOTINTST/USBD_EOTINTCLR/USBD_EOTINTSET/
323 * USBD_SYSERRTINTST/USBD_SYSERRTINTCLR/USBD_SYSERRTINTSET
324 * register definitions
325 **********************************************************************/
326/* Endpoint selection macro */
327#define USBD_EP_SEL(e) (1 << (e))
328
329/**********************************************************************
330 * SBD_DMAINTST/USBD_DMAINTEN
331 **********************************************************************/
332#define USBD_SYS_ERR_INT (1 << 2)
333#define USBD_NEW_DD_INT (1 << 1)
334#define USBD_EOT_INT (1 << 0)
335
336/**********************************************************************
337 * USBD_RXPLEN register definitions
338 **********************************************************************/
339#define USBD_PKT_RDY (1 << 11)
340#define USBD_DV (1 << 10)
341#define USBD_PK_LEN_MASK 0x3FF
342
343/**********************************************************************
344 * USBD_CTRL register definitions
345 **********************************************************************/
346#define USBD_LOG_ENDPOINT(e) ((e) << 2)
347#define USBD_WR_EN (1 << 1)
348#define USBD_RD_EN (1 << 0)
349
350/**********************************************************************
351 * USBD_CMDCODE register definitions
352 **********************************************************************/
353#define USBD_CMD_CODE(c) ((c) << 16)
354#define USBD_CMD_PHASE(p) ((p) << 8)
355
356/**********************************************************************
357 * USBD_DMARST/USBD_DMARCLR/USBD_DMARSET register definitions
358 **********************************************************************/
359#define USBD_DMAEP(e) (1 << (e))
360
361/* DD (DMA Descriptor) structure, requires word alignment */
362struct lpc32xx_usbd_dd {
363 u32 *dd_next;
364 u32 dd_setup;
365 u32 dd_buffer_addr;
366 u32 dd_status;
367 u32 dd_iso_ps_mem_addr;
368};
369
370/* dd_setup bit defines */
371#define DD_SETUP_ATLE_DMA_MODE 0x01
372#define DD_SETUP_NEXT_DD_VALID 0x04
373#define DD_SETUP_ISO_EP 0x10
374#define DD_SETUP_PACKETLEN(n) (((n) & 0x7FF) << 5)
375#define DD_SETUP_DMALENBYTES(n) (((n) & 0xFFFF) << 16)
376
377/* dd_status bit defines */
378#define DD_STATUS_DD_RETIRED 0x01
379#define DD_STATUS_STS_MASK 0x1E
380#define DD_STATUS_STS_NS 0x00 /* Not serviced */
381#define DD_STATUS_STS_BS 0x02 /* Being serviced */
382#define DD_STATUS_STS_NC 0x04 /* Normal completion */
383#define DD_STATUS_STS_DUR 0x06 /* Data underrun (short packet) */
384#define DD_STATUS_STS_DOR 0x08 /* Data overrun */
385#define DD_STATUS_STS_SE 0x12 /* System error */
386#define DD_STATUS_PKT_VAL 0x20 /* Packet valid */
387#define DD_STATUS_LSB_EX 0x40 /* LS byte extracted (ATLE) */
388#define DD_STATUS_MSB_EX 0x80 /* MS byte extracted (ATLE) */
389#define DD_STATUS_MLEN(n) (((n) >> 8) & 0x3F)
390#define DD_STATUS_CURDMACNT(n) (((n) >> 16) & 0xFFFF)
391
392/*
393 *
394 * Protocol engine bits below
395 *
396 */
397/* Device Interrupt Bit Definitions */
398#define FRAME_INT 0x00000001
399#define EP_FAST_INT 0x00000002
400#define EP_SLOW_INT 0x00000004
401#define DEV_STAT_INT 0x00000008
402#define CCEMTY_INT 0x00000010
403#define CDFULL_INT 0x00000020
404#define RxENDPKT_INT 0x00000040
405#define TxENDPKT_INT 0x00000080
406#define EP_RLZED_INT 0x00000100
407#define ERR_INT 0x00000200
408
409/* Rx & Tx Packet Length Definitions */
410#define PKT_LNGTH_MASK 0x000003FF
411#define PKT_DV 0x00000400
412#define PKT_RDY 0x00000800
413
414/* USB Control Definitions */
415#define CTRL_RD_EN 0x00000001
416#define CTRL_WR_EN 0x00000002
417
418/* Command Codes */
419#define CMD_SET_ADDR 0x00D00500
420#define CMD_CFG_DEV 0x00D80500
421#define CMD_SET_MODE 0x00F30500
422#define CMD_RD_FRAME 0x00F50500
423#define DAT_RD_FRAME 0x00F50200
424#define CMD_RD_TEST 0x00FD0500
425#define DAT_RD_TEST 0x00FD0200
426#define CMD_SET_DEV_STAT 0x00FE0500
427#define CMD_GET_DEV_STAT 0x00FE0500
428#define DAT_GET_DEV_STAT 0x00FE0200
429#define CMD_GET_ERR_CODE 0x00FF0500
430#define DAT_GET_ERR_CODE 0x00FF0200
431#define CMD_RD_ERR_STAT 0x00FB0500
432#define DAT_RD_ERR_STAT 0x00FB0200
433#define DAT_WR_BYTE(x) (0x00000100 | ((x) << 16))
434#define CMD_SEL_EP(x) (0x00000500 | ((x) << 16))
435#define DAT_SEL_EP(x) (0x00000200 | ((x) << 16))
436#define CMD_SEL_EP_CLRI(x) (0x00400500 | ((x) << 16))
437#define DAT_SEL_EP_CLRI(x) (0x00400200 | ((x) << 16))
438#define CMD_SET_EP_STAT(x) (0x00400500 | ((x) << 16))
439#define CMD_CLR_BUF 0x00F20500
440#define DAT_CLR_BUF 0x00F20200
441#define CMD_VALID_BUF 0x00FA0500
442
443/* Device Address Register Definitions */
444#define DEV_ADDR_MASK 0x7F
445#define DEV_EN 0x80
446
447/* Device Configure Register Definitions */
448#define CONF_DVICE 0x01
449
450/* Device Mode Register Definitions */
451#define AP_CLK 0x01
452#define INAK_CI 0x02
453#define INAK_CO 0x04
454#define INAK_II 0x08
455#define INAK_IO 0x10
456#define INAK_BI 0x20
457#define INAK_BO 0x40
458
459/* Device Status Register Definitions */
460#define DEV_CON 0x01
461#define DEV_CON_CH 0x02
462#define DEV_SUS 0x04
463#define DEV_SUS_CH 0x08
464#define DEV_RST 0x10
465
466/* Error Code Register Definitions */
467#define ERR_EC_MASK 0x0F
468#define ERR_EA 0x10
469
470/* Error Status Register Definitions */
471#define ERR_PID 0x01
472#define ERR_UEPKT 0x02
473#define ERR_DCRC 0x04
474#define ERR_TIMOUT 0x08
475#define ERR_EOP 0x10
476#define ERR_B_OVRN 0x20
477#define ERR_BTSTF 0x40
478#define ERR_TGL 0x80
479
480/* Endpoint Select Register Definitions */
481#define EP_SEL_F 0x01
482#define EP_SEL_ST 0x02
483#define EP_SEL_STP 0x04
484#define EP_SEL_PO 0x08
485#define EP_SEL_EPN 0x10
486#define EP_SEL_B_1_FULL 0x20
487#define EP_SEL_B_2_FULL 0x40
488
489/* Endpoint Status Register Definitions */
490#define EP_STAT_ST 0x01
491#define EP_STAT_DA 0x20
492#define EP_STAT_RF_MO 0x40
493#define EP_STAT_CND_ST 0x80
494
495/* Clear Buffer Register Definitions */
496#define CLR_BUF_PO 0x01
497
498/* DMA Interrupt Bit Definitions */
499#define EOT_INT 0x01
500#define NDD_REQ_INT 0x02
501#define SYS_ERR_INT 0x04
502
503#define DRIVER_VERSION "1.03"
504static const char driver_name[] = "lpc32xx_udc";
505
506/*
507 *
508 * proc interface support
509 *
510 */
511#ifdef CONFIG_USB_GADGET_DEBUG_FILES
512static char *epnames[] = {"INT", "ISO", "BULK", "CTRL"};
513static const char debug_filename[] = "driver/udc";
514
515static void proc_ep_show(struct seq_file *s, struct lpc32xx_ep *ep)
516{
517 struct lpc32xx_request *req;
518
519 seq_printf(s, "\n");
520 seq_printf(s, "%12s, maxpacket %4d %3s",
521 ep->ep.name, ep->ep.maxpacket,
522 ep->is_in ? "in" : "out");
523 seq_printf(s, " type %4s", epnames[ep->eptype]);
524 seq_printf(s, " ints: %12d", ep->totalints);
525
526 if (list_empty(&ep->queue))
527 seq_printf(s, "\t(queue empty)\n");
528 else {
529 list_for_each_entry(req, &ep->queue, queue) {
530 u32 length = req->req.actual;
531
532 seq_printf(s, "\treq %p len %d/%d buf %p\n",
533 &req->req, length,
534 req->req.length, req->req.buf);
535 }
536 }
537}
538
539static int proc_udc_show(struct seq_file *s, void *unused)
540{
541 struct lpc32xx_udc *udc = s->private;
542 struct lpc32xx_ep *ep;
543 unsigned long flags;
544
545 seq_printf(s, "%s: version %s\n", driver_name, DRIVER_VERSION);
546
547 spin_lock_irqsave(&udc->lock, flags);
548
549 seq_printf(s, "vbus %s, pullup %s, %s powered%s, gadget %s\n\n",
550 udc->vbus ? "present" : "off",
551 udc->enabled ? (udc->vbus ? "active" : "enabled") :
552 "disabled",
553 udc->selfpowered ? "self" : "VBUS",
554 udc->suspended ? ", suspended" : "",
555 udc->driver ? udc->driver->driver.name : "(none)");
556
557 if (udc->enabled && udc->vbus) {
558 proc_ep_show(s, &udc->ep[0]);
559 list_for_each_entry(ep, &udc->gadget.ep_list, ep.ep_list) {
560 if (ep->desc)
561 proc_ep_show(s, ep);
562 }
563 }
564
565 spin_unlock_irqrestore(&udc->lock, flags);
566
567 return 0;
568}
569
570static int proc_udc_open(struct inode *inode, struct file *file)
571{
572 return single_open(file, proc_udc_show, PDE(inode)->data);
573}
574
575static const struct file_operations proc_ops = {
576 .owner = THIS_MODULE,
577 .open = proc_udc_open,
578 .read = seq_read,
579 .llseek = seq_lseek,
580 .release = single_release,
581};
582
583static void create_debug_file(struct lpc32xx_udc *udc)
584{
585 udc->pde = debugfs_create_file(debug_filename, 0, NULL, udc, &proc_ops);
586}
587
588static void remove_debug_file(struct lpc32xx_udc *udc)
589{
590 if (udc->pde)
591 debugfs_remove(udc->pde);
592}
593
594#else
595static inline void create_debug_file(struct lpc32xx_udc *udc) {}
596static inline void remove_debug_file(struct lpc32xx_udc *udc) {}
597#endif
598
599/* Primary initialization sequence for the ISP1301 transceiver */
600static void isp1301_udc_configure(struct lpc32xx_udc *udc)
601{
602 /* LPC32XX only supports DAT_SE0 USB mode */
603 /* This sequence is important */
604
605 /* Disable transparent UART mode first */
606 i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
607 (ISP1301_I2C_MODE_CONTROL_1 | ISP1301_I2C_REG_CLEAR_ADDR),
608 MC1_UART_EN);
609
610 /* Set full speed and SE0 mode */
611 i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
612 (ISP1301_I2C_MODE_CONTROL_1 | ISP1301_I2C_REG_CLEAR_ADDR), ~0);
613 i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
614 ISP1301_I2C_MODE_CONTROL_1, (MC1_SPEED_REG | MC1_DAT_SE0));
615
616 /*
617 * The PSW_OE enable bit state is reversed in the ISP1301 User's Guide
618 */
619 i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
620 (ISP1301_I2C_MODE_CONTROL_2 | ISP1301_I2C_REG_CLEAR_ADDR), ~0);
621 i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
622 ISP1301_I2C_MODE_CONTROL_2, (MC2_BI_DI | MC2_SPD_SUSP_CTRL));
623
624 /* Driver VBUS_DRV high or low depending on board setup */
625 if (udc->board->vbus_drv_pol != 0)
626 i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
627 ISP1301_I2C_OTG_CONTROL_1, OTG1_VBUS_DRV);
628 else
629 i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
630 ISP1301_I2C_OTG_CONTROL_1 | ISP1301_I2C_REG_CLEAR_ADDR,
631 OTG1_VBUS_DRV);
632
633 /* Bi-directional mode with suspend control
634 * Enable both pulldowns for now - the pullup will be enable when VBUS
635 * is detected */
636 i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
637 (ISP1301_I2C_OTG_CONTROL_1 | ISP1301_I2C_REG_CLEAR_ADDR), ~0);
638 i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
639 ISP1301_I2C_OTG_CONTROL_1,
640 (0 | OTG1_DM_PULLDOWN | OTG1_DP_PULLDOWN));
641
642 /* Discharge VBUS (just in case) */
643 i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
644 ISP1301_I2C_OTG_CONTROL_1, OTG1_VBUS_DISCHRG);
645 msleep(1);
646 i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
647 (ISP1301_I2C_OTG_CONTROL_1 | ISP1301_I2C_REG_CLEAR_ADDR),
648 OTG1_VBUS_DISCHRG);
649
650 /* Clear and enable VBUS high edge interrupt */
651 i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
652 ISP1301_I2C_INTERRUPT_LATCH | ISP1301_I2C_REG_CLEAR_ADDR, ~0);
653 i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
654 ISP1301_I2C_INTERRUPT_FALLING | ISP1301_I2C_REG_CLEAR_ADDR, ~0);
655 i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
656 ISP1301_I2C_INTERRUPT_FALLING, INT_VBUS_VLD);
657 i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
658 ISP1301_I2C_INTERRUPT_RISING | ISP1301_I2C_REG_CLEAR_ADDR, ~0);
659 i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
660 ISP1301_I2C_INTERRUPT_RISING, INT_VBUS_VLD);
661
662 /* Enable usb_need_clk clock after transceiver is initialized */
Alexandre Pereira da Silva50856692012-06-20 09:03:15 -0300663 writel((readl(USB_CTRL) | USB_DEV_NEED_CLK_EN), USB_CTRL);
Roland Stigge24a28e42012-04-29 16:47:05 +0200664
665 dev_info(udc->dev, "ISP1301 Vendor ID : 0x%04x\n",
666 i2c_smbus_read_word_data(udc->isp1301_i2c_client, 0x00));
667 dev_info(udc->dev, "ISP1301 Product ID : 0x%04x\n",
668 i2c_smbus_read_word_data(udc->isp1301_i2c_client, 0x02));
669 dev_info(udc->dev, "ISP1301 Version ID : 0x%04x\n",
670 i2c_smbus_read_word_data(udc->isp1301_i2c_client, 0x14));
671}
672
673/* Enables or disables the USB device pullup via the ISP1301 transceiver */
674static void isp1301_pullup_set(struct lpc32xx_udc *udc)
675{
676 if (udc->pullup)
677 /* Enable pullup for bus signalling */
678 i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
679 ISP1301_I2C_OTG_CONTROL_1, OTG1_DP_PULLUP);
680 else
681 /* Enable pullup for bus signalling */
682 i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
683 ISP1301_I2C_OTG_CONTROL_1 | ISP1301_I2C_REG_CLEAR_ADDR,
684 OTG1_DP_PULLUP);
685}
686
687static void pullup_work(struct work_struct *work)
688{
689 struct lpc32xx_udc *udc =
690 container_of(work, struct lpc32xx_udc, pullup_job);
691
692 isp1301_pullup_set(udc);
693}
694
695static void isp1301_pullup_enable(struct lpc32xx_udc *udc, int en_pullup,
696 int block)
697{
698 if (en_pullup == udc->pullup)
699 return;
700
701 udc->pullup = en_pullup;
702 if (block)
703 isp1301_pullup_set(udc);
704 else
705 /* defer slow i2c pull up setting */
706 schedule_work(&udc->pullup_job);
707}
708
709#ifdef CONFIG_PM
710/* Powers up or down the ISP1301 transceiver */
711static void isp1301_set_powerstate(struct lpc32xx_udc *udc, int enable)
712{
713 if (enable != 0)
714 /* Power up ISP1301 - this ISP1301 will automatically wakeup
715 when VBUS is detected */
716 i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
717 ISP1301_I2C_MODE_CONTROL_2 | ISP1301_I2C_REG_CLEAR_ADDR,
718 MC2_GLOBAL_PWR_DN);
719 else
720 /* Power down ISP1301 */
721 i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
722 ISP1301_I2C_MODE_CONTROL_2, MC2_GLOBAL_PWR_DN);
723}
724
725static void power_work(struct work_struct *work)
726{
727 struct lpc32xx_udc *udc =
728 container_of(work, struct lpc32xx_udc, power_job);
729
730 isp1301_set_powerstate(udc, udc->poweron);
731}
732#endif
733
734/*
735 *
736 * USB protocol engine command/data read/write helper functions
737 *
738 */
739/* Issues a single command to the USB device state machine */
740static void udc_protocol_cmd_w(struct lpc32xx_udc *udc, u32 cmd)
741{
742 u32 pass = 0;
743 int to;
744
745 /* EP may lock on CLRI if this read isn't done */
746 u32 tmp = readl(USBD_DEVINTST(udc->udp_baseaddr));
747 (void) tmp;
748
749 while (pass == 0) {
750 writel(USBD_CCEMPTY, USBD_DEVINTCLR(udc->udp_baseaddr));
751
752 /* Write command code */
753 writel(cmd, USBD_CMDCODE(udc->udp_baseaddr));
754 to = 10000;
755 while (((readl(USBD_DEVINTST(udc->udp_baseaddr)) &
756 USBD_CCEMPTY) == 0) && (to > 0)) {
757 to--;
758 }
759
760 if (to > 0)
761 pass = 1;
762
763 cpu_relax();
764 }
765}
766
767/* Issues 2 commands (or command and data) to the USB device state machine */
768static inline void udc_protocol_cmd_data_w(struct lpc32xx_udc *udc, u32 cmd,
769 u32 data)
770{
771 udc_protocol_cmd_w(udc, cmd);
772 udc_protocol_cmd_w(udc, data);
773}
774
775/* Issues a single command to the USB device state machine and reads
776 * response data */
777static u32 udc_protocol_cmd_r(struct lpc32xx_udc *udc, u32 cmd)
778{
779 u32 tmp;
780 int to = 1000;
781
782 /* Write a command and read data from the protocol engine */
783 writel((USBD_CDFULL | USBD_CCEMPTY),
784 USBD_DEVINTCLR(udc->udp_baseaddr));
785
786 /* Write command code */
787 udc_protocol_cmd_w(udc, cmd);
788
789 tmp = readl(USBD_DEVINTST(udc->udp_baseaddr));
790 while ((!(readl(USBD_DEVINTST(udc->udp_baseaddr)) & USBD_CDFULL))
791 && (to > 0))
792 to--;
793 if (!to)
794 dev_dbg(udc->dev,
795 "Protocol engine didn't receive response (CDFULL)\n");
796
797 return readl(USBD_CMDDATA(udc->udp_baseaddr));
798}
799
800/*
801 *
802 * USB device interrupt mask support functions
803 *
804 */
805/* Enable one or more USB device interrupts */
806static inline void uda_enable_devint(struct lpc32xx_udc *udc, u32 devmask)
807{
808 udc->enabled_devints |= devmask;
809 writel(udc->enabled_devints, USBD_DEVINTEN(udc->udp_baseaddr));
810}
811
812/* Disable one or more USB device interrupts */
813static inline void uda_disable_devint(struct lpc32xx_udc *udc, u32 mask)
814{
815 udc->enabled_devints &= ~mask;
816 writel(udc->enabled_devints, USBD_DEVINTEN(udc->udp_baseaddr));
817}
818
819/* Clear one or more USB device interrupts */
820static inline void uda_clear_devint(struct lpc32xx_udc *udc, u32 mask)
821{
822 writel(mask, USBD_DEVINTCLR(udc->udp_baseaddr));
823}
824
825/*
826 *
827 * Endpoint interrupt disable/enable functions
828 *
829 */
830/* Enable one or more USB endpoint interrupts */
831static void uda_enable_hwepint(struct lpc32xx_udc *udc, u32 hwep)
832{
833 udc->enabled_hwepints |= (1 << hwep);
834 writel(udc->enabled_hwepints, USBD_EPINTEN(udc->udp_baseaddr));
835}
836
837/* Disable one or more USB endpoint interrupts */
838static void uda_disable_hwepint(struct lpc32xx_udc *udc, u32 hwep)
839{
840 udc->enabled_hwepints &= ~(1 << hwep);
841 writel(udc->enabled_hwepints, USBD_EPINTEN(udc->udp_baseaddr));
842}
843
844/* Clear one or more USB endpoint interrupts */
845static inline void uda_clear_hwepint(struct lpc32xx_udc *udc, u32 hwep)
846{
847 writel((1 << hwep), USBD_EPINTCLR(udc->udp_baseaddr));
848}
849
850/* Enable DMA for the HW channel */
851static inline void udc_ep_dma_enable(struct lpc32xx_udc *udc, u32 hwep)
852{
853 writel((1 << hwep), USBD_EPDMAEN(udc->udp_baseaddr));
854}
855
856/* Disable DMA for the HW channel */
857static inline void udc_ep_dma_disable(struct lpc32xx_udc *udc, u32 hwep)
858{
859 writel((1 << hwep), USBD_EPDMADIS(udc->udp_baseaddr));
860}
861
862/*
863 *
864 * Endpoint realize/unrealize functions
865 *
866 */
867/* Before an endpoint can be used, it needs to be realized
868 * in the USB protocol engine - this realizes the endpoint.
869 * The interrupt (FIFO or DMA) is not enabled with this function */
870static void udc_realize_hwep(struct lpc32xx_udc *udc, u32 hwep,
871 u32 maxpacket)
872{
873 int to = 1000;
874
875 writel(USBD_EP_RLZED, USBD_DEVINTCLR(udc->udp_baseaddr));
876 writel(hwep, USBD_EPIND(udc->udp_baseaddr));
877 udc->realized_eps |= (1 << hwep);
878 writel(udc->realized_eps, USBD_REEP(udc->udp_baseaddr));
879 writel(maxpacket, USBD_EPMAXPSIZE(udc->udp_baseaddr));
880
881 /* Wait until endpoint is realized in hardware */
882 while ((!(readl(USBD_DEVINTST(udc->udp_baseaddr)) &
883 USBD_EP_RLZED)) && (to > 0))
884 to--;
885 if (!to)
886 dev_dbg(udc->dev, "EP not correctly realized in hardware\n");
887
888 writel(USBD_EP_RLZED, USBD_DEVINTCLR(udc->udp_baseaddr));
889}
890
891/* Unrealize an EP */
892static void udc_unrealize_hwep(struct lpc32xx_udc *udc, u32 hwep)
893{
894 udc->realized_eps &= ~(1 << hwep);
895 writel(udc->realized_eps, USBD_REEP(udc->udp_baseaddr));
896}
897
898/*
899 *
900 * Endpoint support functions
901 *
902 */
903/* Select and clear endpoint interrupt */
904static u32 udc_selep_clrint(struct lpc32xx_udc *udc, u32 hwep)
905{
906 udc_protocol_cmd_w(udc, CMD_SEL_EP_CLRI(hwep));
907 return udc_protocol_cmd_r(udc, DAT_SEL_EP_CLRI(hwep));
908}
909
910/* Disables the endpoint in the USB protocol engine */
911static void udc_disable_hwep(struct lpc32xx_udc *udc, u32 hwep)
912{
913 udc_protocol_cmd_data_w(udc, CMD_SET_EP_STAT(hwep),
914 DAT_WR_BYTE(EP_STAT_DA));
915}
916
917/* Stalls the endpoint - endpoint will return STALL */
918static void udc_stall_hwep(struct lpc32xx_udc *udc, u32 hwep)
919{
920 udc_protocol_cmd_data_w(udc, CMD_SET_EP_STAT(hwep),
921 DAT_WR_BYTE(EP_STAT_ST));
922}
923
924/* Clear stall or reset endpoint */
925static void udc_clrstall_hwep(struct lpc32xx_udc *udc, u32 hwep)
926{
927 udc_protocol_cmd_data_w(udc, CMD_SET_EP_STAT(hwep),
928 DAT_WR_BYTE(0));
929}
930
931/* Select an endpoint for endpoint status, clear, validate */
932static void udc_select_hwep(struct lpc32xx_udc *udc, u32 hwep)
933{
934 udc_protocol_cmd_w(udc, CMD_SEL_EP(hwep));
935}
936
937/*
938 *
939 * Endpoint buffer management functions
940 *
941 */
942/* Clear the current endpoint's buffer */
943static void udc_clr_buffer_hwep(struct lpc32xx_udc *udc, u32 hwep)
944{
945 udc_select_hwep(udc, hwep);
946 udc_protocol_cmd_w(udc, CMD_CLR_BUF);
947}
948
949/* Validate the current endpoint's buffer */
950static void udc_val_buffer_hwep(struct lpc32xx_udc *udc, u32 hwep)
951{
952 udc_select_hwep(udc, hwep);
953 udc_protocol_cmd_w(udc, CMD_VALID_BUF);
954}
955
956static inline u32 udc_clearep_getsts(struct lpc32xx_udc *udc, u32 hwep)
957{
958 /* Clear EP interrupt */
959 uda_clear_hwepint(udc, hwep);
960 return udc_selep_clrint(udc, hwep);
961}
962
963/*
964 *
965 * USB EP DMA support
966 *
967 */
968/* Allocate a DMA Descriptor */
969static struct lpc32xx_usbd_dd_gad *udc_dd_alloc(struct lpc32xx_udc *udc)
970{
971 dma_addr_t dma;
972 struct lpc32xx_usbd_dd_gad *dd;
973
974 dd = (struct lpc32xx_usbd_dd_gad *) dma_pool_alloc(
975 udc->dd_cache, (GFP_KERNEL | GFP_DMA), &dma);
976 if (dd)
977 dd->this_dma = dma;
978
979 return dd;
980}
981
982/* Free a DMA Descriptor */
983static void udc_dd_free(struct lpc32xx_udc *udc, struct lpc32xx_usbd_dd_gad *dd)
984{
985 dma_pool_free(udc->dd_cache, dd, dd->this_dma);
986}
987
988/*
989 *
990 * USB setup and shutdown functions
991 *
992 */
993/* Enables or disables most of the USB system clocks when low power mode is
994 * needed. Clocks are typically started on a connection event, and disabled
995 * when a cable is disconnected */
Roland Stigge24a28e42012-04-29 16:47:05 +0200996static void udc_clk_set(struct lpc32xx_udc *udc, int enable)
997{
Roland Stigge24a28e42012-04-29 16:47:05 +0200998 if (enable != 0) {
999 if (udc->clocked)
1000 return;
1001
1002 udc->clocked = 1;
1003
1004 /* 48MHz PLL up */
1005 clk_enable(udc->usb_pll_clk);
1006
1007 /* Enable the USB device clock */
1008 writel(readl(USB_CTRL) | USB_DEV_NEED_CLK_EN,
1009 USB_CTRL);
1010
Alexandre Pereira da Silva50856692012-06-20 09:03:15 -03001011 clk_enable(udc->usb_otg_clk);
Roland Stigge24a28e42012-04-29 16:47:05 +02001012 } else {
1013 if (!udc->clocked)
1014 return;
1015
1016 udc->clocked = 0;
1017
1018 /* Never disable the USB_HCLK during normal operation */
1019
1020 /* 48MHz PLL dpwn */
1021 clk_disable(udc->usb_pll_clk);
1022
Alexandre Pereira da Silva50856692012-06-20 09:03:15 -03001023 /* Disable the USB device clock */
Roland Stigge24a28e42012-04-29 16:47:05 +02001024 writel(readl(USB_CTRL) & ~USB_DEV_NEED_CLK_EN,
1025 USB_CTRL);
1026
Alexandre Pereira da Silva50856692012-06-20 09:03:15 -03001027 clk_disable(udc->usb_otg_clk);
Roland Stigge24a28e42012-04-29 16:47:05 +02001028 }
1029}
1030
1031/* Set/reset USB device address */
1032static void udc_set_address(struct lpc32xx_udc *udc, u32 addr)
1033{
1034 /* Address will be latched at the end of the status phase, or
1035 latched immediately if function is called twice */
1036 udc_protocol_cmd_data_w(udc, CMD_SET_ADDR,
1037 DAT_WR_BYTE(DEV_EN | addr));
1038}
1039
1040/* Setup up a IN request for DMA transfer - this consists of determining the
1041 * list of DMA addresses for the transfer, allocating DMA Descriptors,
1042 * installing the DD into the UDCA, and then enabling the DMA for that EP */
1043static int udc_ep_in_req_dma(struct lpc32xx_udc *udc, struct lpc32xx_ep *ep)
1044{
1045 struct lpc32xx_request *req;
1046 u32 hwep = ep->hwep_num;
1047
1048 ep->req_pending = 1;
1049
1050 /* There will always be a request waiting here */
1051 req = list_entry(ep->queue.next, struct lpc32xx_request, queue);
1052
1053 /* Place the DD Descriptor into the UDCA */
1054 udc->udca_v_base[hwep] = req->dd_desc_ptr->this_dma;
1055
1056 /* Enable DMA and interrupt for the HW EP */
1057 udc_ep_dma_enable(udc, hwep);
1058
1059 /* Clear ZLP if last packet is not of MAXP size */
1060 if (req->req.length % ep->ep.maxpacket)
1061 req->send_zlp = 0;
1062
1063 return 0;
1064}
1065
1066/* Setup up a OUT request for DMA transfer - this consists of determining the
1067 * list of DMA addresses for the transfer, allocating DMA Descriptors,
1068 * installing the DD into the UDCA, and then enabling the DMA for that EP */
1069static int udc_ep_out_req_dma(struct lpc32xx_udc *udc, struct lpc32xx_ep *ep)
1070{
1071 struct lpc32xx_request *req;
1072 u32 hwep = ep->hwep_num;
1073
1074 ep->req_pending = 1;
1075
1076 /* There will always be a request waiting here */
1077 req = list_entry(ep->queue.next, struct lpc32xx_request, queue);
1078
1079 /* Place the DD Descriptor into the UDCA */
1080 udc->udca_v_base[hwep] = req->dd_desc_ptr->this_dma;
1081
1082 /* Enable DMA and interrupt for the HW EP */
1083 udc_ep_dma_enable(udc, hwep);
1084 return 0;
1085}
1086
1087static void udc_disable(struct lpc32xx_udc *udc)
1088{
1089 u32 i;
1090
1091 /* Disable device */
1092 udc_protocol_cmd_data_w(udc, CMD_CFG_DEV, DAT_WR_BYTE(0));
1093 udc_protocol_cmd_data_w(udc, CMD_SET_DEV_STAT, DAT_WR_BYTE(0));
1094
1095 /* Disable all device interrupts (including EP0) */
1096 uda_disable_devint(udc, 0x3FF);
1097
1098 /* Disable and reset all endpoint interrupts */
1099 for (i = 0; i < 32; i++) {
1100 uda_disable_hwepint(udc, i);
1101 uda_clear_hwepint(udc, i);
1102 udc_disable_hwep(udc, i);
1103 udc_unrealize_hwep(udc, i);
1104 udc->udca_v_base[i] = 0;
1105
1106 /* Disable and clear all interrupts and DMA */
1107 udc_ep_dma_disable(udc, i);
1108 writel((1 << i), USBD_EOTINTCLR(udc->udp_baseaddr));
1109 writel((1 << i), USBD_NDDRTINTCLR(udc->udp_baseaddr));
1110 writel((1 << i), USBD_SYSERRTINTCLR(udc->udp_baseaddr));
1111 writel((1 << i), USBD_DMARCLR(udc->udp_baseaddr));
1112 }
1113
1114 /* Disable DMA interrupts */
1115 writel(0, USBD_DMAINTEN(udc->udp_baseaddr));
1116
1117 writel(0, USBD_UDCAH(udc->udp_baseaddr));
1118}
1119
1120static void udc_enable(struct lpc32xx_udc *udc)
1121{
1122 u32 i;
1123 struct lpc32xx_ep *ep = &udc->ep[0];
1124
1125 /* Start with known state */
1126 udc_disable(udc);
1127
1128 /* Enable device */
1129 udc_protocol_cmd_data_w(udc, CMD_SET_DEV_STAT, DAT_WR_BYTE(DEV_CON));
1130
1131 /* EP interrupts on high priority, FRAME interrupt on low priority */
1132 writel(USBD_EP_FAST, USBD_DEVINTPRI(udc->udp_baseaddr));
1133 writel(0xFFFF, USBD_EPINTPRI(udc->udp_baseaddr));
1134
1135 /* Clear any pending device interrupts */
1136 writel(0x3FF, USBD_DEVINTCLR(udc->udp_baseaddr));
1137
1138 /* Setup UDCA - not yet used (DMA) */
1139 writel(udc->udca_p_base, USBD_UDCAH(udc->udp_baseaddr));
1140
1141 /* Only enable EP0 in and out for now, EP0 only works in FIFO mode */
1142 for (i = 0; i <= 1; i++) {
1143 udc_realize_hwep(udc, i, ep->ep.maxpacket);
1144 uda_enable_hwepint(udc, i);
1145 udc_select_hwep(udc, i);
1146 udc_clrstall_hwep(udc, i);
1147 udc_clr_buffer_hwep(udc, i);
1148 }
1149
1150 /* Device interrupt setup */
1151 uda_clear_devint(udc, (USBD_ERR_INT | USBD_DEV_STAT | USBD_EP_SLOW |
1152 USBD_EP_FAST));
1153 uda_enable_devint(udc, (USBD_ERR_INT | USBD_DEV_STAT | USBD_EP_SLOW |
1154 USBD_EP_FAST));
1155
1156 /* Set device address to 0 - called twice to force a latch in the USB
1157 engine without the need of a setup packet status closure */
1158 udc_set_address(udc, 0);
1159 udc_set_address(udc, 0);
1160
1161 /* Enable master DMA interrupts */
1162 writel((USBD_SYS_ERR_INT | USBD_EOT_INT),
1163 USBD_DMAINTEN(udc->udp_baseaddr));
1164
1165 udc->dev_status = 0;
1166}
1167
1168/*
1169 *
1170 * USB device board specific events handled via callbacks
1171 *
1172 */
1173/* Connection change event - notify board function of change */
1174static void uda_power_event(struct lpc32xx_udc *udc, u32 conn)
1175{
1176 /* Just notify of a connection change event (optional) */
1177 if (udc->board->conn_chgb != NULL)
1178 udc->board->conn_chgb(conn);
1179}
1180
1181/* Suspend/resume event - notify board function of change */
1182static void uda_resm_susp_event(struct lpc32xx_udc *udc, u32 conn)
1183{
1184 /* Just notify of a Suspend/resume change event (optional) */
1185 if (udc->board->susp_chgb != NULL)
1186 udc->board->susp_chgb(conn);
1187
1188 if (conn)
1189 udc->suspended = 0;
1190 else
1191 udc->suspended = 1;
1192}
1193
1194/* Remote wakeup enable/disable - notify board function of change */
1195static void uda_remwkp_cgh(struct lpc32xx_udc *udc)
1196{
1197 if (udc->board->rmwk_chgb != NULL)
1198 udc->board->rmwk_chgb(udc->dev_status &
1199 (1 << USB_DEVICE_REMOTE_WAKEUP));
1200}
1201
1202/* Reads data from FIFO, adjusts for alignment and data size */
1203static void udc_pop_fifo(struct lpc32xx_udc *udc, u8 *data, u32 bytes)
1204{
1205 int n, i, bl;
1206 u16 *p16;
1207 u32 *p32, tmp, cbytes;
1208
1209 /* Use optimal data transfer method based on source address and size */
1210 switch (((u32) data) & 0x3) {
1211 case 0: /* 32-bit aligned */
1212 p32 = (u32 *) data;
1213 cbytes = (bytes & ~0x3);
1214
1215 /* Copy 32-bit aligned data first */
1216 for (n = 0; n < cbytes; n += 4)
1217 *p32++ = readl(USBD_RXDATA(udc->udp_baseaddr));
1218
1219 /* Handle any remaining bytes */
1220 bl = bytes - cbytes;
1221 if (bl) {
1222 tmp = readl(USBD_RXDATA(udc->udp_baseaddr));
1223 for (n = 0; n < bl; n++)
1224 data[cbytes + n] = ((tmp >> (n * 8)) & 0xFF);
1225
1226 }
1227 break;
1228
1229 case 1: /* 8-bit aligned */
1230 case 3:
1231 /* Each byte has to be handled independently */
1232 for (n = 0; n < bytes; n += 4) {
1233 tmp = readl(USBD_RXDATA(udc->udp_baseaddr));
1234
1235 bl = bytes - n;
1236 if (bl > 3)
1237 bl = 3;
1238
1239 for (i = 0; i < bl; i++)
1240 data[n + i] = (u8) ((tmp >> (n * 8)) & 0xFF);
1241 }
1242 break;
1243
1244 case 2: /* 16-bit aligned */
1245 p16 = (u16 *) data;
1246 cbytes = (bytes & ~0x3);
1247
1248 /* Copy 32-bit sized objects first with 16-bit alignment */
1249 for (n = 0; n < cbytes; n += 4) {
1250 tmp = readl(USBD_RXDATA(udc->udp_baseaddr));
1251 *p16++ = (u16)(tmp & 0xFFFF);
1252 *p16++ = (u16)((tmp >> 16) & 0xFFFF);
1253 }
1254
1255 /* Handle any remaining bytes */
1256 bl = bytes - cbytes;
1257 if (bl) {
1258 tmp = readl(USBD_RXDATA(udc->udp_baseaddr));
1259 for (n = 0; n < bl; n++)
1260 data[cbytes + n] = ((tmp >> (n * 8)) & 0xFF);
1261 }
1262 break;
1263 }
1264}
1265
1266/* Read data from the FIFO for an endpoint. This function is for endpoints (such
1267 * as EP0) that don't use DMA. This function should only be called if a packet
1268 * is known to be ready to read for the endpoint. Note that the endpoint must
1269 * be selected in the protocol engine prior to this call. */
1270static u32 udc_read_hwep(struct lpc32xx_udc *udc, u32 hwep, u32 *data,
1271 u32 bytes)
1272{
1273 u32 tmpv;
1274 int to = 1000;
1275 u32 tmp, hwrep = ((hwep & 0x1E) << 1) | CTRL_RD_EN;
1276
1277 /* Setup read of endpoint */
1278 writel(hwrep, USBD_CTRL(udc->udp_baseaddr));
1279
1280 /* Wait until packet is ready */
1281 while ((((tmpv = readl(USBD_RXPLEN(udc->udp_baseaddr))) &
1282 PKT_RDY) == 0) && (to > 0))
1283 to--;
1284 if (!to)
1285 dev_dbg(udc->dev, "No packet ready on FIFO EP read\n");
1286
1287 /* Mask out count */
1288 tmp = tmpv & PKT_LNGTH_MASK;
1289 if (bytes < tmp)
1290 tmp = bytes;
1291
1292 if ((tmp > 0) && (data != NULL))
1293 udc_pop_fifo(udc, (u8 *) data, tmp);
1294
1295 writel(((hwep & 0x1E) << 1), USBD_CTRL(udc->udp_baseaddr));
1296
1297 /* Clear the buffer */
1298 udc_clr_buffer_hwep(udc, hwep);
1299
1300 return tmp;
1301}
1302
1303/* Stuffs data into the FIFO, adjusts for alignment and data size */
1304static void udc_stuff_fifo(struct lpc32xx_udc *udc, u8 *data, u32 bytes)
1305{
1306 int n, i, bl;
1307 u16 *p16;
1308 u32 *p32, tmp, cbytes;
1309
1310 /* Use optimal data transfer method based on source address and size */
1311 switch (((u32) data) & 0x3) {
1312 case 0: /* 32-bit aligned */
1313 p32 = (u32 *) data;
1314 cbytes = (bytes & ~0x3);
1315
1316 /* Copy 32-bit aligned data first */
1317 for (n = 0; n < cbytes; n += 4)
1318 writel(*p32++, USBD_TXDATA(udc->udp_baseaddr));
1319
1320 /* Handle any remaining bytes */
1321 bl = bytes - cbytes;
1322 if (bl) {
1323 tmp = 0;
1324 for (n = 0; n < bl; n++)
1325 tmp |= data[cbytes + n] << (n * 8);
1326
1327 writel(tmp, USBD_TXDATA(udc->udp_baseaddr));
1328 }
1329 break;
1330
1331 case 1: /* 8-bit aligned */
1332 case 3:
1333 /* Each byte has to be handled independently */
1334 for (n = 0; n < bytes; n += 4) {
1335 bl = bytes - n;
1336 if (bl > 4)
1337 bl = 4;
1338
1339 tmp = 0;
1340 for (i = 0; i < bl; i++)
1341 tmp |= data[n + i] << (i * 8);
1342
1343 writel(tmp, USBD_TXDATA(udc->udp_baseaddr));
1344 }
1345 break;
1346
1347 case 2: /* 16-bit aligned */
1348 p16 = (u16 *) data;
1349 cbytes = (bytes & ~0x3);
1350
1351 /* Copy 32-bit aligned data first */
1352 for (n = 0; n < cbytes; n += 4) {
1353 tmp = *p16++ & 0xFFFF;
1354 tmp |= (*p16++ & 0xFFFF) << 16;
1355 writel(tmp, USBD_TXDATA(udc->udp_baseaddr));
1356 }
1357
1358 /* Handle any remaining bytes */
1359 bl = bytes - cbytes;
1360 if (bl) {
1361 tmp = 0;
1362 for (n = 0; n < bl; n++)
1363 tmp |= data[cbytes + n] << (n * 8);
1364
1365 writel(tmp, USBD_TXDATA(udc->udp_baseaddr));
1366 }
1367 break;
1368 }
1369}
1370
1371/* Write data to the FIFO for an endpoint. This function is for endpoints (such
1372 * as EP0) that don't use DMA. Note that the endpoint must be selected in the
1373 * protocol engine prior to this call. */
1374static void udc_write_hwep(struct lpc32xx_udc *udc, u32 hwep, u32 *data,
1375 u32 bytes)
1376{
1377 u32 hwwep = ((hwep & 0x1E) << 1) | CTRL_WR_EN;
1378
1379 if ((bytes > 0) && (data == NULL))
1380 return;
1381
1382 /* Setup write of endpoint */
1383 writel(hwwep, USBD_CTRL(udc->udp_baseaddr));
1384
1385 writel(bytes, USBD_TXPLEN(udc->udp_baseaddr));
1386
1387 /* Need at least 1 byte to trigger TX */
1388 if (bytes == 0)
1389 writel(0, USBD_TXDATA(udc->udp_baseaddr));
1390 else
1391 udc_stuff_fifo(udc, (u8 *) data, bytes);
1392
1393 writel(((hwep & 0x1E) << 1), USBD_CTRL(udc->udp_baseaddr));
1394
1395 udc_val_buffer_hwep(udc, hwep);
1396}
1397
1398/* USB device reset - resets USB to a default state with just EP0
1399 enabled */
1400static void uda_usb_reset(struct lpc32xx_udc *udc)
1401{
1402 u32 i = 0;
1403 /* Re-init device controller and EP0 */
1404 udc_enable(udc);
1405 udc->gadget.speed = USB_SPEED_FULL;
1406
1407 for (i = 1; i < NUM_ENDPOINTS; i++) {
1408 struct lpc32xx_ep *ep = &udc->ep[i];
1409 ep->req_pending = 0;
1410 }
1411}
1412
1413/* Send a ZLP on EP0 */
1414static void udc_ep0_send_zlp(struct lpc32xx_udc *udc)
1415{
1416 udc_write_hwep(udc, EP_IN, NULL, 0);
1417}
1418
1419/* Get current frame number */
1420static u16 udc_get_current_frame(struct lpc32xx_udc *udc)
1421{
1422 u16 flo, fhi;
1423
1424 udc_protocol_cmd_w(udc, CMD_RD_FRAME);
1425 flo = (u16) udc_protocol_cmd_r(udc, DAT_RD_FRAME);
1426 fhi = (u16) udc_protocol_cmd_r(udc, DAT_RD_FRAME);
1427
1428 return (fhi << 8) | flo;
1429}
1430
1431/* Set the device as configured - enables all endpoints */
1432static inline void udc_set_device_configured(struct lpc32xx_udc *udc)
1433{
1434 udc_protocol_cmd_data_w(udc, CMD_CFG_DEV, DAT_WR_BYTE(CONF_DVICE));
1435}
1436
1437/* Set the device as unconfigured - disables all endpoints */
1438static inline void udc_set_device_unconfigured(struct lpc32xx_udc *udc)
1439{
1440 udc_protocol_cmd_data_w(udc, CMD_CFG_DEV, DAT_WR_BYTE(0));
1441}
1442
1443/* reinit == restore initial software state */
1444static void udc_reinit(struct lpc32xx_udc *udc)
1445{
1446 u32 i;
1447
1448 INIT_LIST_HEAD(&udc->gadget.ep_list);
1449 INIT_LIST_HEAD(&udc->gadget.ep0->ep_list);
1450
1451 for (i = 0; i < NUM_ENDPOINTS; i++) {
1452 struct lpc32xx_ep *ep = &udc->ep[i];
1453
1454 if (i != 0)
1455 list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
1456 ep->desc = NULL;
1457 ep->ep.maxpacket = ep->maxpacket;
1458 INIT_LIST_HEAD(&ep->queue);
1459 ep->req_pending = 0;
1460 }
1461
1462 udc->ep0state = WAIT_FOR_SETUP;
1463}
1464
1465/* Must be called with lock */
1466static void done(struct lpc32xx_ep *ep, struct lpc32xx_request *req, int status)
1467{
1468 struct lpc32xx_udc *udc = ep->udc;
1469
1470 list_del_init(&req->queue);
1471 if (req->req.status == -EINPROGRESS)
1472 req->req.status = status;
1473 else
1474 status = req->req.status;
1475
1476 if (ep->lep) {
1477 enum dma_data_direction direction;
1478
1479 if (ep->is_in)
1480 direction = DMA_TO_DEVICE;
1481 else
1482 direction = DMA_FROM_DEVICE;
1483
1484 if (req->mapped) {
1485 dma_unmap_single(ep->udc->gadget.dev.parent,
1486 req->req.dma, req->req.length,
1487 direction);
1488 req->req.dma = 0;
1489 req->mapped = 0;
1490 } else
1491 dma_sync_single_for_cpu(ep->udc->gadget.dev.parent,
1492 req->req.dma, req->req.length,
1493 direction);
1494
1495 /* Free DDs */
1496 udc_dd_free(udc, req->dd_desc_ptr);
1497 }
1498
1499 if (status && status != -ESHUTDOWN)
1500 ep_dbg(ep, "%s done %p, status %d\n", ep->ep.name, req, status);
1501
1502 ep->req_pending = 0;
1503 spin_unlock(&udc->lock);
1504 req->req.complete(&ep->ep, &req->req);
1505 spin_lock(&udc->lock);
1506}
1507
1508/* Must be called with lock */
1509static void nuke(struct lpc32xx_ep *ep, int status)
1510{
1511 struct lpc32xx_request *req;
1512
1513 while (!list_empty(&ep->queue)) {
1514 req = list_entry(ep->queue.next, struct lpc32xx_request, queue);
1515 done(ep, req, status);
1516 }
1517
1518 if (ep->desc && status == -ESHUTDOWN) {
1519 uda_disable_hwepint(ep->udc, ep->hwep_num);
1520 udc_disable_hwep(ep->udc, ep->hwep_num);
1521 }
1522}
1523
1524/* IN endpoint 0 transfer */
1525static int udc_ep0_in_req(struct lpc32xx_udc *udc)
1526{
1527 struct lpc32xx_request *req;
1528 struct lpc32xx_ep *ep0 = &udc->ep[0];
1529 u32 tsend, ts = 0;
1530
1531 if (list_empty(&ep0->queue))
1532 /* Nothing to send */
1533 return 0;
1534 else
1535 req = list_entry(ep0->queue.next, struct lpc32xx_request,
1536 queue);
1537
1538 tsend = ts = req->req.length - req->req.actual;
1539 if (ts == 0) {
1540 /* Send a ZLP */
1541 udc_ep0_send_zlp(udc);
1542 done(ep0, req, 0);
1543 return 1;
1544 } else if (ts > ep0->ep.maxpacket)
1545 ts = ep0->ep.maxpacket; /* Just send what we can */
1546
1547 /* Write data to the EP0 FIFO and start transfer */
1548 udc_write_hwep(udc, EP_IN, (req->req.buf + req->req.actual), ts);
1549
1550 /* Increment data pointer */
1551 req->req.actual += ts;
1552
1553 if (tsend >= ep0->ep.maxpacket)
1554 return 0; /* Stay in data transfer state */
1555
1556 /* Transfer request is complete */
1557 udc->ep0state = WAIT_FOR_SETUP;
1558 done(ep0, req, 0);
1559 return 1;
1560}
1561
1562/* OUT endpoint 0 transfer */
1563static int udc_ep0_out_req(struct lpc32xx_udc *udc)
1564{
1565 struct lpc32xx_request *req;
1566 struct lpc32xx_ep *ep0 = &udc->ep[0];
1567 u32 tr, bufferspace;
1568
1569 if (list_empty(&ep0->queue))
1570 return 0;
1571 else
1572 req = list_entry(ep0->queue.next, struct lpc32xx_request,
1573 queue);
1574
1575 if (req) {
1576 if (req->req.length == 0) {
1577 /* Just dequeue request */
1578 done(ep0, req, 0);
1579 udc->ep0state = WAIT_FOR_SETUP;
1580 return 1;
1581 }
1582
1583 /* Get data from FIFO */
1584 bufferspace = req->req.length - req->req.actual;
1585 if (bufferspace > ep0->ep.maxpacket)
1586 bufferspace = ep0->ep.maxpacket;
1587
1588 /* Copy data to buffer */
1589 prefetchw(req->req.buf + req->req.actual);
1590 tr = udc_read_hwep(udc, EP_OUT, req->req.buf + req->req.actual,
1591 bufferspace);
1592 req->req.actual += bufferspace;
1593
1594 if (tr < ep0->ep.maxpacket) {
1595 /* This is the last packet */
1596 done(ep0, req, 0);
1597 udc->ep0state = WAIT_FOR_SETUP;
1598 return 1;
1599 }
1600 }
1601
1602 return 0;
1603}
1604
1605/* Must be called with lock */
1606static void stop_activity(struct lpc32xx_udc *udc)
1607{
1608 struct usb_gadget_driver *driver = udc->driver;
1609 int i;
1610
1611 if (udc->gadget.speed == USB_SPEED_UNKNOWN)
1612 driver = NULL;
1613
1614 udc->gadget.speed = USB_SPEED_UNKNOWN;
1615 udc->suspended = 0;
1616
1617 for (i = 0; i < NUM_ENDPOINTS; i++) {
1618 struct lpc32xx_ep *ep = &udc->ep[i];
1619 nuke(ep, -ESHUTDOWN);
1620 }
1621 if (driver) {
1622 spin_unlock(&udc->lock);
1623 driver->disconnect(&udc->gadget);
1624 spin_lock(&udc->lock);
1625 }
1626
1627 isp1301_pullup_enable(udc, 0, 0);
1628 udc_disable(udc);
1629 udc_reinit(udc);
1630}
1631
1632/*
1633 * Activate or kill host pullup
1634 * Can be called with or without lock
1635 */
1636static void pullup(struct lpc32xx_udc *udc, int is_on)
1637{
1638 if (!udc->clocked)
1639 return;
1640
1641 if (!udc->enabled || !udc->vbus)
1642 is_on = 0;
1643
1644 if (is_on != udc->pullup)
1645 isp1301_pullup_enable(udc, is_on, 0);
1646}
1647
1648/* Must be called without lock */
1649static int lpc32xx_ep_disable(struct usb_ep *_ep)
1650{
1651 struct lpc32xx_ep *ep = container_of(_ep, struct lpc32xx_ep, ep);
1652 struct lpc32xx_udc *udc = ep->udc;
1653 unsigned long flags;
1654
1655 if ((ep->hwep_num_base == 0) || (ep->hwep_num == 0))
1656 return -EINVAL;
1657 spin_lock_irqsave(&udc->lock, flags);
1658
1659 nuke(ep, -ESHUTDOWN);
1660
1661 /* restore the endpoint's pristine config */
1662 ep->desc = NULL;
1663
1664 /* Clear all DMA statuses for this EP */
1665 udc_ep_dma_disable(udc, ep->hwep_num);
1666 writel(1 << ep->hwep_num, USBD_EOTINTCLR(udc->udp_baseaddr));
1667 writel(1 << ep->hwep_num, USBD_NDDRTINTCLR(udc->udp_baseaddr));
1668 writel(1 << ep->hwep_num, USBD_SYSERRTINTCLR(udc->udp_baseaddr));
1669 writel(1 << ep->hwep_num, USBD_DMARCLR(udc->udp_baseaddr));
1670
1671 /* Remove the DD pointer in the UDCA */
1672 udc->udca_v_base[ep->hwep_num] = 0;
1673
1674 /* Disable and reset endpoint and interrupt */
1675 uda_clear_hwepint(udc, ep->hwep_num);
1676 udc_unrealize_hwep(udc, ep->hwep_num);
1677
1678 ep->hwep_num = 0;
1679
1680 spin_unlock_irqrestore(&udc->lock, flags);
1681
1682 atomic_dec(&udc->enabled_ep_cnt);
1683 wake_up(&udc->ep_disable_wait_queue);
1684
1685 return 0;
1686}
1687
1688/* Must be called without lock */
1689static int lpc32xx_ep_enable(struct usb_ep *_ep,
1690 const struct usb_endpoint_descriptor *desc)
1691{
1692 struct lpc32xx_ep *ep = container_of(_ep, struct lpc32xx_ep, ep);
1693 struct lpc32xx_udc *udc = ep->udc;
1694 u16 maxpacket;
1695 u32 tmp;
1696 unsigned long flags;
1697
1698 /* Verify EP data */
1699 if ((!_ep) || (!ep) || (!desc) || (ep->desc) ||
1700 (desc->bDescriptorType != USB_DT_ENDPOINT)) {
1701 dev_dbg(udc->dev, "bad ep or descriptor\n");
1702 return -EINVAL;
1703 }
1704 maxpacket = usb_endpoint_maxp(desc);
1705 if ((maxpacket == 0) || (maxpacket > ep->maxpacket)) {
1706 dev_dbg(udc->dev, "bad ep descriptor's packet size\n");
1707 return -EINVAL;
1708 }
1709
1710 /* Don't touch EP0 */
1711 if (ep->hwep_num_base == 0) {
1712 dev_dbg(udc->dev, "Can't re-enable EP0!!!\n");
1713 return -EINVAL;
1714 }
1715
1716 /* Is driver ready? */
1717 if ((!udc->driver) || (udc->gadget.speed == USB_SPEED_UNKNOWN)) {
1718 dev_dbg(udc->dev, "bogus device state\n");
1719 return -ESHUTDOWN;
1720 }
1721
1722 tmp = desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
1723 switch (tmp) {
1724 case USB_ENDPOINT_XFER_CONTROL:
1725 return -EINVAL;
1726
1727 case USB_ENDPOINT_XFER_INT:
1728 if (maxpacket > ep->maxpacket) {
1729 dev_dbg(udc->dev,
1730 "Bad INT endpoint maxpacket %d\n", maxpacket);
1731 return -EINVAL;
1732 }
1733 break;
1734
1735 case USB_ENDPOINT_XFER_BULK:
1736 switch (maxpacket) {
1737 case 8:
1738 case 16:
1739 case 32:
1740 case 64:
1741 break;
1742
1743 default:
1744 dev_dbg(udc->dev,
1745 "Bad BULK endpoint maxpacket %d\n", maxpacket);
1746 return -EINVAL;
1747 }
1748 break;
1749
1750 case USB_ENDPOINT_XFER_ISOC:
1751 break;
1752 }
1753 spin_lock_irqsave(&udc->lock, flags);
1754
1755 /* Initialize endpoint to match the selected descriptor */
1756 ep->is_in = (desc->bEndpointAddress & USB_DIR_IN) != 0;
1757 ep->desc = desc;
1758 ep->ep.maxpacket = maxpacket;
1759
1760 /* Map hardware endpoint from base and direction */
1761 if (ep->is_in)
1762 /* IN endpoints are offset 1 from the OUT endpoint */
1763 ep->hwep_num = ep->hwep_num_base + EP_IN;
1764 else
1765 ep->hwep_num = ep->hwep_num_base;
1766
1767 ep_dbg(ep, "EP enabled: %s, HW:%d, MP:%d IN:%d\n", ep->ep.name,
1768 ep->hwep_num, maxpacket, (ep->is_in == 1));
1769
1770 /* Realize the endpoint, interrupt is enabled later when
1771 * buffers are queued, IN EPs will NAK until buffers are ready */
1772 udc_realize_hwep(udc, ep->hwep_num, ep->ep.maxpacket);
1773 udc_clr_buffer_hwep(udc, ep->hwep_num);
1774 uda_disable_hwepint(udc, ep->hwep_num);
1775 udc_clrstall_hwep(udc, ep->hwep_num);
1776
1777 /* Clear all DMA statuses for this EP */
1778 udc_ep_dma_disable(udc, ep->hwep_num);
1779 writel(1 << ep->hwep_num, USBD_EOTINTCLR(udc->udp_baseaddr));
1780 writel(1 << ep->hwep_num, USBD_NDDRTINTCLR(udc->udp_baseaddr));
1781 writel(1 << ep->hwep_num, USBD_SYSERRTINTCLR(udc->udp_baseaddr));
1782 writel(1 << ep->hwep_num, USBD_DMARCLR(udc->udp_baseaddr));
1783
1784 spin_unlock_irqrestore(&udc->lock, flags);
1785
1786 atomic_inc(&udc->enabled_ep_cnt);
1787 return 0;
1788}
1789
1790/*
1791 * Allocate a USB request list
1792 * Can be called with or without lock
1793 */
1794static struct usb_request *lpc32xx_ep_alloc_request(struct usb_ep *_ep,
1795 gfp_t gfp_flags)
1796{
1797 struct lpc32xx_request *req;
1798
1799 req = kzalloc(sizeof(struct lpc32xx_request), gfp_flags);
1800 if (!req)
1801 return NULL;
1802
1803 INIT_LIST_HEAD(&req->queue);
1804 return &req->req;
1805}
1806
1807/*
1808 * De-allocate a USB request list
1809 * Can be called with or without lock
1810 */
1811static void lpc32xx_ep_free_request(struct usb_ep *_ep,
1812 struct usb_request *_req)
1813{
1814 struct lpc32xx_request *req;
1815
1816 req = container_of(_req, struct lpc32xx_request, req);
1817 BUG_ON(!list_empty(&req->queue));
1818 kfree(req);
1819}
1820
1821/* Must be called without lock */
1822static int lpc32xx_ep_queue(struct usb_ep *_ep,
1823 struct usb_request *_req, gfp_t gfp_flags)
1824{
1825 struct lpc32xx_request *req;
1826 struct lpc32xx_ep *ep;
1827 struct lpc32xx_udc *udc;
1828 unsigned long flags;
1829 int status = 0;
1830
1831 req = container_of(_req, struct lpc32xx_request, req);
1832 ep = container_of(_ep, struct lpc32xx_ep, ep);
1833
1834 if (!_req || !_req->complete || !_req->buf ||
1835 !list_empty(&req->queue))
1836 return -EINVAL;
1837
1838 udc = ep->udc;
1839
1840 if (!_ep || (!ep->desc && ep->hwep_num_base != 0)) {
1841 dev_dbg(udc->dev, "invalid ep\n");
1842 return -EINVAL;
1843 }
1844
1845
1846 if ((!udc) || (!udc->driver) ||
1847 (udc->gadget.speed == USB_SPEED_UNKNOWN)) {
1848 dev_dbg(udc->dev, "invalid device\n");
1849 return -EINVAL;
1850 }
1851
1852 if (ep->lep) {
1853 enum dma_data_direction direction;
1854 struct lpc32xx_usbd_dd_gad *dd;
1855
1856 /* Map DMA pointer */
1857 if (ep->is_in)
1858 direction = DMA_TO_DEVICE;
1859 else
1860 direction = DMA_FROM_DEVICE;
1861
1862 if (req->req.dma == 0) {
1863 req->req.dma = dma_map_single(
1864 ep->udc->gadget.dev.parent,
1865 req->req.buf, req->req.length, direction);
1866 req->mapped = 1;
1867 } else {
1868 dma_sync_single_for_device(
1869 ep->udc->gadget.dev.parent, req->req.dma,
1870 req->req.length, direction);
1871 req->mapped = 0;
1872 }
1873
1874 /* For the request, build a list of DDs */
1875 dd = udc_dd_alloc(udc);
1876 if (!dd) {
1877 /* Error allocating DD */
1878 return -ENOMEM;
1879 }
1880 req->dd_desc_ptr = dd;
1881
1882 /* Setup the DMA descriptor */
1883 dd->dd_next_phy = dd->dd_next_v = 0;
1884 dd->dd_buffer_addr = req->req.dma;
1885 dd->dd_status = 0;
1886
1887 /* Special handling for ISO EPs */
1888 if (ep->eptype == EP_ISO_TYPE) {
1889 dd->dd_setup = DD_SETUP_ISO_EP |
1890 DD_SETUP_PACKETLEN(0) |
1891 DD_SETUP_DMALENBYTES(1);
1892 dd->dd_iso_ps_mem_addr = dd->this_dma + 24;
1893 if (ep->is_in)
1894 dd->iso_status[0] = req->req.length;
1895 else
1896 dd->iso_status[0] = 0;
1897 } else
1898 dd->dd_setup = DD_SETUP_PACKETLEN(ep->ep.maxpacket) |
1899 DD_SETUP_DMALENBYTES(req->req.length);
1900 }
1901
1902 ep_dbg(ep, "%s queue req %p len %d buf %p (in=%d) z=%d\n", _ep->name,
1903 _req, _req->length, _req->buf, ep->is_in, _req->zero);
1904
1905 spin_lock_irqsave(&udc->lock, flags);
1906
1907 _req->status = -EINPROGRESS;
1908 _req->actual = 0;
1909 req->send_zlp = _req->zero;
1910
1911 /* Kickstart empty queues */
1912 if (list_empty(&ep->queue)) {
1913 list_add_tail(&req->queue, &ep->queue);
1914
1915 if (ep->hwep_num_base == 0) {
1916 /* Handle expected data direction */
1917 if (ep->is_in) {
1918 /* IN packet to host */
1919 udc->ep0state = DATA_IN;
1920 status = udc_ep0_in_req(udc);
1921 } else {
1922 /* OUT packet from host */
1923 udc->ep0state = DATA_OUT;
1924 status = udc_ep0_out_req(udc);
1925 }
1926 } else if (ep->is_in) {
1927 /* IN packet to host and kick off transfer */
1928 if (!ep->req_pending)
1929 udc_ep_in_req_dma(udc, ep);
1930 } else
1931 /* OUT packet from host and kick off list */
1932 if (!ep->req_pending)
1933 udc_ep_out_req_dma(udc, ep);
1934 } else
1935 list_add_tail(&req->queue, &ep->queue);
1936
1937 spin_unlock_irqrestore(&udc->lock, flags);
1938
1939 return (status < 0) ? status : 0;
1940}
1941
1942/* Must be called without lock */
1943static int lpc32xx_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
1944{
1945 struct lpc32xx_ep *ep;
1946 struct lpc32xx_request *req;
1947 unsigned long flags;
1948
1949 ep = container_of(_ep, struct lpc32xx_ep, ep);
1950 if (!_ep || ep->hwep_num_base == 0)
1951 return -EINVAL;
1952
1953 spin_lock_irqsave(&ep->udc->lock, flags);
1954
1955 /* make sure it's actually queued on this endpoint */
1956 list_for_each_entry(req, &ep->queue, queue) {
1957 if (&req->req == _req)
1958 break;
1959 }
1960 if (&req->req != _req) {
1961 spin_unlock_irqrestore(&ep->udc->lock, flags);
1962 return -EINVAL;
1963 }
1964
1965 done(ep, req, -ECONNRESET);
1966
1967 spin_unlock_irqrestore(&ep->udc->lock, flags);
1968
1969 return 0;
1970}
1971
1972/* Must be called without lock */
1973static int lpc32xx_ep_set_halt(struct usb_ep *_ep, int value)
1974{
1975 struct lpc32xx_ep *ep = container_of(_ep, struct lpc32xx_ep, ep);
1976 struct lpc32xx_udc *udc = ep->udc;
1977 unsigned long flags;
1978
1979 if ((!ep) || (ep->desc == NULL) || (ep->hwep_num <= 1))
1980 return -EINVAL;
1981
1982 /* Don't halt an IN EP */
1983 if (ep->is_in)
1984 return -EAGAIN;
1985
1986 spin_lock_irqsave(&udc->lock, flags);
1987
1988 if (value == 1) {
1989 /* stall */
1990 udc_protocol_cmd_data_w(udc, CMD_SET_EP_STAT(ep->hwep_num),
1991 DAT_WR_BYTE(EP_STAT_ST));
1992 } else {
1993 /* End stall */
1994 ep->wedge = 0;
1995 udc_protocol_cmd_data_w(udc, CMD_SET_EP_STAT(ep->hwep_num),
1996 DAT_WR_BYTE(0));
1997 }
1998
1999 spin_unlock_irqrestore(&udc->lock, flags);
2000
2001 return 0;
2002}
2003
2004/* set the halt feature and ignores clear requests */
2005static int lpc32xx_ep_set_wedge(struct usb_ep *_ep)
2006{
2007 struct lpc32xx_ep *ep = container_of(_ep, struct lpc32xx_ep, ep);
2008
2009 if (!_ep || !ep->udc)
2010 return -EINVAL;
2011
2012 ep->wedge = 1;
2013
2014 return usb_ep_set_halt(_ep);
2015}
2016
2017static const struct usb_ep_ops lpc32xx_ep_ops = {
2018 .enable = lpc32xx_ep_enable,
2019 .disable = lpc32xx_ep_disable,
2020 .alloc_request = lpc32xx_ep_alloc_request,
2021 .free_request = lpc32xx_ep_free_request,
2022 .queue = lpc32xx_ep_queue,
2023 .dequeue = lpc32xx_ep_dequeue,
2024 .set_halt = lpc32xx_ep_set_halt,
2025 .set_wedge = lpc32xx_ep_set_wedge,
2026};
2027
2028/* Send a ZLP on a non-0 IN EP */
2029void udc_send_in_zlp(struct lpc32xx_udc *udc, struct lpc32xx_ep *ep)
2030{
2031 /* Clear EP status */
2032 udc_clearep_getsts(udc, ep->hwep_num);
2033
2034 /* Send ZLP via FIFO mechanism */
2035 udc_write_hwep(udc, ep->hwep_num, NULL, 0);
2036}
2037
2038/*
2039 * Handle EP completion for ZLP
2040 * This function will only be called when a delayed ZLP needs to be sent out
2041 * after a DMA transfer has filled both buffers.
2042 */
2043void udc_handle_eps(struct lpc32xx_udc *udc, struct lpc32xx_ep *ep)
2044{
2045 u32 epstatus;
2046 struct lpc32xx_request *req;
2047
2048 if (ep->hwep_num <= 0)
2049 return;
2050
2051 uda_clear_hwepint(udc, ep->hwep_num);
2052
2053 /* If this interrupt isn't enabled, return now */
2054 if (!(udc->enabled_hwepints & (1 << ep->hwep_num)))
2055 return;
2056
2057 /* Get endpoint status */
2058 epstatus = udc_clearep_getsts(udc, ep->hwep_num);
2059
2060 /*
2061 * This should never happen, but protect against writing to the
2062 * buffer when full.
2063 */
2064 if (epstatus & EP_SEL_F)
2065 return;
2066
2067 if (ep->is_in) {
2068 udc_send_in_zlp(udc, ep);
2069 uda_disable_hwepint(udc, ep->hwep_num);
2070 } else
2071 return;
2072
2073 /* If there isn't a request waiting, something went wrong */
2074 req = list_entry(ep->queue.next, struct lpc32xx_request, queue);
2075 if (req) {
2076 done(ep, req, 0);
2077
2078 /* Start another request if ready */
2079 if (!list_empty(&ep->queue)) {
2080 if (ep->is_in)
2081 udc_ep_in_req_dma(udc, ep);
2082 else
2083 udc_ep_out_req_dma(udc, ep);
2084 } else
2085 ep->req_pending = 0;
2086 }
2087}
2088
2089
2090/* DMA end of transfer completion */
2091static void udc_handle_dma_ep(struct lpc32xx_udc *udc, struct lpc32xx_ep *ep)
2092{
2093 u32 status, epstatus;
2094 struct lpc32xx_request *req;
2095 struct lpc32xx_usbd_dd_gad *dd;
2096
2097#ifdef CONFIG_USB_GADGET_DEBUG_FILES
2098 ep->totalints++;
2099#endif
2100
2101 req = list_entry(ep->queue.next, struct lpc32xx_request, queue);
2102 if (!req) {
2103 ep_err(ep, "DMA interrupt on no req!\n");
2104 return;
2105 }
2106 dd = req->dd_desc_ptr;
2107
2108 /* DMA descriptor should always be retired for this call */
2109 if (!(dd->dd_status & DD_STATUS_DD_RETIRED))
2110 ep_warn(ep, "DMA descriptor did not retire\n");
2111
2112 /* Disable DMA */
2113 udc_ep_dma_disable(udc, ep->hwep_num);
2114 writel((1 << ep->hwep_num), USBD_EOTINTCLR(udc->udp_baseaddr));
2115 writel((1 << ep->hwep_num), USBD_NDDRTINTCLR(udc->udp_baseaddr));
2116
2117 /* System error? */
2118 if (readl(USBD_SYSERRTINTST(udc->udp_baseaddr)) &
2119 (1 << ep->hwep_num)) {
2120 writel((1 << ep->hwep_num),
2121 USBD_SYSERRTINTCLR(udc->udp_baseaddr));
2122 ep_err(ep, "AHB critical error!\n");
2123 ep->req_pending = 0;
2124
2125 /* The error could have occurred on a packet of a multipacket
2126 * transfer, so recovering the transfer is not possible. Close
2127 * the request with an error */
2128 done(ep, req, -ECONNABORTED);
2129 return;
2130 }
2131
2132 /* Handle the current DD's status */
2133 status = dd->dd_status;
2134 switch (status & DD_STATUS_STS_MASK) {
2135 case DD_STATUS_STS_NS:
2136 /* DD not serviced? This shouldn't happen! */
2137 ep->req_pending = 0;
2138 ep_err(ep, "DMA critical EP error: DD not serviced (0x%x)!\n",
2139 status);
2140
2141 done(ep, req, -ECONNABORTED);
2142 return;
2143
2144 case DD_STATUS_STS_BS:
2145 /* Interrupt only fires on EOT - This shouldn't happen! */
2146 ep->req_pending = 0;
2147 ep_err(ep, "DMA critical EP error: EOT prior to service completion (0x%x)!\n",
2148 status);
2149 done(ep, req, -ECONNABORTED);
2150 return;
2151
2152 case DD_STATUS_STS_NC:
2153 case DD_STATUS_STS_DUR:
2154 /* Really just a short packet, not an underrun */
2155 /* This is a good status and what we expect */
2156 break;
2157
2158 default:
2159 /* Data overrun, system error, or unknown */
2160 ep->req_pending = 0;
2161 ep_err(ep, "DMA critical EP error: System error (0x%x)!\n",
2162 status);
2163 done(ep, req, -ECONNABORTED);
2164 return;
2165 }
2166
2167 /* ISO endpoints are handled differently */
2168 if (ep->eptype == EP_ISO_TYPE) {
2169 if (ep->is_in)
2170 req->req.actual = req->req.length;
2171 else
2172 req->req.actual = dd->iso_status[0] & 0xFFFF;
2173 } else
2174 req->req.actual += DD_STATUS_CURDMACNT(status);
2175
2176 /* Send a ZLP if necessary. This will be done for non-int
2177 * packets which have a size that is a divisor of MAXP */
2178 if (req->send_zlp) {
2179 /*
2180 * If at least 1 buffer is available, send the ZLP now.
2181 * Otherwise, the ZLP send needs to be deferred until a
2182 * buffer is available.
2183 */
2184 if (udc_clearep_getsts(udc, ep->hwep_num) & EP_SEL_F) {
2185 udc_clearep_getsts(udc, ep->hwep_num);
2186 uda_enable_hwepint(udc, ep->hwep_num);
2187 epstatus = udc_clearep_getsts(udc, ep->hwep_num);
2188
2189 /* Let the EP interrupt handle the ZLP */
2190 return;
2191 } else
2192 udc_send_in_zlp(udc, ep);
2193 }
2194
2195 /* Transfer request is complete */
2196 done(ep, req, 0);
2197
2198 /* Start another request if ready */
2199 udc_clearep_getsts(udc, ep->hwep_num);
2200 if (!list_empty((&ep->queue))) {
2201 if (ep->is_in)
2202 udc_ep_in_req_dma(udc, ep);
2203 else
2204 udc_ep_out_req_dma(udc, ep);
2205 } else
2206 ep->req_pending = 0;
2207
2208}
2209
2210/*
2211 *
2212 * Endpoint 0 functions
2213 *
2214 */
2215static void udc_handle_dev(struct lpc32xx_udc *udc)
2216{
2217 u32 tmp;
2218
2219 udc_protocol_cmd_w(udc, CMD_GET_DEV_STAT);
2220 tmp = udc_protocol_cmd_r(udc, DAT_GET_DEV_STAT);
2221
2222 if (tmp & DEV_RST)
2223 uda_usb_reset(udc);
2224 else if (tmp & DEV_CON_CH)
2225 uda_power_event(udc, (tmp & DEV_CON));
2226 else if (tmp & DEV_SUS_CH) {
2227 if (tmp & DEV_SUS) {
2228 if (udc->vbus == 0)
2229 stop_activity(udc);
2230 else if ((udc->gadget.speed != USB_SPEED_UNKNOWN) &&
2231 udc->driver) {
2232 /* Power down transceiver */
2233 udc->poweron = 0;
2234 schedule_work(&udc->pullup_job);
2235 uda_resm_susp_event(udc, 1);
2236 }
2237 } else if ((udc->gadget.speed != USB_SPEED_UNKNOWN) &&
2238 udc->driver && udc->vbus) {
2239 uda_resm_susp_event(udc, 0);
2240 /* Power up transceiver */
2241 udc->poweron = 1;
2242 schedule_work(&udc->pullup_job);
2243 }
2244 }
2245}
2246
2247static int udc_get_status(struct lpc32xx_udc *udc, u16 reqtype, u16 wIndex)
2248{
2249 struct lpc32xx_ep *ep;
2250 u32 ep0buff = 0, tmp;
2251
2252 switch (reqtype & USB_RECIP_MASK) {
2253 case USB_RECIP_INTERFACE:
2254 break; /* Not supported */
2255
2256 case USB_RECIP_DEVICE:
2257 ep0buff = (udc->selfpowered << USB_DEVICE_SELF_POWERED);
2258 if (udc->dev_status & (1 << USB_DEVICE_REMOTE_WAKEUP))
2259 ep0buff |= (1 << USB_DEVICE_REMOTE_WAKEUP);
2260 break;
2261
2262 case USB_RECIP_ENDPOINT:
2263 tmp = wIndex & USB_ENDPOINT_NUMBER_MASK;
2264 ep = &udc->ep[tmp];
2265 if ((tmp == 0) || (tmp >= NUM_ENDPOINTS) || (tmp && !ep->desc))
2266 return -EOPNOTSUPP;
2267
2268 if (wIndex & USB_DIR_IN) {
2269 if (!ep->is_in)
2270 return -EOPNOTSUPP; /* Something's wrong */
2271 } else if (ep->is_in)
2272 return -EOPNOTSUPP; /* Not an IN endpoint */
2273
2274 /* Get status of the endpoint */
2275 udc_protocol_cmd_w(udc, CMD_SEL_EP(ep->hwep_num));
2276 tmp = udc_protocol_cmd_r(udc, DAT_SEL_EP(ep->hwep_num));
2277
2278 if (tmp & EP_SEL_ST)
2279 ep0buff = (1 << USB_ENDPOINT_HALT);
2280 else
2281 ep0buff = 0;
2282 break;
2283
2284 default:
2285 break;
2286 }
2287
2288 /* Return data */
2289 udc_write_hwep(udc, EP_IN, &ep0buff, 2);
2290
2291 return 0;
2292}
2293
2294static void udc_handle_ep0_setup(struct lpc32xx_udc *udc)
2295{
2296 struct lpc32xx_ep *ep, *ep0 = &udc->ep[0];
2297 struct usb_ctrlrequest ctrlpkt;
2298 int i, bytes;
2299 u16 wIndex, wValue, wLength, reqtype, req, tmp;
2300
2301 /* Nuke previous transfers */
2302 nuke(ep0, -EPROTO);
2303
2304 /* Get setup packet */
2305 bytes = udc_read_hwep(udc, EP_OUT, (u32 *) &ctrlpkt, 8);
2306 if (bytes != 8) {
2307 ep_warn(ep0, "Incorrectly sized setup packet (s/b 8, is %d)!\n",
2308 bytes);
2309 return;
2310 }
2311
2312 /* Native endianness */
2313 wIndex = le16_to_cpu(ctrlpkt.wIndex);
2314 wValue = le16_to_cpu(ctrlpkt.wValue);
2315 wLength = le16_to_cpu(ctrlpkt.wLength);
2316 reqtype = le16_to_cpu(ctrlpkt.bRequestType);
2317
2318 /* Set direction of EP0 */
2319 if (likely(reqtype & USB_DIR_IN))
2320 ep0->is_in = 1;
2321 else
2322 ep0->is_in = 0;
2323
2324 /* Handle SETUP packet */
2325 req = le16_to_cpu(ctrlpkt.bRequest);
2326 switch (req) {
2327 case USB_REQ_CLEAR_FEATURE:
2328 case USB_REQ_SET_FEATURE:
2329 switch (reqtype) {
2330 case (USB_TYPE_STANDARD | USB_RECIP_DEVICE):
2331 if (wValue != USB_DEVICE_REMOTE_WAKEUP)
2332 goto stall; /* Nothing else handled */
2333
2334 /* Tell board about event */
2335 if (req == USB_REQ_CLEAR_FEATURE)
2336 udc->dev_status &=
2337 ~(1 << USB_DEVICE_REMOTE_WAKEUP);
2338 else
2339 udc->dev_status |=
2340 (1 << USB_DEVICE_REMOTE_WAKEUP);
2341 uda_remwkp_cgh(udc);
2342 goto zlp_send;
2343
2344 case (USB_TYPE_STANDARD | USB_RECIP_ENDPOINT):
2345 tmp = wIndex & USB_ENDPOINT_NUMBER_MASK;
2346 if ((wValue != USB_ENDPOINT_HALT) ||
2347 (tmp >= NUM_ENDPOINTS))
2348 break;
2349
2350 /* Find hardware endpoint from logical endpoint */
2351 ep = &udc->ep[tmp];
2352 tmp = ep->hwep_num;
2353 if (tmp == 0)
2354 break;
2355
2356 if (req == USB_REQ_SET_FEATURE)
2357 udc_stall_hwep(udc, tmp);
2358 else if (!ep->wedge)
2359 udc_clrstall_hwep(udc, tmp);
2360
2361 goto zlp_send;
2362
2363 default:
2364 break;
2365 }
2366
2367
2368 case USB_REQ_SET_ADDRESS:
2369 if (reqtype == (USB_TYPE_STANDARD | USB_RECIP_DEVICE)) {
2370 udc_set_address(udc, wValue);
2371 goto zlp_send;
2372 }
2373 break;
2374
2375 case USB_REQ_GET_STATUS:
2376 udc_get_status(udc, reqtype, wIndex);
2377 return;
2378
2379 default:
2380 break; /* Let GadgetFS handle the descriptor instead */
2381 }
2382
2383 if (likely(udc->driver)) {
2384 /* device-2-host (IN) or no data setup command, process
2385 * immediately */
2386 spin_unlock(&udc->lock);
2387 i = udc->driver->setup(&udc->gadget, &ctrlpkt);
2388
2389 spin_lock(&udc->lock);
2390 if (req == USB_REQ_SET_CONFIGURATION) {
2391 /* Configuration is set after endpoints are realized */
2392 if (wValue) {
2393 /* Set configuration */
2394 udc_set_device_configured(udc);
2395
2396 udc_protocol_cmd_data_w(udc, CMD_SET_MODE,
2397 DAT_WR_BYTE(AP_CLK |
2398 INAK_BI | INAK_II));
2399 } else {
2400 /* Clear configuration */
2401 udc_set_device_unconfigured(udc);
2402
2403 /* Disable NAK interrupts */
2404 udc_protocol_cmd_data_w(udc, CMD_SET_MODE,
2405 DAT_WR_BYTE(AP_CLK));
2406 }
2407 }
2408
2409 if (i < 0) {
2410 /* setup processing failed, force stall */
2411 dev_err(udc->dev,
2412 "req %02x.%02x protocol STALL; stat %d\n",
2413 reqtype, req, i);
2414 udc->ep0state = WAIT_FOR_SETUP;
2415 goto stall;
2416 }
2417 }
2418
2419 if (!ep0->is_in)
2420 udc_ep0_send_zlp(udc); /* ZLP IN packet on data phase */
2421
2422 return;
2423
2424stall:
2425 udc_stall_hwep(udc, EP_IN);
2426 return;
2427
2428zlp_send:
2429 udc_ep0_send_zlp(udc);
2430 return;
2431}
2432
2433/* IN endpoint 0 transfer */
2434static void udc_handle_ep0_in(struct lpc32xx_udc *udc)
2435{
2436 struct lpc32xx_ep *ep0 = &udc->ep[0];
2437 u32 epstatus;
2438
2439 /* Clear EP interrupt */
2440 epstatus = udc_clearep_getsts(udc, EP_IN);
2441
2442#ifdef CONFIG_USB_GADGET_DEBUG_FILES
2443 ep0->totalints++;
2444#endif
2445
2446 /* Stalled? Clear stall and reset buffers */
2447 if (epstatus & EP_SEL_ST) {
2448 udc_clrstall_hwep(udc, EP_IN);
2449 nuke(ep0, -ECONNABORTED);
2450 udc->ep0state = WAIT_FOR_SETUP;
2451 return;
2452 }
2453
2454 /* Is a buffer available? */
2455 if (!(epstatus & EP_SEL_F)) {
2456 /* Handle based on current state */
2457 if (udc->ep0state == DATA_IN)
2458 udc_ep0_in_req(udc);
2459 else {
2460 /* Unknown state for EP0 oe end of DATA IN phase */
2461 nuke(ep0, -ECONNABORTED);
2462 udc->ep0state = WAIT_FOR_SETUP;
2463 }
2464 }
2465}
2466
2467/* OUT endpoint 0 transfer */
2468static void udc_handle_ep0_out(struct lpc32xx_udc *udc)
2469{
2470 struct lpc32xx_ep *ep0 = &udc->ep[0];
2471 u32 epstatus;
2472
2473 /* Clear EP interrupt */
2474 epstatus = udc_clearep_getsts(udc, EP_OUT);
2475
2476
2477#ifdef CONFIG_USB_GADGET_DEBUG_FILES
2478 ep0->totalints++;
2479#endif
2480
2481 /* Stalled? */
2482 if (epstatus & EP_SEL_ST) {
2483 udc_clrstall_hwep(udc, EP_OUT);
2484 nuke(ep0, -ECONNABORTED);
2485 udc->ep0state = WAIT_FOR_SETUP;
2486 return;
2487 }
2488
2489 /* A NAK may occur if a packet couldn't be received yet */
2490 if (epstatus & EP_SEL_EPN)
2491 return;
2492 /* Setup packet incoming? */
2493 if (epstatus & EP_SEL_STP) {
2494 nuke(ep0, 0);
2495 udc->ep0state = WAIT_FOR_SETUP;
2496 }
2497
2498 /* Data available? */
2499 if (epstatus & EP_SEL_F)
2500 /* Handle based on current state */
2501 switch (udc->ep0state) {
2502 case WAIT_FOR_SETUP:
2503 udc_handle_ep0_setup(udc);
2504 break;
2505
2506 case DATA_OUT:
2507 udc_ep0_out_req(udc);
2508 break;
2509
2510 default:
2511 /* Unknown state for EP0 */
2512 nuke(ep0, -ECONNABORTED);
2513 udc->ep0state = WAIT_FOR_SETUP;
2514 }
2515}
2516
2517/* Must be called without lock */
2518static int lpc32xx_get_frame(struct usb_gadget *gadget)
2519{
2520 int frame;
2521 unsigned long flags;
2522 struct lpc32xx_udc *udc = to_udc(gadget);
2523
2524 if (!udc->clocked)
2525 return -EINVAL;
2526
2527 spin_lock_irqsave(&udc->lock, flags);
2528
2529 frame = (int) udc_get_current_frame(udc);
2530
2531 spin_unlock_irqrestore(&udc->lock, flags);
2532
2533 return frame;
2534}
2535
2536static int lpc32xx_wakeup(struct usb_gadget *gadget)
2537{
2538 return -ENOTSUPP;
2539}
2540
2541static int lpc32xx_set_selfpowered(struct usb_gadget *gadget, int is_on)
2542{
2543 struct lpc32xx_udc *udc = to_udc(gadget);
2544
2545 /* Always self-powered */
2546 udc->selfpowered = (is_on != 0);
2547
2548 return 0;
2549}
2550
2551/*
2552 * vbus is here! turn everything on that's ready
2553 * Must be called without lock
2554 */
2555static int lpc32xx_vbus_session(struct usb_gadget *gadget, int is_active)
2556{
2557 unsigned long flags;
2558 struct lpc32xx_udc *udc = to_udc(gadget);
2559
2560 spin_lock_irqsave(&udc->lock, flags);
2561
2562 /* Doesn't need lock */
2563 if (udc->driver) {
2564 udc_clk_set(udc, 1);
2565 udc_enable(udc);
2566 pullup(udc, is_active);
2567 } else {
2568 stop_activity(udc);
2569 pullup(udc, 0);
2570
2571 spin_unlock_irqrestore(&udc->lock, flags);
2572 /*
2573 * Wait for all the endpoints to disable,
2574 * before disabling clocks. Don't wait if
2575 * endpoints are not enabled.
2576 */
2577 if (atomic_read(&udc->enabled_ep_cnt))
2578 wait_event_interruptible(udc->ep_disable_wait_queue,
2579 (atomic_read(&udc->enabled_ep_cnt) == 0));
2580
2581 spin_lock_irqsave(&udc->lock, flags);
2582
2583 udc_clk_set(udc, 0);
2584 }
2585
2586 spin_unlock_irqrestore(&udc->lock, flags);
2587
2588 return 0;
2589}
2590
2591/* Can be called with or without lock */
2592static int lpc32xx_pullup(struct usb_gadget *gadget, int is_on)
2593{
2594 struct lpc32xx_udc *udc = to_udc(gadget);
2595
2596 /* Doesn't need lock */
2597 pullup(udc, is_on);
2598
2599 return 0;
2600}
2601
Roland Stigge252d8ce2012-08-20 10:30:12 +02002602static int lpc32xx_start(struct usb_gadget *, struct usb_gadget_driver *);
2603static int lpc32xx_stop(struct usb_gadget *, struct usb_gadget_driver *);
Roland Stigge24a28e42012-04-29 16:47:05 +02002604
2605static const struct usb_gadget_ops lpc32xx_udc_ops = {
2606 .get_frame = lpc32xx_get_frame,
2607 .wakeup = lpc32xx_wakeup,
2608 .set_selfpowered = lpc32xx_set_selfpowered,
2609 .vbus_session = lpc32xx_vbus_session,
2610 .pullup = lpc32xx_pullup,
Roland Stigge252d8ce2012-08-20 10:30:12 +02002611 .udc_start = lpc32xx_start,
2612 .udc_stop = lpc32xx_stop,
Roland Stigge24a28e42012-04-29 16:47:05 +02002613};
2614
2615static void nop_release(struct device *dev)
2616{
2617 /* nothing to free */
2618}
2619
2620static struct lpc32xx_udc controller = {
2621 .gadget = {
2622 .ops = &lpc32xx_udc_ops,
2623 .ep0 = &controller.ep[0].ep,
2624 .name = driver_name,
2625 .dev = {
2626 .init_name = "gadget",
2627 .release = nop_release,
2628 }
2629 },
2630 .ep[0] = {
2631 .ep = {
2632 .name = "ep0",
2633 .ops = &lpc32xx_ep_ops,
2634 },
2635 .udc = &controller,
2636 .maxpacket = 64,
2637 .hwep_num_base = 0,
2638 .hwep_num = 0, /* Can be 0 or 1, has special handling */
2639 .lep = 0,
2640 .eptype = EP_CTL_TYPE,
2641 },
2642 .ep[1] = {
2643 .ep = {
2644 .name = "ep1-int",
2645 .ops = &lpc32xx_ep_ops,
2646 },
2647 .udc = &controller,
2648 .maxpacket = 64,
2649 .hwep_num_base = 2,
2650 .hwep_num = 0, /* 2 or 3, will be set later */
2651 .lep = 1,
2652 .eptype = EP_INT_TYPE,
2653 },
2654 .ep[2] = {
2655 .ep = {
2656 .name = "ep2-bulk",
2657 .ops = &lpc32xx_ep_ops,
2658 },
2659 .udc = &controller,
2660 .maxpacket = 64,
2661 .hwep_num_base = 4,
2662 .hwep_num = 0, /* 4 or 5, will be set later */
2663 .lep = 2,
2664 .eptype = EP_BLK_TYPE,
2665 },
2666 .ep[3] = {
2667 .ep = {
2668 .name = "ep3-iso",
2669 .ops = &lpc32xx_ep_ops,
2670 },
2671 .udc = &controller,
2672 .maxpacket = 1023,
2673 .hwep_num_base = 6,
2674 .hwep_num = 0, /* 6 or 7, will be set later */
2675 .lep = 3,
2676 .eptype = EP_ISO_TYPE,
2677 },
2678 .ep[4] = {
2679 .ep = {
2680 .name = "ep4-int",
2681 .ops = &lpc32xx_ep_ops,
2682 },
2683 .udc = &controller,
2684 .maxpacket = 64,
2685 .hwep_num_base = 8,
2686 .hwep_num = 0, /* 8 or 9, will be set later */
2687 .lep = 4,
2688 .eptype = EP_INT_TYPE,
2689 },
2690 .ep[5] = {
2691 .ep = {
2692 .name = "ep5-bulk",
2693 .ops = &lpc32xx_ep_ops,
2694 },
2695 .udc = &controller,
2696 .maxpacket = 64,
2697 .hwep_num_base = 10,
2698 .hwep_num = 0, /* 10 or 11, will be set later */
2699 .lep = 5,
2700 .eptype = EP_BLK_TYPE,
2701 },
2702 .ep[6] = {
2703 .ep = {
2704 .name = "ep6-iso",
2705 .ops = &lpc32xx_ep_ops,
2706 },
2707 .udc = &controller,
2708 .maxpacket = 1023,
2709 .hwep_num_base = 12,
2710 .hwep_num = 0, /* 12 or 13, will be set later */
2711 .lep = 6,
2712 .eptype = EP_ISO_TYPE,
2713 },
2714 .ep[7] = {
2715 .ep = {
2716 .name = "ep7-int",
2717 .ops = &lpc32xx_ep_ops,
2718 },
2719 .udc = &controller,
2720 .maxpacket = 64,
2721 .hwep_num_base = 14,
2722 .hwep_num = 0,
2723 .lep = 7,
2724 .eptype = EP_INT_TYPE,
2725 },
2726 .ep[8] = {
2727 .ep = {
2728 .name = "ep8-bulk",
2729 .ops = &lpc32xx_ep_ops,
2730 },
2731 .udc = &controller,
2732 .maxpacket = 64,
2733 .hwep_num_base = 16,
2734 .hwep_num = 0,
2735 .lep = 8,
2736 .eptype = EP_BLK_TYPE,
2737 },
2738 .ep[9] = {
2739 .ep = {
2740 .name = "ep9-iso",
2741 .ops = &lpc32xx_ep_ops,
2742 },
2743 .udc = &controller,
2744 .maxpacket = 1023,
2745 .hwep_num_base = 18,
2746 .hwep_num = 0,
2747 .lep = 9,
2748 .eptype = EP_ISO_TYPE,
2749 },
2750 .ep[10] = {
2751 .ep = {
2752 .name = "ep10-int",
2753 .ops = &lpc32xx_ep_ops,
2754 },
2755 .udc = &controller,
2756 .maxpacket = 64,
2757 .hwep_num_base = 20,
2758 .hwep_num = 0,
2759 .lep = 10,
2760 .eptype = EP_INT_TYPE,
2761 },
2762 .ep[11] = {
2763 .ep = {
2764 .name = "ep11-bulk",
2765 .ops = &lpc32xx_ep_ops,
2766 },
2767 .udc = &controller,
2768 .maxpacket = 64,
2769 .hwep_num_base = 22,
2770 .hwep_num = 0,
2771 .lep = 11,
2772 .eptype = EP_BLK_TYPE,
2773 },
2774 .ep[12] = {
2775 .ep = {
2776 .name = "ep12-iso",
2777 .ops = &lpc32xx_ep_ops,
2778 },
2779 .udc = &controller,
2780 .maxpacket = 1023,
2781 .hwep_num_base = 24,
2782 .hwep_num = 0,
2783 .lep = 12,
2784 .eptype = EP_ISO_TYPE,
2785 },
2786 .ep[13] = {
2787 .ep = {
2788 .name = "ep13-int",
2789 .ops = &lpc32xx_ep_ops,
2790 },
2791 .udc = &controller,
2792 .maxpacket = 64,
2793 .hwep_num_base = 26,
2794 .hwep_num = 0,
2795 .lep = 13,
2796 .eptype = EP_INT_TYPE,
2797 },
2798 .ep[14] = {
2799 .ep = {
2800 .name = "ep14-bulk",
2801 .ops = &lpc32xx_ep_ops,
2802 },
2803 .udc = &controller,
2804 .maxpacket = 64,
2805 .hwep_num_base = 28,
2806 .hwep_num = 0,
2807 .lep = 14,
2808 .eptype = EP_BLK_TYPE,
2809 },
2810 .ep[15] = {
2811 .ep = {
2812 .name = "ep15-bulk",
2813 .ops = &lpc32xx_ep_ops,
2814 },
2815 .udc = &controller,
2816 .maxpacket = 1023,
2817 .hwep_num_base = 30,
2818 .hwep_num = 0,
2819 .lep = 15,
2820 .eptype = EP_BLK_TYPE,
2821 },
2822};
2823
2824/* ISO and status interrupts */
2825static irqreturn_t lpc32xx_usb_lp_irq(int irq, void *_udc)
2826{
2827 u32 tmp, devstat;
2828 struct lpc32xx_udc *udc = _udc;
2829
2830 spin_lock(&udc->lock);
2831
2832 /* Read the device status register */
2833 devstat = readl(USBD_DEVINTST(udc->udp_baseaddr));
2834
2835 devstat &= ~USBD_EP_FAST;
2836 writel(devstat, USBD_DEVINTCLR(udc->udp_baseaddr));
2837 devstat = devstat & udc->enabled_devints;
2838
2839 /* Device specific handling needed? */
2840 if (devstat & USBD_DEV_STAT)
2841 udc_handle_dev(udc);
2842
2843 /* Start of frame? (devstat & FRAME_INT):
2844 * The frame interrupt isn't really needed for ISO support,
2845 * as the driver will queue the necessary packets */
2846
2847 /* Error? */
2848 if (devstat & ERR_INT) {
2849 /* All types of errors, from cable removal during transfer to
2850 * misc protocol and bit errors. These are mostly for just info,
2851 * as the USB hardware will work around these. If these errors
2852 * happen alot, something is wrong. */
2853 udc_protocol_cmd_w(udc, CMD_RD_ERR_STAT);
2854 tmp = udc_protocol_cmd_r(udc, DAT_RD_ERR_STAT);
2855 dev_dbg(udc->dev, "Device error (0x%x)!\n", tmp);
2856 }
2857
2858 spin_unlock(&udc->lock);
2859
2860 return IRQ_HANDLED;
2861}
2862
2863/* EP interrupts */
2864static irqreturn_t lpc32xx_usb_hp_irq(int irq, void *_udc)
2865{
2866 u32 tmp;
2867 struct lpc32xx_udc *udc = _udc;
2868
2869 spin_lock(&udc->lock);
2870
2871 /* Read the device status register */
2872 writel(USBD_EP_FAST, USBD_DEVINTCLR(udc->udp_baseaddr));
2873
2874 /* Endpoints */
2875 tmp = readl(USBD_EPINTST(udc->udp_baseaddr));
2876
2877 /* Special handling for EP0 */
2878 if (tmp & (EP_MASK_SEL(0, EP_OUT) | EP_MASK_SEL(0, EP_IN))) {
2879 /* Handle EP0 IN */
2880 if (tmp & (EP_MASK_SEL(0, EP_IN)))
2881 udc_handle_ep0_in(udc);
2882
2883 /* Handle EP0 OUT */
2884 if (tmp & (EP_MASK_SEL(0, EP_OUT)))
2885 udc_handle_ep0_out(udc);
2886 }
2887
2888 /* All other EPs */
2889 if (tmp & ~(EP_MASK_SEL(0, EP_OUT) | EP_MASK_SEL(0, EP_IN))) {
2890 int i;
2891
2892 /* Handle other EP interrupts */
2893 for (i = 1; i < NUM_ENDPOINTS; i++) {
2894 if (tmp & (1 << udc->ep[i].hwep_num))
2895 udc_handle_eps(udc, &udc->ep[i]);
2896 }
2897 }
2898
2899 spin_unlock(&udc->lock);
2900
2901 return IRQ_HANDLED;
2902}
2903
2904static irqreturn_t lpc32xx_usb_devdma_irq(int irq, void *_udc)
2905{
2906 struct lpc32xx_udc *udc = _udc;
2907
2908 int i;
2909 u32 tmp;
2910
2911 spin_lock(&udc->lock);
2912
2913 /* Handle EP DMA EOT interrupts */
2914 tmp = readl(USBD_EOTINTST(udc->udp_baseaddr)) |
2915 (readl(USBD_EPDMAST(udc->udp_baseaddr)) &
2916 readl(USBD_NDDRTINTST(udc->udp_baseaddr))) |
2917 readl(USBD_SYSERRTINTST(udc->udp_baseaddr));
2918 for (i = 1; i < NUM_ENDPOINTS; i++) {
2919 if (tmp & (1 << udc->ep[i].hwep_num))
2920 udc_handle_dma_ep(udc, &udc->ep[i]);
2921 }
2922
2923 spin_unlock(&udc->lock);
2924
2925 return IRQ_HANDLED;
2926}
2927
2928/*
2929 *
2930 * VBUS detection, pullup handler, and Gadget cable state notification
2931 *
2932 */
2933static void vbus_work(struct work_struct *work)
2934{
2935 u8 value;
2936 struct lpc32xx_udc *udc = container_of(work, struct lpc32xx_udc,
2937 vbus_job);
2938
2939 if (udc->enabled != 0) {
2940 /* Discharge VBUS real quick */
2941 i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
2942 ISP1301_I2C_OTG_CONTROL_1, OTG1_VBUS_DISCHRG);
2943
2944 /* Give VBUS some time (100mS) to discharge */
2945 msleep(100);
2946
2947 /* Disable VBUS discharge resistor */
2948 i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
2949 ISP1301_I2C_OTG_CONTROL_1 | ISP1301_I2C_REG_CLEAR_ADDR,
2950 OTG1_VBUS_DISCHRG);
2951
2952 /* Clear interrupt */
2953 i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
2954 ISP1301_I2C_INTERRUPT_LATCH |
2955 ISP1301_I2C_REG_CLEAR_ADDR, ~0);
2956
2957 /* Get the VBUS status from the transceiver */
2958 value = i2c_smbus_read_byte_data(udc->isp1301_i2c_client,
2959 ISP1301_I2C_OTG_CONTROL_2);
2960
2961 /* VBUS on or off? */
2962 if (value & OTG_B_SESS_VLD)
2963 udc->vbus = 1;
2964 else
2965 udc->vbus = 0;
2966
2967 /* VBUS changed? */
2968 if (udc->last_vbus != udc->vbus) {
2969 udc->last_vbus = udc->vbus;
2970 lpc32xx_vbus_session(&udc->gadget, udc->vbus);
2971 }
2972 }
2973
2974 /* Re-enable after completion */
2975 enable_irq(udc->udp_irq[IRQ_USB_ATX]);
2976}
2977
2978static irqreturn_t lpc32xx_usb_vbus_irq(int irq, void *_udc)
2979{
2980 struct lpc32xx_udc *udc = _udc;
2981
2982 /* Defer handling of VBUS IRQ to work queue */
2983 disable_irq_nosync(udc->udp_irq[IRQ_USB_ATX]);
2984 schedule_work(&udc->vbus_job);
2985
2986 return IRQ_HANDLED;
2987}
2988
Roland Stigge252d8ce2012-08-20 10:30:12 +02002989static int lpc32xx_start(struct usb_gadget *gadget,
2990 struct usb_gadget_driver *driver)
Roland Stigge24a28e42012-04-29 16:47:05 +02002991{
Roland Stigge252d8ce2012-08-20 10:30:12 +02002992 struct lpc32xx_udc *udc = to_udc(gadget);
2993 int i;
Roland Stigge24a28e42012-04-29 16:47:05 +02002994
Roland Stigge252d8ce2012-08-20 10:30:12 +02002995 if (!driver || driver->max_speed < USB_SPEED_FULL || !driver->setup) {
Roland Stigge24a28e42012-04-29 16:47:05 +02002996 dev_err(udc->dev, "bad parameter.\n");
2997 return -EINVAL;
2998 }
2999
3000 if (udc->driver) {
3001 dev_err(udc->dev, "UDC already has a gadget driver\n");
3002 return -EBUSY;
3003 }
3004
3005 udc->driver = driver;
3006 udc->gadget.dev.driver = &driver->driver;
Alexandre Pereira da Silvaeebc0d32012-06-26 11:27:09 -03003007 udc->gadget.dev.of_node = udc->dev->of_node;
Roland Stigge24a28e42012-04-29 16:47:05 +02003008 udc->enabled = 1;
3009 udc->selfpowered = 1;
3010 udc->vbus = 0;
3011
Roland Stigge24a28e42012-04-29 16:47:05 +02003012 /* Force VBUS process once to check for cable insertion */
3013 udc->last_vbus = udc->vbus = 0;
3014 schedule_work(&udc->vbus_job);
3015
3016 /* Do not re-enable ATX IRQ (3) */
3017 for (i = IRQ_USB_LP; i < IRQ_USB_ATX; i++)
3018 enable_irq(udc->udp_irq[i]);
3019
3020 return 0;
3021}
3022
Roland Stigge252d8ce2012-08-20 10:30:12 +02003023static int lpc32xx_stop(struct usb_gadget *gadget,
3024 struct usb_gadget_driver *driver)
Roland Stigge24a28e42012-04-29 16:47:05 +02003025{
3026 int i;
Roland Stigge252d8ce2012-08-20 10:30:12 +02003027 struct lpc32xx_udc *udc = to_udc(gadget);
Roland Stigge24a28e42012-04-29 16:47:05 +02003028
Roland Stigge252d8ce2012-08-20 10:30:12 +02003029 if (!driver || driver != udc->driver)
Roland Stigge24a28e42012-04-29 16:47:05 +02003030 return -EINVAL;
3031
Roland Stigge24a28e42012-04-29 16:47:05 +02003032 for (i = IRQ_USB_LP; i <= IRQ_USB_ATX; i++)
3033 disable_irq(udc->udp_irq[i]);
3034
3035 if (udc->clocked) {
Roland Stigge24a28e42012-04-29 16:47:05 +02003036 spin_lock(&udc->lock);
3037 stop_activity(udc);
3038 spin_unlock(&udc->lock);
3039
3040 /*
3041 * Wait for all the endpoints to disable,
3042 * before disabling clocks. Don't wait if
3043 * endpoints are not enabled.
3044 */
3045 if (atomic_read(&udc->enabled_ep_cnt))
3046 wait_event_interruptible(udc->ep_disable_wait_queue,
3047 (atomic_read(&udc->enabled_ep_cnt) == 0));
3048
3049 spin_lock(&udc->lock);
3050 udc_clk_set(udc, 0);
3051 spin_unlock(&udc->lock);
3052 }
3053
3054 udc->enabled = 0;
Roland Stigge24a28e42012-04-29 16:47:05 +02003055 udc->gadget.dev.driver = NULL;
3056 udc->driver = NULL;
3057
Roland Stigge24a28e42012-04-29 16:47:05 +02003058 return 0;
3059}
3060
3061static void lpc32xx_udc_shutdown(struct platform_device *dev)
3062{
3063 /* Force disconnect on reboot */
Roland Stigge252d8ce2012-08-20 10:30:12 +02003064 struct lpc32xx_udc *udc = platform_get_drvdata(dev);
Roland Stigge24a28e42012-04-29 16:47:05 +02003065
3066 pullup(udc, 0);
3067}
3068
3069/*
3070 * Callbacks to be overridden by options passed via OF (TODO)
3071 */
3072
3073static void lpc32xx_usbd_conn_chg(int conn)
3074{
3075 /* Do nothing, it might be nice to enable an LED
3076 * based on conn state being !0 */
3077}
3078
3079static void lpc32xx_usbd_susp_chg(int susp)
3080{
3081 /* Device suspend if susp != 0 */
3082}
3083
3084static void lpc32xx_rmwkup_chg(int remote_wakup_enable)
3085{
3086 /* Enable or disable USB remote wakeup */
3087}
3088
3089struct lpc32xx_usbd_cfg lpc32xx_usbddata = {
3090 .vbus_drv_pol = 0,
3091 .conn_chgb = &lpc32xx_usbd_conn_chg,
3092 .susp_chgb = &lpc32xx_usbd_susp_chg,
3093 .rmwk_chgb = &lpc32xx_rmwkup_chg,
3094};
3095
3096
3097static u64 lpc32xx_usbd_dmamask = ~(u32) 0x7F;
3098
3099static int __init lpc32xx_udc_probe(struct platform_device *pdev)
3100{
3101 struct device *dev = &pdev->dev;
3102 struct lpc32xx_udc *udc = &controller;
3103 int retval, i;
3104 struct resource *res;
3105 dma_addr_t dma_handle;
3106 struct device_node *isp1301_node;
3107
3108 /* init software state */
3109 udc->gadget.dev.parent = dev;
3110 udc->pdev = pdev;
3111 udc->dev = &pdev->dev;
3112 udc->enabled = 0;
3113
3114 if (pdev->dev.of_node) {
3115 isp1301_node = of_parse_phandle(pdev->dev.of_node,
3116 "transceiver", 0);
3117 } else {
3118 isp1301_node = NULL;
3119 }
3120
3121 udc->isp1301_i2c_client = isp1301_get_client(isp1301_node);
3122 if (!udc->isp1301_i2c_client)
3123 return -EPROBE_DEFER;
3124
3125 dev_info(udc->dev, "ISP1301 I2C device at address 0x%x\n",
3126 udc->isp1301_i2c_client->addr);
3127
3128 pdev->dev.dma_mask = &lpc32xx_usbd_dmamask;
3129 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
3130
3131 udc->board = &lpc32xx_usbddata;
3132
3133 /*
3134 * Resources are mapped as follows:
3135 * IORESOURCE_MEM, base address and size of USB space
3136 * IORESOURCE_IRQ, USB device low priority interrupt number
3137 * IORESOURCE_IRQ, USB device high priority interrupt number
3138 * IORESOURCE_IRQ, USB device interrupt number
3139 * IORESOURCE_IRQ, USB transceiver interrupt number
3140 */
3141 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3142 if (!res)
3143 return -ENXIO;
3144
3145 spin_lock_init(&udc->lock);
3146
3147 /* Get IRQs */
3148 for (i = 0; i < 4; i++) {
3149 udc->udp_irq[i] = platform_get_irq(pdev, i);
3150 if (udc->udp_irq[i] < 0) {
3151 dev_err(udc->dev,
3152 "irq resource %d not available!\n", i);
3153 return udc->udp_irq[i];
3154 }
3155 }
3156
3157 udc->io_p_start = res->start;
3158 udc->io_p_size = resource_size(res);
3159 if (!request_mem_region(udc->io_p_start, udc->io_p_size, driver_name)) {
3160 dev_err(udc->dev, "someone's using UDC memory\n");
3161 return -EBUSY;
3162 }
3163
3164 udc->udp_baseaddr = ioremap(udc->io_p_start, udc->io_p_size);
3165 if (!udc->udp_baseaddr) {
3166 retval = -ENOMEM;
3167 dev_err(udc->dev, "IO map failure\n");
3168 goto io_map_fail;
3169 }
3170
3171 /* Enable AHB slave USB clock, needed for further USB clock control */
3172 writel(USB_SLAVE_HCLK_EN | (1 << 19), USB_CTRL);
3173
3174 /* Get required clocks */
3175 udc->usb_pll_clk = clk_get(&pdev->dev, "ck_pll5");
3176 if (IS_ERR(udc->usb_pll_clk)) {
3177 dev_err(udc->dev, "failed to acquire USB PLL\n");
3178 retval = PTR_ERR(udc->usb_pll_clk);
3179 goto pll_get_fail;
3180 }
3181 udc->usb_slv_clk = clk_get(&pdev->dev, "ck_usbd");
3182 if (IS_ERR(udc->usb_slv_clk)) {
3183 dev_err(udc->dev, "failed to acquire USB device clock\n");
3184 retval = PTR_ERR(udc->usb_slv_clk);
3185 goto usb_clk_get_fail;
3186 }
Alexandre Pereira da Silva50856692012-06-20 09:03:15 -03003187 udc->usb_otg_clk = clk_get(&pdev->dev, "ck_usb_otg");
3188 if (IS_ERR(udc->usb_otg_clk)) {
3189 dev_err(udc->dev, "failed to acquire USB otg clock\n");
3190 retval = PTR_ERR(udc->usb_slv_clk);
3191 goto usb_otg_clk_get_fail;
3192 }
Roland Stigge24a28e42012-04-29 16:47:05 +02003193
3194 /* Setup PLL clock to 48MHz */
3195 retval = clk_enable(udc->usb_pll_clk);
3196 if (retval < 0) {
3197 dev_err(udc->dev, "failed to start USB PLL\n");
3198 goto pll_enable_fail;
3199 }
3200
3201 retval = clk_set_rate(udc->usb_pll_clk, 48000);
3202 if (retval < 0) {
3203 dev_err(udc->dev, "failed to set USB clock rate\n");
3204 goto pll_set_fail;
3205 }
3206
3207 writel(readl(USB_CTRL) | USB_DEV_NEED_CLK_EN, USB_CTRL);
3208
3209 /* Enable USB device clock */
3210 retval = clk_enable(udc->usb_slv_clk);
3211 if (retval < 0) {
3212 dev_err(udc->dev, "failed to start USB device clock\n");
3213 goto usb_clk_enable_fail;
3214 }
3215
Alexandre Pereira da Silva50856692012-06-20 09:03:15 -03003216 /* Enable USB OTG clock */
3217 retval = clk_enable(udc->usb_otg_clk);
3218 if (retval < 0) {
3219 dev_err(udc->dev, "failed to start USB otg clock\n");
3220 goto usb_otg_clk_enable_fail;
3221 }
Roland Stigge24a28e42012-04-29 16:47:05 +02003222
3223 /* Setup deferred workqueue data */
3224 udc->poweron = udc->pullup = 0;
3225 INIT_WORK(&udc->pullup_job, pullup_work);
3226 INIT_WORK(&udc->vbus_job, vbus_work);
3227#ifdef CONFIG_PM
3228 INIT_WORK(&udc->power_job, power_work);
3229#endif
3230
3231 /* All clocks are now on */
3232 udc->clocked = 1;
3233
3234 isp1301_udc_configure(udc);
3235 /* Allocate memory for the UDCA */
3236 udc->udca_v_base = dma_alloc_coherent(&pdev->dev, UDCA_BUFF_SIZE,
3237 &dma_handle,
3238 (GFP_KERNEL | GFP_DMA));
3239 if (!udc->udca_v_base) {
3240 dev_err(udc->dev, "error getting UDCA region\n");
3241 retval = -ENOMEM;
3242 goto i2c_fail;
3243 }
3244 udc->udca_p_base = dma_handle;
3245 dev_dbg(udc->dev, "DMA buffer(0x%x bytes), P:0x%08x, V:0x%p\n",
3246 UDCA_BUFF_SIZE, udc->udca_p_base, udc->udca_v_base);
3247
3248 /* Setup the DD DMA memory pool */
3249 udc->dd_cache = dma_pool_create("udc_dd", udc->dev,
3250 sizeof(struct lpc32xx_usbd_dd_gad),
3251 sizeof(u32), 0);
3252 if (!udc->dd_cache) {
3253 dev_err(udc->dev, "error getting DD DMA region\n");
3254 retval = -ENOMEM;
3255 goto dma_alloc_fail;
3256 }
3257
3258 /* Clear USB peripheral and initialize gadget endpoints */
3259 udc_disable(udc);
3260 udc_reinit(udc);
3261
3262 retval = device_register(&udc->gadget.dev);
3263 if (retval < 0) {
3264 dev_err(udc->dev, "Device registration failure\n");
3265 goto dev_register_fail;
3266 }
3267
3268 /* Request IRQs - low and high priority USB device IRQs are routed to
3269 * the same handler, while the DMA interrupt is routed elsewhere */
3270 retval = request_irq(udc->udp_irq[IRQ_USB_LP], lpc32xx_usb_lp_irq,
3271 0, "udc_lp", udc);
3272 if (retval < 0) {
3273 dev_err(udc->dev, "LP request irq %d failed\n",
3274 udc->udp_irq[IRQ_USB_LP]);
3275 goto irq_lp_fail;
3276 }
3277 retval = request_irq(udc->udp_irq[IRQ_USB_HP], lpc32xx_usb_hp_irq,
3278 0, "udc_hp", udc);
3279 if (retval < 0) {
3280 dev_err(udc->dev, "HP request irq %d failed\n",
3281 udc->udp_irq[IRQ_USB_HP]);
3282 goto irq_hp_fail;
3283 }
3284
3285 retval = request_irq(udc->udp_irq[IRQ_USB_DEVDMA],
3286 lpc32xx_usb_devdma_irq, 0, "udc_dma", udc);
3287 if (retval < 0) {
3288 dev_err(udc->dev, "DEV request irq %d failed\n",
3289 udc->udp_irq[IRQ_USB_DEVDMA]);
3290 goto irq_dev_fail;
3291 }
3292
3293 /* The transceiver interrupt is used for VBUS detection and will
3294 kick off the VBUS handler function */
3295 retval = request_irq(udc->udp_irq[IRQ_USB_ATX], lpc32xx_usb_vbus_irq,
3296 0, "udc_otg", udc);
3297 if (retval < 0) {
3298 dev_err(udc->dev, "VBUS request irq %d failed\n",
3299 udc->udp_irq[IRQ_USB_ATX]);
3300 goto irq_xcvr_fail;
3301 }
3302
3303 /* Initialize wait queue */
3304 init_waitqueue_head(&udc->ep_disable_wait_queue);
3305 atomic_set(&udc->enabled_ep_cnt, 0);
3306
3307 /* Keep all IRQs disabled until GadgetFS starts up */
3308 for (i = IRQ_USB_LP; i <= IRQ_USB_ATX; i++)
3309 disable_irq(udc->udp_irq[i]);
3310
3311 retval = usb_add_gadget_udc(dev, &udc->gadget);
3312 if (retval < 0)
3313 goto add_gadget_fail;
3314
3315 dev_set_drvdata(dev, udc);
3316 device_init_wakeup(dev, 1);
3317 create_debug_file(udc);
3318
3319 /* Disable clocks for now */
3320 udc_clk_set(udc, 0);
3321
3322 dev_info(udc->dev, "%s version %s\n", driver_name, DRIVER_VERSION);
3323 return 0;
3324
3325add_gadget_fail:
3326 free_irq(udc->udp_irq[IRQ_USB_ATX], udc);
3327irq_xcvr_fail:
3328 free_irq(udc->udp_irq[IRQ_USB_DEVDMA], udc);
3329irq_dev_fail:
3330 free_irq(udc->udp_irq[IRQ_USB_HP], udc);
3331irq_hp_fail:
3332 free_irq(udc->udp_irq[IRQ_USB_LP], udc);
3333irq_lp_fail:
3334 device_unregister(&udc->gadget.dev);
3335dev_register_fail:
3336 dma_pool_destroy(udc->dd_cache);
3337dma_alloc_fail:
3338 dma_free_coherent(&pdev->dev, UDCA_BUFF_SIZE,
3339 udc->udca_v_base, udc->udca_p_base);
3340i2c_fail:
Alexandre Pereira da Silva50856692012-06-20 09:03:15 -03003341 clk_disable(udc->usb_otg_clk);
3342usb_otg_clk_enable_fail:
Roland Stigge24a28e42012-04-29 16:47:05 +02003343 clk_disable(udc->usb_slv_clk);
3344usb_clk_enable_fail:
3345pll_set_fail:
3346 clk_disable(udc->usb_pll_clk);
3347pll_enable_fail:
3348 clk_put(udc->usb_slv_clk);
Alexandre Pereira da Silva50856692012-06-20 09:03:15 -03003349usb_otg_clk_get_fail:
3350 clk_put(udc->usb_otg_clk);
Roland Stigge24a28e42012-04-29 16:47:05 +02003351usb_clk_get_fail:
3352 clk_put(udc->usb_pll_clk);
3353pll_get_fail:
3354 iounmap(udc->udp_baseaddr);
3355io_map_fail:
3356 release_mem_region(udc->io_p_start, udc->io_p_size);
3357 dev_err(udc->dev, "%s probe failed, %d\n", driver_name, retval);
3358
3359 return retval;
3360}
3361
3362static int __devexit lpc32xx_udc_remove(struct platform_device *pdev)
3363{
3364 struct lpc32xx_udc *udc = platform_get_drvdata(pdev);
3365
3366 usb_del_gadget_udc(&udc->gadget);
3367 if (udc->driver)
3368 return -EBUSY;
3369
3370 udc_clk_set(udc, 1);
3371 udc_disable(udc);
3372 pullup(udc, 0);
3373
3374 free_irq(udc->udp_irq[IRQ_USB_ATX], udc);
3375
3376 device_init_wakeup(&pdev->dev, 0);
3377 remove_debug_file(udc);
3378
3379 dma_pool_destroy(udc->dd_cache);
3380 dma_free_coherent(&pdev->dev, UDCA_BUFF_SIZE,
3381 udc->udca_v_base, udc->udca_p_base);
3382 free_irq(udc->udp_irq[IRQ_USB_DEVDMA], udc);
3383 free_irq(udc->udp_irq[IRQ_USB_HP], udc);
3384 free_irq(udc->udp_irq[IRQ_USB_LP], udc);
3385
3386 device_unregister(&udc->gadget.dev);
3387
Alexandre Pereira da Silva50856692012-06-20 09:03:15 -03003388 clk_disable(udc->usb_otg_clk);
3389 clk_put(udc->usb_otg_clk);
Roland Stigge24a28e42012-04-29 16:47:05 +02003390 clk_disable(udc->usb_slv_clk);
3391 clk_put(udc->usb_slv_clk);
3392 clk_disable(udc->usb_pll_clk);
3393 clk_put(udc->usb_pll_clk);
3394 iounmap(udc->udp_baseaddr);
3395 release_mem_region(udc->io_p_start, udc->io_p_size);
3396
3397 return 0;
3398}
3399
3400#ifdef CONFIG_PM
3401static int lpc32xx_udc_suspend(struct platform_device *pdev, pm_message_t mesg)
3402{
Roland Stigge24a28e42012-04-29 16:47:05 +02003403 struct lpc32xx_udc *udc = platform_get_drvdata(pdev);
3404
3405 if (udc->clocked) {
3406 /* Power down ISP */
3407 udc->poweron = 0;
3408 isp1301_set_powerstate(udc, 0);
3409
3410 /* Disable clocking */
3411 udc_clk_set(udc, 0);
3412
3413 /* Keep clock flag on, so we know to re-enable clocks
3414 on resume */
3415 udc->clocked = 1;
3416
Roland Stigge24a28e42012-04-29 16:47:05 +02003417 /* Kill global USB clock */
3418 clk_disable(udc->usb_slv_clk);
3419 }
3420
3421 return 0;
3422}
3423
3424static int lpc32xx_udc_resume(struct platform_device *pdev)
3425{
3426 struct lpc32xx_udc *udc = platform_get_drvdata(pdev);
3427
3428 if (udc->clocked) {
3429 /* Enable global USB clock */
3430 clk_enable(udc->usb_slv_clk);
3431
3432 /* Enable clocking */
3433 udc_clk_set(udc, 1);
3434
3435 /* ISP back to normal power mode */
3436 udc->poweron = 1;
3437 isp1301_set_powerstate(udc, 1);
3438 }
3439
3440 return 0;
3441}
3442#else
3443#define lpc32xx_udc_suspend NULL
3444#define lpc32xx_udc_resume NULL
3445#endif
3446
3447#ifdef CONFIG_OF
3448static struct of_device_id lpc32xx_udc_of_match[] = {
3449 { .compatible = "nxp,lpc3220-udc", },
3450 { },
3451};
3452MODULE_DEVICE_TABLE(of, lpc32xx_udc_of_match);
3453#endif
3454
3455static struct platform_driver lpc32xx_udc_driver = {
3456 .remove = __devexit_p(lpc32xx_udc_remove),
3457 .shutdown = lpc32xx_udc_shutdown,
3458 .suspend = lpc32xx_udc_suspend,
3459 .resume = lpc32xx_udc_resume,
3460 .driver = {
3461 .name = (char *) driver_name,
3462 .owner = THIS_MODULE,
3463 .of_match_table = of_match_ptr(lpc32xx_udc_of_match),
3464 },
3465};
3466
3467static int __init udc_init_module(void)
3468{
3469 return platform_driver_probe(&lpc32xx_udc_driver, lpc32xx_udc_probe);
3470}
3471module_init(udc_init_module);
3472
3473static void __exit udc_exit_module(void)
3474{
3475 platform_driver_unregister(&lpc32xx_udc_driver);
3476}
3477module_exit(udc_exit_module);
3478
3479MODULE_DESCRIPTION("LPC32XX udc driver");
3480MODULE_AUTHOR("Kevin Wells <kevin.wells@nxp.com>");
3481MODULE_AUTHOR("Roland Stigge <stigge@antcom.de>");
3482MODULE_LICENSE("GPL");
3483MODULE_ALIAS("platform:lpc32xx_udc");