blob: 50b57b21d1308bb9fdf0fb24f5a5aced4026430d [file] [log] [blame]
Lukasz Majewski8b9bc462012-05-04 14:17:11 +02001/**
2 * linux/drivers/usb/gadget/s3c-hsotg.c
Ben Dooks5b7d70c2009-06-02 14:58:06 +01003 *
Anton Tikhomirovdfbc6fa2011-04-21 17:06:43 +09004 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com
6 *
Ben Dooks5b7d70c2009-06-02 14:58:06 +01007 * Copyright 2008 Openmoko, Inc.
8 * Copyright 2008 Simtec Electronics
9 * Ben Dooks <ben@simtec.co.uk>
10 * http://armlinux.simtec.co.uk/
11 *
12 * S3C USB2.0 High-speed / OtG driver
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License version 2 as
16 * published by the Free Software Foundation.
Lukasz Majewski8b9bc462012-05-04 14:17:11 +020017 */
Ben Dooks5b7d70c2009-06-02 14:58:06 +010018
19#include <linux/kernel.h>
20#include <linux/module.h>
21#include <linux/spinlock.h>
22#include <linux/interrupt.h>
23#include <linux/platform_device.h>
24#include <linux/dma-mapping.h>
25#include <linux/debugfs.h>
26#include <linux/seq_file.h>
27#include <linux/delay.h>
28#include <linux/io.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090029#include <linux/slab.h>
Maurus Cuelenaeree50bf382010-07-19 09:40:50 +010030#include <linux/clk.h>
Lukasz Majewskifc9a7312012-05-04 14:17:02 +020031#include <linux/regulator/consumer.h>
Tomasz Figac50f056c2013-06-25 17:38:23 +020032#include <linux/of_platform.h>
Ben Dooks5b7d70c2009-06-02 14:58:06 +010033
34#include <linux/usb/ch9.h>
35#include <linux/usb/gadget.h>
Praveen Panerib2e587d2012-11-14 15:57:16 +053036#include <linux/usb/phy.h>
Lukasz Majewski126625e2012-05-09 13:16:53 +020037#include <linux/platform_data/s3c-hsotg.h>
Ben Dooks5b7d70c2009-06-02 14:58:06 +010038
Lukasz Majewski127d42a2012-05-04 14:16:59 +020039#include "s3c-hsotg.h"
Ben Dooks5b7d70c2009-06-02 14:58:06 +010040
Lukasz Majewskifc9a7312012-05-04 14:17:02 +020041static const char * const s3c_hsotg_supply_names[] = {
42 "vusb_d", /* digital USB supply, 1.2V */
43 "vusb_a", /* analog USB supply, 1.1V */
44};
45
Lukasz Majewski8b9bc462012-05-04 14:17:11 +020046/*
47 * EP0_MPS_LIMIT
Ben Dooks5b7d70c2009-06-02 14:58:06 +010048 *
49 * Unfortunately there seems to be a limit of the amount of data that can
Lucas De Marchi25985ed2011-03-30 22:57:33 -030050 * be transferred by IN transactions on EP0. This is either 127 bytes or 3
51 * packets (which practically means 1 packet and 63 bytes of data) when the
Ben Dooks5b7d70c2009-06-02 14:58:06 +010052 * MPS is set to 64.
53 *
54 * This means if we are wanting to move >127 bytes of data, we need to
55 * split the transactions up, but just doing one packet at a time does
56 * not work (this may be an implicit DATA0 PID on first packet of the
57 * transaction) and doing 2 packets is outside the controller's limits.
58 *
59 * If we try to lower the MPS size for EP0, then no transfers work properly
60 * for EP0, and the system will fail basic enumeration. As no cause for this
61 * has currently been found, we cannot support any large IN transfers for
62 * EP0.
63 */
64#define EP0_MPS_LIMIT 64
65
66struct s3c_hsotg;
67struct s3c_hsotg_req;
68
69/**
70 * struct s3c_hsotg_ep - driver endpoint definition.
71 * @ep: The gadget layer representation of the endpoint.
72 * @name: The driver generated name for the endpoint.
73 * @queue: Queue of requests for this endpoint.
74 * @parent: Reference back to the parent device structure.
75 * @req: The current request that the endpoint is processing. This is
76 * used to indicate an request has been loaded onto the endpoint
77 * and has yet to be completed (maybe due to data move, or simply
78 * awaiting an ack from the core all the data has been completed).
79 * @debugfs: File entry for debugfs file for this endpoint.
80 * @lock: State lock to protect contents of endpoint.
81 * @dir_in: Set to true if this endpoint is of the IN direction, which
82 * means that it is sending data to the Host.
83 * @index: The index for the endpoint registers.
Robert Baldyga4fca54a2013-10-09 09:00:02 +020084 * @mc: Multi Count - number of transactions per microframe
Robert Baldyga1479e842013-10-09 08:41:57 +020085 * @interval - Interval for periodic endpoints
Ben Dooks5b7d70c2009-06-02 14:58:06 +010086 * @name: The name array passed to the USB core.
87 * @halted: Set if the endpoint has been halted.
88 * @periodic: Set if this is a periodic ep, such as Interrupt
Robert Baldyga1479e842013-10-09 08:41:57 +020089 * @isochronous: Set if this is a isochronous ep
Ben Dooks5b7d70c2009-06-02 14:58:06 +010090 * @sent_zlp: Set if we've sent a zero-length packet.
91 * @total_data: The total number of data bytes done.
92 * @fifo_size: The size of the FIFO (for periodic IN endpoints)
93 * @fifo_load: The amount of data loaded into the FIFO (periodic IN)
94 * @last_load: The offset of data for the last start of request.
95 * @size_loaded: The last loaded size for DxEPTSIZE for periodic IN
96 *
97 * This is the driver's state for each registered enpoint, allowing it
98 * to keep track of transactions that need doing. Each endpoint has a
99 * lock to protect the state, to try and avoid using an overall lock
100 * for the host controller as much as possible.
101 *
102 * For periodic IN endpoints, we have fifo_size and fifo_load to try
103 * and keep track of the amount of data in the periodic FIFO for each
104 * of these as we don't have a status register that tells us how much
Ben Dookse7a9ff52010-07-19 09:40:42 +0100105 * is in each of them. (note, this may actually be useless information
106 * as in shared-fifo mode periodic in acts like a single-frame packet
107 * buffer than a fifo)
Ben Dooks5b7d70c2009-06-02 14:58:06 +0100108 */
109struct s3c_hsotg_ep {
110 struct usb_ep ep;
111 struct list_head queue;
112 struct s3c_hsotg *parent;
113 struct s3c_hsotg_req *req;
114 struct dentry *debugfs;
115
Ben Dooks5b7d70c2009-06-02 14:58:06 +0100116
117 unsigned long total_data;
118 unsigned int size_loaded;
119 unsigned int last_load;
120 unsigned int fifo_load;
121 unsigned short fifo_size;
122
123 unsigned char dir_in;
124 unsigned char index;
Robert Baldyga4fca54a2013-10-09 09:00:02 +0200125 unsigned char mc;
Robert Baldyga1479e842013-10-09 08:41:57 +0200126 unsigned char interval;
Ben Dooks5b7d70c2009-06-02 14:58:06 +0100127
128 unsigned int halted:1;
129 unsigned int periodic:1;
Robert Baldyga1479e842013-10-09 08:41:57 +0200130 unsigned int isochronous:1;
Ben Dooks5b7d70c2009-06-02 14:58:06 +0100131 unsigned int sent_zlp:1;
132
133 char name[10];
134};
135
Ben Dooks5b7d70c2009-06-02 14:58:06 +0100136/**
137 * struct s3c_hsotg - driver state.
138 * @dev: The parent device supplied to the probe function
139 * @driver: USB gadget driver
Praveen Panerib2e587d2012-11-14 15:57:16 +0530140 * @phy: The otg phy transceiver structure for phy control.
141 * @plat: The platform specific configuration data. This can be removed once
142 * all SoCs support usb transceiver.
Ben Dooks5b7d70c2009-06-02 14:58:06 +0100143 * @regs: The memory area mapped for accessing registers.
Ben Dooks5b7d70c2009-06-02 14:58:06 +0100144 * @irq: The IRQ number we are using
Lukasz Majewskifc9a7312012-05-04 14:17:02 +0200145 * @supplies: Definition of USB power supplies
Ben Dooks10aebc72010-07-19 09:40:44 +0100146 * @dedicated_fifos: Set if the hardware has dedicated IN-EP fifos.
Lukasz Majewskib3f489b2012-05-04 14:17:09 +0200147 * @num_of_eps: Number of available EPs (excluding EP0)
Ben Dooks5b7d70c2009-06-02 14:58:06 +0100148 * @debug_root: root directrory for debugfs.
149 * @debug_file: main status file for debugfs.
150 * @debug_fifo: FIFO status file for debugfs.
151 * @ep0_reply: Request used for ep0 reply.
152 * @ep0_buff: Buffer for EP0 reply data, if needed.
153 * @ctrl_buff: Buffer for EP0 control requests.
154 * @ctrl_req: Request for EP0 control packets.
Lukasz Majewski71225be2012-05-04 14:17:03 +0200155 * @setup: NAK management for EP0 SETUP
Lukasz Majewski12a1f4d2012-05-04 14:17:08 +0200156 * @last_rst: Time of last reset
Ben Dooks5b7d70c2009-06-02 14:58:06 +0100157 * @eps: The endpoints being supplied to the gadget framework
158 */
159struct s3c_hsotg {
160 struct device *dev;
161 struct usb_gadget_driver *driver;
Praveen Panerib2e587d2012-11-14 15:57:16 +0530162 struct usb_phy *phy;
Ben Dooks5b7d70c2009-06-02 14:58:06 +0100163 struct s3c_hsotg_plat *plat;
164
Lukasz Majewski22258f42012-06-14 10:02:24 +0200165 spinlock_t lock;
166
Ben Dooks5b7d70c2009-06-02 14:58:06 +0100167 void __iomem *regs;
Ben Dooks5b7d70c2009-06-02 14:58:06 +0100168 int irq;
Marek Szyprowski31ee04d2010-07-19 16:01:42 +0200169 struct clk *clk;
Ben Dooks5b7d70c2009-06-02 14:58:06 +0100170
Lukasz Majewskifc9a7312012-05-04 14:17:02 +0200171 struct regulator_bulk_data supplies[ARRAY_SIZE(s3c_hsotg_supply_names)];
172
Ben Dooks10aebc72010-07-19 09:40:44 +0100173 unsigned int dedicated_fifos:1;
Lukasz Majewskib3f489b2012-05-04 14:17:09 +0200174 unsigned char num_of_eps;
Ben Dooks10aebc72010-07-19 09:40:44 +0100175
Ben Dooks5b7d70c2009-06-02 14:58:06 +0100176 struct dentry *debug_root;
177 struct dentry *debug_file;
178 struct dentry *debug_fifo;
179
180 struct usb_request *ep0_reply;
181 struct usb_request *ctrl_req;
182 u8 ep0_buff[8];
183 u8 ctrl_buff[8];
184
185 struct usb_gadget gadget;
Lukasz Majewski71225be2012-05-04 14:17:03 +0200186 unsigned int setup;
Lukasz Majewski12a1f4d2012-05-04 14:17:08 +0200187 unsigned long last_rst;
Lukasz Majewskib3f489b2012-05-04 14:17:09 +0200188 struct s3c_hsotg_ep *eps;
Ben Dooks5b7d70c2009-06-02 14:58:06 +0100189};
190
191/**
192 * struct s3c_hsotg_req - data transfer request
193 * @req: The USB gadget request
194 * @queue: The list of requests for the endpoint this is queued for.
195 * @in_progress: Has already had size/packets written to core
196 * @mapped: DMA buffer for this request has been mapped via dma_map_single().
197 */
198struct s3c_hsotg_req {
199 struct usb_request req;
200 struct list_head queue;
201 unsigned char in_progress;
202 unsigned char mapped;
203};
204
205/* conversion functions */
206static inline struct s3c_hsotg_req *our_req(struct usb_request *req)
207{
208 return container_of(req, struct s3c_hsotg_req, req);
209}
210
211static inline struct s3c_hsotg_ep *our_ep(struct usb_ep *ep)
212{
213 return container_of(ep, struct s3c_hsotg_ep, ep);
214}
215
216static inline struct s3c_hsotg *to_hsotg(struct usb_gadget *gadget)
217{
218 return container_of(gadget, struct s3c_hsotg, gadget);
219}
220
221static inline void __orr32(void __iomem *ptr, u32 val)
222{
223 writel(readl(ptr) | val, ptr);
224}
225
226static inline void __bic32(void __iomem *ptr, u32 val)
227{
228 writel(readl(ptr) & ~val, ptr);
229}
230
231/* forward decleration of functions */
232static void s3c_hsotg_dump(struct s3c_hsotg *hsotg);
233
234/**
235 * using_dma - return the DMA status of the driver.
236 * @hsotg: The driver state.
237 *
238 * Return true if we're using DMA.
239 *
240 * Currently, we have the DMA support code worked into everywhere
241 * that needs it, but the AMBA DMA implementation in the hardware can
242 * only DMA from 32bit aligned addresses. This means that gadgets such
243 * as the CDC Ethernet cannot work as they often pass packets which are
244 * not 32bit aligned.
245 *
246 * Unfortunately the choice to use DMA or not is global to the controller
247 * and seems to be only settable when the controller is being put through
248 * a core reset. This means we either need to fix the gadgets to take
249 * account of DMA alignment, or add bounce buffers (yuerk).
250 *
251 * Until this issue is sorted out, we always return 'false'.
252 */
253static inline bool using_dma(struct s3c_hsotg *hsotg)
254{
255 return false; /* support is not complete */
256}
257
258/**
259 * s3c_hsotg_en_gsint - enable one or more of the general interrupt
260 * @hsotg: The device state
261 * @ints: A bitmask of the interrupts to enable
262 */
263static void s3c_hsotg_en_gsint(struct s3c_hsotg *hsotg, u32 ints)
264{
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +0200265 u32 gsintmsk = readl(hsotg->regs + GINTMSK);
Ben Dooks5b7d70c2009-06-02 14:58:06 +0100266 u32 new_gsintmsk;
267
268 new_gsintmsk = gsintmsk | ints;
269
270 if (new_gsintmsk != gsintmsk) {
271 dev_dbg(hsotg->dev, "gsintmsk now 0x%08x\n", new_gsintmsk);
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +0200272 writel(new_gsintmsk, hsotg->regs + GINTMSK);
Ben Dooks5b7d70c2009-06-02 14:58:06 +0100273 }
274}
275
276/**
277 * s3c_hsotg_disable_gsint - disable one or more of the general interrupt
278 * @hsotg: The device state
279 * @ints: A bitmask of the interrupts to enable
280 */
281static void s3c_hsotg_disable_gsint(struct s3c_hsotg *hsotg, u32 ints)
282{
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +0200283 u32 gsintmsk = readl(hsotg->regs + GINTMSK);
Ben Dooks5b7d70c2009-06-02 14:58:06 +0100284 u32 new_gsintmsk;
285
286 new_gsintmsk = gsintmsk & ~ints;
287
288 if (new_gsintmsk != gsintmsk)
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +0200289 writel(new_gsintmsk, hsotg->regs + GINTMSK);
Ben Dooks5b7d70c2009-06-02 14:58:06 +0100290}
291
292/**
293 * s3c_hsotg_ctrl_epint - enable/disable an endpoint irq
294 * @hsotg: The device state
295 * @ep: The endpoint index
296 * @dir_in: True if direction is in.
297 * @en: The enable value, true to enable
298 *
299 * Set or clear the mask for an individual endpoint's interrupt
300 * request.
301 */
302static void s3c_hsotg_ctrl_epint(struct s3c_hsotg *hsotg,
303 unsigned int ep, unsigned int dir_in,
304 unsigned int en)
305{
306 unsigned long flags;
307 u32 bit = 1 << ep;
308 u32 daint;
309
310 if (!dir_in)
311 bit <<= 16;
312
313 local_irq_save(flags);
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +0200314 daint = readl(hsotg->regs + DAINTMSK);
Ben Dooks5b7d70c2009-06-02 14:58:06 +0100315 if (en)
316 daint |= bit;
317 else
318 daint &= ~bit;
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +0200319 writel(daint, hsotg->regs + DAINTMSK);
Ben Dooks5b7d70c2009-06-02 14:58:06 +0100320 local_irq_restore(flags);
321}
322
323/**
324 * s3c_hsotg_init_fifo - initialise non-periodic FIFOs
325 * @hsotg: The device instance.
326 */
327static void s3c_hsotg_init_fifo(struct s3c_hsotg *hsotg)
328{
Ben Dooks0f002d22010-05-25 05:36:50 +0100329 unsigned int ep;
330 unsigned int addr;
331 unsigned int size;
Ben Dooks1703a6d2010-05-25 05:36:52 +0100332 int timeout;
Ben Dooks0f002d22010-05-25 05:36:50 +0100333 u32 val;
334
Ben Dooks6d091ee2010-07-19 09:40:40 +0100335 /* set FIFO sizes to 2048/1024 */
Ben Dooks5b7d70c2009-06-02 14:58:06 +0100336
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +0200337 writel(2048, hsotg->regs + GRXFSIZ);
338 writel(GNPTXFSIZ_NPTxFStAddr(2048) |
339 GNPTXFSIZ_NPTxFDep(1024),
340 hsotg->regs + GNPTXFSIZ);
Ben Dooks0f002d22010-05-25 05:36:50 +0100341
Lukasz Majewski8b9bc462012-05-04 14:17:11 +0200342 /*
343 * arange all the rest of the TX FIFOs, as some versions of this
Ben Dooks0f002d22010-05-25 05:36:50 +0100344 * block have overlapping default addresses. This also ensures
345 * that if the settings have been changed, then they are set to
Lukasz Majewski8b9bc462012-05-04 14:17:11 +0200346 * known values.
347 */
Ben Dooks0f002d22010-05-25 05:36:50 +0100348
349 /* start at the end of the GNPTXFSIZ, rounded up */
350 addr = 2048 + 1024;
351 size = 768;
352
Lukasz Majewski8b9bc462012-05-04 14:17:11 +0200353 /*
354 * currently we allocate TX FIFOs for all possible endpoints,
355 * and assume that they are all the same size.
356 */
Ben Dooks0f002d22010-05-25 05:36:50 +0100357
Anton Tikhomirovf7a83fe2012-03-06 14:05:49 +0900358 for (ep = 1; ep <= 15; ep++) {
Ben Dooks0f002d22010-05-25 05:36:50 +0100359 val = addr;
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +0200360 val |= size << DPTXFSIZn_DPTxFSize_SHIFT;
Ben Dooks0f002d22010-05-25 05:36:50 +0100361 addr += size;
362
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +0200363 writel(val, hsotg->regs + DPTXFSIZn(ep));
Ben Dooks0f002d22010-05-25 05:36:50 +0100364 }
Ben Dooks1703a6d2010-05-25 05:36:52 +0100365
Lukasz Majewski8b9bc462012-05-04 14:17:11 +0200366 /*
367 * according to p428 of the design guide, we need to ensure that
368 * all fifos are flushed before continuing
369 */
Ben Dooks1703a6d2010-05-25 05:36:52 +0100370
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +0200371 writel(GRSTCTL_TxFNum(0x10) | GRSTCTL_TxFFlsh |
372 GRSTCTL_RxFFlsh, hsotg->regs + GRSTCTL);
Ben Dooks1703a6d2010-05-25 05:36:52 +0100373
374 /* wait until the fifos are both flushed */
375 timeout = 100;
376 while (1) {
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +0200377 val = readl(hsotg->regs + GRSTCTL);
Ben Dooks1703a6d2010-05-25 05:36:52 +0100378
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +0200379 if ((val & (GRSTCTL_TxFFlsh | GRSTCTL_RxFFlsh)) == 0)
Ben Dooks1703a6d2010-05-25 05:36:52 +0100380 break;
381
382 if (--timeout == 0) {
383 dev_err(hsotg->dev,
384 "%s: timeout flushing fifos (GRSTCTL=%08x)\n",
385 __func__, val);
386 }
387
388 udelay(1);
389 }
390
391 dev_dbg(hsotg->dev, "FIFOs reset, timeout at %d\n", timeout);
Ben Dooks5b7d70c2009-06-02 14:58:06 +0100392}
393
394/**
395 * @ep: USB endpoint to allocate request for.
396 * @flags: Allocation flags
397 *
398 * Allocate a new USB request structure appropriate for the specified endpoint
399 */
Mark Brown0978f8c2010-01-18 13:18:35 +0000400static struct usb_request *s3c_hsotg_ep_alloc_request(struct usb_ep *ep,
401 gfp_t flags)
Ben Dooks5b7d70c2009-06-02 14:58:06 +0100402{
403 struct s3c_hsotg_req *req;
404
405 req = kzalloc(sizeof(struct s3c_hsotg_req), flags);
406 if (!req)
407 return NULL;
408
409 INIT_LIST_HEAD(&req->queue);
410
Ben Dooks5b7d70c2009-06-02 14:58:06 +0100411 return &req->req;
412}
413
414/**
415 * is_ep_periodic - return true if the endpoint is in periodic mode.
416 * @hs_ep: The endpoint to query.
417 *
418 * Returns true if the endpoint is in periodic mode, meaning it is being
419 * used for an Interrupt or ISO transfer.
420 */
421static inline int is_ep_periodic(struct s3c_hsotg_ep *hs_ep)
422{
423 return hs_ep->periodic;
424}
425
426/**
427 * s3c_hsotg_unmap_dma - unmap the DMA memory being used for the request
428 * @hsotg: The device state.
429 * @hs_ep: The endpoint for the request
430 * @hs_req: The request being processed.
431 *
432 * This is the reverse of s3c_hsotg_map_dma(), called for the completion
433 * of a request to ensure the buffer is ready for access by the caller.
Lukasz Majewski8b9bc462012-05-04 14:17:11 +0200434 */
Ben Dooks5b7d70c2009-06-02 14:58:06 +0100435static void s3c_hsotg_unmap_dma(struct s3c_hsotg *hsotg,
436 struct s3c_hsotg_ep *hs_ep,
437 struct s3c_hsotg_req *hs_req)
438{
439 struct usb_request *req = &hs_req->req;
Ben Dooks5b7d70c2009-06-02 14:58:06 +0100440
441 /* ignore this if we're not moving any data */
442 if (hs_req->req.length == 0)
443 return;
444
Jingoo Han17d966a2013-05-11 21:14:00 +0900445 usb_gadget_unmap_request(&hsotg->gadget, req, hs_ep->dir_in);
Ben Dooks5b7d70c2009-06-02 14:58:06 +0100446}
447
448/**
449 * s3c_hsotg_write_fifo - write packet Data to the TxFIFO
450 * @hsotg: The controller state.
451 * @hs_ep: The endpoint we're going to write for.
452 * @hs_req: The request to write data for.
453 *
454 * This is called when the TxFIFO has some space in it to hold a new
455 * transmission and we have something to give it. The actual setup of
456 * the data size is done elsewhere, so all we have to do is to actually
457 * write the data.
458 *
459 * The return value is zero if there is more space (or nothing was done)
460 * otherwise -ENOSPC is returned if the FIFO space was used up.
461 *
462 * This routine is only needed for PIO
Lukasz Majewski8b9bc462012-05-04 14:17:11 +0200463 */
Ben Dooks5b7d70c2009-06-02 14:58:06 +0100464static int s3c_hsotg_write_fifo(struct s3c_hsotg *hsotg,
465 struct s3c_hsotg_ep *hs_ep,
466 struct s3c_hsotg_req *hs_req)
467{
468 bool periodic = is_ep_periodic(hs_ep);
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +0200469 u32 gnptxsts = readl(hsotg->regs + GNPTXSTS);
Ben Dooks5b7d70c2009-06-02 14:58:06 +0100470 int buf_pos = hs_req->req.actual;
471 int to_write = hs_ep->size_loaded;
472 void *data;
473 int can_write;
474 int pkt_round;
Robert Baldyga4fca54a2013-10-09 09:00:02 +0200475 int max_transfer;
Ben Dooks5b7d70c2009-06-02 14:58:06 +0100476
477 to_write -= (buf_pos - hs_ep->last_load);
478
479 /* if there's nothing to write, get out early */
480 if (to_write == 0)
481 return 0;
482
Ben Dooks10aebc72010-07-19 09:40:44 +0100483 if (periodic && !hsotg->dedicated_fifos) {
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +0200484 u32 epsize = readl(hsotg->regs + DIEPTSIZ(hs_ep->index));
Ben Dooks5b7d70c2009-06-02 14:58:06 +0100485 int size_left;
486 int size_done;
487
Lukasz Majewski8b9bc462012-05-04 14:17:11 +0200488 /*
489 * work out how much data was loaded so we can calculate
490 * how much data is left in the fifo.
491 */
Ben Dooks5b7d70c2009-06-02 14:58:06 +0100492
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +0200493 size_left = DxEPTSIZ_XferSize_GET(epsize);
Ben Dooks5b7d70c2009-06-02 14:58:06 +0100494
Lukasz Majewski8b9bc462012-05-04 14:17:11 +0200495 /*
496 * if shared fifo, we cannot write anything until the
Ben Dookse7a9ff52010-07-19 09:40:42 +0100497 * previous data has been completely sent.
498 */
499 if (hs_ep->fifo_load != 0) {
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +0200500 s3c_hsotg_en_gsint(hsotg, GINTSTS_PTxFEmp);
Ben Dookse7a9ff52010-07-19 09:40:42 +0100501 return -ENOSPC;
502 }
503
Ben Dooks5b7d70c2009-06-02 14:58:06 +0100504 dev_dbg(hsotg->dev, "%s: left=%d, load=%d, fifo=%d, size %d\n",
505 __func__, size_left,
506 hs_ep->size_loaded, hs_ep->fifo_load, hs_ep->fifo_size);
507
508 /* how much of the data has moved */
509 size_done = hs_ep->size_loaded - size_left;
510
511 /* how much data is left in the fifo */
512 can_write = hs_ep->fifo_load - size_done;
513 dev_dbg(hsotg->dev, "%s: => can_write1=%d\n",
514 __func__, can_write);
515
516 can_write = hs_ep->fifo_size - can_write;
517 dev_dbg(hsotg->dev, "%s: => can_write2=%d\n",
518 __func__, can_write);
519
520 if (can_write <= 0) {
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +0200521 s3c_hsotg_en_gsint(hsotg, GINTSTS_PTxFEmp);
Ben Dooks5b7d70c2009-06-02 14:58:06 +0100522 return -ENOSPC;
523 }
Ben Dooks10aebc72010-07-19 09:40:44 +0100524 } else if (hsotg->dedicated_fifos && hs_ep->index != 0) {
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +0200525 can_write = readl(hsotg->regs + DTXFSTS(hs_ep->index));
Ben Dooks10aebc72010-07-19 09:40:44 +0100526
527 can_write &= 0xffff;
528 can_write *= 4;
Ben Dooks5b7d70c2009-06-02 14:58:06 +0100529 } else {
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +0200530 if (GNPTXSTS_NPTxQSpcAvail_GET(gnptxsts) == 0) {
Ben Dooks5b7d70c2009-06-02 14:58:06 +0100531 dev_dbg(hsotg->dev,
532 "%s: no queue slots available (0x%08x)\n",
533 __func__, gnptxsts);
534
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +0200535 s3c_hsotg_en_gsint(hsotg, GINTSTS_NPTxFEmp);
Ben Dooks5b7d70c2009-06-02 14:58:06 +0100536 return -ENOSPC;
537 }
538
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +0200539 can_write = GNPTXSTS_NPTxFSpcAvail_GET(gnptxsts);
Ben Dooks679f9b72010-07-19 09:40:41 +0100540 can_write *= 4; /* fifo size is in 32bit quantities. */
Ben Dooks5b7d70c2009-06-02 14:58:06 +0100541 }
542
Robert Baldyga4fca54a2013-10-09 09:00:02 +0200543 max_transfer = hs_ep->ep.maxpacket * hs_ep->mc;
544
545 dev_dbg(hsotg->dev, "%s: GNPTXSTS=%08x, can=%d, to=%d, max_transfer %d\n",
546 __func__, gnptxsts, can_write, to_write, max_transfer);
Ben Dooks5b7d70c2009-06-02 14:58:06 +0100547
Lukasz Majewski8b9bc462012-05-04 14:17:11 +0200548 /*
549 * limit to 512 bytes of data, it seems at least on the non-periodic
Ben Dooks5b7d70c2009-06-02 14:58:06 +0100550 * FIFO, requests of >512 cause the endpoint to get stuck with a
551 * fragment of the end of the transfer in it.
552 */
Robert Baldyga811f3302013-09-24 11:24:28 +0200553 if (can_write > 512 && !periodic)
Ben Dooks5b7d70c2009-06-02 14:58:06 +0100554 can_write = 512;
555
Lukasz Majewski8b9bc462012-05-04 14:17:11 +0200556 /*
557 * limit the write to one max-packet size worth of data, but allow
Ben Dooks03e10e52010-07-19 09:40:45 +0100558 * the transfer to return that it did not run out of fifo space
Lukasz Majewski8b9bc462012-05-04 14:17:11 +0200559 * doing it.
560 */
Robert Baldyga4fca54a2013-10-09 09:00:02 +0200561 if (to_write > max_transfer) {
562 to_write = max_transfer;
Ben Dooks03e10e52010-07-19 09:40:45 +0100563
Robert Baldyga5cb2ff02013-09-19 11:50:18 +0200564 /* it's needed only when we do not use dedicated fifos */
565 if (!hsotg->dedicated_fifos)
566 s3c_hsotg_en_gsint(hsotg,
567 periodic ? GINTSTS_PTxFEmp :
568 GINTSTS_NPTxFEmp);
Ben Dooks03e10e52010-07-19 09:40:45 +0100569 }
570
Ben Dooks5b7d70c2009-06-02 14:58:06 +0100571 /* see if we can write data */
572
573 if (to_write > can_write) {
574 to_write = can_write;
Robert Baldyga4fca54a2013-10-09 09:00:02 +0200575 pkt_round = to_write % max_transfer;
Ben Dooks5b7d70c2009-06-02 14:58:06 +0100576
Lukasz Majewski8b9bc462012-05-04 14:17:11 +0200577 /*
578 * Round the write down to an
Ben Dooks5b7d70c2009-06-02 14:58:06 +0100579 * exact number of packets.
580 *
581 * Note, we do not currently check to see if we can ever
582 * write a full packet or not to the FIFO.
583 */
584
585 if (pkt_round)
586 to_write -= pkt_round;
587
Lukasz Majewski8b9bc462012-05-04 14:17:11 +0200588 /*
589 * enable correct FIFO interrupt to alert us when there
590 * is more room left.
591 */
Ben Dooks5b7d70c2009-06-02 14:58:06 +0100592
Robert Baldyga5cb2ff02013-09-19 11:50:18 +0200593 /* it's needed only when we do not use dedicated fifos */
594 if (!hsotg->dedicated_fifos)
595 s3c_hsotg_en_gsint(hsotg,
596 periodic ? GINTSTS_PTxFEmp :
597 GINTSTS_NPTxFEmp);
Ben Dooks5b7d70c2009-06-02 14:58:06 +0100598 }
599
600 dev_dbg(hsotg->dev, "write %d/%d, can_write %d, done %d\n",
601 to_write, hs_req->req.length, can_write, buf_pos);
602
603 if (to_write <= 0)
604 return -ENOSPC;
605
606 hs_req->req.actual = buf_pos + to_write;
607 hs_ep->total_data += to_write;
608
609 if (periodic)
610 hs_ep->fifo_load += to_write;
611
612 to_write = DIV_ROUND_UP(to_write, 4);
613 data = hs_req->req.buf + buf_pos;
614
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +0200615 writesl(hsotg->regs + EPFIFO(hs_ep->index), data, to_write);
Ben Dooks5b7d70c2009-06-02 14:58:06 +0100616
617 return (to_write >= can_write) ? -ENOSPC : 0;
618}
619
620/**
621 * get_ep_limit - get the maximum data legnth for this endpoint
622 * @hs_ep: The endpoint
623 *
624 * Return the maximum data that can be queued in one go on a given endpoint
625 * so that transfers that are too long can be split.
626 */
627static unsigned get_ep_limit(struct s3c_hsotg_ep *hs_ep)
628{
629 int index = hs_ep->index;
630 unsigned maxsize;
631 unsigned maxpkt;
632
633 if (index != 0) {
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +0200634 maxsize = DxEPTSIZ_XferSize_LIMIT + 1;
635 maxpkt = DxEPTSIZ_PktCnt_LIMIT + 1;
Ben Dooks5b7d70c2009-06-02 14:58:06 +0100636 } else {
Ben Dooksb05ca582010-07-19 09:40:48 +0100637 maxsize = 64+64;
Jingoo Han66e5c642011-05-13 21:26:15 +0900638 if (hs_ep->dir_in)
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +0200639 maxpkt = DIEPTSIZ0_PktCnt_LIMIT + 1;
Jingoo Han66e5c642011-05-13 21:26:15 +0900640 else
Ben Dooks5b7d70c2009-06-02 14:58:06 +0100641 maxpkt = 2;
Ben Dooks5b7d70c2009-06-02 14:58:06 +0100642 }
643
644 /* we made the constant loading easier above by using +1 */
645 maxpkt--;
646 maxsize--;
647
Lukasz Majewski8b9bc462012-05-04 14:17:11 +0200648 /*
649 * constrain by packet count if maxpkts*pktsize is greater
650 * than the length register size.
651 */
Ben Dooks5b7d70c2009-06-02 14:58:06 +0100652
653 if ((maxpkt * hs_ep->ep.maxpacket) < maxsize)
654 maxsize = maxpkt * hs_ep->ep.maxpacket;
655
656 return maxsize;
657}
658
659/**
660 * s3c_hsotg_start_req - start a USB request from an endpoint's queue
661 * @hsotg: The controller state.
662 * @hs_ep: The endpoint to process a request for
663 * @hs_req: The request to start.
664 * @continuing: True if we are doing more for the current request.
665 *
666 * Start the given request running by setting the endpoint registers
667 * appropriately, and writing any data to the FIFOs.
668 */
669static void s3c_hsotg_start_req(struct s3c_hsotg *hsotg,
670 struct s3c_hsotg_ep *hs_ep,
671 struct s3c_hsotg_req *hs_req,
672 bool continuing)
673{
674 struct usb_request *ureq = &hs_req->req;
675 int index = hs_ep->index;
676 int dir_in = hs_ep->dir_in;
677 u32 epctrl_reg;
678 u32 epsize_reg;
679 u32 epsize;
680 u32 ctrl;
681 unsigned length;
682 unsigned packets;
683 unsigned maxreq;
684
685 if (index != 0) {
686 if (hs_ep->req && !continuing) {
687 dev_err(hsotg->dev, "%s: active request\n", __func__);
688 WARN_ON(1);
689 return;
690 } else if (hs_ep->req != hs_req && continuing) {
691 dev_err(hsotg->dev,
692 "%s: continue different req\n", __func__);
693 WARN_ON(1);
694 return;
695 }
696 }
697
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +0200698 epctrl_reg = dir_in ? DIEPCTL(index) : DOEPCTL(index);
699 epsize_reg = dir_in ? DIEPTSIZ(index) : DOEPTSIZ(index);
Ben Dooks5b7d70c2009-06-02 14:58:06 +0100700
701 dev_dbg(hsotg->dev, "%s: DxEPCTL=0x%08x, ep %d, dir %s\n",
702 __func__, readl(hsotg->regs + epctrl_reg), index,
703 hs_ep->dir_in ? "in" : "out");
704
Anton Tikhomirov9c39ddc2011-04-21 17:06:41 +0900705 /* If endpoint is stalled, we will restart request later */
706 ctrl = readl(hsotg->regs + epctrl_reg);
707
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +0200708 if (ctrl & DxEPCTL_Stall) {
Anton Tikhomirov9c39ddc2011-04-21 17:06:41 +0900709 dev_warn(hsotg->dev, "%s: ep%d is stalled\n", __func__, index);
710 return;
711 }
712
Ben Dooks5b7d70c2009-06-02 14:58:06 +0100713 length = ureq->length - ureq->actual;
Lukasz Majewski71225be2012-05-04 14:17:03 +0200714 dev_dbg(hsotg->dev, "ureq->length:%d ureq->actual:%d\n",
715 ureq->length, ureq->actual);
Ben Dooks5b7d70c2009-06-02 14:58:06 +0100716 if (0)
717 dev_dbg(hsotg->dev,
718 "REQ buf %p len %d dma 0x%08x noi=%d zp=%d snok=%d\n",
719 ureq->buf, length, ureq->dma,
720 ureq->no_interrupt, ureq->zero, ureq->short_not_ok);
721
722 maxreq = get_ep_limit(hs_ep);
723 if (length > maxreq) {
724 int round = maxreq % hs_ep->ep.maxpacket;
725
726 dev_dbg(hsotg->dev, "%s: length %d, max-req %d, r %d\n",
727 __func__, length, maxreq, round);
728
729 /* round down to multiple of packets */
730 if (round)
731 maxreq -= round;
732
733 length = maxreq;
734 }
735
736 if (length)
737 packets = DIV_ROUND_UP(length, hs_ep->ep.maxpacket);
738 else
739 packets = 1; /* send one packet if length is zero. */
740
Robert Baldyga4fca54a2013-10-09 09:00:02 +0200741 if (hs_ep->isochronous && length > (hs_ep->mc * hs_ep->ep.maxpacket)) {
742 dev_err(hsotg->dev, "req length > maxpacket*mc\n");
743 return;
744 }
745
Ben Dooks5b7d70c2009-06-02 14:58:06 +0100746 if (dir_in && index != 0)
Robert Baldyga4fca54a2013-10-09 09:00:02 +0200747 if (hs_ep->isochronous)
748 epsize = DxEPTSIZ_MC(packets);
749 else
750 epsize = DxEPTSIZ_MC(1);
Ben Dooks5b7d70c2009-06-02 14:58:06 +0100751 else
752 epsize = 0;
753
754 if (index != 0 && ureq->zero) {
Lukasz Majewski8b9bc462012-05-04 14:17:11 +0200755 /*
756 * test for the packets being exactly right for the
757 * transfer
758 */
Ben Dooks5b7d70c2009-06-02 14:58:06 +0100759
760 if (length == (packets * hs_ep->ep.maxpacket))
761 packets++;
762 }
763
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +0200764 epsize |= DxEPTSIZ_PktCnt(packets);
765 epsize |= DxEPTSIZ_XferSize(length);
Ben Dooks5b7d70c2009-06-02 14:58:06 +0100766
767 dev_dbg(hsotg->dev, "%s: %d@%d/%d, 0x%08x => 0x%08x\n",
768 __func__, packets, length, ureq->length, epsize, epsize_reg);
769
770 /* store the request as the current one we're doing */
771 hs_ep->req = hs_req;
772
773 /* write size / packets */
774 writel(epsize, hsotg->regs + epsize_reg);
775
Anton Tikhomirovdb1d8ba2012-03-06 14:09:19 +0900776 if (using_dma(hsotg) && !continuing) {
Ben Dooks5b7d70c2009-06-02 14:58:06 +0100777 unsigned int dma_reg;
778
Lukasz Majewski8b9bc462012-05-04 14:17:11 +0200779 /*
780 * write DMA address to control register, buffer already
781 * synced by s3c_hsotg_ep_queue().
782 */
Ben Dooks5b7d70c2009-06-02 14:58:06 +0100783
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +0200784 dma_reg = dir_in ? DIEPDMA(index) : DOEPDMA(index);
Ben Dooks5b7d70c2009-06-02 14:58:06 +0100785 writel(ureq->dma, hsotg->regs + dma_reg);
786
787 dev_dbg(hsotg->dev, "%s: 0x%08x => 0x%08x\n",
788 __func__, ureq->dma, dma_reg);
789 }
790
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +0200791 ctrl |= DxEPCTL_EPEna; /* ensure ep enabled */
792 ctrl |= DxEPCTL_USBActEp;
Lukasz Majewski71225be2012-05-04 14:17:03 +0200793
794 dev_dbg(hsotg->dev, "setup req:%d\n", hsotg->setup);
795
796 /* For Setup request do not clear NAK */
797 if (hsotg->setup && index == 0)
798 hsotg->setup = 0;
799 else
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +0200800 ctrl |= DxEPCTL_CNAK; /* clear NAK set by core */
Lukasz Majewski71225be2012-05-04 14:17:03 +0200801
Ben Dooks5b7d70c2009-06-02 14:58:06 +0100802
803 dev_dbg(hsotg->dev, "%s: DxEPCTL=0x%08x\n", __func__, ctrl);
804 writel(ctrl, hsotg->regs + epctrl_reg);
805
Lukasz Majewski8b9bc462012-05-04 14:17:11 +0200806 /*
807 * set these, it seems that DMA support increments past the end
Ben Dooks5b7d70c2009-06-02 14:58:06 +0100808 * of the packet buffer so we need to calculate the length from
Lukasz Majewski8b9bc462012-05-04 14:17:11 +0200809 * this information.
810 */
Ben Dooks5b7d70c2009-06-02 14:58:06 +0100811 hs_ep->size_loaded = length;
812 hs_ep->last_load = ureq->actual;
813
814 if (dir_in && !using_dma(hsotg)) {
815 /* set these anyway, we may need them for non-periodic in */
816 hs_ep->fifo_load = 0;
817
818 s3c_hsotg_write_fifo(hsotg, hs_ep, hs_req);
819 }
820
Lukasz Majewski8b9bc462012-05-04 14:17:11 +0200821 /*
822 * clear the INTknTXFEmpMsk when we start request, more as a aide
823 * to debugging to see what is going on.
824 */
Ben Dooks5b7d70c2009-06-02 14:58:06 +0100825 if (dir_in)
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +0200826 writel(DIEPMSK_INTknTXFEmpMsk,
827 hsotg->regs + DIEPINT(index));
Ben Dooks5b7d70c2009-06-02 14:58:06 +0100828
Lukasz Majewski8b9bc462012-05-04 14:17:11 +0200829 /*
830 * Note, trying to clear the NAK here causes problems with transmit
831 * on the S3C6400 ending up with the TXFIFO becoming full.
832 */
Ben Dooks5b7d70c2009-06-02 14:58:06 +0100833
834 /* check ep is enabled */
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +0200835 if (!(readl(hsotg->regs + epctrl_reg) & DxEPCTL_EPEna))
Ben Dooks5b7d70c2009-06-02 14:58:06 +0100836 dev_warn(hsotg->dev,
837 "ep%d: failed to become enabled (DxEPCTL=0x%08x)?\n",
838 index, readl(hsotg->regs + epctrl_reg));
839
840 dev_dbg(hsotg->dev, "%s: DxEPCTL=0x%08x\n",
841 __func__, readl(hsotg->regs + epctrl_reg));
Robert Baldygaafcf4162013-09-19 11:50:19 +0200842
843 /* enable ep interrupts */
844 s3c_hsotg_ctrl_epint(hsotg, hs_ep->index, hs_ep->dir_in, 1);
Ben Dooks5b7d70c2009-06-02 14:58:06 +0100845}
846
847/**
848 * s3c_hsotg_map_dma - map the DMA memory being used for the request
849 * @hsotg: The device state.
850 * @hs_ep: The endpoint the request is on.
851 * @req: The request being processed.
852 *
853 * We've been asked to queue a request, so ensure that the memory buffer
854 * is correctly setup for DMA. If we've been passed an extant DMA address
855 * then ensure the buffer has been synced to memory. If our buffer has no
856 * DMA memory, then we map the memory and mark our request to allow us to
857 * cleanup on completion.
Lukasz Majewski8b9bc462012-05-04 14:17:11 +0200858 */
Ben Dooks5b7d70c2009-06-02 14:58:06 +0100859static int s3c_hsotg_map_dma(struct s3c_hsotg *hsotg,
860 struct s3c_hsotg_ep *hs_ep,
861 struct usb_request *req)
862{
Ben Dooks5b7d70c2009-06-02 14:58:06 +0100863 struct s3c_hsotg_req *hs_req = our_req(req);
Felipe Balbie58ebcd2013-01-28 14:48:36 +0200864 int ret;
Ben Dooks5b7d70c2009-06-02 14:58:06 +0100865
866 /* if the length is zero, ignore the DMA data */
867 if (hs_req->req.length == 0)
868 return 0;
869
Felipe Balbie58ebcd2013-01-28 14:48:36 +0200870 ret = usb_gadget_map_request(&hsotg->gadget, req, hs_ep->dir_in);
871 if (ret)
872 goto dma_error;
Ben Dooks5b7d70c2009-06-02 14:58:06 +0100873
874 return 0;
875
876dma_error:
877 dev_err(hsotg->dev, "%s: failed to map buffer %p, %d bytes\n",
878 __func__, req->buf, req->length);
879
880 return -EIO;
881}
882
883static int s3c_hsotg_ep_queue(struct usb_ep *ep, struct usb_request *req,
884 gfp_t gfp_flags)
885{
886 struct s3c_hsotg_req *hs_req = our_req(req);
887 struct s3c_hsotg_ep *hs_ep = our_ep(ep);
888 struct s3c_hsotg *hs = hs_ep->parent;
Ben Dooks5b7d70c2009-06-02 14:58:06 +0100889 bool first;
890
891 dev_dbg(hs->dev, "%s: req %p: %d@%p, noi=%d, zero=%d, snok=%d\n",
892 ep->name, req, req->length, req->buf, req->no_interrupt,
893 req->zero, req->short_not_ok);
894
895 /* initialise status of the request */
896 INIT_LIST_HEAD(&hs_req->queue);
897 req->actual = 0;
898 req->status = -EINPROGRESS;
899
900 /* if we're using DMA, sync the buffers as necessary */
901 if (using_dma(hs)) {
902 int ret = s3c_hsotg_map_dma(hs, hs_ep, req);
903 if (ret)
904 return ret;
905 }
906
Ben Dooks5b7d70c2009-06-02 14:58:06 +0100907 first = list_empty(&hs_ep->queue);
908 list_add_tail(&hs_req->queue, &hs_ep->queue);
909
910 if (first)
911 s3c_hsotg_start_req(hs, hs_ep, hs_req, false);
912
Ben Dooks5b7d70c2009-06-02 14:58:06 +0100913 return 0;
914}
915
Lukasz Majewski5ad1d312012-06-14 10:02:26 +0200916static int s3c_hsotg_ep_queue_lock(struct usb_ep *ep, struct usb_request *req,
917 gfp_t gfp_flags)
918{
919 struct s3c_hsotg_ep *hs_ep = our_ep(ep);
920 struct s3c_hsotg *hs = hs_ep->parent;
921 unsigned long flags = 0;
922 int ret = 0;
923
924 spin_lock_irqsave(&hs->lock, flags);
925 ret = s3c_hsotg_ep_queue(ep, req, gfp_flags);
926 spin_unlock_irqrestore(&hs->lock, flags);
927
928 return ret;
929}
930
Ben Dooks5b7d70c2009-06-02 14:58:06 +0100931static void s3c_hsotg_ep_free_request(struct usb_ep *ep,
932 struct usb_request *req)
933{
934 struct s3c_hsotg_req *hs_req = our_req(req);
935
936 kfree(hs_req);
937}
938
939/**
940 * s3c_hsotg_complete_oursetup - setup completion callback
941 * @ep: The endpoint the request was on.
942 * @req: The request completed.
943 *
944 * Called on completion of any requests the driver itself
945 * submitted that need cleaning up.
946 */
947static void s3c_hsotg_complete_oursetup(struct usb_ep *ep,
948 struct usb_request *req)
949{
950 struct s3c_hsotg_ep *hs_ep = our_ep(ep);
951 struct s3c_hsotg *hsotg = hs_ep->parent;
952
953 dev_dbg(hsotg->dev, "%s: ep %p, req %p\n", __func__, ep, req);
954
955 s3c_hsotg_ep_free_request(ep, req);
956}
957
958/**
959 * ep_from_windex - convert control wIndex value to endpoint
960 * @hsotg: The driver state.
961 * @windex: The control request wIndex field (in host order).
962 *
963 * Convert the given wIndex into a pointer to an driver endpoint
964 * structure, or return NULL if it is not a valid endpoint.
Lukasz Majewski8b9bc462012-05-04 14:17:11 +0200965 */
Ben Dooks5b7d70c2009-06-02 14:58:06 +0100966static struct s3c_hsotg_ep *ep_from_windex(struct s3c_hsotg *hsotg,
967 u32 windex)
968{
969 struct s3c_hsotg_ep *ep = &hsotg->eps[windex & 0x7F];
970 int dir = (windex & USB_DIR_IN) ? 1 : 0;
971 int idx = windex & 0x7F;
972
973 if (windex >= 0x100)
974 return NULL;
975
Lukasz Majewskib3f489b2012-05-04 14:17:09 +0200976 if (idx > hsotg->num_of_eps)
Ben Dooks5b7d70c2009-06-02 14:58:06 +0100977 return NULL;
978
979 if (idx && ep->dir_in != dir)
980 return NULL;
981
982 return ep;
983}
984
985/**
986 * s3c_hsotg_send_reply - send reply to control request
987 * @hsotg: The device state
988 * @ep: Endpoint 0
989 * @buff: Buffer for request
990 * @length: Length of reply.
991 *
992 * Create a request and queue it on the given endpoint. This is useful as
993 * an internal method of sending replies to certain control requests, etc.
994 */
995static int s3c_hsotg_send_reply(struct s3c_hsotg *hsotg,
996 struct s3c_hsotg_ep *ep,
997 void *buff,
998 int length)
999{
1000 struct usb_request *req;
1001 int ret;
1002
1003 dev_dbg(hsotg->dev, "%s: buff %p, len %d\n", __func__, buff, length);
1004
1005 req = s3c_hsotg_ep_alloc_request(&ep->ep, GFP_ATOMIC);
1006 hsotg->ep0_reply = req;
1007 if (!req) {
1008 dev_warn(hsotg->dev, "%s: cannot alloc req\n", __func__);
1009 return -ENOMEM;
1010 }
1011
1012 req->buf = hsotg->ep0_buff;
1013 req->length = length;
1014 req->zero = 1; /* always do zero-length final transfer */
1015 req->complete = s3c_hsotg_complete_oursetup;
1016
1017 if (length)
1018 memcpy(req->buf, buff, length);
1019 else
1020 ep->sent_zlp = 1;
1021
1022 ret = s3c_hsotg_ep_queue(&ep->ep, req, GFP_ATOMIC);
1023 if (ret) {
1024 dev_warn(hsotg->dev, "%s: cannot queue req\n", __func__);
1025 return ret;
1026 }
1027
1028 return 0;
1029}
1030
1031/**
1032 * s3c_hsotg_process_req_status - process request GET_STATUS
1033 * @hsotg: The device state
1034 * @ctrl: USB control request
1035 */
1036static int s3c_hsotg_process_req_status(struct s3c_hsotg *hsotg,
1037 struct usb_ctrlrequest *ctrl)
1038{
1039 struct s3c_hsotg_ep *ep0 = &hsotg->eps[0];
1040 struct s3c_hsotg_ep *ep;
1041 __le16 reply;
1042 int ret;
1043
1044 dev_dbg(hsotg->dev, "%s: USB_REQ_GET_STATUS\n", __func__);
1045
1046 if (!ep0->dir_in) {
1047 dev_warn(hsotg->dev, "%s: direction out?\n", __func__);
1048 return -EINVAL;
1049 }
1050
1051 switch (ctrl->bRequestType & USB_RECIP_MASK) {
1052 case USB_RECIP_DEVICE:
1053 reply = cpu_to_le16(0); /* bit 0 => self powered,
1054 * bit 1 => remote wakeup */
1055 break;
1056
1057 case USB_RECIP_INTERFACE:
1058 /* currently, the data result should be zero */
1059 reply = cpu_to_le16(0);
1060 break;
1061
1062 case USB_RECIP_ENDPOINT:
1063 ep = ep_from_windex(hsotg, le16_to_cpu(ctrl->wIndex));
1064 if (!ep)
1065 return -ENOENT;
1066
1067 reply = cpu_to_le16(ep->halted ? 1 : 0);
1068 break;
1069
1070 default:
1071 return 0;
1072 }
1073
1074 if (le16_to_cpu(ctrl->wLength) != 2)
1075 return -EINVAL;
1076
1077 ret = s3c_hsotg_send_reply(hsotg, ep0, &reply, 2);
1078 if (ret) {
1079 dev_err(hsotg->dev, "%s: failed to send reply\n", __func__);
1080 return ret;
1081 }
1082
1083 return 1;
1084}
1085
1086static int s3c_hsotg_ep_sethalt(struct usb_ep *ep, int value);
1087
1088/**
Anton Tikhomirov9c39ddc2011-04-21 17:06:41 +09001089 * get_ep_head - return the first request on the endpoint
1090 * @hs_ep: The controller endpoint to get
1091 *
1092 * Get the first request on the endpoint.
1093 */
1094static struct s3c_hsotg_req *get_ep_head(struct s3c_hsotg_ep *hs_ep)
1095{
1096 if (list_empty(&hs_ep->queue))
1097 return NULL;
1098
1099 return list_first_entry(&hs_ep->queue, struct s3c_hsotg_req, queue);
1100}
1101
1102/**
Ben Dooks5b7d70c2009-06-02 14:58:06 +01001103 * s3c_hsotg_process_req_featire - process request {SET,CLEAR}_FEATURE
1104 * @hsotg: The device state
1105 * @ctrl: USB control request
1106 */
1107static int s3c_hsotg_process_req_feature(struct s3c_hsotg *hsotg,
1108 struct usb_ctrlrequest *ctrl)
1109{
Anton Tikhomirov26ab3d02011-04-21 17:06:40 +09001110 struct s3c_hsotg_ep *ep0 = &hsotg->eps[0];
Anton Tikhomirov9c39ddc2011-04-21 17:06:41 +09001111 struct s3c_hsotg_req *hs_req;
1112 bool restart;
Ben Dooks5b7d70c2009-06-02 14:58:06 +01001113 bool set = (ctrl->bRequest == USB_REQ_SET_FEATURE);
1114 struct s3c_hsotg_ep *ep;
Anton Tikhomirov26ab3d02011-04-21 17:06:40 +09001115 int ret;
Robert Baldygabd9ef7b2013-09-19 11:50:22 +02001116 bool halted;
Ben Dooks5b7d70c2009-06-02 14:58:06 +01001117
1118 dev_dbg(hsotg->dev, "%s: %s_FEATURE\n",
1119 __func__, set ? "SET" : "CLEAR");
1120
1121 if (ctrl->bRequestType == USB_RECIP_ENDPOINT) {
1122 ep = ep_from_windex(hsotg, le16_to_cpu(ctrl->wIndex));
1123 if (!ep) {
1124 dev_dbg(hsotg->dev, "%s: no endpoint for 0x%04x\n",
1125 __func__, le16_to_cpu(ctrl->wIndex));
1126 return -ENOENT;
1127 }
1128
1129 switch (le16_to_cpu(ctrl->wValue)) {
1130 case USB_ENDPOINT_HALT:
Robert Baldygabd9ef7b2013-09-19 11:50:22 +02001131 halted = ep->halted;
1132
Ben Dooks5b7d70c2009-06-02 14:58:06 +01001133 s3c_hsotg_ep_sethalt(&ep->ep, set);
Anton Tikhomirov26ab3d02011-04-21 17:06:40 +09001134
1135 ret = s3c_hsotg_send_reply(hsotg, ep0, NULL, 0);
1136 if (ret) {
1137 dev_err(hsotg->dev,
1138 "%s: failed to send reply\n", __func__);
1139 return ret;
1140 }
Anton Tikhomirov9c39ddc2011-04-21 17:06:41 +09001141
Robert Baldygabd9ef7b2013-09-19 11:50:22 +02001142 /*
1143 * we have to complete all requests for ep if it was
1144 * halted, and the halt was cleared by CLEAR_FEATURE
1145 */
1146
1147 if (!set && halted) {
Anton Tikhomirov9c39ddc2011-04-21 17:06:41 +09001148 /*
1149 * If we have request in progress,
1150 * then complete it
1151 */
1152 if (ep->req) {
1153 hs_req = ep->req;
1154 ep->req = NULL;
1155 list_del_init(&hs_req->queue);
1156 hs_req->req.complete(&ep->ep,
1157 &hs_req->req);
1158 }
1159
1160 /* If we have pending request, then start it */
1161 restart = !list_empty(&ep->queue);
1162 if (restart) {
1163 hs_req = get_ep_head(ep);
1164 s3c_hsotg_start_req(hsotg, ep,
1165 hs_req, false);
1166 }
1167 }
1168
Ben Dooks5b7d70c2009-06-02 14:58:06 +01001169 break;
1170
1171 default:
1172 return -ENOENT;
1173 }
1174 } else
1175 return -ENOENT; /* currently only deal with endpoint */
1176
1177 return 1;
1178}
1179
Robert Baldygaab93e012013-09-19 11:50:17 +02001180static void s3c_hsotg_enqueue_setup(struct s3c_hsotg *hsotg);
Robert Baldygad18f71162013-11-21 13:49:18 +01001181static void s3c_hsotg_disconnect(struct s3c_hsotg *hsotg);
Robert Baldygaab93e012013-09-19 11:50:17 +02001182
Ben Dooks5b7d70c2009-06-02 14:58:06 +01001183/**
1184 * s3c_hsotg_process_control - process a control request
1185 * @hsotg: The device state
1186 * @ctrl: The control request received
1187 *
1188 * The controller has received the SETUP phase of a control request, and
1189 * needs to work out what to do next (and whether to pass it on to the
1190 * gadget driver).
1191 */
1192static void s3c_hsotg_process_control(struct s3c_hsotg *hsotg,
1193 struct usb_ctrlrequest *ctrl)
1194{
1195 struct s3c_hsotg_ep *ep0 = &hsotg->eps[0];
1196 int ret = 0;
1197 u32 dcfg;
1198
1199 ep0->sent_zlp = 0;
1200
1201 dev_dbg(hsotg->dev, "ctrl Req=%02x, Type=%02x, V=%04x, L=%04x\n",
1202 ctrl->bRequest, ctrl->bRequestType,
1203 ctrl->wValue, ctrl->wLength);
1204
Lukasz Majewski8b9bc462012-05-04 14:17:11 +02001205 /*
1206 * record the direction of the request, for later use when enquing
1207 * packets onto EP0.
1208 */
Ben Dooks5b7d70c2009-06-02 14:58:06 +01001209
1210 ep0->dir_in = (ctrl->bRequestType & USB_DIR_IN) ? 1 : 0;
1211 dev_dbg(hsotg->dev, "ctrl: dir_in=%d\n", ep0->dir_in);
1212
Lukasz Majewski8b9bc462012-05-04 14:17:11 +02001213 /*
1214 * if we've no data with this request, then the last part of the
1215 * transaction is going to implicitly be IN.
1216 */
Ben Dooks5b7d70c2009-06-02 14:58:06 +01001217 if (ctrl->wLength == 0)
1218 ep0->dir_in = 1;
1219
1220 if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
1221 switch (ctrl->bRequest) {
1222 case USB_REQ_SET_ADDRESS:
Robert Baldygad18f71162013-11-21 13:49:18 +01001223 s3c_hsotg_disconnect(hsotg);
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02001224 dcfg = readl(hsotg->regs + DCFG);
1225 dcfg &= ~DCFG_DevAddr_MASK;
1226 dcfg |= ctrl->wValue << DCFG_DevAddr_SHIFT;
1227 writel(dcfg, hsotg->regs + DCFG);
Ben Dooks5b7d70c2009-06-02 14:58:06 +01001228
1229 dev_info(hsotg->dev, "new address %d\n", ctrl->wValue);
1230
1231 ret = s3c_hsotg_send_reply(hsotg, ep0, NULL, 0);
1232 return;
1233
1234 case USB_REQ_GET_STATUS:
1235 ret = s3c_hsotg_process_req_status(hsotg, ctrl);
1236 break;
1237
1238 case USB_REQ_CLEAR_FEATURE:
1239 case USB_REQ_SET_FEATURE:
1240 ret = s3c_hsotg_process_req_feature(hsotg, ctrl);
1241 break;
1242 }
1243 }
1244
1245 /* as a fallback, try delivering it to the driver to deal with */
1246
1247 if (ret == 0 && hsotg->driver) {
Robert Baldyga93f599f2013-11-21 13:49:17 +01001248 spin_unlock(&hsotg->lock);
Ben Dooks5b7d70c2009-06-02 14:58:06 +01001249 ret = hsotg->driver->setup(&hsotg->gadget, ctrl);
Robert Baldyga93f599f2013-11-21 13:49:17 +01001250 spin_lock(&hsotg->lock);
Ben Dooks5b7d70c2009-06-02 14:58:06 +01001251 if (ret < 0)
1252 dev_dbg(hsotg->dev, "driver->setup() ret %d\n", ret);
1253 }
1254
Lukasz Majewski8b9bc462012-05-04 14:17:11 +02001255 /*
1256 * the request is either unhandlable, or is not formatted correctly
Ben Dooks5b7d70c2009-06-02 14:58:06 +01001257 * so respond with a STALL for the status stage to indicate failure.
1258 */
1259
1260 if (ret < 0) {
1261 u32 reg;
1262 u32 ctrl;
1263
1264 dev_dbg(hsotg->dev, "ep0 stall (dir=%d)\n", ep0->dir_in);
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02001265 reg = (ep0->dir_in) ? DIEPCTL0 : DOEPCTL0;
Ben Dooks5b7d70c2009-06-02 14:58:06 +01001266
Lukasz Majewski8b9bc462012-05-04 14:17:11 +02001267 /*
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02001268 * DxEPCTL_Stall will be cleared by EP once it has
Lukasz Majewski8b9bc462012-05-04 14:17:11 +02001269 * taken effect, so no need to clear later.
1270 */
Ben Dooks5b7d70c2009-06-02 14:58:06 +01001271
1272 ctrl = readl(hsotg->regs + reg);
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02001273 ctrl |= DxEPCTL_Stall;
1274 ctrl |= DxEPCTL_CNAK;
Ben Dooks5b7d70c2009-06-02 14:58:06 +01001275 writel(ctrl, hsotg->regs + reg);
1276
1277 dev_dbg(hsotg->dev,
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001278 "written DxEPCTL=0x%08x to %08x (DxEPCTL=0x%08x)\n",
Ben Dooks5b7d70c2009-06-02 14:58:06 +01001279 ctrl, reg, readl(hsotg->regs + reg));
1280
Lukasz Majewski8b9bc462012-05-04 14:17:11 +02001281 /*
1282 * don't believe we need to anything more to get the EP
1283 * to reply with a STALL packet
1284 */
Robert Baldygaab93e012013-09-19 11:50:17 +02001285
1286 /*
1287 * complete won't be called, so we enqueue
1288 * setup request here
1289 */
1290 s3c_hsotg_enqueue_setup(hsotg);
Ben Dooks5b7d70c2009-06-02 14:58:06 +01001291 }
1292}
1293
Ben Dooks5b7d70c2009-06-02 14:58:06 +01001294/**
1295 * s3c_hsotg_complete_setup - completion of a setup transfer
1296 * @ep: The endpoint the request was on.
1297 * @req: The request completed.
1298 *
1299 * Called on completion of any requests the driver itself submitted for
1300 * EP0 setup packets
1301 */
1302static void s3c_hsotg_complete_setup(struct usb_ep *ep,
1303 struct usb_request *req)
1304{
1305 struct s3c_hsotg_ep *hs_ep = our_ep(ep);
1306 struct s3c_hsotg *hsotg = hs_ep->parent;
1307
1308 if (req->status < 0) {
1309 dev_dbg(hsotg->dev, "%s: failed %d\n", __func__, req->status);
1310 return;
1311 }
1312
Robert Baldyga93f599f2013-11-21 13:49:17 +01001313 spin_lock(&hsotg->lock);
Ben Dooks5b7d70c2009-06-02 14:58:06 +01001314 if (req->actual == 0)
1315 s3c_hsotg_enqueue_setup(hsotg);
1316 else
1317 s3c_hsotg_process_control(hsotg, req->buf);
Robert Baldyga93f599f2013-11-21 13:49:17 +01001318 spin_unlock(&hsotg->lock);
Ben Dooks5b7d70c2009-06-02 14:58:06 +01001319}
1320
1321/**
1322 * s3c_hsotg_enqueue_setup - start a request for EP0 packets
1323 * @hsotg: The device state.
1324 *
1325 * Enqueue a request on EP0 if necessary to received any SETUP packets
1326 * received from the host.
1327 */
1328static void s3c_hsotg_enqueue_setup(struct s3c_hsotg *hsotg)
1329{
1330 struct usb_request *req = hsotg->ctrl_req;
1331 struct s3c_hsotg_req *hs_req = our_req(req);
1332 int ret;
1333
1334 dev_dbg(hsotg->dev, "%s: queueing setup request\n", __func__);
1335
1336 req->zero = 0;
1337 req->length = 8;
1338 req->buf = hsotg->ctrl_buff;
1339 req->complete = s3c_hsotg_complete_setup;
1340
1341 if (!list_empty(&hs_req->queue)) {
1342 dev_dbg(hsotg->dev, "%s already queued???\n", __func__);
1343 return;
1344 }
1345
1346 hsotg->eps[0].dir_in = 0;
1347
1348 ret = s3c_hsotg_ep_queue(&hsotg->eps[0].ep, req, GFP_ATOMIC);
1349 if (ret < 0) {
1350 dev_err(hsotg->dev, "%s: failed queue (%d)\n", __func__, ret);
Lukasz Majewski8b9bc462012-05-04 14:17:11 +02001351 /*
1352 * Don't think there's much we can do other than watch the
1353 * driver fail.
1354 */
Ben Dooks5b7d70c2009-06-02 14:58:06 +01001355 }
1356}
1357
1358/**
Ben Dooks5b7d70c2009-06-02 14:58:06 +01001359 * s3c_hsotg_complete_request - complete a request given to us
1360 * @hsotg: The device state.
1361 * @hs_ep: The endpoint the request was on.
1362 * @hs_req: The request to complete.
1363 * @result: The result code (0 => Ok, otherwise errno)
1364 *
1365 * The given request has finished, so call the necessary completion
1366 * if it has one and then look to see if we can start a new request
1367 * on the endpoint.
1368 *
1369 * Note, expects the ep to already be locked as appropriate.
Lukasz Majewski8b9bc462012-05-04 14:17:11 +02001370 */
Ben Dooks5b7d70c2009-06-02 14:58:06 +01001371static void s3c_hsotg_complete_request(struct s3c_hsotg *hsotg,
1372 struct s3c_hsotg_ep *hs_ep,
1373 struct s3c_hsotg_req *hs_req,
1374 int result)
1375{
1376 bool restart;
1377
1378 if (!hs_req) {
1379 dev_dbg(hsotg->dev, "%s: nothing to complete?\n", __func__);
1380 return;
1381 }
1382
1383 dev_dbg(hsotg->dev, "complete: ep %p %s, req %p, %d => %p\n",
1384 hs_ep, hs_ep->ep.name, hs_req, result, hs_req->req.complete);
1385
Lukasz Majewski8b9bc462012-05-04 14:17:11 +02001386 /*
1387 * only replace the status if we've not already set an error
1388 * from a previous transaction
1389 */
Ben Dooks5b7d70c2009-06-02 14:58:06 +01001390
1391 if (hs_req->req.status == -EINPROGRESS)
1392 hs_req->req.status = result;
1393
1394 hs_ep->req = NULL;
1395 list_del_init(&hs_req->queue);
1396
1397 if (using_dma(hsotg))
1398 s3c_hsotg_unmap_dma(hsotg, hs_ep, hs_req);
1399
Lukasz Majewski8b9bc462012-05-04 14:17:11 +02001400 /*
1401 * call the complete request with the locks off, just in case the
1402 * request tries to queue more work for this endpoint.
1403 */
Ben Dooks5b7d70c2009-06-02 14:58:06 +01001404
1405 if (hs_req->req.complete) {
Lukasz Majewski22258f42012-06-14 10:02:24 +02001406 spin_unlock(&hsotg->lock);
Ben Dooks5b7d70c2009-06-02 14:58:06 +01001407 hs_req->req.complete(&hs_ep->ep, &hs_req->req);
Lukasz Majewski22258f42012-06-14 10:02:24 +02001408 spin_lock(&hsotg->lock);
Ben Dooks5b7d70c2009-06-02 14:58:06 +01001409 }
1410
Lukasz Majewski8b9bc462012-05-04 14:17:11 +02001411 /*
1412 * Look to see if there is anything else to do. Note, the completion
Ben Dooks5b7d70c2009-06-02 14:58:06 +01001413 * of the previous request may have caused a new request to be started
Lukasz Majewski8b9bc462012-05-04 14:17:11 +02001414 * so be careful when doing this.
1415 */
Ben Dooks5b7d70c2009-06-02 14:58:06 +01001416
1417 if (!hs_ep->req && result >= 0) {
1418 restart = !list_empty(&hs_ep->queue);
1419 if (restart) {
1420 hs_req = get_ep_head(hs_ep);
1421 s3c_hsotg_start_req(hsotg, hs_ep, hs_req, false);
1422 }
1423 }
1424}
1425
1426/**
Ben Dooks5b7d70c2009-06-02 14:58:06 +01001427 * s3c_hsotg_rx_data - receive data from the FIFO for an endpoint
1428 * @hsotg: The device state.
1429 * @ep_idx: The endpoint index for the data
1430 * @size: The size of data in the fifo, in bytes
1431 *
1432 * The FIFO status shows there is data to read from the FIFO for a given
1433 * endpoint, so sort out whether we need to read the data into a request
1434 * that has been made for that endpoint.
1435 */
1436static void s3c_hsotg_rx_data(struct s3c_hsotg *hsotg, int ep_idx, int size)
1437{
1438 struct s3c_hsotg_ep *hs_ep = &hsotg->eps[ep_idx];
1439 struct s3c_hsotg_req *hs_req = hs_ep->req;
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02001440 void __iomem *fifo = hsotg->regs + EPFIFO(ep_idx);
Ben Dooks5b7d70c2009-06-02 14:58:06 +01001441 int to_read;
1442 int max_req;
1443 int read_ptr;
1444
Lukasz Majewski22258f42012-06-14 10:02:24 +02001445
Ben Dooks5b7d70c2009-06-02 14:58:06 +01001446 if (!hs_req) {
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02001447 u32 epctl = readl(hsotg->regs + DOEPCTL(ep_idx));
Ben Dooks5b7d70c2009-06-02 14:58:06 +01001448 int ptr;
1449
1450 dev_warn(hsotg->dev,
1451 "%s: FIFO %d bytes on ep%d but no req (DxEPCTl=0x%08x)\n",
1452 __func__, size, ep_idx, epctl);
1453
1454 /* dump the data from the FIFO, we've nothing we can do */
1455 for (ptr = 0; ptr < size; ptr += 4)
1456 (void)readl(fifo);
1457
1458 return;
1459 }
1460
Ben Dooks5b7d70c2009-06-02 14:58:06 +01001461 to_read = size;
1462 read_ptr = hs_req->req.actual;
1463 max_req = hs_req->req.length - read_ptr;
1464
Ben Dooksa33e7132010-07-19 09:40:49 +01001465 dev_dbg(hsotg->dev, "%s: read %d/%d, done %d/%d\n",
1466 __func__, to_read, max_req, read_ptr, hs_req->req.length);
1467
Ben Dooks5b7d70c2009-06-02 14:58:06 +01001468 if (to_read > max_req) {
Lukasz Majewski8b9bc462012-05-04 14:17:11 +02001469 /*
1470 * more data appeared than we where willing
Ben Dooks5b7d70c2009-06-02 14:58:06 +01001471 * to deal with in this request.
1472 */
1473
1474 /* currently we don't deal this */
1475 WARN_ON_ONCE(1);
1476 }
1477
Ben Dooks5b7d70c2009-06-02 14:58:06 +01001478 hs_ep->total_data += to_read;
1479 hs_req->req.actual += to_read;
1480 to_read = DIV_ROUND_UP(to_read, 4);
1481
Lukasz Majewski8b9bc462012-05-04 14:17:11 +02001482 /*
1483 * note, we might over-write the buffer end by 3 bytes depending on
1484 * alignment of the data.
1485 */
Ben Dooks5b7d70c2009-06-02 14:58:06 +01001486 readsl(fifo, hs_req->req.buf + read_ptr, to_read);
Ben Dooks5b7d70c2009-06-02 14:58:06 +01001487}
1488
1489/**
1490 * s3c_hsotg_send_zlp - send zero-length packet on control endpoint
1491 * @hsotg: The device instance
1492 * @req: The request currently on this endpoint
1493 *
1494 * Generate a zero-length IN packet request for terminating a SETUP
1495 * transaction.
1496 *
1497 * Note, since we don't write any data to the TxFIFO, then it is
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001498 * currently believed that we do not need to wait for any space in
Ben Dooks5b7d70c2009-06-02 14:58:06 +01001499 * the TxFIFO.
1500 */
1501static void s3c_hsotg_send_zlp(struct s3c_hsotg *hsotg,
1502 struct s3c_hsotg_req *req)
1503{
1504 u32 ctrl;
1505
1506 if (!req) {
1507 dev_warn(hsotg->dev, "%s: no request?\n", __func__);
1508 return;
1509 }
1510
1511 if (req->req.length == 0) {
1512 hsotg->eps[0].sent_zlp = 1;
1513 s3c_hsotg_enqueue_setup(hsotg);
1514 return;
1515 }
1516
1517 hsotg->eps[0].dir_in = 1;
1518 hsotg->eps[0].sent_zlp = 1;
1519
1520 dev_dbg(hsotg->dev, "sending zero-length packet\n");
1521
1522 /* issue a zero-sized packet to terminate this */
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02001523 writel(DxEPTSIZ_MC(1) | DxEPTSIZ_PktCnt(1) |
1524 DxEPTSIZ_XferSize(0), hsotg->regs + DIEPTSIZ(0));
Ben Dooks5b7d70c2009-06-02 14:58:06 +01001525
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02001526 ctrl = readl(hsotg->regs + DIEPCTL0);
1527 ctrl |= DxEPCTL_CNAK; /* clear NAK set by core */
1528 ctrl |= DxEPCTL_EPEna; /* ensure ep enabled */
1529 ctrl |= DxEPCTL_USBActEp;
1530 writel(ctrl, hsotg->regs + DIEPCTL0);
Ben Dooks5b7d70c2009-06-02 14:58:06 +01001531}
1532
1533/**
1534 * s3c_hsotg_handle_outdone - handle receiving OutDone/SetupDone from RXFIFO
1535 * @hsotg: The device instance
1536 * @epnum: The endpoint received from
1537 * @was_setup: Set if processing a SetupDone event.
1538 *
1539 * The RXFIFO has delivered an OutDone event, which means that the data
1540 * transfer for an OUT endpoint has been completed, either by a short
1541 * packet or by the finish of a transfer.
Lukasz Majewski8b9bc462012-05-04 14:17:11 +02001542 */
Ben Dooks5b7d70c2009-06-02 14:58:06 +01001543static void s3c_hsotg_handle_outdone(struct s3c_hsotg *hsotg,
1544 int epnum, bool was_setup)
1545{
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02001546 u32 epsize = readl(hsotg->regs + DOEPTSIZ(epnum));
Ben Dooks5b7d70c2009-06-02 14:58:06 +01001547 struct s3c_hsotg_ep *hs_ep = &hsotg->eps[epnum];
1548 struct s3c_hsotg_req *hs_req = hs_ep->req;
1549 struct usb_request *req = &hs_req->req;
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02001550 unsigned size_left = DxEPTSIZ_XferSize_GET(epsize);
Ben Dooks5b7d70c2009-06-02 14:58:06 +01001551 int result = 0;
1552
1553 if (!hs_req) {
1554 dev_dbg(hsotg->dev, "%s: no request active\n", __func__);
1555 return;
1556 }
1557
1558 if (using_dma(hsotg)) {
Ben Dooks5b7d70c2009-06-02 14:58:06 +01001559 unsigned size_done;
Ben Dooks5b7d70c2009-06-02 14:58:06 +01001560
Lukasz Majewski8b9bc462012-05-04 14:17:11 +02001561 /*
1562 * Calculate the size of the transfer by checking how much
Ben Dooks5b7d70c2009-06-02 14:58:06 +01001563 * is left in the endpoint size register and then working it
1564 * out from the amount we loaded for the transfer.
1565 *
1566 * We need to do this as DMA pointers are always 32bit aligned
1567 * so may overshoot/undershoot the transfer.
1568 */
1569
Ben Dooks5b7d70c2009-06-02 14:58:06 +01001570 size_done = hs_ep->size_loaded - size_left;
1571 size_done += hs_ep->last_load;
1572
1573 req->actual = size_done;
1574 }
1575
Ben Dooksa33e7132010-07-19 09:40:49 +01001576 /* if there is more request to do, schedule new transfer */
1577 if (req->actual < req->length && size_left == 0) {
1578 s3c_hsotg_start_req(hsotg, hs_ep, hs_req, true);
1579 return;
Lukasz Majewski71225be2012-05-04 14:17:03 +02001580 } else if (epnum == 0) {
1581 /*
1582 * After was_setup = 1 =>
1583 * set CNAK for non Setup requests
1584 */
1585 hsotg->setup = was_setup ? 0 : 1;
Ben Dooksa33e7132010-07-19 09:40:49 +01001586 }
1587
Ben Dooks5b7d70c2009-06-02 14:58:06 +01001588 if (req->actual < req->length && req->short_not_ok) {
1589 dev_dbg(hsotg->dev, "%s: got %d/%d (short not ok) => error\n",
1590 __func__, req->actual, req->length);
1591
Lukasz Majewski8b9bc462012-05-04 14:17:11 +02001592 /*
1593 * todo - what should we return here? there's no one else
1594 * even bothering to check the status.
1595 */
Ben Dooks5b7d70c2009-06-02 14:58:06 +01001596 }
1597
1598 if (epnum == 0) {
Lukasz Majewskid3ca0252012-05-04 14:17:04 +02001599 /*
1600 * Condition req->complete != s3c_hsotg_complete_setup says:
1601 * send ZLP when we have an asynchronous request from gadget
1602 */
Ben Dooks5b7d70c2009-06-02 14:58:06 +01001603 if (!was_setup && req->complete != s3c_hsotg_complete_setup)
1604 s3c_hsotg_send_zlp(hsotg, hs_req);
1605 }
1606
Lukasz Majewski5ad1d312012-06-14 10:02:26 +02001607 s3c_hsotg_complete_request(hsotg, hs_ep, hs_req, result);
Ben Dooks5b7d70c2009-06-02 14:58:06 +01001608}
1609
1610/**
1611 * s3c_hsotg_read_frameno - read current frame number
1612 * @hsotg: The device instance
1613 *
1614 * Return the current frame number
Lukasz Majewski8b9bc462012-05-04 14:17:11 +02001615 */
Ben Dooks5b7d70c2009-06-02 14:58:06 +01001616static u32 s3c_hsotg_read_frameno(struct s3c_hsotg *hsotg)
1617{
1618 u32 dsts;
1619
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02001620 dsts = readl(hsotg->regs + DSTS);
1621 dsts &= DSTS_SOFFN_MASK;
1622 dsts >>= DSTS_SOFFN_SHIFT;
Ben Dooks5b7d70c2009-06-02 14:58:06 +01001623
1624 return dsts;
1625}
1626
1627/**
1628 * s3c_hsotg_handle_rx - RX FIFO has data
1629 * @hsotg: The device instance
1630 *
1631 * The IRQ handler has detected that the RX FIFO has some data in it
1632 * that requires processing, so find out what is in there and do the
1633 * appropriate read.
1634 *
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001635 * The RXFIFO is a true FIFO, the packets coming out are still in packet
Ben Dooks5b7d70c2009-06-02 14:58:06 +01001636 * chunks, so if you have x packets received on an endpoint you'll get x
1637 * FIFO events delivered, each with a packet's worth of data in it.
1638 *
1639 * When using DMA, we should not be processing events from the RXFIFO
1640 * as the actual data should be sent to the memory directly and we turn
1641 * on the completion interrupts to get notifications of transfer completion.
1642 */
Mark Brown0978f8c2010-01-18 13:18:35 +00001643static void s3c_hsotg_handle_rx(struct s3c_hsotg *hsotg)
Ben Dooks5b7d70c2009-06-02 14:58:06 +01001644{
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02001645 u32 grxstsr = readl(hsotg->regs + GRXSTSP);
Ben Dooks5b7d70c2009-06-02 14:58:06 +01001646 u32 epnum, status, size;
1647
1648 WARN_ON(using_dma(hsotg));
1649
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02001650 epnum = grxstsr & GRXSTS_EPNum_MASK;
1651 status = grxstsr & GRXSTS_PktSts_MASK;
Ben Dooks5b7d70c2009-06-02 14:58:06 +01001652
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02001653 size = grxstsr & GRXSTS_ByteCnt_MASK;
1654 size >>= GRXSTS_ByteCnt_SHIFT;
Ben Dooks5b7d70c2009-06-02 14:58:06 +01001655
1656 if (1)
1657 dev_dbg(hsotg->dev, "%s: GRXSTSP=0x%08x (%d@%d)\n",
1658 __func__, grxstsr, size, epnum);
1659
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02001660#define __status(x) ((x) >> GRXSTS_PktSts_SHIFT)
Ben Dooks5b7d70c2009-06-02 14:58:06 +01001661
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02001662 switch (status >> GRXSTS_PktSts_SHIFT) {
1663 case __status(GRXSTS_PktSts_GlobalOutNAK):
Ben Dooks5b7d70c2009-06-02 14:58:06 +01001664 dev_dbg(hsotg->dev, "GlobalOutNAK\n");
1665 break;
1666
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02001667 case __status(GRXSTS_PktSts_OutDone):
Ben Dooks5b7d70c2009-06-02 14:58:06 +01001668 dev_dbg(hsotg->dev, "OutDone (Frame=0x%08x)\n",
1669 s3c_hsotg_read_frameno(hsotg));
1670
1671 if (!using_dma(hsotg))
1672 s3c_hsotg_handle_outdone(hsotg, epnum, false);
1673 break;
1674
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02001675 case __status(GRXSTS_PktSts_SetupDone):
Ben Dooks5b7d70c2009-06-02 14:58:06 +01001676 dev_dbg(hsotg->dev,
1677 "SetupDone (Frame=0x%08x, DOPEPCTL=0x%08x)\n",
1678 s3c_hsotg_read_frameno(hsotg),
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02001679 readl(hsotg->regs + DOEPCTL(0)));
Ben Dooks5b7d70c2009-06-02 14:58:06 +01001680
1681 s3c_hsotg_handle_outdone(hsotg, epnum, true);
1682 break;
1683
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02001684 case __status(GRXSTS_PktSts_OutRX):
Ben Dooks5b7d70c2009-06-02 14:58:06 +01001685 s3c_hsotg_rx_data(hsotg, epnum, size);
1686 break;
1687
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02001688 case __status(GRXSTS_PktSts_SetupRX):
Ben Dooks5b7d70c2009-06-02 14:58:06 +01001689 dev_dbg(hsotg->dev,
1690 "SetupRX (Frame=0x%08x, DOPEPCTL=0x%08x)\n",
1691 s3c_hsotg_read_frameno(hsotg),
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02001692 readl(hsotg->regs + DOEPCTL(0)));
Ben Dooks5b7d70c2009-06-02 14:58:06 +01001693
1694 s3c_hsotg_rx_data(hsotg, epnum, size);
1695 break;
1696
1697 default:
1698 dev_warn(hsotg->dev, "%s: unknown status %08x\n",
1699 __func__, grxstsr);
1700
1701 s3c_hsotg_dump(hsotg);
1702 break;
1703 }
1704}
1705
1706/**
1707 * s3c_hsotg_ep0_mps - turn max packet size into register setting
1708 * @mps: The maximum packet size in bytes.
Lukasz Majewski8b9bc462012-05-04 14:17:11 +02001709 */
Ben Dooks5b7d70c2009-06-02 14:58:06 +01001710static u32 s3c_hsotg_ep0_mps(unsigned int mps)
1711{
1712 switch (mps) {
1713 case 64:
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02001714 return D0EPCTL_MPS_64;
Ben Dooks5b7d70c2009-06-02 14:58:06 +01001715 case 32:
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02001716 return D0EPCTL_MPS_32;
Ben Dooks5b7d70c2009-06-02 14:58:06 +01001717 case 16:
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02001718 return D0EPCTL_MPS_16;
Ben Dooks5b7d70c2009-06-02 14:58:06 +01001719 case 8:
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02001720 return D0EPCTL_MPS_8;
Ben Dooks5b7d70c2009-06-02 14:58:06 +01001721 }
1722
1723 /* bad max packet size, warn and return invalid result */
1724 WARN_ON(1);
1725 return (u32)-1;
1726}
1727
1728/**
1729 * s3c_hsotg_set_ep_maxpacket - set endpoint's max-packet field
1730 * @hsotg: The driver state.
1731 * @ep: The index number of the endpoint
1732 * @mps: The maximum packet size in bytes
1733 *
1734 * Configure the maximum packet size for the given endpoint, updating
1735 * the hardware control registers to reflect this.
1736 */
1737static void s3c_hsotg_set_ep_maxpacket(struct s3c_hsotg *hsotg,
1738 unsigned int ep, unsigned int mps)
1739{
1740 struct s3c_hsotg_ep *hs_ep = &hsotg->eps[ep];
1741 void __iomem *regs = hsotg->regs;
1742 u32 mpsval;
Robert Baldyga4fca54a2013-10-09 09:00:02 +02001743 u32 mcval;
Ben Dooks5b7d70c2009-06-02 14:58:06 +01001744 u32 reg;
1745
1746 if (ep == 0) {
1747 /* EP0 is a special case */
1748 mpsval = s3c_hsotg_ep0_mps(mps);
1749 if (mpsval > 3)
1750 goto bad_mps;
Robert Baldygae9edd1992013-10-09 08:20:02 +02001751 hs_ep->ep.maxpacket = mps;
Robert Baldyga4fca54a2013-10-09 09:00:02 +02001752 hs_ep->mc = 1;
Ben Dooks5b7d70c2009-06-02 14:58:06 +01001753 } else {
Robert Baldygae9edd1992013-10-09 08:20:02 +02001754 mpsval = mps & DxEPCTL_MPS_MASK;
1755 if (mpsval > 1024)
Ben Dooks5b7d70c2009-06-02 14:58:06 +01001756 goto bad_mps;
Robert Baldyga4fca54a2013-10-09 09:00:02 +02001757 mcval = ((mps >> 11) & 0x3) + 1;
1758 hs_ep->mc = mcval;
1759 if (mcval > 3)
1760 goto bad_mps;
Robert Baldygae9edd1992013-10-09 08:20:02 +02001761 hs_ep->ep.maxpacket = mpsval;
Ben Dooks5b7d70c2009-06-02 14:58:06 +01001762 }
1763
Lukasz Majewski8b9bc462012-05-04 14:17:11 +02001764 /*
1765 * update both the in and out endpoint controldir_ registers, even
1766 * if one of the directions may not be in use.
1767 */
Ben Dooks5b7d70c2009-06-02 14:58:06 +01001768
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02001769 reg = readl(regs + DIEPCTL(ep));
1770 reg &= ~DxEPCTL_MPS_MASK;
Ben Dooks5b7d70c2009-06-02 14:58:06 +01001771 reg |= mpsval;
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02001772 writel(reg, regs + DIEPCTL(ep));
Ben Dooks5b7d70c2009-06-02 14:58:06 +01001773
Anton Tikhomirov659ad602012-03-06 14:07:29 +09001774 if (ep) {
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02001775 reg = readl(regs + DOEPCTL(ep));
1776 reg &= ~DxEPCTL_MPS_MASK;
Anton Tikhomirov659ad602012-03-06 14:07:29 +09001777 reg |= mpsval;
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02001778 writel(reg, regs + DOEPCTL(ep));
Anton Tikhomirov659ad602012-03-06 14:07:29 +09001779 }
Ben Dooks5b7d70c2009-06-02 14:58:06 +01001780
1781 return;
1782
1783bad_mps:
1784 dev_err(hsotg->dev, "ep%d: bad mps of %d\n", ep, mps);
1785}
1786
Anton Tikhomirov9c39ddc2011-04-21 17:06:41 +09001787/**
1788 * s3c_hsotg_txfifo_flush - flush Tx FIFO
1789 * @hsotg: The driver state
1790 * @idx: The index for the endpoint (0..15)
1791 */
1792static void s3c_hsotg_txfifo_flush(struct s3c_hsotg *hsotg, unsigned int idx)
1793{
1794 int timeout;
1795 int val;
1796
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02001797 writel(GRSTCTL_TxFNum(idx) | GRSTCTL_TxFFlsh,
1798 hsotg->regs + GRSTCTL);
Anton Tikhomirov9c39ddc2011-04-21 17:06:41 +09001799
1800 /* wait until the fifo is flushed */
1801 timeout = 100;
1802
1803 while (1) {
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02001804 val = readl(hsotg->regs + GRSTCTL);
Anton Tikhomirov9c39ddc2011-04-21 17:06:41 +09001805
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02001806 if ((val & (GRSTCTL_TxFFlsh)) == 0)
Anton Tikhomirov9c39ddc2011-04-21 17:06:41 +09001807 break;
1808
1809 if (--timeout == 0) {
1810 dev_err(hsotg->dev,
1811 "%s: timeout flushing fifo (GRSTCTL=%08x)\n",
1812 __func__, val);
1813 }
1814
1815 udelay(1);
1816 }
1817}
Ben Dooks5b7d70c2009-06-02 14:58:06 +01001818
1819/**
1820 * s3c_hsotg_trytx - check to see if anything needs transmitting
1821 * @hsotg: The driver state
1822 * @hs_ep: The driver endpoint to check.
1823 *
1824 * Check to see if there is a request that has data to send, and if so
1825 * make an attempt to write data into the FIFO.
1826 */
1827static int s3c_hsotg_trytx(struct s3c_hsotg *hsotg,
1828 struct s3c_hsotg_ep *hs_ep)
1829{
1830 struct s3c_hsotg_req *hs_req = hs_ep->req;
1831
Robert Baldygaafcf4162013-09-19 11:50:19 +02001832 if (!hs_ep->dir_in || !hs_req) {
1833 /**
1834 * if request is not enqueued, we disable interrupts
1835 * for endpoints, excepting ep0
1836 */
1837 if (hs_ep->index != 0)
1838 s3c_hsotg_ctrl_epint(hsotg, hs_ep->index,
1839 hs_ep->dir_in, 0);
Ben Dooks5b7d70c2009-06-02 14:58:06 +01001840 return 0;
Robert Baldygaafcf4162013-09-19 11:50:19 +02001841 }
Ben Dooks5b7d70c2009-06-02 14:58:06 +01001842
1843 if (hs_req->req.actual < hs_req->req.length) {
1844 dev_dbg(hsotg->dev, "trying to write more for ep%d\n",
1845 hs_ep->index);
1846 return s3c_hsotg_write_fifo(hsotg, hs_ep, hs_req);
1847 }
1848
1849 return 0;
1850}
1851
1852/**
1853 * s3c_hsotg_complete_in - complete IN transfer
1854 * @hsotg: The device state.
1855 * @hs_ep: The endpoint that has just completed.
1856 *
1857 * An IN transfer has been completed, update the transfer's state and then
1858 * call the relevant completion routines.
1859 */
1860static void s3c_hsotg_complete_in(struct s3c_hsotg *hsotg,
1861 struct s3c_hsotg_ep *hs_ep)
1862{
1863 struct s3c_hsotg_req *hs_req = hs_ep->req;
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02001864 u32 epsize = readl(hsotg->regs + DIEPTSIZ(hs_ep->index));
Ben Dooks5b7d70c2009-06-02 14:58:06 +01001865 int size_left, size_done;
1866
1867 if (!hs_req) {
1868 dev_dbg(hsotg->dev, "XferCompl but no req\n");
1869 return;
1870 }
1871
Lukasz Majewskid3ca0252012-05-04 14:17:04 +02001872 /* Finish ZLP handling for IN EP0 transactions */
1873 if (hsotg->eps[0].sent_zlp) {
1874 dev_dbg(hsotg->dev, "zlp packet received\n");
Lukasz Majewski5ad1d312012-06-14 10:02:26 +02001875 s3c_hsotg_complete_request(hsotg, hs_ep, hs_req, 0);
Lukasz Majewskid3ca0252012-05-04 14:17:04 +02001876 return;
1877 }
1878
Lukasz Majewski8b9bc462012-05-04 14:17:11 +02001879 /*
1880 * Calculate the size of the transfer by checking how much is left
Ben Dooks5b7d70c2009-06-02 14:58:06 +01001881 * in the endpoint size register and then working it out from
1882 * the amount we loaded for the transfer.
1883 *
1884 * We do this even for DMA, as the transfer may have incremented
1885 * past the end of the buffer (DMA transfers are always 32bit
1886 * aligned).
1887 */
1888
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02001889 size_left = DxEPTSIZ_XferSize_GET(epsize);
Ben Dooks5b7d70c2009-06-02 14:58:06 +01001890
1891 size_done = hs_ep->size_loaded - size_left;
1892 size_done += hs_ep->last_load;
1893
1894 if (hs_req->req.actual != size_done)
1895 dev_dbg(hsotg->dev, "%s: adjusting size done %d => %d\n",
1896 __func__, hs_req->req.actual, size_done);
1897
1898 hs_req->req.actual = size_done;
Lukasz Majewskid3ca0252012-05-04 14:17:04 +02001899 dev_dbg(hsotg->dev, "req->length:%d req->actual:%d req->zero:%d\n",
1900 hs_req->req.length, hs_req->req.actual, hs_req->req.zero);
Ben Dooks5b7d70c2009-06-02 14:58:06 +01001901
Lukasz Majewskid3ca0252012-05-04 14:17:04 +02001902 /*
1903 * Check if dealing with Maximum Packet Size(MPS) IN transfer at EP0
1904 * When sent data is a multiple MPS size (e.g. 64B ,128B ,192B
1905 * ,256B ... ), after last MPS sized packet send IN ZLP packet to
1906 * inform the host that no more data is available.
1907 * The state of req.zero member is checked to be sure that the value to
1908 * send is smaller than wValue expected from host.
1909 * Check req.length to NOT send another ZLP when the current one is
1910 * under completion (the one for which this completion has been called).
1911 */
1912 if (hs_req->req.length && hs_ep->index == 0 && hs_req->req.zero &&
1913 hs_req->req.length == hs_req->req.actual &&
1914 !(hs_req->req.length % hs_ep->ep.maxpacket)) {
1915
1916 dev_dbg(hsotg->dev, "ep0 zlp IN packet sent\n");
1917 s3c_hsotg_send_zlp(hsotg, hs_req);
1918
1919 return;
1920 }
Ben Dooks5b7d70c2009-06-02 14:58:06 +01001921
1922 if (!size_left && hs_req->req.actual < hs_req->req.length) {
1923 dev_dbg(hsotg->dev, "%s trying more for req...\n", __func__);
1924 s3c_hsotg_start_req(hsotg, hs_ep, hs_req, true);
1925 } else
Lukasz Majewski5ad1d312012-06-14 10:02:26 +02001926 s3c_hsotg_complete_request(hsotg, hs_ep, hs_req, 0);
Ben Dooks5b7d70c2009-06-02 14:58:06 +01001927}
1928
1929/**
1930 * s3c_hsotg_epint - handle an in/out endpoint interrupt
1931 * @hsotg: The driver state
1932 * @idx: The index for the endpoint (0..15)
1933 * @dir_in: Set if this is an IN endpoint
1934 *
1935 * Process and clear any interrupt pending for an individual endpoint
Lukasz Majewski8b9bc462012-05-04 14:17:11 +02001936 */
Ben Dooks5b7d70c2009-06-02 14:58:06 +01001937static void s3c_hsotg_epint(struct s3c_hsotg *hsotg, unsigned int idx,
1938 int dir_in)
1939{
1940 struct s3c_hsotg_ep *hs_ep = &hsotg->eps[idx];
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02001941 u32 epint_reg = dir_in ? DIEPINT(idx) : DOEPINT(idx);
1942 u32 epctl_reg = dir_in ? DIEPCTL(idx) : DOEPCTL(idx);
1943 u32 epsiz_reg = dir_in ? DIEPTSIZ(idx) : DOEPTSIZ(idx);
Ben Dooks5b7d70c2009-06-02 14:58:06 +01001944 u32 ints;
Robert Baldyga1479e842013-10-09 08:41:57 +02001945 u32 ctrl;
Ben Dooks5b7d70c2009-06-02 14:58:06 +01001946
1947 ints = readl(hsotg->regs + epint_reg);
Robert Baldyga1479e842013-10-09 08:41:57 +02001948 ctrl = readl(hsotg->regs + epctl_reg);
Ben Dooks5b7d70c2009-06-02 14:58:06 +01001949
Anton Tikhomirova3395f02011-04-21 17:06:39 +09001950 /* Clear endpoint interrupts */
1951 writel(ints, hsotg->regs + epint_reg);
1952
Ben Dooks5b7d70c2009-06-02 14:58:06 +01001953 dev_dbg(hsotg->dev, "%s: ep%d(%s) DxEPINT=0x%08x\n",
1954 __func__, idx, dir_in ? "in" : "out", ints);
1955
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02001956 if (ints & DxEPINT_XferCompl) {
Robert Baldyga1479e842013-10-09 08:41:57 +02001957 if (hs_ep->isochronous && hs_ep->interval == 1) {
1958 if (ctrl & DxEPCTL_EOFrNum)
1959 ctrl |= DxEPCTL_SetEvenFr;
1960 else
1961 ctrl |= DxEPCTL_SetOddFr;
1962 writel(ctrl, hsotg->regs + epctl_reg);
1963 }
1964
Ben Dooks5b7d70c2009-06-02 14:58:06 +01001965 dev_dbg(hsotg->dev,
1966 "%s: XferCompl: DxEPCTL=0x%08x, DxEPTSIZ=%08x\n",
1967 __func__, readl(hsotg->regs + epctl_reg),
1968 readl(hsotg->regs + epsiz_reg));
1969
Lukasz Majewski8b9bc462012-05-04 14:17:11 +02001970 /*
1971 * we get OutDone from the FIFO, so we only need to look
1972 * at completing IN requests here
1973 */
Ben Dooks5b7d70c2009-06-02 14:58:06 +01001974 if (dir_in) {
1975 s3c_hsotg_complete_in(hsotg, hs_ep);
1976
Ben Dooksc9a64ea2010-07-19 09:40:46 +01001977 if (idx == 0 && !hs_ep->req)
Ben Dooks5b7d70c2009-06-02 14:58:06 +01001978 s3c_hsotg_enqueue_setup(hsotg);
1979 } else if (using_dma(hsotg)) {
Lukasz Majewski8b9bc462012-05-04 14:17:11 +02001980 /*
1981 * We're using DMA, we need to fire an OutDone here
1982 * as we ignore the RXFIFO.
1983 */
Ben Dooks5b7d70c2009-06-02 14:58:06 +01001984
1985 s3c_hsotg_handle_outdone(hsotg, idx, false);
1986 }
Ben Dooks5b7d70c2009-06-02 14:58:06 +01001987 }
1988
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02001989 if (ints & DxEPINT_EPDisbld) {
Ben Dooks5b7d70c2009-06-02 14:58:06 +01001990 dev_dbg(hsotg->dev, "%s: EPDisbld\n", __func__);
Ben Dooks5b7d70c2009-06-02 14:58:06 +01001991
Anton Tikhomirov9c39ddc2011-04-21 17:06:41 +09001992 if (dir_in) {
1993 int epctl = readl(hsotg->regs + epctl_reg);
1994
1995 s3c_hsotg_txfifo_flush(hsotg, idx);
1996
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02001997 if ((epctl & DxEPCTL_Stall) &&
1998 (epctl & DxEPCTL_EPType_Bulk)) {
1999 int dctl = readl(hsotg->regs + DCTL);
Anton Tikhomirov9c39ddc2011-04-21 17:06:41 +09002000
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02002001 dctl |= DCTL_CGNPInNAK;
2002 writel(dctl, hsotg->regs + DCTL);
Anton Tikhomirov9c39ddc2011-04-21 17:06:41 +09002003 }
2004 }
2005 }
2006
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02002007 if (ints & DxEPINT_AHBErr)
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002008 dev_dbg(hsotg->dev, "%s: AHBErr\n", __func__);
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002009
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02002010 if (ints & DxEPINT_Setup) { /* Setup or Timeout */
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002011 dev_dbg(hsotg->dev, "%s: Setup/Timeout\n", __func__);
2012
2013 if (using_dma(hsotg) && idx == 0) {
Lukasz Majewski8b9bc462012-05-04 14:17:11 +02002014 /*
2015 * this is the notification we've received a
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002016 * setup packet. In non-DMA mode we'd get this
2017 * from the RXFIFO, instead we need to process
Lukasz Majewski8b9bc462012-05-04 14:17:11 +02002018 * the setup here.
2019 */
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002020
2021 if (dir_in)
2022 WARN_ON_ONCE(1);
2023 else
2024 s3c_hsotg_handle_outdone(hsotg, 0, true);
2025 }
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002026 }
2027
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02002028 if (ints & DxEPINT_Back2BackSetup)
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002029 dev_dbg(hsotg->dev, "%s: B2BSetup/INEPNakEff\n", __func__);
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002030
Robert Baldyga1479e842013-10-09 08:41:57 +02002031 if (dir_in && !hs_ep->isochronous) {
Lukasz Majewski8b9bc462012-05-04 14:17:11 +02002032 /* not sure if this is important, but we'll clear it anyway */
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02002033 if (ints & DIEPMSK_INTknTXFEmpMsk) {
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002034 dev_dbg(hsotg->dev, "%s: ep%d: INTknTXFEmpMsk\n",
2035 __func__, idx);
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002036 }
2037
2038 /* this probably means something bad is happening */
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02002039 if (ints & DIEPMSK_INTknEPMisMsk) {
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002040 dev_warn(hsotg->dev, "%s: ep%d: INTknEP\n",
2041 __func__, idx);
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002042 }
Ben Dooks10aebc72010-07-19 09:40:44 +01002043
2044 /* FIFO has space or is empty (see GAHBCFG) */
2045 if (hsotg->dedicated_fifos &&
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02002046 ints & DIEPMSK_TxFIFOEmpty) {
Ben Dooks10aebc72010-07-19 09:40:44 +01002047 dev_dbg(hsotg->dev, "%s: ep%d: TxFIFOEmpty\n",
2048 __func__, idx);
Anton Tikhomirov70fa0302012-03-06 14:08:29 +09002049 if (!using_dma(hsotg))
2050 s3c_hsotg_trytx(hsotg, hs_ep);
Ben Dooks10aebc72010-07-19 09:40:44 +01002051 }
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002052 }
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002053}
2054
2055/**
2056 * s3c_hsotg_irq_enumdone - Handle EnumDone interrupt (enumeration done)
2057 * @hsotg: The device state.
2058 *
2059 * Handle updating the device settings after the enumeration phase has
2060 * been completed.
Lukasz Majewski8b9bc462012-05-04 14:17:11 +02002061 */
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002062static void s3c_hsotg_irq_enumdone(struct s3c_hsotg *hsotg)
2063{
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02002064 u32 dsts = readl(hsotg->regs + DSTS);
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002065 int ep0_mps = 0, ep_mps;
2066
Lukasz Majewski8b9bc462012-05-04 14:17:11 +02002067 /*
2068 * This should signal the finish of the enumeration phase
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002069 * of the USB handshaking, so we should now know what rate
Lukasz Majewski8b9bc462012-05-04 14:17:11 +02002070 * we connected at.
2071 */
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002072
2073 dev_dbg(hsotg->dev, "EnumDone (DSTS=0x%08x)\n", dsts);
2074
Lukasz Majewski8b9bc462012-05-04 14:17:11 +02002075 /*
2076 * note, since we're limited by the size of transfer on EP0, and
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002077 * it seems IN transfers must be a even number of packets we do
Lukasz Majewski8b9bc462012-05-04 14:17:11 +02002078 * not advertise a 64byte MPS on EP0.
2079 */
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002080
2081 /* catch both EnumSpd_FS and EnumSpd_FS48 */
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02002082 switch (dsts & DSTS_EnumSpd_MASK) {
2083 case DSTS_EnumSpd_FS:
2084 case DSTS_EnumSpd_FS48:
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002085 hsotg->gadget.speed = USB_SPEED_FULL;
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002086 ep0_mps = EP0_MPS_LIMIT;
Robert Baldyga295538f2013-12-06 13:03:44 +01002087 ep_mps = 1023;
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002088 break;
2089
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02002090 case DSTS_EnumSpd_HS:
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002091 hsotg->gadget.speed = USB_SPEED_HIGH;
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002092 ep0_mps = EP0_MPS_LIMIT;
Robert Baldyga295538f2013-12-06 13:03:44 +01002093 ep_mps = 1024;
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002094 break;
2095
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02002096 case DSTS_EnumSpd_LS:
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002097 hsotg->gadget.speed = USB_SPEED_LOW;
Lukasz Majewski8b9bc462012-05-04 14:17:11 +02002098 /*
2099 * note, we don't actually support LS in this driver at the
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002100 * moment, and the documentation seems to imply that it isn't
2101 * supported by the PHYs on some of the devices.
2102 */
2103 break;
2104 }
Michal Nazarewicze538dfd2011-08-30 17:11:19 +02002105 dev_info(hsotg->dev, "new device is %s\n",
2106 usb_speed_string(hsotg->gadget.speed));
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002107
Lukasz Majewski8b9bc462012-05-04 14:17:11 +02002108 /*
2109 * we should now know the maximum packet size for an
2110 * endpoint, so set the endpoints to a default value.
2111 */
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002112
2113 if (ep0_mps) {
2114 int i;
2115 s3c_hsotg_set_ep_maxpacket(hsotg, 0, ep0_mps);
Lukasz Majewskib3f489b2012-05-04 14:17:09 +02002116 for (i = 1; i < hsotg->num_of_eps; i++)
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002117 s3c_hsotg_set_ep_maxpacket(hsotg, i, ep_mps);
2118 }
2119
2120 /* ensure after enumeration our EP0 is active */
2121
2122 s3c_hsotg_enqueue_setup(hsotg);
2123
2124 dev_dbg(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n",
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02002125 readl(hsotg->regs + DIEPCTL0),
2126 readl(hsotg->regs + DOEPCTL0));
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002127}
2128
2129/**
2130 * kill_all_requests - remove all requests from the endpoint's queue
2131 * @hsotg: The device state.
2132 * @ep: The endpoint the requests may be on.
2133 * @result: The result code to use.
2134 * @force: Force removal of any current requests
2135 *
2136 * Go through the requests on the given endpoint and mark them
2137 * completed with the given result code.
2138 */
2139static void kill_all_requests(struct s3c_hsotg *hsotg,
2140 struct s3c_hsotg_ep *ep,
2141 int result, bool force)
2142{
2143 struct s3c_hsotg_req *req, *treq;
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002144
2145 list_for_each_entry_safe(req, treq, &ep->queue, queue) {
Lukasz Majewski8b9bc462012-05-04 14:17:11 +02002146 /*
2147 * currently, we can't do much about an already
2148 * running request on an in endpoint
2149 */
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002150
2151 if (ep->req == req && ep->dir_in && !force)
2152 continue;
2153
2154 s3c_hsotg_complete_request(hsotg, ep, req,
2155 result);
2156 }
Robert Baldygab963a812013-12-06 13:03:45 +01002157 if(hsotg->dedicated_fifos)
2158 if ((readl(hsotg->regs + DTXFSTS(ep->index)) & 0xffff) * 4 < 3072)
2159 s3c_hsotg_txfifo_flush(hsotg, ep->index);
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002160}
2161
2162#define call_gadget(_hs, _entry) \
Pavel Macheka023da32013-09-30 14:56:02 +02002163do { \
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002164 if ((_hs)->gadget.speed != USB_SPEED_UNKNOWN && \
Lukasz Majewski5ad1d312012-06-14 10:02:26 +02002165 (_hs)->driver && (_hs)->driver->_entry) { \
2166 spin_unlock(&_hs->lock); \
2167 (_hs)->driver->_entry(&(_hs)->gadget); \
2168 spin_lock(&_hs->lock); \
Pavel Macheka023da32013-09-30 14:56:02 +02002169 } \
2170} while (0)
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002171
2172/**
Lukasz Majewski5e891342012-05-04 14:17:07 +02002173 * s3c_hsotg_disconnect - disconnect service
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002174 * @hsotg: The device state.
2175 *
Lukasz Majewski5e891342012-05-04 14:17:07 +02002176 * The device has been disconnected. Remove all current
2177 * transactions and signal the gadget driver that this
2178 * has happened.
Lukasz Majewski8b9bc462012-05-04 14:17:11 +02002179 */
Lukasz Majewski5e891342012-05-04 14:17:07 +02002180static void s3c_hsotg_disconnect(struct s3c_hsotg *hsotg)
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002181{
2182 unsigned ep;
2183
Lukasz Majewskib3f489b2012-05-04 14:17:09 +02002184 for (ep = 0; ep < hsotg->num_of_eps; ep++)
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002185 kill_all_requests(hsotg, &hsotg->eps[ep], -ESHUTDOWN, true);
2186
2187 call_gadget(hsotg, disconnect);
2188}
2189
2190/**
2191 * s3c_hsotg_irq_fifoempty - TX FIFO empty interrupt handler
2192 * @hsotg: The device state:
2193 * @periodic: True if this is a periodic FIFO interrupt
2194 */
2195static void s3c_hsotg_irq_fifoempty(struct s3c_hsotg *hsotg, bool periodic)
2196{
2197 struct s3c_hsotg_ep *ep;
2198 int epno, ret;
2199
2200 /* look through for any more data to transmit */
2201
Lukasz Majewskib3f489b2012-05-04 14:17:09 +02002202 for (epno = 0; epno < hsotg->num_of_eps; epno++) {
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002203 ep = &hsotg->eps[epno];
2204
2205 if (!ep->dir_in)
2206 continue;
2207
2208 if ((periodic && !ep->periodic) ||
2209 (!periodic && ep->periodic))
2210 continue;
2211
2212 ret = s3c_hsotg_trytx(hsotg, ep);
2213 if (ret < 0)
2214 break;
2215 }
2216}
2217
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002218/* IRQ flags which will trigger a retry around the IRQ loop */
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02002219#define IRQ_RETRY_MASK (GINTSTS_NPTxFEmp | \
2220 GINTSTS_PTxFEmp | \
2221 GINTSTS_RxFLvl)
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002222
2223/**
Lukasz Majewski308d7342012-05-04 14:17:05 +02002224 * s3c_hsotg_corereset - issue softreset to the core
2225 * @hsotg: The device state
2226 *
2227 * Issue a soft reset to the core, and await the core finishing it.
Lukasz Majewski8b9bc462012-05-04 14:17:11 +02002228 */
Lukasz Majewski308d7342012-05-04 14:17:05 +02002229static int s3c_hsotg_corereset(struct s3c_hsotg *hsotg)
2230{
2231 int timeout;
2232 u32 grstctl;
2233
2234 dev_dbg(hsotg->dev, "resetting core\n");
2235
2236 /* issue soft reset */
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02002237 writel(GRSTCTL_CSftRst, hsotg->regs + GRSTCTL);
Lukasz Majewski308d7342012-05-04 14:17:05 +02002238
Du, Changbin2868fea2012-07-24 08:19:25 +08002239 timeout = 10000;
Lukasz Majewski308d7342012-05-04 14:17:05 +02002240 do {
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02002241 grstctl = readl(hsotg->regs + GRSTCTL);
2242 } while ((grstctl & GRSTCTL_CSftRst) && timeout-- > 0);
Lukasz Majewski308d7342012-05-04 14:17:05 +02002243
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02002244 if (grstctl & GRSTCTL_CSftRst) {
Lukasz Majewski308d7342012-05-04 14:17:05 +02002245 dev_err(hsotg->dev, "Failed to get CSftRst asserted\n");
2246 return -EINVAL;
2247 }
2248
Du, Changbin2868fea2012-07-24 08:19:25 +08002249 timeout = 10000;
Lukasz Majewski308d7342012-05-04 14:17:05 +02002250
2251 while (1) {
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02002252 u32 grstctl = readl(hsotg->regs + GRSTCTL);
Lukasz Majewski308d7342012-05-04 14:17:05 +02002253
2254 if (timeout-- < 0) {
2255 dev_info(hsotg->dev,
2256 "%s: reset failed, GRSTCTL=%08x\n",
2257 __func__, grstctl);
2258 return -ETIMEDOUT;
2259 }
2260
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02002261 if (!(grstctl & GRSTCTL_AHBIdle))
Lukasz Majewski308d7342012-05-04 14:17:05 +02002262 continue;
2263
2264 break; /* reset done */
2265 }
2266
2267 dev_dbg(hsotg->dev, "reset successful\n");
2268 return 0;
2269}
2270
Lukasz Majewski8b9bc462012-05-04 14:17:11 +02002271/**
2272 * s3c_hsotg_core_init - issue softreset to the core
2273 * @hsotg: The device state
2274 *
2275 * Issue a soft reset to the core, and await the core finishing it.
2276 */
Lukasz Majewski308d7342012-05-04 14:17:05 +02002277static void s3c_hsotg_core_init(struct s3c_hsotg *hsotg)
2278{
2279 s3c_hsotg_corereset(hsotg);
2280
2281 /*
2282 * we must now enable ep0 ready for host detection and then
2283 * set configuration.
2284 */
2285
2286 /* set the PLL on, remove the HNP/SRP and set the PHY */
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02002287 writel(GUSBCFG_PHYIf16 | GUSBCFG_TOutCal(7) |
2288 (0x5 << 10), hsotg->regs + GUSBCFG);
Lukasz Majewski308d7342012-05-04 14:17:05 +02002289
2290 s3c_hsotg_init_fifo(hsotg);
2291
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02002292 __orr32(hsotg->regs + DCTL, DCTL_SftDiscon);
Lukasz Majewski308d7342012-05-04 14:17:05 +02002293
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02002294 writel(1 << 18 | DCFG_DevSpd_HS, hsotg->regs + DCFG);
Lukasz Majewski308d7342012-05-04 14:17:05 +02002295
2296 /* Clear any pending OTG interrupts */
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02002297 writel(0xffffffff, hsotg->regs + GOTGINT);
Lukasz Majewski308d7342012-05-04 14:17:05 +02002298
2299 /* Clear any pending interrupts */
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02002300 writel(0xffffffff, hsotg->regs + GINTSTS);
Lukasz Majewski308d7342012-05-04 14:17:05 +02002301
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02002302 writel(GINTSTS_ErlySusp | GINTSTS_SessReqInt |
2303 GINTSTS_GOUTNakEff | GINTSTS_GINNakEff |
2304 GINTSTS_ConIDStsChng | GINTSTS_USBRst |
2305 GINTSTS_EnumDone | GINTSTS_OTGInt |
2306 GINTSTS_USBSusp | GINTSTS_WkUpInt,
2307 hsotg->regs + GINTMSK);
Lukasz Majewski308d7342012-05-04 14:17:05 +02002308
2309 if (using_dma(hsotg))
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02002310 writel(GAHBCFG_GlblIntrEn | GAHBCFG_DMAEn |
2311 GAHBCFG_HBstLen_Incr4,
2312 hsotg->regs + GAHBCFG);
Lukasz Majewski308d7342012-05-04 14:17:05 +02002313 else
Robert Baldyga8acc8292013-09-19 11:50:23 +02002314 writel(((hsotg->dedicated_fifos) ? (GAHBCFG_NPTxFEmpLvl |
2315 GAHBCFG_PTxFEmpLvl) : 0) |
2316 GAHBCFG_GlblIntrEn,
2317 hsotg->regs + GAHBCFG);
Lukasz Majewski308d7342012-05-04 14:17:05 +02002318
2319 /*
Robert Baldyga8acc8292013-09-19 11:50:23 +02002320 * If INTknTXFEmpMsk is enabled, it's important to disable ep interrupts
2321 * when we have no data to transfer. Otherwise we get being flooded by
2322 * interrupts.
Lukasz Majewski308d7342012-05-04 14:17:05 +02002323 */
2324
Robert Baldyga8acc8292013-09-19 11:50:23 +02002325 writel(((hsotg->dedicated_fifos) ? DIEPMSK_TxFIFOEmpty |
2326 DIEPMSK_INTknTXFEmpMsk : 0) |
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02002327 DIEPMSK_EPDisbldMsk | DIEPMSK_XferComplMsk |
2328 DIEPMSK_TimeOUTMsk | DIEPMSK_AHBErrMsk |
2329 DIEPMSK_INTknEPMisMsk,
2330 hsotg->regs + DIEPMSK);
Lukasz Majewski308d7342012-05-04 14:17:05 +02002331
2332 /*
2333 * don't need XferCompl, we get that from RXFIFO in slave mode. In
2334 * DMA mode we may need this.
2335 */
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02002336 writel((using_dma(hsotg) ? (DIEPMSK_XferComplMsk |
2337 DIEPMSK_TimeOUTMsk) : 0) |
2338 DOEPMSK_EPDisbldMsk | DOEPMSK_AHBErrMsk |
2339 DOEPMSK_SetupMsk,
2340 hsotg->regs + DOEPMSK);
Lukasz Majewski308d7342012-05-04 14:17:05 +02002341
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02002342 writel(0, hsotg->regs + DAINTMSK);
Lukasz Majewski308d7342012-05-04 14:17:05 +02002343
2344 dev_dbg(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n",
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02002345 readl(hsotg->regs + DIEPCTL0),
2346 readl(hsotg->regs + DOEPCTL0));
Lukasz Majewski308d7342012-05-04 14:17:05 +02002347
2348 /* enable in and out endpoint interrupts */
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02002349 s3c_hsotg_en_gsint(hsotg, GINTSTS_OEPInt | GINTSTS_IEPInt);
Lukasz Majewski308d7342012-05-04 14:17:05 +02002350
2351 /*
2352 * Enable the RXFIFO when in slave mode, as this is how we collect
2353 * the data. In DMA mode, we get events from the FIFO but also
2354 * things we cannot process, so do not use it.
2355 */
2356 if (!using_dma(hsotg))
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02002357 s3c_hsotg_en_gsint(hsotg, GINTSTS_RxFLvl);
Lukasz Majewski308d7342012-05-04 14:17:05 +02002358
2359 /* Enable interrupts for EP0 in and out */
2360 s3c_hsotg_ctrl_epint(hsotg, 0, 0, 1);
2361 s3c_hsotg_ctrl_epint(hsotg, 0, 1, 1);
2362
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02002363 __orr32(hsotg->regs + DCTL, DCTL_PWROnPrgDone);
Lukasz Majewski308d7342012-05-04 14:17:05 +02002364 udelay(10); /* see openiboot */
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02002365 __bic32(hsotg->regs + DCTL, DCTL_PWROnPrgDone);
Lukasz Majewski308d7342012-05-04 14:17:05 +02002366
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02002367 dev_dbg(hsotg->dev, "DCTL=0x%08x\n", readl(hsotg->regs + DCTL));
Lukasz Majewski308d7342012-05-04 14:17:05 +02002368
2369 /*
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02002370 * DxEPCTL_USBActEp says RO in manual, but seems to be set by
Lukasz Majewski308d7342012-05-04 14:17:05 +02002371 * writing to the EPCTL register..
2372 */
2373
2374 /* set to read 1 8byte packet */
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02002375 writel(DxEPTSIZ_MC(1) | DxEPTSIZ_PktCnt(1) |
2376 DxEPTSIZ_XferSize(8), hsotg->regs + DOEPTSIZ0);
Lukasz Majewski308d7342012-05-04 14:17:05 +02002377
2378 writel(s3c_hsotg_ep0_mps(hsotg->eps[0].ep.maxpacket) |
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02002379 DxEPCTL_CNAK | DxEPCTL_EPEna |
2380 DxEPCTL_USBActEp,
2381 hsotg->regs + DOEPCTL0);
Lukasz Majewski308d7342012-05-04 14:17:05 +02002382
2383 /* enable, but don't activate EP0in */
2384 writel(s3c_hsotg_ep0_mps(hsotg->eps[0].ep.maxpacket) |
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02002385 DxEPCTL_USBActEp, hsotg->regs + DIEPCTL0);
Lukasz Majewski308d7342012-05-04 14:17:05 +02002386
2387 s3c_hsotg_enqueue_setup(hsotg);
2388
2389 dev_dbg(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n",
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02002390 readl(hsotg->regs + DIEPCTL0),
2391 readl(hsotg->regs + DOEPCTL0));
Lukasz Majewski308d7342012-05-04 14:17:05 +02002392
2393 /* clear global NAKs */
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02002394 writel(DCTL_CGOUTNak | DCTL_CGNPInNAK,
2395 hsotg->regs + DCTL);
Lukasz Majewski308d7342012-05-04 14:17:05 +02002396
2397 /* must be at-least 3ms to allow bus to see disconnect */
2398 mdelay(3);
2399
2400 /* remove the soft-disconnect and let's go */
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02002401 __bic32(hsotg->regs + DCTL, DCTL_SftDiscon);
Lukasz Majewski308d7342012-05-04 14:17:05 +02002402}
2403
2404/**
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002405 * s3c_hsotg_irq - handle device interrupt
2406 * @irq: The IRQ number triggered
2407 * @pw: The pw value when registered the handler.
2408 */
2409static irqreturn_t s3c_hsotg_irq(int irq, void *pw)
2410{
2411 struct s3c_hsotg *hsotg = pw;
2412 int retry_count = 8;
2413 u32 gintsts;
2414 u32 gintmsk;
2415
Lukasz Majewski5ad1d312012-06-14 10:02:26 +02002416 spin_lock(&hsotg->lock);
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002417irq_retry:
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02002418 gintsts = readl(hsotg->regs + GINTSTS);
2419 gintmsk = readl(hsotg->regs + GINTMSK);
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002420
2421 dev_dbg(hsotg->dev, "%s: %08x %08x (%08x) retry %d\n",
2422 __func__, gintsts, gintsts & gintmsk, gintmsk, retry_count);
2423
2424 gintsts &= gintmsk;
2425
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02002426 if (gintsts & GINTSTS_OTGInt) {
2427 u32 otgint = readl(hsotg->regs + GOTGINT);
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002428
2429 dev_info(hsotg->dev, "OTGInt: %08x\n", otgint);
2430
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02002431 writel(otgint, hsotg->regs + GOTGINT);
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002432 }
2433
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02002434 if (gintsts & GINTSTS_SessReqInt) {
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002435 dev_dbg(hsotg->dev, "%s: SessReqInt\n", __func__);
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02002436 writel(GINTSTS_SessReqInt, hsotg->regs + GINTSTS);
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002437 }
2438
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02002439 if (gintsts & GINTSTS_EnumDone) {
2440 writel(GINTSTS_EnumDone, hsotg->regs + GINTSTS);
Anton Tikhomirova3395f02011-04-21 17:06:39 +09002441
2442 s3c_hsotg_irq_enumdone(hsotg);
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002443 }
2444
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02002445 if (gintsts & GINTSTS_ConIDStsChng) {
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002446 dev_dbg(hsotg->dev, "ConIDStsChg (DSTS=0x%08x, GOTCTL=%08x)\n",
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02002447 readl(hsotg->regs + DSTS),
2448 readl(hsotg->regs + GOTGCTL));
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002449
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02002450 writel(GINTSTS_ConIDStsChng, hsotg->regs + GINTSTS);
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002451 }
2452
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02002453 if (gintsts & (GINTSTS_OEPInt | GINTSTS_IEPInt)) {
2454 u32 daint = readl(hsotg->regs + DAINT);
Robert Baldyga7e804652013-09-19 11:50:20 +02002455 u32 daintmsk = readl(hsotg->regs + DAINTMSK);
2456 u32 daint_out, daint_in;
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002457 int ep;
2458
Robert Baldyga7e804652013-09-19 11:50:20 +02002459 daint &= daintmsk;
2460 daint_out = daint >> DAINT_OutEP_SHIFT;
2461 daint_in = daint & ~(daint_out << DAINT_OutEP_SHIFT);
2462
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002463 dev_dbg(hsotg->dev, "%s: daint=%08x\n", __func__, daint);
2464
2465 for (ep = 0; ep < 15 && daint_out; ep++, daint_out >>= 1) {
2466 if (daint_out & 1)
2467 s3c_hsotg_epint(hsotg, ep, 0);
2468 }
2469
2470 for (ep = 0; ep < 15 && daint_in; ep++, daint_in >>= 1) {
2471 if (daint_in & 1)
2472 s3c_hsotg_epint(hsotg, ep, 1);
2473 }
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002474 }
2475
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02002476 if (gintsts & GINTSTS_USBRst) {
Lukasz Majewski12a1f4d2012-05-04 14:17:08 +02002477
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02002478 u32 usb_status = readl(hsotg->regs + GOTGCTL);
Lukasz Majewski12a1f4d2012-05-04 14:17:08 +02002479
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002480 dev_info(hsotg->dev, "%s: USBRst\n", __func__);
2481 dev_dbg(hsotg->dev, "GNPTXSTS=%08x\n",
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02002482 readl(hsotg->regs + GNPTXSTS));
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002483
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02002484 writel(GINTSTS_USBRst, hsotg->regs + GINTSTS);
Anton Tikhomirova3395f02011-04-21 17:06:39 +09002485
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02002486 if (usb_status & GOTGCTL_BSESVLD) {
Lukasz Majewski12a1f4d2012-05-04 14:17:08 +02002487 if (time_after(jiffies, hsotg->last_rst +
2488 msecs_to_jiffies(200))) {
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002489
Lukasz Majewski12a1f4d2012-05-04 14:17:08 +02002490 kill_all_requests(hsotg, &hsotg->eps[0],
2491 -ECONNRESET, true);
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002492
Lukasz Majewski12a1f4d2012-05-04 14:17:08 +02002493 s3c_hsotg_core_init(hsotg);
2494 hsotg->last_rst = jiffies;
2495 }
2496 }
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002497 }
2498
2499 /* check both FIFOs */
2500
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02002501 if (gintsts & GINTSTS_NPTxFEmp) {
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002502 dev_dbg(hsotg->dev, "NPTxFEmp\n");
2503
Lukasz Majewski8b9bc462012-05-04 14:17:11 +02002504 /*
2505 * Disable the interrupt to stop it happening again
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002506 * unless one of these endpoint routines decides that
Lukasz Majewski8b9bc462012-05-04 14:17:11 +02002507 * it needs re-enabling
2508 */
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002509
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02002510 s3c_hsotg_disable_gsint(hsotg, GINTSTS_NPTxFEmp);
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002511 s3c_hsotg_irq_fifoempty(hsotg, false);
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002512 }
2513
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02002514 if (gintsts & GINTSTS_PTxFEmp) {
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002515 dev_dbg(hsotg->dev, "PTxFEmp\n");
2516
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02002517 /* See note in GINTSTS_NPTxFEmp */
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002518
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02002519 s3c_hsotg_disable_gsint(hsotg, GINTSTS_PTxFEmp);
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002520 s3c_hsotg_irq_fifoempty(hsotg, true);
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002521 }
2522
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02002523 if (gintsts & GINTSTS_RxFLvl) {
Lukasz Majewski8b9bc462012-05-04 14:17:11 +02002524 /*
2525 * note, since GINTSTS_RxFLvl doubles as FIFO-not-empty,
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002526 * we need to retry s3c_hsotg_handle_rx if this is still
Lukasz Majewski8b9bc462012-05-04 14:17:11 +02002527 * set.
2528 */
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002529
2530 s3c_hsotg_handle_rx(hsotg);
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002531 }
2532
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02002533 if (gintsts & GINTSTS_ModeMis) {
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002534 dev_warn(hsotg->dev, "warning, mode mismatch triggered\n");
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02002535 writel(GINTSTS_ModeMis, hsotg->regs + GINTSTS);
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002536 }
2537
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02002538 if (gintsts & GINTSTS_USBSusp) {
2539 dev_info(hsotg->dev, "GINTSTS_USBSusp\n");
2540 writel(GINTSTS_USBSusp, hsotg->regs + GINTSTS);
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002541
2542 call_gadget(hsotg, suspend);
2543 }
2544
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02002545 if (gintsts & GINTSTS_WkUpInt) {
2546 dev_info(hsotg->dev, "GINTSTS_WkUpIn\n");
2547 writel(GINTSTS_WkUpInt, hsotg->regs + GINTSTS);
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002548
2549 call_gadget(hsotg, resume);
2550 }
2551
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02002552 if (gintsts & GINTSTS_ErlySusp) {
2553 dev_dbg(hsotg->dev, "GINTSTS_ErlySusp\n");
2554 writel(GINTSTS_ErlySusp, hsotg->regs + GINTSTS);
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002555 }
2556
Lukasz Majewski8b9bc462012-05-04 14:17:11 +02002557 /*
2558 * these next two seem to crop-up occasionally causing the core
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002559 * to shutdown the USB transfer, so try clearing them and logging
Lukasz Majewski8b9bc462012-05-04 14:17:11 +02002560 * the occurrence.
2561 */
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002562
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02002563 if (gintsts & GINTSTS_GOUTNakEff) {
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002564 dev_info(hsotg->dev, "GOUTNakEff triggered\n");
2565
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02002566 writel(DCTL_CGOUTNak, hsotg->regs + DCTL);
Anton Tikhomirova3395f02011-04-21 17:06:39 +09002567
2568 s3c_hsotg_dump(hsotg);
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002569 }
2570
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02002571 if (gintsts & GINTSTS_GINNakEff) {
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002572 dev_info(hsotg->dev, "GINNakEff triggered\n");
2573
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02002574 writel(DCTL_CGNPInNAK, hsotg->regs + DCTL);
Anton Tikhomirova3395f02011-04-21 17:06:39 +09002575
2576 s3c_hsotg_dump(hsotg);
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002577 }
2578
Lukasz Majewski8b9bc462012-05-04 14:17:11 +02002579 /*
2580 * if we've had fifo events, we should try and go around the
2581 * loop again to see if there's any point in returning yet.
2582 */
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002583
2584 if (gintsts & IRQ_RETRY_MASK && --retry_count > 0)
2585 goto irq_retry;
2586
Lukasz Majewski5ad1d312012-06-14 10:02:26 +02002587 spin_unlock(&hsotg->lock);
2588
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002589 return IRQ_HANDLED;
2590}
2591
2592/**
2593 * s3c_hsotg_ep_enable - enable the given endpoint
2594 * @ep: The USB endpint to configure
2595 * @desc: The USB endpoint descriptor to configure with.
2596 *
2597 * This is called from the USB gadget code's usb_ep_enable().
Lukasz Majewski8b9bc462012-05-04 14:17:11 +02002598 */
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002599static int s3c_hsotg_ep_enable(struct usb_ep *ep,
2600 const struct usb_endpoint_descriptor *desc)
2601{
2602 struct s3c_hsotg_ep *hs_ep = our_ep(ep);
2603 struct s3c_hsotg *hsotg = hs_ep->parent;
2604 unsigned long flags;
2605 int index = hs_ep->index;
2606 u32 epctrl_reg;
2607 u32 epctrl;
2608 u32 mps;
2609 int dir_in;
Julia Lawall19c190f2010-03-29 17:36:44 +02002610 int ret = 0;
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002611
2612 dev_dbg(hsotg->dev,
2613 "%s: ep %s: a 0x%02x, attr 0x%02x, mps 0x%04x, intr %d\n",
2614 __func__, ep->name, desc->bEndpointAddress, desc->bmAttributes,
2615 desc->wMaxPacketSize, desc->bInterval);
2616
2617 /* not to be called for EP0 */
2618 WARN_ON(index == 0);
2619
2620 dir_in = (desc->bEndpointAddress & USB_ENDPOINT_DIR_MASK) ? 1 : 0;
2621 if (dir_in != hs_ep->dir_in) {
2622 dev_err(hsotg->dev, "%s: direction mismatch!\n", __func__);
2623 return -EINVAL;
2624 }
2625
Kuninori Morimoto29cc8892011-08-23 03:12:03 -07002626 mps = usb_endpoint_maxp(desc);
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002627
2628 /* note, we handle this here instead of s3c_hsotg_set_ep_maxpacket */
2629
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02002630 epctrl_reg = dir_in ? DIEPCTL(index) : DOEPCTL(index);
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002631 epctrl = readl(hsotg->regs + epctrl_reg);
2632
2633 dev_dbg(hsotg->dev, "%s: read DxEPCTL=0x%08x from 0x%08x\n",
2634 __func__, epctrl, epctrl_reg);
2635
Lukasz Majewski22258f42012-06-14 10:02:24 +02002636 spin_lock_irqsave(&hsotg->lock, flags);
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002637
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02002638 epctrl &= ~(DxEPCTL_EPType_MASK | DxEPCTL_MPS_MASK);
2639 epctrl |= DxEPCTL_MPS(mps);
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002640
Lukasz Majewski8b9bc462012-05-04 14:17:11 +02002641 /*
2642 * mark the endpoint as active, otherwise the core may ignore
2643 * transactions entirely for this endpoint
2644 */
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02002645 epctrl |= DxEPCTL_USBActEp;
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002646
Lukasz Majewski8b9bc462012-05-04 14:17:11 +02002647 /*
2648 * set the NAK status on the endpoint, otherwise we might try and
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002649 * do something with data that we've yet got a request to process
2650 * since the RXFIFO will take data for an endpoint even if the
2651 * size register hasn't been set.
2652 */
2653
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02002654 epctrl |= DxEPCTL_SNAK;
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002655
2656 /* update the endpoint state */
Robert Baldygae9edd1992013-10-09 08:20:02 +02002657 s3c_hsotg_set_ep_maxpacket(hsotg, hs_ep->index, mps);
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002658
2659 /* default, set to non-periodic */
Robert Baldyga1479e842013-10-09 08:41:57 +02002660 hs_ep->isochronous = 0;
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002661 hs_ep->periodic = 0;
Robert Baldygaa18ed7b2013-09-19 11:50:21 +02002662 hs_ep->halted = 0;
Robert Baldyga1479e842013-10-09 08:41:57 +02002663 hs_ep->interval = desc->bInterval;
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002664
Robert Baldyga4fca54a2013-10-09 09:00:02 +02002665 if (hs_ep->interval > 1 && hs_ep->mc > 1)
2666 dev_err(hsotg->dev, "MC > 1 when interval is not 1\n");
2667
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002668 switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
2669 case USB_ENDPOINT_XFER_ISOC:
Robert Baldyga1479e842013-10-09 08:41:57 +02002670 epctrl |= DxEPCTL_EPType_Iso;
2671 epctrl |= DxEPCTL_SetEvenFr;
2672 hs_ep->isochronous = 1;
2673 if (dir_in)
2674 hs_ep->periodic = 1;
2675 break;
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002676
2677 case USB_ENDPOINT_XFER_BULK:
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02002678 epctrl |= DxEPCTL_EPType_Bulk;
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002679 break;
2680
2681 case USB_ENDPOINT_XFER_INT:
2682 if (dir_in) {
Lukasz Majewski8b9bc462012-05-04 14:17:11 +02002683 /*
2684 * Allocate our TxFNum by simply using the index
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002685 * of the endpoint for the moment. We could do
2686 * something better if the host indicates how
Lukasz Majewski8b9bc462012-05-04 14:17:11 +02002687 * many FIFOs we are expecting to use.
2688 */
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002689
2690 hs_ep->periodic = 1;
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02002691 epctrl |= DxEPCTL_TxFNum(index);
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002692 }
2693
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02002694 epctrl |= DxEPCTL_EPType_Intterupt;
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002695 break;
2696
2697 case USB_ENDPOINT_XFER_CONTROL:
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02002698 epctrl |= DxEPCTL_EPType_Control;
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002699 break;
2700 }
2701
Lukasz Majewski8b9bc462012-05-04 14:17:11 +02002702 /*
2703 * if the hardware has dedicated fifos, we must give each IN EP
Ben Dooks10aebc72010-07-19 09:40:44 +01002704 * a unique tx-fifo even if it is non-periodic.
2705 */
2706 if (dir_in && hsotg->dedicated_fifos)
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02002707 epctrl |= DxEPCTL_TxFNum(index);
Ben Dooks10aebc72010-07-19 09:40:44 +01002708
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002709 /* for non control endpoints, set PID to D0 */
2710 if (index)
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02002711 epctrl |= DxEPCTL_SetD0PID;
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002712
2713 dev_dbg(hsotg->dev, "%s: write DxEPCTL=0x%08x\n",
2714 __func__, epctrl);
2715
2716 writel(epctrl, hsotg->regs + epctrl_reg);
2717 dev_dbg(hsotg->dev, "%s: read DxEPCTL=0x%08x\n",
2718 __func__, readl(hsotg->regs + epctrl_reg));
2719
2720 /* enable the endpoint interrupt */
2721 s3c_hsotg_ctrl_epint(hsotg, index, dir_in, 1);
2722
Lukasz Majewski22258f42012-06-14 10:02:24 +02002723 spin_unlock_irqrestore(&hsotg->lock, flags);
Julia Lawall19c190f2010-03-29 17:36:44 +02002724 return ret;
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002725}
2726
Lukasz Majewski8b9bc462012-05-04 14:17:11 +02002727/**
2728 * s3c_hsotg_ep_disable - disable given endpoint
2729 * @ep: The endpoint to disable.
2730 */
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002731static int s3c_hsotg_ep_disable(struct usb_ep *ep)
2732{
2733 struct s3c_hsotg_ep *hs_ep = our_ep(ep);
2734 struct s3c_hsotg *hsotg = hs_ep->parent;
2735 int dir_in = hs_ep->dir_in;
2736 int index = hs_ep->index;
2737 unsigned long flags;
2738 u32 epctrl_reg;
2739 u32 ctrl;
2740
2741 dev_info(hsotg->dev, "%s(ep %p)\n", __func__, ep);
2742
2743 if (ep == &hsotg->eps[0].ep) {
2744 dev_err(hsotg->dev, "%s: called for ep0\n", __func__);
2745 return -EINVAL;
2746 }
2747
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02002748 epctrl_reg = dir_in ? DIEPCTL(index) : DOEPCTL(index);
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002749
Lukasz Majewski5ad1d312012-06-14 10:02:26 +02002750 spin_lock_irqsave(&hsotg->lock, flags);
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002751 /* terminate all requests with shutdown */
2752 kill_all_requests(hsotg, hs_ep, -ESHUTDOWN, false);
2753
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002754
2755 ctrl = readl(hsotg->regs + epctrl_reg);
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02002756 ctrl &= ~DxEPCTL_EPEna;
2757 ctrl &= ~DxEPCTL_USBActEp;
2758 ctrl |= DxEPCTL_SNAK;
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002759
2760 dev_dbg(hsotg->dev, "%s: DxEPCTL=0x%08x\n", __func__, ctrl);
2761 writel(ctrl, hsotg->regs + epctrl_reg);
2762
2763 /* disable endpoint interrupts */
2764 s3c_hsotg_ctrl_epint(hsotg, hs_ep->index, hs_ep->dir_in, 0);
2765
Lukasz Majewski22258f42012-06-14 10:02:24 +02002766 spin_unlock_irqrestore(&hsotg->lock, flags);
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002767 return 0;
2768}
2769
2770/**
2771 * on_list - check request is on the given endpoint
2772 * @ep: The endpoint to check.
2773 * @test: The request to test if it is on the endpoint.
Lukasz Majewski8b9bc462012-05-04 14:17:11 +02002774 */
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002775static bool on_list(struct s3c_hsotg_ep *ep, struct s3c_hsotg_req *test)
2776{
2777 struct s3c_hsotg_req *req, *treq;
2778
2779 list_for_each_entry_safe(req, treq, &ep->queue, queue) {
2780 if (req == test)
2781 return true;
2782 }
2783
2784 return false;
2785}
2786
Lukasz Majewski8b9bc462012-05-04 14:17:11 +02002787/**
2788 * s3c_hsotg_ep_dequeue - dequeue given endpoint
2789 * @ep: The endpoint to dequeue.
2790 * @req: The request to be removed from a queue.
2791 */
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002792static int s3c_hsotg_ep_dequeue(struct usb_ep *ep, struct usb_request *req)
2793{
2794 struct s3c_hsotg_req *hs_req = our_req(req);
2795 struct s3c_hsotg_ep *hs_ep = our_ep(ep);
2796 struct s3c_hsotg *hs = hs_ep->parent;
2797 unsigned long flags;
2798
2799 dev_info(hs->dev, "ep_dequeue(%p,%p)\n", ep, req);
2800
Lukasz Majewski22258f42012-06-14 10:02:24 +02002801 spin_lock_irqsave(&hs->lock, flags);
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002802
2803 if (!on_list(hs_ep, hs_req)) {
Lukasz Majewski22258f42012-06-14 10:02:24 +02002804 spin_unlock_irqrestore(&hs->lock, flags);
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002805 return -EINVAL;
2806 }
2807
2808 s3c_hsotg_complete_request(hs, hs_ep, hs_req, -ECONNRESET);
Lukasz Majewski22258f42012-06-14 10:02:24 +02002809 spin_unlock_irqrestore(&hs->lock, flags);
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002810
2811 return 0;
2812}
2813
Lukasz Majewski8b9bc462012-05-04 14:17:11 +02002814/**
2815 * s3c_hsotg_ep_sethalt - set halt on a given endpoint
2816 * @ep: The endpoint to set halt.
2817 * @value: Set or unset the halt.
2818 */
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002819static int s3c_hsotg_ep_sethalt(struct usb_ep *ep, int value)
2820{
2821 struct s3c_hsotg_ep *hs_ep = our_ep(ep);
2822 struct s3c_hsotg *hs = hs_ep->parent;
2823 int index = hs_ep->index;
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002824 u32 epreg;
2825 u32 epctl;
Anton Tikhomirov9c39ddc2011-04-21 17:06:41 +09002826 u32 xfertype;
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002827
2828 dev_info(hs->dev, "%s(ep %p %s, %d)\n", __func__, ep, ep->name, value);
2829
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002830 /* write both IN and OUT control registers */
2831
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02002832 epreg = DIEPCTL(index);
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002833 epctl = readl(hs->regs + epreg);
2834
Anton Tikhomirov9c39ddc2011-04-21 17:06:41 +09002835 if (value) {
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02002836 epctl |= DxEPCTL_Stall + DxEPCTL_SNAK;
2837 if (epctl & DxEPCTL_EPEna)
2838 epctl |= DxEPCTL_EPDis;
Anton Tikhomirov9c39ddc2011-04-21 17:06:41 +09002839 } else {
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02002840 epctl &= ~DxEPCTL_Stall;
2841 xfertype = epctl & DxEPCTL_EPType_MASK;
2842 if (xfertype == DxEPCTL_EPType_Bulk ||
2843 xfertype == DxEPCTL_EPType_Intterupt)
2844 epctl |= DxEPCTL_SetD0PID;
Anton Tikhomirov9c39ddc2011-04-21 17:06:41 +09002845 }
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002846
2847 writel(epctl, hs->regs + epreg);
2848
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02002849 epreg = DOEPCTL(index);
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002850 epctl = readl(hs->regs + epreg);
2851
2852 if (value)
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02002853 epctl |= DxEPCTL_Stall;
Anton Tikhomirov9c39ddc2011-04-21 17:06:41 +09002854 else {
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02002855 epctl &= ~DxEPCTL_Stall;
2856 xfertype = epctl & DxEPCTL_EPType_MASK;
2857 if (xfertype == DxEPCTL_EPType_Bulk ||
2858 xfertype == DxEPCTL_EPType_Intterupt)
2859 epctl |= DxEPCTL_SetD0PID;
Anton Tikhomirov9c39ddc2011-04-21 17:06:41 +09002860 }
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002861
2862 writel(epctl, hs->regs + epreg);
2863
Robert Baldygaa18ed7b2013-09-19 11:50:21 +02002864 hs_ep->halted = value;
2865
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002866 return 0;
2867}
2868
Lukasz Majewski5ad1d312012-06-14 10:02:26 +02002869/**
2870 * s3c_hsotg_ep_sethalt_lock - set halt on a given endpoint with lock held
2871 * @ep: The endpoint to set halt.
2872 * @value: Set or unset the halt.
2873 */
2874static int s3c_hsotg_ep_sethalt_lock(struct usb_ep *ep, int value)
2875{
2876 struct s3c_hsotg_ep *hs_ep = our_ep(ep);
2877 struct s3c_hsotg *hs = hs_ep->parent;
2878 unsigned long flags = 0;
2879 int ret = 0;
2880
2881 spin_lock_irqsave(&hs->lock, flags);
2882 ret = s3c_hsotg_ep_sethalt(ep, value);
2883 spin_unlock_irqrestore(&hs->lock, flags);
2884
2885 return ret;
2886}
2887
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002888static struct usb_ep_ops s3c_hsotg_ep_ops = {
2889 .enable = s3c_hsotg_ep_enable,
2890 .disable = s3c_hsotg_ep_disable,
2891 .alloc_request = s3c_hsotg_ep_alloc_request,
2892 .free_request = s3c_hsotg_ep_free_request,
Lukasz Majewski5ad1d312012-06-14 10:02:26 +02002893 .queue = s3c_hsotg_ep_queue_lock,
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002894 .dequeue = s3c_hsotg_ep_dequeue,
Lukasz Majewski5ad1d312012-06-14 10:02:26 +02002895 .set_halt = s3c_hsotg_ep_sethalt_lock,
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002896 /* note, don't believe we have any call for the fifo routines */
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002897};
2898
2899/**
Lukasz Majewski41188782012-05-04 14:17:01 +02002900 * s3c_hsotg_phy_enable - enable platform phy dev
Lukasz Majewski8b9bc462012-05-04 14:17:11 +02002901 * @hsotg: The driver state
Lukasz Majewski41188782012-05-04 14:17:01 +02002902 *
2903 * A wrapper for platform code responsible for controlling
2904 * low-level USB code
2905 */
2906static void s3c_hsotg_phy_enable(struct s3c_hsotg *hsotg)
2907{
2908 struct platform_device *pdev = to_platform_device(hsotg->dev);
2909
2910 dev_dbg(hsotg->dev, "pdev 0x%p\n", pdev);
Praveen Panerib2e587d2012-11-14 15:57:16 +05302911
2912 if (hsotg->phy)
2913 usb_phy_init(hsotg->phy);
2914 else if (hsotg->plat->phy_init)
Lukasz Majewski41188782012-05-04 14:17:01 +02002915 hsotg->plat->phy_init(pdev, hsotg->plat->phy_type);
2916}
2917
2918/**
2919 * s3c_hsotg_phy_disable - disable platform phy dev
Lukasz Majewski8b9bc462012-05-04 14:17:11 +02002920 * @hsotg: The driver state
Lukasz Majewski41188782012-05-04 14:17:01 +02002921 *
2922 * A wrapper for platform code responsible for controlling
2923 * low-level USB code
2924 */
2925static void s3c_hsotg_phy_disable(struct s3c_hsotg *hsotg)
2926{
2927 struct platform_device *pdev = to_platform_device(hsotg->dev);
2928
Praveen Panerib2e587d2012-11-14 15:57:16 +05302929 if (hsotg->phy)
2930 usb_phy_shutdown(hsotg->phy);
2931 else if (hsotg->plat->phy_exit)
Lukasz Majewski41188782012-05-04 14:17:01 +02002932 hsotg->plat->phy_exit(pdev, hsotg->plat->phy_type);
2933}
2934
Lukasz Majewski8b9bc462012-05-04 14:17:11 +02002935/**
2936 * s3c_hsotg_init - initalize the usb core
2937 * @hsotg: The driver state
2938 */
Lukasz Majewskib3f489b2012-05-04 14:17:09 +02002939static void s3c_hsotg_init(struct s3c_hsotg *hsotg)
2940{
2941 /* unmask subset of endpoint interrupts */
2942
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02002943 writel(DIEPMSK_TimeOUTMsk | DIEPMSK_AHBErrMsk |
2944 DIEPMSK_EPDisbldMsk | DIEPMSK_XferComplMsk,
2945 hsotg->regs + DIEPMSK);
Lukasz Majewskib3f489b2012-05-04 14:17:09 +02002946
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02002947 writel(DOEPMSK_SetupMsk | DOEPMSK_AHBErrMsk |
2948 DOEPMSK_EPDisbldMsk | DOEPMSK_XferComplMsk,
2949 hsotg->regs + DOEPMSK);
Lukasz Majewskib3f489b2012-05-04 14:17:09 +02002950
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02002951 writel(0, hsotg->regs + DAINTMSK);
Lukasz Majewskib3f489b2012-05-04 14:17:09 +02002952
2953 /* Be in disconnected state until gadget is registered */
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02002954 __orr32(hsotg->regs + DCTL, DCTL_SftDiscon);
Lukasz Majewskib3f489b2012-05-04 14:17:09 +02002955
2956 if (0) {
2957 /* post global nak until we're ready */
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02002958 writel(DCTL_SGNPInNAK | DCTL_SGOUTNak,
2959 hsotg->regs + DCTL);
Lukasz Majewskib3f489b2012-05-04 14:17:09 +02002960 }
2961
2962 /* setup fifos */
2963
2964 dev_dbg(hsotg->dev, "GRXFSIZ=0x%08x, GNPTXFSIZ=0x%08x\n",
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02002965 readl(hsotg->regs + GRXFSIZ),
2966 readl(hsotg->regs + GNPTXFSIZ));
Lukasz Majewskib3f489b2012-05-04 14:17:09 +02002967
2968 s3c_hsotg_init_fifo(hsotg);
2969
2970 /* set the PLL on, remove the HNP/SRP and set the PHY */
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02002971 writel(GUSBCFG_PHYIf16 | GUSBCFG_TOutCal(7) | (0x5 << 10),
2972 hsotg->regs + GUSBCFG);
Lukasz Majewskib3f489b2012-05-04 14:17:09 +02002973
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02002974 writel(using_dma(hsotg) ? GAHBCFG_DMAEn : 0x0,
2975 hsotg->regs + GAHBCFG);
Lukasz Majewskib3f489b2012-05-04 14:17:09 +02002976}
2977
Lukasz Majewski8b9bc462012-05-04 14:17:11 +02002978/**
2979 * s3c_hsotg_udc_start - prepare the udc for work
2980 * @gadget: The usb gadget state
2981 * @driver: The usb gadget driver
2982 *
2983 * Perform initialization to prepare udc device and driver
2984 * to work.
2985 */
Lukasz Majewskif65f0f12012-05-04 14:17:10 +02002986static int s3c_hsotg_udc_start(struct usb_gadget *gadget,
2987 struct usb_gadget_driver *driver)
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002988{
Lukasz Majewskif99b2bf2012-05-04 14:17:12 +02002989 struct s3c_hsotg *hsotg = to_hsotg(gadget);
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002990 int ret;
2991
2992 if (!hsotg) {
Pavel Macheka023da32013-09-30 14:56:02 +02002993 pr_err("%s: called with no device\n", __func__);
Ben Dooks5b7d70c2009-06-02 14:58:06 +01002994 return -ENODEV;
2995 }
2996
2997 if (!driver) {
2998 dev_err(hsotg->dev, "%s: no driver\n", __func__);
2999 return -EINVAL;
3000 }
3001
Michal Nazarewicz7177aed2011-11-19 18:27:38 +01003002 if (driver->max_speed < USB_SPEED_FULL)
Ben Dooks5b7d70c2009-06-02 14:58:06 +01003003 dev_err(hsotg->dev, "%s: bad speed\n", __func__);
Ben Dooks5b7d70c2009-06-02 14:58:06 +01003004
Lukasz Majewskif65f0f12012-05-04 14:17:10 +02003005 if (!driver->setup) {
Ben Dooks5b7d70c2009-06-02 14:58:06 +01003006 dev_err(hsotg->dev, "%s: missing entry points\n", __func__);
3007 return -EINVAL;
3008 }
3009
3010 WARN_ON(hsotg->driver);
3011
3012 driver->driver.bus = NULL;
3013 hsotg->driver = driver;
Alexandre Pereira da Silva7d7b2292012-06-26 11:27:10 -03003014 hsotg->gadget.dev.of_node = hsotg->dev->of_node;
Ben Dooks5b7d70c2009-06-02 14:58:06 +01003015 hsotg->gadget.speed = USB_SPEED_UNKNOWN;
3016
Lukasz Majewskif65f0f12012-05-04 14:17:10 +02003017 ret = regulator_bulk_enable(ARRAY_SIZE(hsotg->supplies),
3018 hsotg->supplies);
Ben Dooks5b7d70c2009-06-02 14:58:06 +01003019 if (ret) {
Lukasz Majewskif65f0f12012-05-04 14:17:10 +02003020 dev_err(hsotg->dev, "failed to enable supplies: %d\n", ret);
Ben Dooks5b7d70c2009-06-02 14:58:06 +01003021 goto err;
3022 }
3023
Lukasz Majewski12a1f4d2012-05-04 14:17:08 +02003024 hsotg->last_rst = jiffies;
Ben Dooks5b7d70c2009-06-02 14:58:06 +01003025 dev_info(hsotg->dev, "bound driver %s\n", driver->driver.name);
3026 return 0;
3027
3028err:
3029 hsotg->driver = NULL;
Ben Dooks5b7d70c2009-06-02 14:58:06 +01003030 return ret;
3031}
Ben Dooks5b7d70c2009-06-02 14:58:06 +01003032
Lukasz Majewski8b9bc462012-05-04 14:17:11 +02003033/**
3034 * s3c_hsotg_udc_stop - stop the udc
3035 * @gadget: The usb gadget state
3036 * @driver: The usb gadget driver
3037 *
3038 * Stop udc hw block and stay tunned for future transmissions
3039 */
Lukasz Majewskif65f0f12012-05-04 14:17:10 +02003040static int s3c_hsotg_udc_stop(struct usb_gadget *gadget,
3041 struct usb_gadget_driver *driver)
Ben Dooks5b7d70c2009-06-02 14:58:06 +01003042{
Lukasz Majewskif99b2bf2012-05-04 14:17:12 +02003043 struct s3c_hsotg *hsotg = to_hsotg(gadget);
Lukasz Majewski2b19a522012-06-14 10:02:25 +02003044 unsigned long flags = 0;
Ben Dooks5b7d70c2009-06-02 14:58:06 +01003045 int ep;
3046
3047 if (!hsotg)
3048 return -ENODEV;
3049
Ben Dooks5b7d70c2009-06-02 14:58:06 +01003050 /* all endpoints should be shutdown */
Lukasz Majewskib3f489b2012-05-04 14:17:09 +02003051 for (ep = 0; ep < hsotg->num_of_eps; ep++)
Ben Dooks5b7d70c2009-06-02 14:58:06 +01003052 s3c_hsotg_ep_disable(&hsotg->eps[ep].ep);
3053
Lukasz Majewski2b19a522012-06-14 10:02:25 +02003054 spin_lock_irqsave(&hsotg->lock, flags);
3055
Lukasz Majewskif65f0f12012-05-04 14:17:10 +02003056 s3c_hsotg_phy_disable(hsotg);
Ben Dooks5b7d70c2009-06-02 14:58:06 +01003057
Marek Szyprowskic8c10252013-09-12 16:18:48 +02003058 if (!driver)
3059 hsotg->driver = NULL;
3060
Ben Dooks5b7d70c2009-06-02 14:58:06 +01003061 hsotg->gadget.speed = USB_SPEED_UNKNOWN;
Ben Dooks5b7d70c2009-06-02 14:58:06 +01003062
Lukasz Majewski2b19a522012-06-14 10:02:25 +02003063 spin_unlock_irqrestore(&hsotg->lock, flags);
3064
Marek Szyprowskic8c10252013-09-12 16:18:48 +02003065 regulator_bulk_disable(ARRAY_SIZE(hsotg->supplies), hsotg->supplies);
Ben Dooks5b7d70c2009-06-02 14:58:06 +01003066
3067 return 0;
3068}
Ben Dooks5b7d70c2009-06-02 14:58:06 +01003069
Lukasz Majewski8b9bc462012-05-04 14:17:11 +02003070/**
3071 * s3c_hsotg_gadget_getframe - read the frame number
3072 * @gadget: The usb gadget state
3073 *
3074 * Read the {micro} frame number
3075 */
Ben Dooks5b7d70c2009-06-02 14:58:06 +01003076static int s3c_hsotg_gadget_getframe(struct usb_gadget *gadget)
3077{
3078 return s3c_hsotg_read_frameno(to_hsotg(gadget));
3079}
3080
Lukasz Majewskia188b682012-06-22 09:29:56 +02003081/**
3082 * s3c_hsotg_pullup - connect/disconnect the USB PHY
3083 * @gadget: The usb gadget state
3084 * @is_on: Current state of the USB PHY
3085 *
3086 * Connect/Disconnect the USB PHY pullup
3087 */
3088static int s3c_hsotg_pullup(struct usb_gadget *gadget, int is_on)
3089{
3090 struct s3c_hsotg *hsotg = to_hsotg(gadget);
3091 unsigned long flags = 0;
3092
3093 dev_dbg(hsotg->dev, "%s: is_in: %d\n", __func__, is_on);
3094
3095 spin_lock_irqsave(&hsotg->lock, flags);
3096 if (is_on) {
3097 s3c_hsotg_phy_enable(hsotg);
3098 s3c_hsotg_core_init(hsotg);
3099 } else {
3100 s3c_hsotg_disconnect(hsotg);
3101 s3c_hsotg_phy_disable(hsotg);
3102 }
3103
3104 hsotg->gadget.speed = USB_SPEED_UNKNOWN;
3105 spin_unlock_irqrestore(&hsotg->lock, flags);
3106
3107 return 0;
3108}
3109
Felipe Balbieeef4582013-01-24 17:58:16 +02003110static const struct usb_gadget_ops s3c_hsotg_gadget_ops = {
Ben Dooks5b7d70c2009-06-02 14:58:06 +01003111 .get_frame = s3c_hsotg_gadget_getframe,
Lukasz Majewskif65f0f12012-05-04 14:17:10 +02003112 .udc_start = s3c_hsotg_udc_start,
3113 .udc_stop = s3c_hsotg_udc_stop,
Lukasz Majewskia188b682012-06-22 09:29:56 +02003114 .pullup = s3c_hsotg_pullup,
Ben Dooks5b7d70c2009-06-02 14:58:06 +01003115};
3116
3117/**
3118 * s3c_hsotg_initep - initialise a single endpoint
3119 * @hsotg: The device state.
3120 * @hs_ep: The endpoint to be initialised.
3121 * @epnum: The endpoint number
3122 *
3123 * Initialise the given endpoint (as part of the probe and device state
3124 * creation) to give to the gadget driver. Setup the endpoint name, any
3125 * direction information and other state that may be required.
3126 */
Bill Pemberton41ac7b32012-11-19 13:21:48 -05003127static void s3c_hsotg_initep(struct s3c_hsotg *hsotg,
Ben Dooks5b7d70c2009-06-02 14:58:06 +01003128 struct s3c_hsotg_ep *hs_ep,
3129 int epnum)
3130{
3131 u32 ptxfifo;
3132 char *dir;
3133
3134 if (epnum == 0)
3135 dir = "";
3136 else if ((epnum % 2) == 0) {
3137 dir = "out";
3138 } else {
3139 dir = "in";
3140 hs_ep->dir_in = 1;
3141 }
3142
3143 hs_ep->index = epnum;
3144
3145 snprintf(hs_ep->name, sizeof(hs_ep->name), "ep%d%s", epnum, dir);
3146
3147 INIT_LIST_HEAD(&hs_ep->queue);
3148 INIT_LIST_HEAD(&hs_ep->ep.ep_list);
3149
Ben Dooks5b7d70c2009-06-02 14:58:06 +01003150 /* add to the list of endpoints known by the gadget driver */
3151 if (epnum)
3152 list_add_tail(&hs_ep->ep.ep_list, &hsotg->gadget.ep_list);
3153
3154 hs_ep->parent = hsotg;
3155 hs_ep->ep.name = hs_ep->name;
Robert Baldygae117e742013-12-13 12:23:38 +01003156 usb_ep_set_maxpacket_limit(&hs_ep->ep, epnum ? 1024 : EP0_MPS_LIMIT);
Ben Dooks5b7d70c2009-06-02 14:58:06 +01003157 hs_ep->ep.ops = &s3c_hsotg_ep_ops;
3158
Lukasz Majewski8b9bc462012-05-04 14:17:11 +02003159 /*
3160 * Read the FIFO size for the Periodic TX FIFO, even if we're
Ben Dooks5b7d70c2009-06-02 14:58:06 +01003161 * an OUT endpoint, we may as well do this if in future the
3162 * code is changed to make each endpoint's direction changeable.
3163 */
3164
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02003165 ptxfifo = readl(hsotg->regs + DPTXFSIZn(epnum));
3166 hs_ep->fifo_size = DPTXFSIZn_DPTxFSize_GET(ptxfifo) * 4;
Ben Dooks5b7d70c2009-06-02 14:58:06 +01003167
Lukasz Majewski8b9bc462012-05-04 14:17:11 +02003168 /*
3169 * if we're using dma, we need to set the next-endpoint pointer
Ben Dooks5b7d70c2009-06-02 14:58:06 +01003170 * to be something valid.
3171 */
3172
3173 if (using_dma(hsotg)) {
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02003174 u32 next = DxEPCTL_NextEp((epnum + 1) % 15);
3175 writel(next, hsotg->regs + DIEPCTL(epnum));
3176 writel(next, hsotg->regs + DOEPCTL(epnum));
Ben Dooks5b7d70c2009-06-02 14:58:06 +01003177 }
3178}
3179
Lukasz Majewskib3f489b2012-05-04 14:17:09 +02003180/**
3181 * s3c_hsotg_hw_cfg - read HW configuration registers
3182 * @param: The device state
3183 *
3184 * Read the USB core HW configuration registers
3185 */
3186static void s3c_hsotg_hw_cfg(struct s3c_hsotg *hsotg)
Ben Dooks5b7d70c2009-06-02 14:58:06 +01003187{
Lukasz Majewskib3f489b2012-05-04 14:17:09 +02003188 u32 cfg2, cfg4;
Ben Dooks10aebc72010-07-19 09:40:44 +01003189 /* check hardware configuration */
3190
Lukasz Majewskib3f489b2012-05-04 14:17:09 +02003191 cfg2 = readl(hsotg->regs + 0x48);
3192 hsotg->num_of_eps = (cfg2 >> 10) & 0xF;
3193
3194 dev_info(hsotg->dev, "EPs:%d\n", hsotg->num_of_eps);
3195
Ben Dooks10aebc72010-07-19 09:40:44 +01003196 cfg4 = readl(hsotg->regs + 0x50);
3197 hsotg->dedicated_fifos = (cfg4 >> 25) & 1;
3198
3199 dev_info(hsotg->dev, "%s fifos\n",
3200 hsotg->dedicated_fifos ? "dedicated" : "shared");
Ben Dooks5b7d70c2009-06-02 14:58:06 +01003201}
3202
Lukasz Majewski8b9bc462012-05-04 14:17:11 +02003203/**
3204 * s3c_hsotg_dump - dump state of the udc
3205 * @param: The device state
3206 */
Ben Dooks5b7d70c2009-06-02 14:58:06 +01003207static void s3c_hsotg_dump(struct s3c_hsotg *hsotg)
3208{
Mark Brown83a01802011-06-01 17:16:15 +01003209#ifdef DEBUG
Ben Dooks5b7d70c2009-06-02 14:58:06 +01003210 struct device *dev = hsotg->dev;
3211 void __iomem *regs = hsotg->regs;
3212 u32 val;
3213 int idx;
3214
3215 dev_info(dev, "DCFG=0x%08x, DCTL=0x%08x, DIEPMSK=%08x\n",
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02003216 readl(regs + DCFG), readl(regs + DCTL),
3217 readl(regs + DIEPMSK));
Ben Dooks5b7d70c2009-06-02 14:58:06 +01003218
3219 dev_info(dev, "GAHBCFG=0x%08x, 0x44=0x%08x\n",
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02003220 readl(regs + GAHBCFG), readl(regs + 0x44));
Ben Dooks5b7d70c2009-06-02 14:58:06 +01003221
3222 dev_info(dev, "GRXFSIZ=0x%08x, GNPTXFSIZ=0x%08x\n",
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02003223 readl(regs + GRXFSIZ), readl(regs + GNPTXFSIZ));
Ben Dooks5b7d70c2009-06-02 14:58:06 +01003224
3225 /* show periodic fifo settings */
3226
3227 for (idx = 1; idx <= 15; idx++) {
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02003228 val = readl(regs + DPTXFSIZn(idx));
Ben Dooks5b7d70c2009-06-02 14:58:06 +01003229 dev_info(dev, "DPTx[%d] FSize=%d, StAddr=0x%08x\n", idx,
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02003230 val >> DPTXFSIZn_DPTxFSize_SHIFT,
3231 val & DPTXFSIZn_DPTxFStAddr_MASK);
Ben Dooks5b7d70c2009-06-02 14:58:06 +01003232 }
3233
3234 for (idx = 0; idx < 15; idx++) {
3235 dev_info(dev,
3236 "ep%d-in: EPCTL=0x%08x, SIZ=0x%08x, DMA=0x%08x\n", idx,
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02003237 readl(regs + DIEPCTL(idx)),
3238 readl(regs + DIEPTSIZ(idx)),
3239 readl(regs + DIEPDMA(idx)));
Ben Dooks5b7d70c2009-06-02 14:58:06 +01003240
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02003241 val = readl(regs + DOEPCTL(idx));
Ben Dooks5b7d70c2009-06-02 14:58:06 +01003242 dev_info(dev,
3243 "ep%d-out: EPCTL=0x%08x, SIZ=0x%08x, DMA=0x%08x\n",
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02003244 idx, readl(regs + DOEPCTL(idx)),
3245 readl(regs + DOEPTSIZ(idx)),
3246 readl(regs + DOEPDMA(idx)));
Ben Dooks5b7d70c2009-06-02 14:58:06 +01003247
3248 }
3249
3250 dev_info(dev, "DVBUSDIS=0x%08x, DVBUSPULSE=%08x\n",
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02003251 readl(regs + DVBUSDIS), readl(regs + DVBUSPULSE));
Mark Brown83a01802011-06-01 17:16:15 +01003252#endif
Ben Dooks5b7d70c2009-06-02 14:58:06 +01003253}
3254
Ben Dooks5b7d70c2009-06-02 14:58:06 +01003255/**
3256 * state_show - debugfs: show overall driver and device state.
3257 * @seq: The seq file to write to.
3258 * @v: Unused parameter.
3259 *
3260 * This debugfs entry shows the overall state of the hardware and
3261 * some general information about each of the endpoints available
3262 * to the system.
3263 */
3264static int state_show(struct seq_file *seq, void *v)
3265{
3266 struct s3c_hsotg *hsotg = seq->private;
3267 void __iomem *regs = hsotg->regs;
3268 int idx;
3269
3270 seq_printf(seq, "DCFG=0x%08x, DCTL=0x%08x, DSTS=0x%08x\n",
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02003271 readl(regs + DCFG),
3272 readl(regs + DCTL),
3273 readl(regs + DSTS));
Ben Dooks5b7d70c2009-06-02 14:58:06 +01003274
3275 seq_printf(seq, "DIEPMSK=0x%08x, DOEPMASK=0x%08x\n",
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02003276 readl(regs + DIEPMSK), readl(regs + DOEPMSK));
Ben Dooks5b7d70c2009-06-02 14:58:06 +01003277
3278 seq_printf(seq, "GINTMSK=0x%08x, GINTSTS=0x%08x\n",
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02003279 readl(regs + GINTMSK),
3280 readl(regs + GINTSTS));
Ben Dooks5b7d70c2009-06-02 14:58:06 +01003281
3282 seq_printf(seq, "DAINTMSK=0x%08x, DAINT=0x%08x\n",
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02003283 readl(regs + DAINTMSK),
3284 readl(regs + DAINT));
Ben Dooks5b7d70c2009-06-02 14:58:06 +01003285
3286 seq_printf(seq, "GNPTXSTS=0x%08x, GRXSTSR=%08x\n",
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02003287 readl(regs + GNPTXSTS),
3288 readl(regs + GRXSTSR));
Ben Dooks5b7d70c2009-06-02 14:58:06 +01003289
Pavel Macheka023da32013-09-30 14:56:02 +02003290 seq_puts(seq, "\nEndpoint status:\n");
Ben Dooks5b7d70c2009-06-02 14:58:06 +01003291
3292 for (idx = 0; idx < 15; idx++) {
3293 u32 in, out;
3294
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02003295 in = readl(regs + DIEPCTL(idx));
3296 out = readl(regs + DOEPCTL(idx));
Ben Dooks5b7d70c2009-06-02 14:58:06 +01003297
3298 seq_printf(seq, "ep%d: DIEPCTL=0x%08x, DOEPCTL=0x%08x",
3299 idx, in, out);
3300
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02003301 in = readl(regs + DIEPTSIZ(idx));
3302 out = readl(regs + DOEPTSIZ(idx));
Ben Dooks5b7d70c2009-06-02 14:58:06 +01003303
3304 seq_printf(seq, ", DIEPTSIZ=0x%08x, DOEPTSIZ=0x%08x",
3305 in, out);
3306
Pavel Macheka023da32013-09-30 14:56:02 +02003307 seq_puts(seq, "\n");
Ben Dooks5b7d70c2009-06-02 14:58:06 +01003308 }
3309
3310 return 0;
3311}
3312
3313static int state_open(struct inode *inode, struct file *file)
3314{
3315 return single_open(file, state_show, inode->i_private);
3316}
3317
3318static const struct file_operations state_fops = {
3319 .owner = THIS_MODULE,
3320 .open = state_open,
3321 .read = seq_read,
3322 .llseek = seq_lseek,
3323 .release = single_release,
3324};
3325
3326/**
3327 * fifo_show - debugfs: show the fifo information
3328 * @seq: The seq_file to write data to.
3329 * @v: Unused parameter.
3330 *
3331 * Show the FIFO information for the overall fifo and all the
3332 * periodic transmission FIFOs.
Lukasz Majewski8b9bc462012-05-04 14:17:11 +02003333 */
Ben Dooks5b7d70c2009-06-02 14:58:06 +01003334static int fifo_show(struct seq_file *seq, void *v)
3335{
3336 struct s3c_hsotg *hsotg = seq->private;
3337 void __iomem *regs = hsotg->regs;
3338 u32 val;
3339 int idx;
3340
Pavel Macheka023da32013-09-30 14:56:02 +02003341 seq_puts(seq, "Non-periodic FIFOs:\n");
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02003342 seq_printf(seq, "RXFIFO: Size %d\n", readl(regs + GRXFSIZ));
Ben Dooks5b7d70c2009-06-02 14:58:06 +01003343
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02003344 val = readl(regs + GNPTXFSIZ);
Ben Dooks5b7d70c2009-06-02 14:58:06 +01003345 seq_printf(seq, "NPTXFIFO: Size %d, Start 0x%08x\n",
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02003346 val >> GNPTXFSIZ_NPTxFDep_SHIFT,
3347 val & GNPTXFSIZ_NPTxFStAddr_MASK);
Ben Dooks5b7d70c2009-06-02 14:58:06 +01003348
Pavel Macheka023da32013-09-30 14:56:02 +02003349 seq_puts(seq, "\nPeriodic TXFIFOs:\n");
Ben Dooks5b7d70c2009-06-02 14:58:06 +01003350
3351 for (idx = 1; idx <= 15; idx++) {
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02003352 val = readl(regs + DPTXFSIZn(idx));
Ben Dooks5b7d70c2009-06-02 14:58:06 +01003353
3354 seq_printf(seq, "\tDPTXFIFO%2d: Size %d, Start 0x%08x\n", idx,
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02003355 val >> DPTXFSIZn_DPTxFSize_SHIFT,
3356 val & DPTXFSIZn_DPTxFStAddr_MASK);
Ben Dooks5b7d70c2009-06-02 14:58:06 +01003357 }
3358
3359 return 0;
3360}
3361
3362static int fifo_open(struct inode *inode, struct file *file)
3363{
3364 return single_open(file, fifo_show, inode->i_private);
3365}
3366
3367static const struct file_operations fifo_fops = {
3368 .owner = THIS_MODULE,
3369 .open = fifo_open,
3370 .read = seq_read,
3371 .llseek = seq_lseek,
3372 .release = single_release,
3373};
3374
3375
3376static const char *decode_direction(int is_in)
3377{
3378 return is_in ? "in" : "out";
3379}
3380
3381/**
3382 * ep_show - debugfs: show the state of an endpoint.
3383 * @seq: The seq_file to write data to.
3384 * @v: Unused parameter.
3385 *
3386 * This debugfs entry shows the state of the given endpoint (one is
3387 * registered for each available).
Lukasz Majewski8b9bc462012-05-04 14:17:11 +02003388 */
Ben Dooks5b7d70c2009-06-02 14:58:06 +01003389static int ep_show(struct seq_file *seq, void *v)
3390{
3391 struct s3c_hsotg_ep *ep = seq->private;
3392 struct s3c_hsotg *hsotg = ep->parent;
3393 struct s3c_hsotg_req *req;
3394 void __iomem *regs = hsotg->regs;
3395 int index = ep->index;
3396 int show_limit = 15;
3397 unsigned long flags;
3398
3399 seq_printf(seq, "Endpoint index %d, named %s, dir %s:\n",
3400 ep->index, ep->ep.name, decode_direction(ep->dir_in));
3401
3402 /* first show the register state */
3403
3404 seq_printf(seq, "\tDIEPCTL=0x%08x, DOEPCTL=0x%08x\n",
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02003405 readl(regs + DIEPCTL(index)),
3406 readl(regs + DOEPCTL(index)));
Ben Dooks5b7d70c2009-06-02 14:58:06 +01003407
3408 seq_printf(seq, "\tDIEPDMA=0x%08x, DOEPDMA=0x%08x\n",
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02003409 readl(regs + DIEPDMA(index)),
3410 readl(regs + DOEPDMA(index)));
Ben Dooks5b7d70c2009-06-02 14:58:06 +01003411
3412 seq_printf(seq, "\tDIEPINT=0x%08x, DOEPINT=0x%08x\n",
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02003413 readl(regs + DIEPINT(index)),
3414 readl(regs + DOEPINT(index)));
Ben Dooks5b7d70c2009-06-02 14:58:06 +01003415
3416 seq_printf(seq, "\tDIEPTSIZ=0x%08x, DOEPTSIZ=0x%08x\n",
Lukasz Majewski94cb8fd2012-05-04 14:17:14 +02003417 readl(regs + DIEPTSIZ(index)),
3418 readl(regs + DOEPTSIZ(index)));
Ben Dooks5b7d70c2009-06-02 14:58:06 +01003419
Pavel Macheka023da32013-09-30 14:56:02 +02003420 seq_puts(seq, "\n");
Ben Dooks5b7d70c2009-06-02 14:58:06 +01003421 seq_printf(seq, "mps %d\n", ep->ep.maxpacket);
3422 seq_printf(seq, "total_data=%ld\n", ep->total_data);
3423
3424 seq_printf(seq, "request list (%p,%p):\n",
3425 ep->queue.next, ep->queue.prev);
3426
Lukasz Majewski22258f42012-06-14 10:02:24 +02003427 spin_lock_irqsave(&hsotg->lock, flags);
Ben Dooks5b7d70c2009-06-02 14:58:06 +01003428
3429 list_for_each_entry(req, &ep->queue, queue) {
3430 if (--show_limit < 0) {
Pavel Macheka023da32013-09-30 14:56:02 +02003431 seq_puts(seq, "not showing more requests...\n");
Ben Dooks5b7d70c2009-06-02 14:58:06 +01003432 break;
3433 }
3434
3435 seq_printf(seq, "%c req %p: %d bytes @%p, ",
3436 req == ep->req ? '*' : ' ',
3437 req, req->req.length, req->req.buf);
3438 seq_printf(seq, "%d done, res %d\n",
3439 req->req.actual, req->req.status);
3440 }
3441
Lukasz Majewski22258f42012-06-14 10:02:24 +02003442 spin_unlock_irqrestore(&hsotg->lock, flags);
Ben Dooks5b7d70c2009-06-02 14:58:06 +01003443
3444 return 0;
3445}
3446
3447static int ep_open(struct inode *inode, struct file *file)
3448{
3449 return single_open(file, ep_show, inode->i_private);
3450}
3451
3452static const struct file_operations ep_fops = {
3453 .owner = THIS_MODULE,
3454 .open = ep_open,
3455 .read = seq_read,
3456 .llseek = seq_lseek,
3457 .release = single_release,
3458};
3459
3460/**
3461 * s3c_hsotg_create_debug - create debugfs directory and files
3462 * @hsotg: The driver state
3463 *
3464 * Create the debugfs files to allow the user to get information
3465 * about the state of the system. The directory name is created
3466 * with the same name as the device itself, in case we end up
3467 * with multiple blocks in future systems.
Lukasz Majewski8b9bc462012-05-04 14:17:11 +02003468 */
Bill Pemberton41ac7b32012-11-19 13:21:48 -05003469static void s3c_hsotg_create_debug(struct s3c_hsotg *hsotg)
Ben Dooks5b7d70c2009-06-02 14:58:06 +01003470{
3471 struct dentry *root;
3472 unsigned epidx;
3473
3474 root = debugfs_create_dir(dev_name(hsotg->dev), NULL);
3475 hsotg->debug_root = root;
3476 if (IS_ERR(root)) {
3477 dev_err(hsotg->dev, "cannot create debug root\n");
3478 return;
3479 }
3480
3481 /* create general state file */
3482
3483 hsotg->debug_file = debugfs_create_file("state", 0444, root,
3484 hsotg, &state_fops);
3485
3486 if (IS_ERR(hsotg->debug_file))
3487 dev_err(hsotg->dev, "%s: failed to create state\n", __func__);
3488
3489 hsotg->debug_fifo = debugfs_create_file("fifo", 0444, root,
3490 hsotg, &fifo_fops);
3491
3492 if (IS_ERR(hsotg->debug_fifo))
3493 dev_err(hsotg->dev, "%s: failed to create fifo\n", __func__);
3494
3495 /* create one file for each endpoint */
3496
Lukasz Majewskib3f489b2012-05-04 14:17:09 +02003497 for (epidx = 0; epidx < hsotg->num_of_eps; epidx++) {
Ben Dooks5b7d70c2009-06-02 14:58:06 +01003498 struct s3c_hsotg_ep *ep = &hsotg->eps[epidx];
3499
3500 ep->debugfs = debugfs_create_file(ep->name, 0444,
3501 root, ep, &ep_fops);
3502
3503 if (IS_ERR(ep->debugfs))
3504 dev_err(hsotg->dev, "failed to create %s debug file\n",
3505 ep->name);
3506 }
3507}
3508
3509/**
3510 * s3c_hsotg_delete_debug - cleanup debugfs entries
3511 * @hsotg: The driver state
3512 *
3513 * Cleanup (remove) the debugfs files for use on module exit.
Lukasz Majewski8b9bc462012-05-04 14:17:11 +02003514 */
Bill Pembertonfb4e98a2012-11-19 13:26:20 -05003515static void s3c_hsotg_delete_debug(struct s3c_hsotg *hsotg)
Ben Dooks5b7d70c2009-06-02 14:58:06 +01003516{
3517 unsigned epidx;
3518
Lukasz Majewskib3f489b2012-05-04 14:17:09 +02003519 for (epidx = 0; epidx < hsotg->num_of_eps; epidx++) {
Ben Dooks5b7d70c2009-06-02 14:58:06 +01003520 struct s3c_hsotg_ep *ep = &hsotg->eps[epidx];
3521 debugfs_remove(ep->debugfs);
3522 }
3523
3524 debugfs_remove(hsotg->debug_file);
3525 debugfs_remove(hsotg->debug_fifo);
3526 debugfs_remove(hsotg->debug_root);
3527}
3528
Lukasz Majewski8b9bc462012-05-04 14:17:11 +02003529/**
3530 * s3c_hsotg_probe - probe function for hsotg driver
3531 * @pdev: The platform information for the driver
3532 */
Lukasz Majewskif026a522012-05-04 14:17:13 +02003533
Bill Pemberton41ac7b32012-11-19 13:21:48 -05003534static int s3c_hsotg_probe(struct platform_device *pdev)
Ben Dooks5b7d70c2009-06-02 14:58:06 +01003535{
Jingoo Hane01ee9f2013-07-30 17:00:51 +09003536 struct s3c_hsotg_plat *plat = dev_get_platdata(&pdev->dev);
Praveen Panerib2e587d2012-11-14 15:57:16 +05303537 struct usb_phy *phy;
Ben Dooks5b7d70c2009-06-02 14:58:06 +01003538 struct device *dev = &pdev->dev;
Lukasz Majewskib3f489b2012-05-04 14:17:09 +02003539 struct s3c_hsotg_ep *eps;
Ben Dooks5b7d70c2009-06-02 14:58:06 +01003540 struct s3c_hsotg *hsotg;
3541 struct resource *res;
3542 int epnum;
3543 int ret;
Lukasz Majewskifc9a7312012-05-04 14:17:02 +02003544 int i;
Ben Dooks5b7d70c2009-06-02 14:58:06 +01003545
Sachin Kamat338edab2012-05-18 14:33:46 +05303546 hsotg = devm_kzalloc(&pdev->dev, sizeof(struct s3c_hsotg), GFP_KERNEL);
Ben Dooks5b7d70c2009-06-02 14:58:06 +01003547 if (!hsotg) {
3548 dev_err(dev, "cannot get memory\n");
3549 return -ENOMEM;
3550 }
3551
Praveen Panerib2e587d2012-11-14 15:57:16 +05303552 phy = devm_usb_get_phy(dev, USB_PHY_TYPE_USB2);
Felipe Balbif4f5ba52013-03-15 10:56:19 +02003553 if (IS_ERR(phy)) {
Praveen Panerib2e587d2012-11-14 15:57:16 +05303554 /* Fallback for pdata */
Jingoo Hane01ee9f2013-07-30 17:00:51 +09003555 plat = dev_get_platdata(&pdev->dev);
Praveen Panerib2e587d2012-11-14 15:57:16 +05303556 if (!plat) {
3557 dev_err(&pdev->dev, "no platform data or transceiver defined\n");
3558 return -EPROBE_DEFER;
3559 } else {
3560 hsotg->plat = plat;
3561 }
3562 } else {
3563 hsotg->phy = phy;
3564 }
3565
Ben Dooks5b7d70c2009-06-02 14:58:06 +01003566 hsotg->dev = dev;
Ben Dooks5b7d70c2009-06-02 14:58:06 +01003567
Sachin Kamat84749c62012-09-03 16:15:18 +05303568 hsotg->clk = devm_clk_get(&pdev->dev, "otg");
Marek Szyprowski31ee04d2010-07-19 16:01:42 +02003569 if (IS_ERR(hsotg->clk)) {
3570 dev_err(dev, "cannot get otg clock\n");
Sachin Kamat338edab2012-05-18 14:33:46 +05303571 return PTR_ERR(hsotg->clk);
Marek Szyprowski31ee04d2010-07-19 16:01:42 +02003572 }
3573
Ben Dooks5b7d70c2009-06-02 14:58:06 +01003574 platform_set_drvdata(pdev, hsotg);
3575
3576 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Ben Dooks5b7d70c2009-06-02 14:58:06 +01003577
Thierry Reding148e1132013-01-21 11:09:22 +01003578 hsotg->regs = devm_ioremap_resource(&pdev->dev, res);
3579 if (IS_ERR(hsotg->regs)) {
3580 ret = PTR_ERR(hsotg->regs);
Sachin Kamat338edab2012-05-18 14:33:46 +05303581 goto err_clk;
Ben Dooks5b7d70c2009-06-02 14:58:06 +01003582 }
3583
3584 ret = platform_get_irq(pdev, 0);
3585 if (ret < 0) {
3586 dev_err(dev, "cannot find IRQ\n");
Sachin Kamat338edab2012-05-18 14:33:46 +05303587 goto err_clk;
Ben Dooks5b7d70c2009-06-02 14:58:06 +01003588 }
3589
Lukasz Majewski22258f42012-06-14 10:02:24 +02003590 spin_lock_init(&hsotg->lock);
3591
Ben Dooks5b7d70c2009-06-02 14:58:06 +01003592 hsotg->irq = ret;
3593
Sachin Kamat338edab2012-05-18 14:33:46 +05303594 ret = devm_request_irq(&pdev->dev, hsotg->irq, s3c_hsotg_irq, 0,
3595 dev_name(dev), hsotg);
Ben Dooks5b7d70c2009-06-02 14:58:06 +01003596 if (ret < 0) {
3597 dev_err(dev, "cannot claim IRQ\n");
Sachin Kamat338edab2012-05-18 14:33:46 +05303598 goto err_clk;
Ben Dooks5b7d70c2009-06-02 14:58:06 +01003599 }
3600
3601 dev_info(dev, "regs %p, irq %d\n", hsotg->regs, hsotg->irq);
3602
Michal Nazarewiczd327ab52011-11-19 18:27:37 +01003603 hsotg->gadget.max_speed = USB_SPEED_HIGH;
Ben Dooks5b7d70c2009-06-02 14:58:06 +01003604 hsotg->gadget.ops = &s3c_hsotg_gadget_ops;
3605 hsotg->gadget.name = dev_name(dev);
Ben Dooks5b7d70c2009-06-02 14:58:06 +01003606
Ben Dooks5b7d70c2009-06-02 14:58:06 +01003607 /* reset the system */
3608
Lukasz Majewski04b4a0f2012-05-04 14:17:15 +02003609 clk_prepare_enable(hsotg->clk);
Marek Szyprowski31ee04d2010-07-19 16:01:42 +02003610
Lukasz Majewskifc9a7312012-05-04 14:17:02 +02003611 /* regulators */
3612
3613 for (i = 0; i < ARRAY_SIZE(hsotg->supplies); i++)
3614 hsotg->supplies[i].supply = s3c_hsotg_supply_names[i];
3615
Sachin Kamatcd762132013-01-08 14:27:00 +05303616 ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(hsotg->supplies),
Lukasz Majewskifc9a7312012-05-04 14:17:02 +02003617 hsotg->supplies);
3618 if (ret) {
3619 dev_err(dev, "failed to request supplies: %d\n", ret);
Sachin Kamat338edab2012-05-18 14:33:46 +05303620 goto err_clk;
Lukasz Majewskifc9a7312012-05-04 14:17:02 +02003621 }
3622
3623 ret = regulator_bulk_enable(ARRAY_SIZE(hsotg->supplies),
3624 hsotg->supplies);
3625
3626 if (ret) {
3627 dev_err(hsotg->dev, "failed to enable supplies: %d\n", ret);
3628 goto err_supplies;
3629 }
3630
Lukasz Majewski41188782012-05-04 14:17:01 +02003631 /* usb phy enable */
3632 s3c_hsotg_phy_enable(hsotg);
Ben Dooks5b7d70c2009-06-02 14:58:06 +01003633
Ben Dooks5b7d70c2009-06-02 14:58:06 +01003634 s3c_hsotg_corereset(hsotg);
3635 s3c_hsotg_init(hsotg);
Lukasz Majewskib3f489b2012-05-04 14:17:09 +02003636 s3c_hsotg_hw_cfg(hsotg);
3637
3638 /* hsotg->num_of_eps holds number of EPs other than ep0 */
3639
3640 if (hsotg->num_of_eps == 0) {
3641 dev_err(dev, "wrong number of EPs (zero)\n");
Julia Lawalldfdda5a2012-08-14 08:47:34 +02003642 ret = -EINVAL;
Lukasz Majewskib3f489b2012-05-04 14:17:09 +02003643 goto err_supplies;
3644 }
3645
3646 eps = kcalloc(hsotg->num_of_eps + 1, sizeof(struct s3c_hsotg_ep),
3647 GFP_KERNEL);
3648 if (!eps) {
3649 dev_err(dev, "cannot get memory\n");
Julia Lawalldfdda5a2012-08-14 08:47:34 +02003650 ret = -ENOMEM;
Lukasz Majewskib3f489b2012-05-04 14:17:09 +02003651 goto err_supplies;
3652 }
3653
3654 hsotg->eps = eps;
3655
3656 /* setup endpoint information */
3657
3658 INIT_LIST_HEAD(&hsotg->gadget.ep_list);
3659 hsotg->gadget.ep0 = &hsotg->eps[0].ep;
3660
3661 /* allocate EP0 request */
3662
3663 hsotg->ctrl_req = s3c_hsotg_ep_alloc_request(&hsotg->eps[0].ep,
3664 GFP_KERNEL);
3665 if (!hsotg->ctrl_req) {
3666 dev_err(dev, "failed to allocate ctrl req\n");
Julia Lawalldfdda5a2012-08-14 08:47:34 +02003667 ret = -ENOMEM;
Lukasz Majewskib3f489b2012-05-04 14:17:09 +02003668 goto err_ep_mem;
3669 }
Ben Dooks5b7d70c2009-06-02 14:58:06 +01003670
3671 /* initialise the endpoints now the core has been initialised */
Lukasz Majewskib3f489b2012-05-04 14:17:09 +02003672 for (epnum = 0; epnum < hsotg->num_of_eps; epnum++)
Ben Dooks5b7d70c2009-06-02 14:58:06 +01003673 s3c_hsotg_initep(hsotg, &hsotg->eps[epnum], epnum);
3674
Lukasz Majewskif65f0f12012-05-04 14:17:10 +02003675 /* disable power and clock */
3676
3677 ret = regulator_bulk_disable(ARRAY_SIZE(hsotg->supplies),
3678 hsotg->supplies);
3679 if (ret) {
3680 dev_err(hsotg->dev, "failed to disable supplies: %d\n", ret);
3681 goto err_ep_mem;
3682 }
3683
3684 s3c_hsotg_phy_disable(hsotg);
3685
Sebastian Andrzej Siewior0f913492011-06-28 16:33:47 +03003686 ret = usb_add_gadget_udc(&pdev->dev, &hsotg->gadget);
3687 if (ret)
Lukasz Majewskib3f489b2012-05-04 14:17:09 +02003688 goto err_ep_mem;
Sebastian Andrzej Siewior0f913492011-06-28 16:33:47 +03003689
Ben Dooks5b7d70c2009-06-02 14:58:06 +01003690 s3c_hsotg_create_debug(hsotg);
3691
3692 s3c_hsotg_dump(hsotg);
3693
Ben Dooks5b7d70c2009-06-02 14:58:06 +01003694 return 0;
3695
Lukasz Majewski1d144c62012-05-04 14:17:16 +02003696err_ep_mem:
Lukasz Majewskib3f489b2012-05-04 14:17:09 +02003697 kfree(eps);
Lukasz Majewskifc9a7312012-05-04 14:17:02 +02003698err_supplies:
Lukasz Majewski41188782012-05-04 14:17:01 +02003699 s3c_hsotg_phy_disable(hsotg);
Marek Szyprowski31ee04d2010-07-19 16:01:42 +02003700err_clk:
Lukasz Majewski1d144c62012-05-04 14:17:16 +02003701 clk_disable_unprepare(hsotg->clk);
Sachin Kamat338edab2012-05-18 14:33:46 +05303702
Ben Dooks5b7d70c2009-06-02 14:58:06 +01003703 return ret;
3704}
3705
Lukasz Majewski8b9bc462012-05-04 14:17:11 +02003706/**
3707 * s3c_hsotg_remove - remove function for hsotg driver
3708 * @pdev: The platform information for the driver
3709 */
Bill Pembertonfb4e98a2012-11-19 13:26:20 -05003710static int s3c_hsotg_remove(struct platform_device *pdev)
Ben Dooks5b7d70c2009-06-02 14:58:06 +01003711{
3712 struct s3c_hsotg *hsotg = platform_get_drvdata(pdev);
3713
Sebastian Andrzej Siewior0f913492011-06-28 16:33:47 +03003714 usb_del_gadget_udc(&hsotg->gadget);
3715
Ben Dooks5b7d70c2009-06-02 14:58:06 +01003716 s3c_hsotg_delete_debug(hsotg);
3717
Lukasz Majewskif65f0f12012-05-04 14:17:10 +02003718 if (hsotg->driver) {
3719 /* should have been done already by driver model core */
3720 usb_gadget_unregister_driver(hsotg->driver);
3721 }
Ben Dooks5b7d70c2009-06-02 14:58:06 +01003722
Lukasz Majewski41188782012-05-04 14:17:01 +02003723 s3c_hsotg_phy_disable(hsotg);
Lukasz Majewski04b4a0f2012-05-04 14:17:15 +02003724 clk_disable_unprepare(hsotg->clk);
Marek Szyprowski31ee04d2010-07-19 16:01:42 +02003725
Ben Dooks5b7d70c2009-06-02 14:58:06 +01003726 return 0;
3727}
3728
3729#if 1
3730#define s3c_hsotg_suspend NULL
3731#define s3c_hsotg_resume NULL
3732#endif
3733
Tomasz Figac50f056c2013-06-25 17:38:23 +02003734#ifdef CONFIG_OF
3735static const struct of_device_id s3c_hsotg_of_ids[] = {
3736 { .compatible = "samsung,s3c6400-hsotg", },
Matt Porter0d33d822013-12-19 09:23:05 -05003737 { .compatible = "snps,dwc2", },
Tomasz Figac50f056c2013-06-25 17:38:23 +02003738 { /* sentinel */ }
3739};
3740MODULE_DEVICE_TABLE(of, s3c_hsotg_of_ids);
3741#endif
3742
Ben Dooks5b7d70c2009-06-02 14:58:06 +01003743static struct platform_driver s3c_hsotg_driver = {
3744 .driver = {
3745 .name = "s3c-hsotg",
3746 .owner = THIS_MODULE,
Tomasz Figac50f056c2013-06-25 17:38:23 +02003747 .of_match_table = of_match_ptr(s3c_hsotg_of_ids),
Ben Dooks5b7d70c2009-06-02 14:58:06 +01003748 },
3749 .probe = s3c_hsotg_probe,
Bill Pemberton76904172012-11-19 13:21:08 -05003750 .remove = s3c_hsotg_remove,
Ben Dooks5b7d70c2009-06-02 14:58:06 +01003751 .suspend = s3c_hsotg_suspend,
3752 .resume = s3c_hsotg_resume,
3753};
3754
Axel Lincc27c962011-11-27 20:16:27 +08003755module_platform_driver(s3c_hsotg_driver);
Ben Dooks5b7d70c2009-06-02 14:58:06 +01003756
3757MODULE_DESCRIPTION("Samsung S3C USB High-speed/OtG device");
3758MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
3759MODULE_LICENSE("GPL");
3760MODULE_ALIAS("platform:s3c-hsotg");