blob: 1654a1bc641413b391f3b206fe63d6c5ad52a33e [file] [log] [blame]
David Mosberger2d531392014-04-28 22:14:07 -06001/*
2 * MAX3421 Host Controller driver for USB.
3 *
4 * Author: David Mosberger-Tang <davidm@egauge.net>
5 *
6 * (C) Copyright 2014 David Mosberger-Tang <davidm@egauge.net>
7 *
8 * MAX3421 is a chip implementing a USB 2.0 Full-/Low-Speed host
9 * controller on a SPI bus.
10 *
11 * Based on:
12 * o MAX3421E datasheet
13 * http://datasheets.maximintegrated.com/en/ds/MAX3421E.pdf
14 * o MAX3421E Programming Guide
15 * http://www.hdl.co.jp/ftpdata/utl-001/AN3785.pdf
16 * o gadget/dummy_hcd.c
17 * For USB HCD implementation.
18 * o Arduino MAX3421 driver
19 * https://github.com/felis/USB_Host_Shield_2.0/blob/master/Usb.cpp
20 *
21 * This file is licenced under the GPL v2.
22 *
23 * Important note on worst-case (full-speed) packet size constraints
24 * (See USB 2.0 Section 5.6.3 and following):
25 *
26 * - control: 64 bytes
27 * - isochronous: 1023 bytes
28 * - interrupt: 64 bytes
29 * - bulk: 64 bytes
30 *
31 * Since the MAX3421 FIFO size is 64 bytes, we do not have to work about
32 * multi-FIFO writes/reads for a single USB packet *except* for isochronous
33 * transfers. We don't support isochronous transfers at this time, so we
34 * just assume that a USB packet always fits into a single FIFO buffer.
35 *
36 * NOTE: The June 2006 version of "MAX3421E Programming Guide"
37 * (AN3785) has conflicting info for the RCVDAVIRQ bit:
38 *
39 * The description of RCVDAVIRQ says "The CPU *must* clear
40 * this IRQ bit (by writing a 1 to it) before reading the
41 * RCVFIFO data.
42 *
43 * However, the earlier section on "Programming BULK-IN
44 * Transfers" says * that:
45 *
46 * After the CPU retrieves the data, it clears the
47 * RCVDAVIRQ bit.
48 *
49 * The December 2006 version has been corrected and it consistently
50 * states the second behavior is the correct one.
51 *
52 * Synchronous SPI transactions sleep so we can't perform any such
53 * transactions while holding a spin-lock (and/or while interrupts are
54 * masked). To achieve this, all SPI transactions are issued from a
55 * single thread (max3421_spi_thread).
56 */
57
Asaf Vertz788bfe82014-12-15 09:22:07 +020058#include <linux/jiffies.h>
David Mosberger2d531392014-04-28 22:14:07 -060059#include <linux/module.h>
60#include <linux/spi/spi.h>
61#include <linux/usb.h>
62#include <linux/usb/hcd.h>
63
64#include <linux/platform_data/max3421-hcd.h>
65
66#define DRIVER_DESC "MAX3421 USB Host-Controller Driver"
67#define DRIVER_VERSION "1.0"
68
69/* 11-bit counter that wraps around (USB 2.0 Section 8.3.3): */
70#define USB_MAX_FRAME_NUMBER 0x7ff
71#define USB_MAX_RETRIES 3 /* # of retries before error is reported */
72
73/*
74 * Max. # of times we're willing to retransmit a request immediately in
75 * resposne to a NAK. Afterwards, we fall back on trying once a frame.
76 */
77#define NAK_MAX_FAST_RETRANSMITS 2
78
79#define POWER_BUDGET 500 /* in mA; use 8 for low-power port testing */
80
81/* Port-change mask: */
82#define PORT_C_MASK ((USB_PORT_STAT_C_CONNECTION | \
83 USB_PORT_STAT_C_ENABLE | \
84 USB_PORT_STAT_C_SUSPEND | \
85 USB_PORT_STAT_C_OVERCURRENT | \
86 USB_PORT_STAT_C_RESET) << 16)
87
88enum max3421_rh_state {
89 MAX3421_RH_RESET,
90 MAX3421_RH_SUSPENDED,
91 MAX3421_RH_RUNNING
92};
93
94enum pkt_state {
95 PKT_STATE_SETUP, /* waiting to send setup packet to ctrl pipe */
96 PKT_STATE_TRANSFER, /* waiting to xfer transfer_buffer */
97 PKT_STATE_TERMINATE /* waiting to terminate control transfer */
98};
99
100enum scheduling_pass {
101 SCHED_PASS_PERIODIC,
102 SCHED_PASS_NON_PERIODIC,
103 SCHED_PASS_DONE
104};
105
David Mosberger-Tang2eb5dbd2014-06-19 12:56:53 -0600106/* Bit numbers for max3421_hcd->todo: */
107enum {
108 ENABLE_IRQ = 0,
109 RESET_HCD,
110 RESET_PORT,
111 CHECK_UNLINK,
112 IOPIN_UPDATE
113};
114
David Mosberger-Tang05dfa5c2014-05-28 22:40:00 -0600115struct max3421_dma_buf {
116 u8 data[2];
117};
118
David Mosberger2d531392014-04-28 22:14:07 -0600119struct max3421_hcd {
120 spinlock_t lock;
121
122 struct task_struct *spi_thread;
123
124 struct max3421_hcd *next;
125
126 enum max3421_rh_state rh_state;
127 /* lower 16 bits contain port status, upper 16 bits the change mask: */
128 u32 port_status;
129
130 unsigned active:1;
131
132 struct list_head ep_list; /* list of EP's with work */
133
134 /*
135 * The following are owned by spi_thread (may be accessed by
136 * SPI-thread without acquiring the HCD lock:
137 */
138 u8 rev; /* chip revision */
139 u16 frame_number;
140 /*
David Mosberger-Tang05dfa5c2014-05-28 22:40:00 -0600141 * kmalloc'd buffers guaranteed to be in separate (DMA)
142 * cache-lines:
143 */
144 struct max3421_dma_buf *tx;
145 struct max3421_dma_buf *rx;
146 /*
David Mosberger2d531392014-04-28 22:14:07 -0600147 * URB we're currently processing. Must not be reset to NULL
148 * unless MAX3421E chip is idle:
149 */
150 struct urb *curr_urb;
151 enum scheduling_pass sched_pass;
David Mosberger2d531392014-04-28 22:14:07 -0600152 int urb_done; /* > 0 -> no errors, < 0: errno */
153 size_t curr_len;
154 u8 hien;
155 u8 mode;
156 u8 iopins[2];
David Mosberger-Tang2eb5dbd2014-06-19 12:56:53 -0600157 unsigned long todo;
David Mosberger2d531392014-04-28 22:14:07 -0600158#ifdef DEBUG
159 unsigned long err_stat[16];
160#endif
161};
162
163struct max3421_ep {
164 struct usb_host_endpoint *ep;
165 struct list_head ep_list;
166 u32 naks;
167 u16 last_active; /* frame # this ep was last active */
168 enum pkt_state pkt_state;
169 u8 retries;
170 u8 retransmit; /* packet needs retransmission */
171};
172
173static struct max3421_hcd *max3421_hcd_list;
174
175#define MAX3421_FIFO_SIZE 64
176
177#define MAX3421_SPI_DIR_RD 0 /* read register from MAX3421 */
178#define MAX3421_SPI_DIR_WR 1 /* write register to MAX3421 */
179
180/* SPI commands: */
181#define MAX3421_SPI_DIR_SHIFT 1
182#define MAX3421_SPI_REG_SHIFT 3
183
184#define MAX3421_REG_RCVFIFO 1
185#define MAX3421_REG_SNDFIFO 2
186#define MAX3421_REG_SUDFIFO 4
187#define MAX3421_REG_RCVBC 6
188#define MAX3421_REG_SNDBC 7
189#define MAX3421_REG_USBIRQ 13
190#define MAX3421_REG_USBIEN 14
191#define MAX3421_REG_USBCTL 15
192#define MAX3421_REG_CPUCTL 16
193#define MAX3421_REG_PINCTL 17
194#define MAX3421_REG_REVISION 18
195#define MAX3421_REG_IOPINS1 20
196#define MAX3421_REG_IOPINS2 21
197#define MAX3421_REG_GPINIRQ 22
198#define MAX3421_REG_GPINIEN 23
199#define MAX3421_REG_GPINPOL 24
200#define MAX3421_REG_HIRQ 25
201#define MAX3421_REG_HIEN 26
202#define MAX3421_REG_MODE 27
203#define MAX3421_REG_PERADDR 28
204#define MAX3421_REG_HCTL 29
205#define MAX3421_REG_HXFR 30
206#define MAX3421_REG_HRSL 31
207
208enum {
209 MAX3421_USBIRQ_OSCOKIRQ_BIT = 0,
210 MAX3421_USBIRQ_NOVBUSIRQ_BIT = 5,
211 MAX3421_USBIRQ_VBUSIRQ_BIT
212};
213
214enum {
215 MAX3421_CPUCTL_IE_BIT = 0,
216 MAX3421_CPUCTL_PULSEWID0_BIT = 6,
217 MAX3421_CPUCTL_PULSEWID1_BIT
218};
219
220enum {
221 MAX3421_USBCTL_PWRDOWN_BIT = 4,
222 MAX3421_USBCTL_CHIPRES_BIT
223};
224
225enum {
226 MAX3421_PINCTL_GPXA_BIT = 0,
227 MAX3421_PINCTL_GPXB_BIT,
228 MAX3421_PINCTL_POSINT_BIT,
229 MAX3421_PINCTL_INTLEVEL_BIT,
230 MAX3421_PINCTL_FDUPSPI_BIT,
231 MAX3421_PINCTL_EP0INAK_BIT,
232 MAX3421_PINCTL_EP2INAK_BIT,
233 MAX3421_PINCTL_EP3INAK_BIT,
234};
235
236enum {
237 MAX3421_HI_BUSEVENT_BIT = 0, /* bus-reset/-resume */
238 MAX3421_HI_RWU_BIT, /* remote wakeup */
239 MAX3421_HI_RCVDAV_BIT, /* receive FIFO data available */
240 MAX3421_HI_SNDBAV_BIT, /* send buffer available */
241 MAX3421_HI_SUSDN_BIT, /* suspend operation done */
242 MAX3421_HI_CONDET_BIT, /* peripheral connect/disconnect */
243 MAX3421_HI_FRAME_BIT, /* frame generator */
244 MAX3421_HI_HXFRDN_BIT, /* host transfer done */
245};
246
247enum {
248 MAX3421_HCTL_BUSRST_BIT = 0,
249 MAX3421_HCTL_FRMRST_BIT,
250 MAX3421_HCTL_SAMPLEBUS_BIT,
251 MAX3421_HCTL_SIGRSM_BIT,
252 MAX3421_HCTL_RCVTOG0_BIT,
253 MAX3421_HCTL_RCVTOG1_BIT,
254 MAX3421_HCTL_SNDTOG0_BIT,
255 MAX3421_HCTL_SNDTOG1_BIT
256};
257
258enum {
259 MAX3421_MODE_HOST_BIT = 0,
260 MAX3421_MODE_LOWSPEED_BIT,
261 MAX3421_MODE_HUBPRE_BIT,
262 MAX3421_MODE_SOFKAENAB_BIT,
263 MAX3421_MODE_SEPIRQ_BIT,
264 MAX3421_MODE_DELAYISO_BIT,
265 MAX3421_MODE_DMPULLDN_BIT,
266 MAX3421_MODE_DPPULLDN_BIT
267};
268
269enum {
270 MAX3421_HRSL_OK = 0,
271 MAX3421_HRSL_BUSY,
272 MAX3421_HRSL_BADREQ,
273 MAX3421_HRSL_UNDEF,
274 MAX3421_HRSL_NAK,
275 MAX3421_HRSL_STALL,
276 MAX3421_HRSL_TOGERR,
277 MAX3421_HRSL_WRONGPID,
278 MAX3421_HRSL_BADBC,
279 MAX3421_HRSL_PIDERR,
280 MAX3421_HRSL_PKTERR,
281 MAX3421_HRSL_CRCERR,
282 MAX3421_HRSL_KERR,
283 MAX3421_HRSL_JERR,
284 MAX3421_HRSL_TIMEOUT,
285 MAX3421_HRSL_BABBLE,
286 MAX3421_HRSL_RESULT_MASK = 0xf,
287 MAX3421_HRSL_RCVTOGRD_BIT = 4,
288 MAX3421_HRSL_SNDTOGRD_BIT,
289 MAX3421_HRSL_KSTATUS_BIT,
290 MAX3421_HRSL_JSTATUS_BIT
291};
292
293/* Return same error-codes as ohci.h:cc_to_error: */
294static const int hrsl_to_error[] = {
295 [MAX3421_HRSL_OK] = 0,
296 [MAX3421_HRSL_BUSY] = -EINVAL,
297 [MAX3421_HRSL_BADREQ] = -EINVAL,
298 [MAX3421_HRSL_UNDEF] = -EINVAL,
299 [MAX3421_HRSL_NAK] = -EAGAIN,
300 [MAX3421_HRSL_STALL] = -EPIPE,
301 [MAX3421_HRSL_TOGERR] = -EILSEQ,
302 [MAX3421_HRSL_WRONGPID] = -EPROTO,
303 [MAX3421_HRSL_BADBC] = -EREMOTEIO,
304 [MAX3421_HRSL_PIDERR] = -EPROTO,
305 [MAX3421_HRSL_PKTERR] = -EPROTO,
306 [MAX3421_HRSL_CRCERR] = -EILSEQ,
307 [MAX3421_HRSL_KERR] = -EIO,
308 [MAX3421_HRSL_JERR] = -EIO,
309 [MAX3421_HRSL_TIMEOUT] = -ETIME,
310 [MAX3421_HRSL_BABBLE] = -EOVERFLOW
311};
312
313/*
314 * See http://www.beyondlogic.org/usbnutshell/usb4.shtml#Control for a
315 * reasonable overview of how control transfers use the the IN/OUT
316 * tokens.
317 */
318#define MAX3421_HXFR_BULK_IN(ep) (0x00 | (ep)) /* bulk or interrupt */
319#define MAX3421_HXFR_SETUP 0x10
320#define MAX3421_HXFR_BULK_OUT(ep) (0x20 | (ep)) /* bulk or interrupt */
321#define MAX3421_HXFR_ISO_IN(ep) (0x40 | (ep))
322#define MAX3421_HXFR_ISO_OUT(ep) (0x60 | (ep))
323#define MAX3421_HXFR_HS_IN 0x80 /* handshake in */
324#define MAX3421_HXFR_HS_OUT 0xa0 /* handshake out */
325
326#define field(val, bit) ((val) << (bit))
327
328static inline s16
329frame_diff(u16 left, u16 right)
330{
331 return ((unsigned) (left - right)) % (USB_MAX_FRAME_NUMBER + 1);
332}
333
334static inline struct max3421_hcd *
335hcd_to_max3421(struct usb_hcd *hcd)
336{
337 return (struct max3421_hcd *) hcd->hcd_priv;
338}
339
340static inline struct usb_hcd *
341max3421_to_hcd(struct max3421_hcd *max3421_hcd)
342{
343 return container_of((void *) max3421_hcd, struct usb_hcd, hcd_priv);
344}
345
346static u8
347spi_rd8(struct usb_hcd *hcd, unsigned int reg)
348{
David Mosberger-Tang05dfa5c2014-05-28 22:40:00 -0600349 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
David Mosberger2d531392014-04-28 22:14:07 -0600350 struct spi_device *spi = to_spi_device(hcd->self.controller);
351 struct spi_transfer transfer;
David Mosberger2d531392014-04-28 22:14:07 -0600352 struct spi_message msg;
353
354 memset(&transfer, 0, sizeof(transfer));
355
356 spi_message_init(&msg);
357
David Mosberger-Tang05dfa5c2014-05-28 22:40:00 -0600358 max3421_hcd->tx->data[0] =
359 (field(reg, MAX3421_SPI_REG_SHIFT) |
360 field(MAX3421_SPI_DIR_RD, MAX3421_SPI_DIR_SHIFT));
David Mosberger2d531392014-04-28 22:14:07 -0600361
David Mosberger-Tang05dfa5c2014-05-28 22:40:00 -0600362 transfer.tx_buf = max3421_hcd->tx->data;
363 transfer.rx_buf = max3421_hcd->rx->data;
David Mosberger2d531392014-04-28 22:14:07 -0600364 transfer.len = 2;
365
366 spi_message_add_tail(&transfer, &msg);
367 spi_sync(spi, &msg);
368
David Mosberger-Tang05dfa5c2014-05-28 22:40:00 -0600369 return max3421_hcd->rx->data[1];
David Mosberger2d531392014-04-28 22:14:07 -0600370}
371
372static void
373spi_wr8(struct usb_hcd *hcd, unsigned int reg, u8 val)
374{
375 struct spi_device *spi = to_spi_device(hcd->self.controller);
David Mosberger-Tang05dfa5c2014-05-28 22:40:00 -0600376 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
David Mosberger2d531392014-04-28 22:14:07 -0600377 struct spi_transfer transfer;
378 struct spi_message msg;
David Mosberger2d531392014-04-28 22:14:07 -0600379
380 memset(&transfer, 0, sizeof(transfer));
381
382 spi_message_init(&msg);
383
David Mosberger-Tang05dfa5c2014-05-28 22:40:00 -0600384 max3421_hcd->tx->data[0] =
385 (field(reg, MAX3421_SPI_REG_SHIFT) |
386 field(MAX3421_SPI_DIR_WR, MAX3421_SPI_DIR_SHIFT));
387 max3421_hcd->tx->data[1] = val;
David Mosberger2d531392014-04-28 22:14:07 -0600388
David Mosberger-Tang05dfa5c2014-05-28 22:40:00 -0600389 transfer.tx_buf = max3421_hcd->tx->data;
David Mosberger2d531392014-04-28 22:14:07 -0600390 transfer.len = 2;
391
392 spi_message_add_tail(&transfer, &msg);
393 spi_sync(spi, &msg);
394}
395
396static void
397spi_rd_buf(struct usb_hcd *hcd, unsigned int reg, void *buf, size_t len)
398{
399 struct spi_device *spi = to_spi_device(hcd->self.controller);
David Mosberger-Tang05dfa5c2014-05-28 22:40:00 -0600400 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
David Mosberger2d531392014-04-28 22:14:07 -0600401 struct spi_transfer transfer[2];
402 struct spi_message msg;
David Mosberger2d531392014-04-28 22:14:07 -0600403
404 memset(transfer, 0, sizeof(transfer));
405
406 spi_message_init(&msg);
407
David Mosberger-Tang05dfa5c2014-05-28 22:40:00 -0600408 max3421_hcd->tx->data[0] =
409 (field(reg, MAX3421_SPI_REG_SHIFT) |
410 field(MAX3421_SPI_DIR_RD, MAX3421_SPI_DIR_SHIFT));
411 transfer[0].tx_buf = max3421_hcd->tx->data;
David Mosberger2d531392014-04-28 22:14:07 -0600412 transfer[0].len = 1;
413
414 transfer[1].rx_buf = buf;
415 transfer[1].len = len;
416
417 spi_message_add_tail(&transfer[0], &msg);
418 spi_message_add_tail(&transfer[1], &msg);
419 spi_sync(spi, &msg);
420}
421
422static void
423spi_wr_buf(struct usb_hcd *hcd, unsigned int reg, void *buf, size_t len)
424{
425 struct spi_device *spi = to_spi_device(hcd->self.controller);
David Mosberger-Tang05dfa5c2014-05-28 22:40:00 -0600426 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
David Mosberger2d531392014-04-28 22:14:07 -0600427 struct spi_transfer transfer[2];
428 struct spi_message msg;
David Mosberger2d531392014-04-28 22:14:07 -0600429
430 memset(transfer, 0, sizeof(transfer));
431
432 spi_message_init(&msg);
433
David Mosberger-Tang05dfa5c2014-05-28 22:40:00 -0600434 max3421_hcd->tx->data[0] =
435 (field(reg, MAX3421_SPI_REG_SHIFT) |
436 field(MAX3421_SPI_DIR_WR, MAX3421_SPI_DIR_SHIFT));
David Mosberger2d531392014-04-28 22:14:07 -0600437
David Mosberger-Tang05dfa5c2014-05-28 22:40:00 -0600438 transfer[0].tx_buf = max3421_hcd->tx->data;
David Mosberger2d531392014-04-28 22:14:07 -0600439 transfer[0].len = 1;
440
441 transfer[1].tx_buf = buf;
442 transfer[1].len = len;
443
444 spi_message_add_tail(&transfer[0], &msg);
445 spi_message_add_tail(&transfer[1], &msg);
446 spi_sync(spi, &msg);
447}
448
449/*
450 * Figure out the correct setting for the LOWSPEED and HUBPRE mode
451 * bits. The HUBPRE bit needs to be set when MAX3421E operates at
452 * full speed, but it's talking to a low-speed device (i.e., through a
453 * hub). Setting that bit ensures that every low-speed packet is
454 * preceded by a full-speed PRE PID. Possible configurations:
455 *
456 * Hub speed: Device speed: => LOWSPEED bit: HUBPRE bit:
457 * FULL FULL => 0 0
458 * FULL LOW => 1 1
459 * LOW LOW => 1 0
460 * LOW FULL => 1 0
461 */
462static void
463max3421_set_speed(struct usb_hcd *hcd, struct usb_device *dev)
464{
465 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
466 u8 mode_lowspeed, mode_hubpre, mode = max3421_hcd->mode;
467
468 mode_lowspeed = BIT(MAX3421_MODE_LOWSPEED_BIT);
469 mode_hubpre = BIT(MAX3421_MODE_HUBPRE_BIT);
470 if (max3421_hcd->port_status & USB_PORT_STAT_LOW_SPEED) {
471 mode |= mode_lowspeed;
472 mode &= ~mode_hubpre;
473 } else if (dev->speed == USB_SPEED_LOW) {
474 mode |= mode_lowspeed | mode_hubpre;
475 } else {
476 mode &= ~(mode_lowspeed | mode_hubpre);
477 }
478 if (mode != max3421_hcd->mode) {
479 max3421_hcd->mode = mode;
480 spi_wr8(hcd, MAX3421_REG_MODE, max3421_hcd->mode);
481 }
482
483}
484
485/*
486 * Caller must NOT hold HCD spinlock.
487 */
488static void
Mark Tomlinsona573b502021-06-25 15:14:56 +1200489max3421_set_address(struct usb_hcd *hcd, struct usb_device *dev, int epnum)
David Mosberger2d531392014-04-28 22:14:07 -0600490{
Mark Tomlinsona573b502021-06-25 15:14:56 +1200491 int rcvtog, sndtog;
David Mosberger2d531392014-04-28 22:14:07 -0600492 u8 hctl;
493
David Mosberger2d531392014-04-28 22:14:07 -0600494 /* setup new endpoint's toggle bits: */
495 rcvtog = usb_gettoggle(dev, epnum, 0);
496 sndtog = usb_gettoggle(dev, epnum, 1);
497 hctl = (BIT(rcvtog + MAX3421_HCTL_RCVTOG0_BIT) |
498 BIT(sndtog + MAX3421_HCTL_SNDTOG0_BIT));
499
David Mosberger2d531392014-04-28 22:14:07 -0600500 spi_wr8(hcd, MAX3421_REG_HCTL, hctl);
501
502 /*
503 * Note: devnum for one and the same device can change during
504 * address-assignment so it's best to just always load the
505 * address whenever the end-point changed/was forced.
506 */
David Mosberger2d531392014-04-28 22:14:07 -0600507 spi_wr8(hcd, MAX3421_REG_PERADDR, dev->devnum);
508}
509
510static int
511max3421_ctrl_setup(struct usb_hcd *hcd, struct urb *urb)
512{
513 spi_wr_buf(hcd, MAX3421_REG_SUDFIFO, urb->setup_packet, 8);
514 return MAX3421_HXFR_SETUP;
515}
516
517static int
518max3421_transfer_in(struct usb_hcd *hcd, struct urb *urb)
519{
520 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
521 int epnum = usb_pipeendpoint(urb->pipe);
522
523 max3421_hcd->curr_len = 0;
524 max3421_hcd->hien |= BIT(MAX3421_HI_RCVDAV_BIT);
525 return MAX3421_HXFR_BULK_IN(epnum);
526}
527
528static int
529max3421_transfer_out(struct usb_hcd *hcd, struct urb *urb, int fast_retransmit)
530{
531 struct spi_device *spi = to_spi_device(hcd->self.controller);
532 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
533 int epnum = usb_pipeendpoint(urb->pipe);
534 u32 max_packet;
535 void *src;
536
537 src = urb->transfer_buffer + urb->actual_length;
538
539 if (fast_retransmit) {
540 if (max3421_hcd->rev == 0x12) {
541 /* work around rev 0x12 bug: */
542 spi_wr8(hcd, MAX3421_REG_SNDBC, 0);
543 spi_wr8(hcd, MAX3421_REG_SNDFIFO, ((u8 *) src)[0]);
544 spi_wr8(hcd, MAX3421_REG_SNDBC, max3421_hcd->curr_len);
545 }
546 return MAX3421_HXFR_BULK_OUT(epnum);
547 }
548
549 max_packet = usb_maxpacket(urb->dev, urb->pipe, 1);
550
551 if (max_packet > MAX3421_FIFO_SIZE) {
552 /*
553 * We do not support isochronous transfers at this
554 * time.
555 */
556 dev_err(&spi->dev,
557 "%s: packet-size of %u too big (limit is %u bytes)",
558 __func__, max_packet, MAX3421_FIFO_SIZE);
559 max3421_hcd->urb_done = -EMSGSIZE;
560 return -EMSGSIZE;
561 }
562 max3421_hcd->curr_len = min((urb->transfer_buffer_length -
563 urb->actual_length), max_packet);
564
565 spi_wr_buf(hcd, MAX3421_REG_SNDFIFO, src, max3421_hcd->curr_len);
566 spi_wr8(hcd, MAX3421_REG_SNDBC, max3421_hcd->curr_len);
567 return MAX3421_HXFR_BULK_OUT(epnum);
568}
569
570/*
571 * Issue the next host-transfer command.
572 * Caller must NOT hold HCD spinlock.
573 */
574static void
575max3421_next_transfer(struct usb_hcd *hcd, int fast_retransmit)
576{
577 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
578 struct urb *urb = max3421_hcd->curr_urb;
David Mosberger-Tangf9da25c2014-05-28 10:06:24 -0600579 struct max3421_ep *max3421_ep;
David Mosberger2d531392014-04-28 22:14:07 -0600580 int cmd = -EINVAL;
581
582 if (!urb)
583 return; /* nothing to do */
584
David Mosberger-Tangf9da25c2014-05-28 10:06:24 -0600585 max3421_ep = urb->ep->hcpriv;
586
David Mosberger2d531392014-04-28 22:14:07 -0600587 switch (max3421_ep->pkt_state) {
588 case PKT_STATE_SETUP:
589 cmd = max3421_ctrl_setup(hcd, urb);
590 break;
591
592 case PKT_STATE_TRANSFER:
593 if (usb_urb_dir_in(urb))
594 cmd = max3421_transfer_in(hcd, urb);
595 else
596 cmd = max3421_transfer_out(hcd, urb, fast_retransmit);
597 break;
598
599 case PKT_STATE_TERMINATE:
600 /*
601 * IN transfers are terminated with HS_OUT token,
602 * OUT transfers with HS_IN:
603 */
604 if (usb_urb_dir_in(urb))
605 cmd = MAX3421_HXFR_HS_OUT;
606 else
607 cmd = MAX3421_HXFR_HS_IN;
608 break;
609 }
610
611 if (cmd < 0)
612 return;
613
614 /* issue the command and wait for host-xfer-done interrupt: */
615
616 spi_wr8(hcd, MAX3421_REG_HXFR, cmd);
617 max3421_hcd->hien |= BIT(MAX3421_HI_HXFRDN_BIT);
618}
619
620/*
621 * Find the next URB to process and start its execution.
622 *
623 * At this time, we do not anticipate ever connecting a USB hub to the
624 * MAX3421 chip, so at most USB device can be connected and we can use
625 * a simplistic scheduler: at the start of a frame, schedule all
626 * periodic transfers. Once that is done, use the remainder of the
627 * frame to process non-periodic (bulk & control) transfers.
628 *
629 * Preconditions:
630 * o Caller must NOT hold HCD spinlock.
631 * o max3421_hcd->curr_urb MUST BE NULL.
632 * o MAX3421E chip must be idle.
633 */
634static int
635max3421_select_and_start_urb(struct usb_hcd *hcd)
636{
637 struct spi_device *spi = to_spi_device(hcd->self.controller);
638 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
639 struct urb *urb, *curr_urb = NULL;
640 struct max3421_ep *max3421_ep;
Mark Tomlinsona573b502021-06-25 15:14:56 +1200641 int epnum;
David Mosberger2d531392014-04-28 22:14:07 -0600642 struct usb_host_endpoint *ep;
643 struct list_head *pos;
644 unsigned long flags;
645
646 spin_lock_irqsave(&max3421_hcd->lock, flags);
647
648 for (;
649 max3421_hcd->sched_pass < SCHED_PASS_DONE;
650 ++max3421_hcd->sched_pass)
651 list_for_each(pos, &max3421_hcd->ep_list) {
652 urb = NULL;
653 max3421_ep = container_of(pos, struct max3421_ep,
654 ep_list);
655 ep = max3421_ep->ep;
656
657 switch (usb_endpoint_type(&ep->desc)) {
658 case USB_ENDPOINT_XFER_ISOC:
659 case USB_ENDPOINT_XFER_INT:
660 if (max3421_hcd->sched_pass !=
661 SCHED_PASS_PERIODIC)
662 continue;
663 break;
664
665 case USB_ENDPOINT_XFER_CONTROL:
666 case USB_ENDPOINT_XFER_BULK:
667 if (max3421_hcd->sched_pass !=
668 SCHED_PASS_NON_PERIODIC)
669 continue;
670 break;
671 }
672
673 if (list_empty(&ep->urb_list))
674 continue; /* nothing to do */
675 urb = list_first_entry(&ep->urb_list, struct urb,
676 urb_list);
677 if (urb->unlinked) {
678 dev_dbg(&spi->dev, "%s: URB %p unlinked=%d",
679 __func__, urb, urb->unlinked);
680 max3421_hcd->curr_urb = urb;
681 max3421_hcd->urb_done = 1;
682 spin_unlock_irqrestore(&max3421_hcd->lock,
683 flags);
684 return 1;
685 }
686
687 switch (usb_endpoint_type(&ep->desc)) {
688 case USB_ENDPOINT_XFER_CONTROL:
689 /*
690 * Allow one control transaction per
691 * frame per endpoint:
692 */
693 if (frame_diff(max3421_ep->last_active,
694 max3421_hcd->frame_number) == 0)
695 continue;
696 break;
697
698 case USB_ENDPOINT_XFER_BULK:
699 if (max3421_ep->retransmit
700 && (frame_diff(max3421_ep->last_active,
701 max3421_hcd->frame_number)
702 == 0))
703 /*
704 * We already tried this EP
705 * during this frame and got a
706 * NAK or error; wait for next frame
707 */
708 continue;
709 break;
710
711 case USB_ENDPOINT_XFER_ISOC:
712 case USB_ENDPOINT_XFER_INT:
713 if (frame_diff(max3421_hcd->frame_number,
714 max3421_ep->last_active)
715 < urb->interval)
716 /*
717 * We already processed this
718 * end-point in the current
719 * frame
720 */
721 continue;
722 break;
723 }
724
725 /* move current ep to tail: */
726 list_move_tail(pos, &max3421_hcd->ep_list);
727 curr_urb = urb;
728 goto done;
729 }
730done:
731 if (!curr_urb) {
732 spin_unlock_irqrestore(&max3421_hcd->lock, flags);
733 return 0;
734 }
735
736 urb = max3421_hcd->curr_urb = curr_urb;
737 epnum = usb_endpoint_num(&urb->ep->desc);
738 if (max3421_ep->retransmit)
739 /* restart (part of) a USB transaction: */
740 max3421_ep->retransmit = 0;
741 else {
742 /* start USB transaction: */
743 if (usb_endpoint_xfer_control(&ep->desc)) {
744 /*
745 * See USB 2.0 spec section 8.6.1
746 * Initialization via SETUP Token:
747 */
748 usb_settoggle(urb->dev, epnum, 0, 1);
749 usb_settoggle(urb->dev, epnum, 1, 1);
750 max3421_ep->pkt_state = PKT_STATE_SETUP;
David Mosberger2d531392014-04-28 22:14:07 -0600751 } else
752 max3421_ep->pkt_state = PKT_STATE_TRANSFER;
753 }
754
755 spin_unlock_irqrestore(&max3421_hcd->lock, flags);
756
757 max3421_ep->last_active = max3421_hcd->frame_number;
Mark Tomlinsona573b502021-06-25 15:14:56 +1200758 max3421_set_address(hcd, urb->dev, epnum);
David Mosberger2d531392014-04-28 22:14:07 -0600759 max3421_set_speed(hcd, urb->dev);
760 max3421_next_transfer(hcd, 0);
761 return 1;
762}
763
764/*
765 * Check all endpoints for URBs that got unlinked.
766 *
767 * Caller must NOT hold HCD spinlock.
768 */
769static int
770max3421_check_unlink(struct usb_hcd *hcd)
771{
772 struct spi_device *spi = to_spi_device(hcd->self.controller);
773 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
David Mosberger2d531392014-04-28 22:14:07 -0600774 struct max3421_ep *max3421_ep;
775 struct usb_host_endpoint *ep;
Geliang Tang553c2362015-12-20 00:11:50 +0800776 struct urb *urb, *next;
David Mosberger2d531392014-04-28 22:14:07 -0600777 unsigned long flags;
778 int retval = 0;
779
780 spin_lock_irqsave(&max3421_hcd->lock, flags);
Geliang Tang553c2362015-12-20 00:11:50 +0800781 list_for_each_entry(max3421_ep, &max3421_hcd->ep_list, ep_list) {
David Mosberger2d531392014-04-28 22:14:07 -0600782 ep = max3421_ep->ep;
Geliang Tang553c2362015-12-20 00:11:50 +0800783 list_for_each_entry_safe(urb, next, &ep->urb_list, urb_list) {
David Mosberger2d531392014-04-28 22:14:07 -0600784 if (urb->unlinked) {
785 retval = 1;
786 dev_dbg(&spi->dev, "%s: URB %p unlinked=%d",
787 __func__, urb, urb->unlinked);
788 usb_hcd_unlink_urb_from_ep(hcd, urb);
789 spin_unlock_irqrestore(&max3421_hcd->lock,
790 flags);
791 usb_hcd_giveback_urb(hcd, urb, 0);
792 spin_lock_irqsave(&max3421_hcd->lock, flags);
793 }
794 }
795 }
796 spin_unlock_irqrestore(&max3421_hcd->lock, flags);
797 return retval;
798}
799
800/*
801 * Caller must NOT hold HCD spinlock.
802 */
803static void
804max3421_slow_retransmit(struct usb_hcd *hcd)
805{
806 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
807 struct urb *urb = max3421_hcd->curr_urb;
808 struct max3421_ep *max3421_ep;
809
810 max3421_ep = urb->ep->hcpriv;
811 max3421_ep->retransmit = 1;
812 max3421_hcd->curr_urb = NULL;
813}
814
815/*
816 * Caller must NOT hold HCD spinlock.
817 */
818static void
819max3421_recv_data_available(struct usb_hcd *hcd)
820{
821 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
822 struct urb *urb = max3421_hcd->curr_urb;
823 size_t remaining, transfer_size;
824 u8 rcvbc;
825
826 rcvbc = spi_rd8(hcd, MAX3421_REG_RCVBC);
827
828 if (rcvbc > MAX3421_FIFO_SIZE)
829 rcvbc = MAX3421_FIFO_SIZE;
830 if (urb->actual_length >= urb->transfer_buffer_length)
831 remaining = 0;
832 else
833 remaining = urb->transfer_buffer_length - urb->actual_length;
834 transfer_size = rcvbc;
835 if (transfer_size > remaining)
836 transfer_size = remaining;
837 if (transfer_size > 0) {
838 void *dst = urb->transfer_buffer + urb->actual_length;
839
840 spi_rd_buf(hcd, MAX3421_REG_RCVFIFO, dst, transfer_size);
841 urb->actual_length += transfer_size;
842 max3421_hcd->curr_len = transfer_size;
843 }
844
845 /* ack the RCVDAV irq now that the FIFO has been read: */
846 spi_wr8(hcd, MAX3421_REG_HIRQ, BIT(MAX3421_HI_RCVDAV_BIT));
847}
848
849static void
850max3421_handle_error(struct usb_hcd *hcd, u8 hrsl)
851{
852 struct spi_device *spi = to_spi_device(hcd->self.controller);
853 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
854 u8 result_code = hrsl & MAX3421_HRSL_RESULT_MASK;
855 struct urb *urb = max3421_hcd->curr_urb;
856 struct max3421_ep *max3421_ep = urb->ep->hcpriv;
857 int switch_sndfifo;
858
859 /*
860 * If an OUT command results in any response other than OK
861 * (i.e., error or NAK), we have to perform a dummy-write to
862 * SNDBC so the FIFO gets switched back to us. Otherwise, we
863 * get out of sync with the SNDFIFO double buffer.
864 */
865 switch_sndfifo = (max3421_ep->pkt_state == PKT_STATE_TRANSFER &&
866 usb_urb_dir_out(urb));
867
868 switch (result_code) {
869 case MAX3421_HRSL_OK:
870 return; /* this shouldn't happen */
871
872 case MAX3421_HRSL_WRONGPID: /* received wrong PID */
873 case MAX3421_HRSL_BUSY: /* SIE busy */
874 case MAX3421_HRSL_BADREQ: /* bad val in HXFR */
875 case MAX3421_HRSL_UNDEF: /* reserved */
876 case MAX3421_HRSL_KERR: /* K-state instead of response */
877 case MAX3421_HRSL_JERR: /* J-state instead of response */
878 /*
879 * packet experienced an error that we cannot recover
880 * from; report error
881 */
882 max3421_hcd->urb_done = hrsl_to_error[result_code];
883 dev_dbg(&spi->dev, "%s: unexpected error HRSL=0x%02x",
884 __func__, hrsl);
885 break;
886
887 case MAX3421_HRSL_TOGERR:
888 if (usb_urb_dir_in(urb))
889 ; /* don't do anything (device will switch toggle) */
890 else {
891 /* flip the send toggle bit: */
892 int sndtog = (hrsl >> MAX3421_HRSL_SNDTOGRD_BIT) & 1;
893
894 sndtog ^= 1;
895 spi_wr8(hcd, MAX3421_REG_HCTL,
896 BIT(sndtog + MAX3421_HCTL_SNDTOG0_BIT));
897 }
898 /* FALL THROUGH */
899 case MAX3421_HRSL_BADBC: /* bad byte count */
900 case MAX3421_HRSL_PIDERR: /* received PID is corrupted */
901 case MAX3421_HRSL_PKTERR: /* packet error (stuff, EOP) */
902 case MAX3421_HRSL_CRCERR: /* CRC error */
903 case MAX3421_HRSL_BABBLE: /* device talked too long */
904 case MAX3421_HRSL_TIMEOUT:
905 if (max3421_ep->retries++ < USB_MAX_RETRIES)
906 /* retry the packet again in the next frame */
907 max3421_slow_retransmit(hcd);
908 else {
909 /* Based on ohci.h cc_to_err[]: */
910 max3421_hcd->urb_done = hrsl_to_error[result_code];
911 dev_dbg(&spi->dev, "%s: unexpected error HRSL=0x%02x",
912 __func__, hrsl);
913 }
914 break;
915
916 case MAX3421_HRSL_STALL:
917 dev_dbg(&spi->dev, "%s: unexpected error HRSL=0x%02x",
918 __func__, hrsl);
919 max3421_hcd->urb_done = hrsl_to_error[result_code];
920 break;
921
922 case MAX3421_HRSL_NAK:
923 /*
924 * Device wasn't ready for data or has no data
925 * available: retry the packet again.
926 */
927 if (max3421_ep->naks++ < NAK_MAX_FAST_RETRANSMITS) {
928 max3421_next_transfer(hcd, 1);
929 switch_sndfifo = 0;
930 } else
931 max3421_slow_retransmit(hcd);
932 break;
933 }
934 if (switch_sndfifo)
935 spi_wr8(hcd, MAX3421_REG_SNDBC, 0);
936}
937
938/*
939 * Caller must NOT hold HCD spinlock.
940 */
941static int
942max3421_transfer_in_done(struct usb_hcd *hcd, struct urb *urb)
943{
944 struct spi_device *spi = to_spi_device(hcd->self.controller);
945 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
946 u32 max_packet;
947
948 if (urb->actual_length >= urb->transfer_buffer_length)
949 return 1; /* read is complete, so we're done */
950
951 /*
952 * USB 2.0 Section 5.3.2 Pipes: packets must be full size
953 * except for last one.
954 */
955 max_packet = usb_maxpacket(urb->dev, urb->pipe, 0);
956 if (max_packet > MAX3421_FIFO_SIZE) {
957 /*
958 * We do not support isochronous transfers at this
959 * time...
960 */
961 dev_err(&spi->dev,
962 "%s: packet-size of %u too big (limit is %u bytes)",
963 __func__, max_packet, MAX3421_FIFO_SIZE);
964 return -EINVAL;
965 }
966
967 if (max3421_hcd->curr_len < max_packet) {
968 if (urb->transfer_flags & URB_SHORT_NOT_OK) {
969 /*
970 * remaining > 0 and received an
971 * unexpected partial packet ->
972 * error
973 */
974 return -EREMOTEIO;
975 } else
976 /* short read, but it's OK */
977 return 1;
978 }
979 return 0; /* not done */
980}
981
982/*
983 * Caller must NOT hold HCD spinlock.
984 */
985static int
986max3421_transfer_out_done(struct usb_hcd *hcd, struct urb *urb)
987{
988 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
989
990 urb->actual_length += max3421_hcd->curr_len;
991 if (urb->actual_length < urb->transfer_buffer_length)
992 return 0;
993 if (urb->transfer_flags & URB_ZERO_PACKET) {
994 /*
995 * Some hardware needs a zero-size packet at the end
996 * of a bulk-out transfer if the last transfer was a
997 * full-sized packet (i.e., such hardware use <
998 * max_packet as an indicator that the end of the
999 * packet has been reached).
1000 */
1001 u32 max_packet = usb_maxpacket(urb->dev, urb->pipe, 1);
1002
1003 if (max3421_hcd->curr_len == max_packet)
1004 return 0;
1005 }
1006 return 1;
1007}
1008
1009/*
1010 * Caller must NOT hold HCD spinlock.
1011 */
1012static void
1013max3421_host_transfer_done(struct usb_hcd *hcd)
1014{
1015 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
1016 struct urb *urb = max3421_hcd->curr_urb;
1017 struct max3421_ep *max3421_ep;
1018 u8 result_code, hrsl;
1019 int urb_done = 0;
1020
1021 max3421_hcd->hien &= ~(BIT(MAX3421_HI_HXFRDN_BIT) |
1022 BIT(MAX3421_HI_RCVDAV_BIT));
1023
1024 hrsl = spi_rd8(hcd, MAX3421_REG_HRSL);
1025 result_code = hrsl & MAX3421_HRSL_RESULT_MASK;
1026
1027#ifdef DEBUG
1028 ++max3421_hcd->err_stat[result_code];
1029#endif
1030
1031 max3421_ep = urb->ep->hcpriv;
1032
1033 if (unlikely(result_code != MAX3421_HRSL_OK)) {
1034 max3421_handle_error(hcd, hrsl);
1035 return;
1036 }
1037
1038 max3421_ep->naks = 0;
1039 max3421_ep->retries = 0;
1040 switch (max3421_ep->pkt_state) {
1041
1042 case PKT_STATE_SETUP:
1043 if (urb->transfer_buffer_length > 0)
1044 max3421_ep->pkt_state = PKT_STATE_TRANSFER;
1045 else
1046 max3421_ep->pkt_state = PKT_STATE_TERMINATE;
1047 break;
1048
1049 case PKT_STATE_TRANSFER:
1050 if (usb_urb_dir_in(urb))
1051 urb_done = max3421_transfer_in_done(hcd, urb);
1052 else
1053 urb_done = max3421_transfer_out_done(hcd, urb);
1054 if (urb_done > 0 && usb_pipetype(urb->pipe) == PIPE_CONTROL) {
1055 /*
1056 * We aren't really done - we still need to
1057 * terminate the control transfer:
1058 */
1059 max3421_hcd->urb_done = urb_done = 0;
1060 max3421_ep->pkt_state = PKT_STATE_TERMINATE;
1061 }
1062 break;
1063
1064 case PKT_STATE_TERMINATE:
1065 urb_done = 1;
1066 break;
1067 }
1068
1069 if (urb_done)
1070 max3421_hcd->urb_done = urb_done;
1071 else
1072 max3421_next_transfer(hcd, 0);
1073}
1074
1075/*
1076 * Caller must NOT hold HCD spinlock.
1077 */
1078static void
1079max3421_detect_conn(struct usb_hcd *hcd)
1080{
1081 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
1082 unsigned int jk, have_conn = 0;
1083 u32 old_port_status, chg;
1084 unsigned long flags;
1085 u8 hrsl, mode;
1086
1087 hrsl = spi_rd8(hcd, MAX3421_REG_HRSL);
1088
1089 jk = ((((hrsl >> MAX3421_HRSL_JSTATUS_BIT) & 1) << 0) |
1090 (((hrsl >> MAX3421_HRSL_KSTATUS_BIT) & 1) << 1));
1091
1092 mode = max3421_hcd->mode;
1093
1094 switch (jk) {
1095 case 0x0: /* SE0: disconnect */
1096 /*
1097 * Turn off SOFKAENAB bit to avoid getting interrupt
1098 * every milli-second:
1099 */
1100 mode &= ~BIT(MAX3421_MODE_SOFKAENAB_BIT);
1101 break;
1102
1103 case 0x1: /* J=0,K=1: low-speed (in full-speed or vice versa) */
1104 case 0x2: /* J=1,K=0: full-speed (in full-speed or vice versa) */
1105 if (jk == 0x2)
1106 /* need to switch to the other speed: */
1107 mode ^= BIT(MAX3421_MODE_LOWSPEED_BIT);
1108 /* turn on SOFKAENAB bit: */
1109 mode |= BIT(MAX3421_MODE_SOFKAENAB_BIT);
1110 have_conn = 1;
1111 break;
1112
1113 case 0x3: /* illegal */
1114 break;
1115 }
1116
1117 max3421_hcd->mode = mode;
1118 spi_wr8(hcd, MAX3421_REG_MODE, max3421_hcd->mode);
1119
1120 spin_lock_irqsave(&max3421_hcd->lock, flags);
1121 old_port_status = max3421_hcd->port_status;
1122 if (have_conn)
1123 max3421_hcd->port_status |= USB_PORT_STAT_CONNECTION;
1124 else
1125 max3421_hcd->port_status &= ~USB_PORT_STAT_CONNECTION;
1126 if (mode & BIT(MAX3421_MODE_LOWSPEED_BIT))
1127 max3421_hcd->port_status |= USB_PORT_STAT_LOW_SPEED;
1128 else
1129 max3421_hcd->port_status &= ~USB_PORT_STAT_LOW_SPEED;
1130 chg = (old_port_status ^ max3421_hcd->port_status);
1131 max3421_hcd->port_status |= chg << 16;
1132 spin_unlock_irqrestore(&max3421_hcd->lock, flags);
1133}
1134
1135static irqreturn_t
1136max3421_irq_handler(int irq, void *dev_id)
1137{
1138 struct usb_hcd *hcd = dev_id;
1139 struct spi_device *spi = to_spi_device(hcd->self.controller);
1140 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
1141
1142 if (max3421_hcd->spi_thread &&
1143 max3421_hcd->spi_thread->state != TASK_RUNNING)
1144 wake_up_process(max3421_hcd->spi_thread);
David Mosberger-Tang2eb5dbd2014-06-19 12:56:53 -06001145 if (!test_and_set_bit(ENABLE_IRQ, &max3421_hcd->todo))
David Mosberger2d531392014-04-28 22:14:07 -06001146 disable_irq_nosync(spi->irq);
David Mosberger2d531392014-04-28 22:14:07 -06001147 return IRQ_HANDLED;
1148}
1149
1150#ifdef DEBUG
1151
1152static void
1153dump_eps(struct usb_hcd *hcd)
1154{
1155 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
1156 struct max3421_ep *max3421_ep;
1157 struct usb_host_endpoint *ep;
David Mosberger2d531392014-04-28 22:14:07 -06001158 char ubuf[512], *dp, *end;
1159 unsigned long flags;
1160 struct urb *urb;
1161 int epnum, ret;
1162
1163 spin_lock_irqsave(&max3421_hcd->lock, flags);
Geliang Tang553c2362015-12-20 00:11:50 +08001164 list_for_each_entry(max3421_ep, &max3421_hcd->ep_list, ep_list) {
David Mosberger2d531392014-04-28 22:14:07 -06001165 ep = max3421_ep->ep;
1166
1167 dp = ubuf;
1168 end = dp + sizeof(ubuf);
1169 *dp = '\0';
Geliang Tang553c2362015-12-20 00:11:50 +08001170 list_for_each_entry(urb, &ep->urb_list, urb_list) {
David Mosberger2d531392014-04-28 22:14:07 -06001171 ret = snprintf(dp, end - dp, " %p(%d.%s %d/%d)", urb,
1172 usb_pipetype(urb->pipe),
1173 usb_urb_dir_in(urb) ? "IN" : "OUT",
1174 urb->actual_length,
1175 urb->transfer_buffer_length);
1176 if (ret < 0 || ret >= end - dp)
1177 break; /* error or buffer full */
1178 dp += ret;
1179 }
1180
1181 epnum = usb_endpoint_num(&ep->desc);
1182 pr_info("EP%0u %u lst %04u rtr %u nak %6u rxmt %u: %s\n",
1183 epnum, max3421_ep->pkt_state, max3421_ep->last_active,
1184 max3421_ep->retries, max3421_ep->naks,
1185 max3421_ep->retransmit, ubuf);
1186 }
1187 spin_unlock_irqrestore(&max3421_hcd->lock, flags);
1188}
1189
1190#endif /* DEBUG */
1191
1192/* Return zero if no work was performed, 1 otherwise. */
1193static int
1194max3421_handle_irqs(struct usb_hcd *hcd)
1195{
1196 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
1197 u32 chg, old_port_status;
1198 unsigned long flags;
1199 u8 hirq;
1200
1201 /*
1202 * Read and ack pending interrupts (CPU must never
1203 * clear SNDBAV directly and RCVDAV must be cleared by
1204 * max3421_recv_data_available()!):
1205 */
1206 hirq = spi_rd8(hcd, MAX3421_REG_HIRQ);
1207 hirq &= max3421_hcd->hien;
1208 if (!hirq)
1209 return 0;
1210
1211 spi_wr8(hcd, MAX3421_REG_HIRQ,
1212 hirq & ~(BIT(MAX3421_HI_SNDBAV_BIT) |
1213 BIT(MAX3421_HI_RCVDAV_BIT)));
1214
1215 if (hirq & BIT(MAX3421_HI_FRAME_BIT)) {
1216 max3421_hcd->frame_number = ((max3421_hcd->frame_number + 1)
1217 & USB_MAX_FRAME_NUMBER);
1218 max3421_hcd->sched_pass = SCHED_PASS_PERIODIC;
1219 }
1220
1221 if (hirq & BIT(MAX3421_HI_RCVDAV_BIT))
1222 max3421_recv_data_available(hcd);
1223
1224 if (hirq & BIT(MAX3421_HI_HXFRDN_BIT))
1225 max3421_host_transfer_done(hcd);
1226
1227 if (hirq & BIT(MAX3421_HI_CONDET_BIT))
1228 max3421_detect_conn(hcd);
1229
1230 /*
1231 * Now process interrupts that may affect HCD state
1232 * other than the end-points:
1233 */
1234 spin_lock_irqsave(&max3421_hcd->lock, flags);
1235
1236 old_port_status = max3421_hcd->port_status;
1237 if (hirq & BIT(MAX3421_HI_BUSEVENT_BIT)) {
1238 if (max3421_hcd->port_status & USB_PORT_STAT_RESET) {
1239 /* BUSEVENT due to completion of Bus Reset */
1240 max3421_hcd->port_status &= ~USB_PORT_STAT_RESET;
1241 max3421_hcd->port_status |= USB_PORT_STAT_ENABLE;
1242 } else {
1243 /* BUSEVENT due to completion of Bus Resume */
1244 pr_info("%s: BUSEVENT Bus Resume Done\n", __func__);
1245 }
1246 }
1247 if (hirq & BIT(MAX3421_HI_RWU_BIT))
1248 pr_info("%s: RWU\n", __func__);
1249 if (hirq & BIT(MAX3421_HI_SUSDN_BIT))
1250 pr_info("%s: SUSDN\n", __func__);
1251
1252 chg = (old_port_status ^ max3421_hcd->port_status);
1253 max3421_hcd->port_status |= chg << 16;
1254
1255 spin_unlock_irqrestore(&max3421_hcd->lock, flags);
1256
1257#ifdef DEBUG
1258 {
1259 static unsigned long last_time;
1260 char sbuf[16 * 16], *dp, *end;
1261 int i;
1262
Asaf Vertz788bfe82014-12-15 09:22:07 +02001263 if (time_after(jiffies, last_time + 5*HZ)) {
David Mosberger2d531392014-04-28 22:14:07 -06001264 dp = sbuf;
1265 end = sbuf + sizeof(sbuf);
1266 *dp = '\0';
1267 for (i = 0; i < 16; ++i) {
1268 int ret = snprintf(dp, end - dp, " %lu",
1269 max3421_hcd->err_stat[i]);
1270 if (ret < 0 || ret >= end - dp)
1271 break; /* error or buffer full */
1272 dp += ret;
1273 }
1274 pr_info("%s: hrsl_stats %s\n", __func__, sbuf);
1275 memset(max3421_hcd->err_stat, 0,
1276 sizeof(max3421_hcd->err_stat));
1277 last_time = jiffies;
1278
1279 dump_eps(hcd);
1280 }
1281 }
1282#endif
1283 return 1;
1284}
1285
1286static int
1287max3421_reset_hcd(struct usb_hcd *hcd)
1288{
1289 struct spi_device *spi = to_spi_device(hcd->self.controller);
1290 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
1291 int timeout;
1292
1293 /* perform a chip reset and wait for OSCIRQ signal to appear: */
1294 spi_wr8(hcd, MAX3421_REG_USBCTL, BIT(MAX3421_USBCTL_CHIPRES_BIT));
1295 /* clear reset: */
1296 spi_wr8(hcd, MAX3421_REG_USBCTL, 0);
1297 timeout = 1000;
1298 while (1) {
1299 if (spi_rd8(hcd, MAX3421_REG_USBIRQ)
1300 & BIT(MAX3421_USBIRQ_OSCOKIRQ_BIT))
1301 break;
1302 if (--timeout < 0) {
1303 dev_err(&spi->dev,
1304 "timed out waiting for oscillator OK signal");
1305 return 1;
1306 }
1307 cond_resched();
1308 }
1309
1310 /*
1311 * Turn on host mode, automatic generation of SOF packets, and
1312 * enable pull-down registers on DM/DP:
1313 */
1314 max3421_hcd->mode = (BIT(MAX3421_MODE_HOST_BIT) |
1315 BIT(MAX3421_MODE_SOFKAENAB_BIT) |
1316 BIT(MAX3421_MODE_DMPULLDN_BIT) |
1317 BIT(MAX3421_MODE_DPPULLDN_BIT));
1318 spi_wr8(hcd, MAX3421_REG_MODE, max3421_hcd->mode);
1319
1320 /* reset frame-number: */
1321 max3421_hcd->frame_number = USB_MAX_FRAME_NUMBER;
1322 spi_wr8(hcd, MAX3421_REG_HCTL, BIT(MAX3421_HCTL_FRMRST_BIT));
1323
1324 /* sample the state of the D+ and D- lines */
1325 spi_wr8(hcd, MAX3421_REG_HCTL, BIT(MAX3421_HCTL_SAMPLEBUS_BIT));
1326 max3421_detect_conn(hcd);
1327
1328 /* enable frame, connection-detected, and bus-event interrupts: */
1329 max3421_hcd->hien = (BIT(MAX3421_HI_FRAME_BIT) |
1330 BIT(MAX3421_HI_CONDET_BIT) |
1331 BIT(MAX3421_HI_BUSEVENT_BIT));
1332 spi_wr8(hcd, MAX3421_REG_HIEN, max3421_hcd->hien);
1333
1334 /* enable interrupts: */
1335 spi_wr8(hcd, MAX3421_REG_CPUCTL, BIT(MAX3421_CPUCTL_IE_BIT));
1336 return 1;
1337}
1338
1339static int
1340max3421_urb_done(struct usb_hcd *hcd)
1341{
1342 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
1343 unsigned long flags;
1344 struct urb *urb;
1345 int status;
1346
1347 status = max3421_hcd->urb_done;
1348 max3421_hcd->urb_done = 0;
1349 if (status > 0)
1350 status = 0;
1351 urb = max3421_hcd->curr_urb;
1352 if (urb) {
Mark Tomlinsona573b502021-06-25 15:14:56 +12001353 /* save the old end-points toggles: */
1354 u8 hrsl = spi_rd8(hcd, MAX3421_REG_HRSL);
1355 int rcvtog = (hrsl >> MAX3421_HRSL_RCVTOGRD_BIT) & 1;
1356 int sndtog = (hrsl >> MAX3421_HRSL_SNDTOGRD_BIT) & 1;
1357 int epnum = usb_endpoint_num(&urb->ep->desc);
1358
1359 /* no locking: HCD (i.e., we) own toggles, don't we? */
1360 usb_settoggle(urb->dev, epnum, 0, rcvtog);
1361 usb_settoggle(urb->dev, epnum, 1, sndtog);
1362
David Mosberger2d531392014-04-28 22:14:07 -06001363 max3421_hcd->curr_urb = NULL;
1364 spin_lock_irqsave(&max3421_hcd->lock, flags);
1365 usb_hcd_unlink_urb_from_ep(hcd, urb);
1366 spin_unlock_irqrestore(&max3421_hcd->lock, flags);
1367
1368 /* must be called without the HCD spinlock: */
1369 usb_hcd_giveback_urb(hcd, urb, status);
1370 }
1371 return 1;
1372}
1373
1374static int
1375max3421_spi_thread(void *dev_id)
1376{
1377 struct usb_hcd *hcd = dev_id;
1378 struct spi_device *spi = to_spi_device(hcd->self.controller);
1379 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
1380 int i, i_worked = 1;
1381
1382 /* set full-duplex SPI mode, low-active interrupt pin: */
1383 spi_wr8(hcd, MAX3421_REG_PINCTL,
1384 (BIT(MAX3421_PINCTL_FDUPSPI_BIT) | /* full-duplex */
1385 BIT(MAX3421_PINCTL_INTLEVEL_BIT))); /* low-active irq */
1386
1387 while (!kthread_should_stop()) {
1388 max3421_hcd->rev = spi_rd8(hcd, MAX3421_REG_REVISION);
1389 if (max3421_hcd->rev == 0x12 || max3421_hcd->rev == 0x13)
1390 break;
1391 dev_err(&spi->dev, "bad rev 0x%02x", max3421_hcd->rev);
1392 msleep(10000);
1393 }
1394 dev_info(&spi->dev, "rev 0x%x, SPI clk %dHz, bpw %u, irq %d\n",
1395 max3421_hcd->rev, spi->max_speed_hz, spi->bits_per_word,
1396 spi->irq);
1397
1398 while (!kthread_should_stop()) {
1399 if (!i_worked) {
1400 /*
1401 * We'll be waiting for wakeups from the hard
1402 * interrupt handler, so now is a good time to
1403 * sync our hien with the chip:
1404 */
1405 spi_wr8(hcd, MAX3421_REG_HIEN, max3421_hcd->hien);
1406
1407 set_current_state(TASK_INTERRUPTIBLE);
David Mosberger-Tang2eb5dbd2014-06-19 12:56:53 -06001408 if (test_and_clear_bit(ENABLE_IRQ, &max3421_hcd->todo))
David Mosberger2d531392014-04-28 22:14:07 -06001409 enable_irq(spi->irq);
David Mosberger2d531392014-04-28 22:14:07 -06001410 schedule();
1411 __set_current_state(TASK_RUNNING);
1412 }
1413
1414 i_worked = 0;
1415
1416 if (max3421_hcd->urb_done)
1417 i_worked |= max3421_urb_done(hcd);
1418 else if (max3421_handle_irqs(hcd))
1419 i_worked = 1;
1420 else if (!max3421_hcd->curr_urb)
1421 i_worked |= max3421_select_and_start_urb(hcd);
1422
David Mosberger-Tang2eb5dbd2014-06-19 12:56:53 -06001423 if (test_and_clear_bit(RESET_HCD, &max3421_hcd->todo))
David Mosberger2d531392014-04-28 22:14:07 -06001424 /* reset the HCD: */
David Mosberger2d531392014-04-28 22:14:07 -06001425 i_worked |= max3421_reset_hcd(hcd);
David Mosberger-Tang2eb5dbd2014-06-19 12:56:53 -06001426 if (test_and_clear_bit(RESET_PORT, &max3421_hcd->todo)) {
David Mosberger2d531392014-04-28 22:14:07 -06001427 /* perform a USB bus reset: */
David Mosberger2d531392014-04-28 22:14:07 -06001428 spi_wr8(hcd, MAX3421_REG_HCTL,
1429 BIT(MAX3421_HCTL_BUSRST_BIT));
1430 i_worked = 1;
1431 }
David Mosberger-Tang2eb5dbd2014-06-19 12:56:53 -06001432 if (test_and_clear_bit(CHECK_UNLINK, &max3421_hcd->todo))
David Mosberger2d531392014-04-28 22:14:07 -06001433 i_worked |= max3421_check_unlink(hcd);
David Mosberger-Tang2eb5dbd2014-06-19 12:56:53 -06001434 if (test_and_clear_bit(IOPIN_UPDATE, &max3421_hcd->todo)) {
David Mosberger2d531392014-04-28 22:14:07 -06001435 /*
1436 * IOPINS1/IOPINS2 do not auto-increment, so we can't
1437 * use spi_wr_buf().
1438 */
1439 for (i = 0; i < ARRAY_SIZE(max3421_hcd->iopins); ++i) {
1440 u8 val = spi_rd8(hcd, MAX3421_REG_IOPINS1);
1441
1442 val = ((val & 0xf0) |
1443 (max3421_hcd->iopins[i] & 0x0f));
1444 spi_wr8(hcd, MAX3421_REG_IOPINS1 + i, val);
1445 max3421_hcd->iopins[i] = val;
1446 }
David Mosberger2d531392014-04-28 22:14:07 -06001447 i_worked = 1;
1448 }
1449 }
1450 set_current_state(TASK_RUNNING);
1451 dev_info(&spi->dev, "SPI thread exiting");
1452 return 0;
1453}
1454
1455static int
1456max3421_reset_port(struct usb_hcd *hcd)
1457{
1458 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
1459
1460 max3421_hcd->port_status &= ~(USB_PORT_STAT_ENABLE |
1461 USB_PORT_STAT_LOW_SPEED);
David Mosberger-Tanga2b63cb2014-06-19 12:57:28 -06001462 max3421_hcd->port_status |= USB_PORT_STAT_RESET;
David Mosberger-Tang2eb5dbd2014-06-19 12:56:53 -06001463 set_bit(RESET_PORT, &max3421_hcd->todo);
David Mosberger2d531392014-04-28 22:14:07 -06001464 wake_up_process(max3421_hcd->spi_thread);
1465 return 0;
1466}
1467
1468static int
1469max3421_reset(struct usb_hcd *hcd)
1470{
1471 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
1472
1473 hcd->self.sg_tablesize = 0;
1474 hcd->speed = HCD_USB2;
1475 hcd->self.root_hub->speed = USB_SPEED_FULL;
David Mosberger-Tang2eb5dbd2014-06-19 12:56:53 -06001476 set_bit(RESET_HCD, &max3421_hcd->todo);
David Mosberger2d531392014-04-28 22:14:07 -06001477 wake_up_process(max3421_hcd->spi_thread);
1478 return 0;
1479}
1480
1481static int
1482max3421_start(struct usb_hcd *hcd)
1483{
1484 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
1485
1486 spin_lock_init(&max3421_hcd->lock);
1487 max3421_hcd->rh_state = MAX3421_RH_RUNNING;
1488
1489 INIT_LIST_HEAD(&max3421_hcd->ep_list);
1490
1491 hcd->power_budget = POWER_BUDGET;
1492 hcd->state = HC_STATE_RUNNING;
1493 hcd->uses_new_polling = 1;
1494 return 0;
1495}
1496
1497static void
1498max3421_stop(struct usb_hcd *hcd)
1499{
1500}
1501
1502static int
1503max3421_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
1504{
1505 struct spi_device *spi = to_spi_device(hcd->self.controller);
1506 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
1507 struct max3421_ep *max3421_ep;
1508 unsigned long flags;
1509 int retval;
1510
1511 switch (usb_pipetype(urb->pipe)) {
1512 case PIPE_INTERRUPT:
1513 case PIPE_ISOCHRONOUS:
1514 if (urb->interval < 0) {
1515 dev_err(&spi->dev,
1516 "%s: interval=%d for intr-/iso-pipe; expected > 0\n",
1517 __func__, urb->interval);
1518 return -EINVAL;
1519 }
1520 default:
1521 break;
1522 }
1523
1524 spin_lock_irqsave(&max3421_hcd->lock, flags);
1525
1526 max3421_ep = urb->ep->hcpriv;
1527 if (!max3421_ep) {
1528 /* gets freed in max3421_endpoint_disable: */
Alexey Khoroshilov6c0f3692014-06-19 23:44:57 +04001529 max3421_ep = kzalloc(sizeof(struct max3421_ep), GFP_ATOMIC);
David Mosberger-Tang00c5aa12014-05-28 16:09:16 -06001530 if (!max3421_ep) {
1531 retval = -ENOMEM;
1532 goto out;
1533 }
David Mosberger2d531392014-04-28 22:14:07 -06001534 max3421_ep->ep = urb->ep;
1535 max3421_ep->last_active = max3421_hcd->frame_number;
1536 urb->ep->hcpriv = max3421_ep;
1537
1538 list_add_tail(&max3421_ep->ep_list, &max3421_hcd->ep_list);
1539 }
1540
1541 retval = usb_hcd_link_urb_to_ep(hcd, urb);
1542 if (retval == 0) {
1543 /* Since we added to the queue, restart scheduling: */
1544 max3421_hcd->sched_pass = SCHED_PASS_PERIODIC;
1545 wake_up_process(max3421_hcd->spi_thread);
1546 }
1547
David Mosberger-Tang00c5aa12014-05-28 16:09:16 -06001548out:
David Mosberger2d531392014-04-28 22:14:07 -06001549 spin_unlock_irqrestore(&max3421_hcd->lock, flags);
1550 return retval;
1551}
1552
1553static int
1554max3421_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1555{
1556 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
1557 unsigned long flags;
1558 int retval;
1559
1560 spin_lock_irqsave(&max3421_hcd->lock, flags);
1561
1562 /*
1563 * This will set urb->unlinked which in turn causes the entry
1564 * to be dropped at the next opportunity.
1565 */
1566 retval = usb_hcd_check_unlink_urb(hcd, urb, status);
1567 if (retval == 0) {
David Mosberger-Tang2eb5dbd2014-06-19 12:56:53 -06001568 set_bit(CHECK_UNLINK, &max3421_hcd->todo);
David Mosberger2d531392014-04-28 22:14:07 -06001569 wake_up_process(max3421_hcd->spi_thread);
1570 }
1571 spin_unlock_irqrestore(&max3421_hcd->lock, flags);
1572 return retval;
1573}
1574
1575static void
1576max3421_endpoint_disable(struct usb_hcd *hcd, struct usb_host_endpoint *ep)
1577{
1578 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
1579 unsigned long flags;
1580
1581 spin_lock_irqsave(&max3421_hcd->lock, flags);
1582
1583 if (ep->hcpriv) {
1584 struct max3421_ep *max3421_ep = ep->hcpriv;
1585
1586 /* remove myself from the ep_list: */
1587 if (!list_empty(&max3421_ep->ep_list))
1588 list_del(&max3421_ep->ep_list);
1589 kfree(max3421_ep);
1590 ep->hcpriv = NULL;
1591 }
1592
1593 spin_unlock_irqrestore(&max3421_hcd->lock, flags);
1594}
1595
1596static int
1597max3421_get_frame_number(struct usb_hcd *hcd)
1598{
1599 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
1600 return max3421_hcd->frame_number;
1601}
1602
1603/*
1604 * Should return a non-zero value when any port is undergoing a resume
1605 * transition while the root hub is suspended.
1606 */
1607static int
1608max3421_hub_status_data(struct usb_hcd *hcd, char *buf)
1609{
1610 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
1611 unsigned long flags;
1612 int retval = 0;
1613
1614 spin_lock_irqsave(&max3421_hcd->lock, flags);
1615 if (!HCD_HW_ACCESSIBLE(hcd))
1616 goto done;
1617
1618 *buf = 0;
1619 if ((max3421_hcd->port_status & PORT_C_MASK) != 0) {
1620 *buf = (1 << 1); /* a hub over-current condition exists */
1621 dev_dbg(hcd->self.controller,
1622 "port status 0x%08x has changes\n",
1623 max3421_hcd->port_status);
1624 retval = 1;
1625 if (max3421_hcd->rh_state == MAX3421_RH_SUSPENDED)
1626 usb_hcd_resume_root_hub(hcd);
1627 }
1628done:
1629 spin_unlock_irqrestore(&max3421_hcd->lock, flags);
1630 return retval;
1631}
1632
1633static inline void
1634hub_descriptor(struct usb_hub_descriptor *desc)
1635{
1636 memset(desc, 0, sizeof(*desc));
1637 /*
1638 * See Table 11-13: Hub Descriptor in USB 2.0 spec.
1639 */
Sergei Shtylyove3d02e02015-03-29 01:14:03 +03001640 desc->bDescriptorType = USB_DT_HUB; /* hub descriptor */
David Mosberger2d531392014-04-28 22:14:07 -06001641 desc->bDescLength = 9;
Sergei Shtylyov2e48c462015-01-19 01:38:22 +03001642 desc->wHubCharacteristics = cpu_to_le16(HUB_CHAR_INDV_PORT_LPSM |
1643 HUB_CHAR_COMMON_OCPM);
David Mosberger2d531392014-04-28 22:14:07 -06001644 desc->bNbrPorts = 1;
1645}
1646
1647/*
1648 * Set the MAX3421E general-purpose output with number PIN_NUMBER to
1649 * VALUE (0 or 1). PIN_NUMBER may be in the range from 1-8. For
1650 * any other value, this function acts as a no-op.
1651 */
1652static void
1653max3421_gpout_set_value(struct usb_hcd *hcd, u8 pin_number, u8 value)
1654{
1655 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
1656 u8 mask, idx;
1657
1658 --pin_number;
1659 if (pin_number > 7)
1660 return;
1661
Jaewon Kim59b71f72016-07-21 22:20:53 +09001662 mask = 1u << (pin_number % 4);
David Mosberger2d531392014-04-28 22:14:07 -06001663 idx = pin_number / 4;
1664
1665 if (value)
1666 max3421_hcd->iopins[idx] |= mask;
1667 else
1668 max3421_hcd->iopins[idx] &= ~mask;
David Mosberger-Tang2eb5dbd2014-06-19 12:56:53 -06001669 set_bit(IOPIN_UPDATE, &max3421_hcd->todo);
David Mosberger2d531392014-04-28 22:14:07 -06001670 wake_up_process(max3421_hcd->spi_thread);
1671}
1672
1673static int
1674max3421_hub_control(struct usb_hcd *hcd, u16 type_req, u16 value, u16 index,
1675 char *buf, u16 length)
1676{
1677 struct spi_device *spi = to_spi_device(hcd->self.controller);
1678 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
1679 struct max3421_hcd_platform_data *pdata;
1680 unsigned long flags;
1681 int retval = 0;
1682
1683 spin_lock_irqsave(&max3421_hcd->lock, flags);
1684
1685 pdata = spi->dev.platform_data;
1686
1687 switch (type_req) {
1688 case ClearHubFeature:
1689 break;
1690 case ClearPortFeature:
1691 switch (value) {
1692 case USB_PORT_FEAT_SUSPEND:
1693 break;
1694 case USB_PORT_FEAT_POWER:
1695 dev_dbg(hcd->self.controller, "power-off\n");
David Mosberger-Tang4055e5e2014-05-29 10:23:55 -06001696 max3421_gpout_set_value(hcd, pdata->vbus_gpout,
1697 !pdata->vbus_active_level);
David Mosberger2d531392014-04-28 22:14:07 -06001698 /* FALLS THROUGH */
1699 default:
1700 max3421_hcd->port_status &= ~(1 << value);
1701 }
1702 break;
1703 case GetHubDescriptor:
1704 hub_descriptor((struct usb_hub_descriptor *) buf);
1705 break;
1706
1707 case DeviceRequest | USB_REQ_GET_DESCRIPTOR:
1708 case GetPortErrorCount:
1709 case SetHubDepth:
1710 /* USB3 only */
1711 goto error;
1712
1713 case GetHubStatus:
1714 *(__le32 *) buf = cpu_to_le32(0);
1715 break;
1716
1717 case GetPortStatus:
1718 if (index != 1) {
1719 retval = -EPIPE;
1720 goto error;
1721 }
1722 ((__le16 *) buf)[0] = cpu_to_le16(max3421_hcd->port_status);
1723 ((__le16 *) buf)[1] =
1724 cpu_to_le16(max3421_hcd->port_status >> 16);
1725 break;
1726
1727 case SetHubFeature:
1728 retval = -EPIPE;
1729 break;
1730
1731 case SetPortFeature:
1732 switch (value) {
1733 case USB_PORT_FEAT_LINK_STATE:
1734 case USB_PORT_FEAT_U1_TIMEOUT:
1735 case USB_PORT_FEAT_U2_TIMEOUT:
1736 case USB_PORT_FEAT_BH_PORT_RESET:
1737 goto error;
1738 case USB_PORT_FEAT_SUSPEND:
1739 if (max3421_hcd->active)
1740 max3421_hcd->port_status |=
1741 USB_PORT_STAT_SUSPEND;
1742 break;
1743 case USB_PORT_FEAT_POWER:
1744 dev_dbg(hcd->self.controller, "power-on\n");
1745 max3421_hcd->port_status |= USB_PORT_STAT_POWER;
David Mosberger-Tang4055e5e2014-05-29 10:23:55 -06001746 max3421_gpout_set_value(hcd, pdata->vbus_gpout,
1747 pdata->vbus_active_level);
David Mosberger2d531392014-04-28 22:14:07 -06001748 break;
1749 case USB_PORT_FEAT_RESET:
1750 max3421_reset_port(hcd);
1751 /* FALLS THROUGH */
1752 default:
1753 if ((max3421_hcd->port_status & USB_PORT_STAT_POWER)
1754 != 0)
1755 max3421_hcd->port_status |= (1 << value);
1756 }
1757 break;
1758
1759 default:
1760 dev_dbg(hcd->self.controller,
1761 "hub control req%04x v%04x i%04x l%d\n",
1762 type_req, value, index, length);
1763error: /* "protocol stall" on error */
1764 retval = -EPIPE;
1765 }
1766
1767 spin_unlock_irqrestore(&max3421_hcd->lock, flags);
1768 return retval;
1769}
1770
1771static int
1772max3421_bus_suspend(struct usb_hcd *hcd)
1773{
1774 return -1;
1775}
1776
1777static int
1778max3421_bus_resume(struct usb_hcd *hcd)
1779{
1780 return -1;
1781}
1782
1783/*
1784 * The SPI driver already takes care of DMA-mapping/unmapping, so no
1785 * reason to do it twice.
1786 */
1787static int
1788max3421_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
1789{
1790 return 0;
1791}
1792
1793static void
1794max3421_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
1795{
1796}
1797
1798static struct hc_driver max3421_hcd_desc = {
1799 .description = "max3421",
1800 .product_desc = DRIVER_DESC,
1801 .hcd_priv_size = sizeof(struct max3421_hcd),
1802 .flags = HCD_USB11,
1803 .reset = max3421_reset,
1804 .start = max3421_start,
1805 .stop = max3421_stop,
1806 .get_frame_number = max3421_get_frame_number,
1807 .urb_enqueue = max3421_urb_enqueue,
1808 .urb_dequeue = max3421_urb_dequeue,
1809 .map_urb_for_dma = max3421_map_urb_for_dma,
1810 .unmap_urb_for_dma = max3421_unmap_urb_for_dma,
1811 .endpoint_disable = max3421_endpoint_disable,
1812 .hub_status_data = max3421_hub_status_data,
1813 .hub_control = max3421_hub_control,
1814 .bus_suspend = max3421_bus_suspend,
1815 .bus_resume = max3421_bus_resume,
1816};
1817
1818static int
1819max3421_probe(struct spi_device *spi)
1820{
1821 struct max3421_hcd *max3421_hcd;
David Mosberger-Tang05dfa5c2014-05-28 22:40:00 -06001822 struct usb_hcd *hcd = NULL;
1823 int retval = -ENOMEM;
David Mosberger2d531392014-04-28 22:14:07 -06001824
1825 if (spi_setup(spi) < 0) {
1826 dev_err(&spi->dev, "Unable to setup SPI bus");
1827 return -EFAULT;
1828 }
1829
1830 hcd = usb_create_hcd(&max3421_hcd_desc, &spi->dev,
1831 dev_name(&spi->dev));
1832 if (!hcd) {
1833 dev_err(&spi->dev, "failed to create HCD structure\n");
David Mosberger-Tang05dfa5c2014-05-28 22:40:00 -06001834 goto error;
David Mosberger2d531392014-04-28 22:14:07 -06001835 }
1836 set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
1837 max3421_hcd = hcd_to_max3421(hcd);
1838 max3421_hcd->next = max3421_hcd_list;
1839 max3421_hcd_list = max3421_hcd;
1840 INIT_LIST_HEAD(&max3421_hcd->ep_list);
1841
David Mosberger-Tang05dfa5c2014-05-28 22:40:00 -06001842 max3421_hcd->tx = kmalloc(sizeof(*max3421_hcd->tx), GFP_KERNEL);
Wolfram Sang13dcf7802016-08-25 19:39:07 +02001843 if (!max3421_hcd->tx)
David Mosberger-Tang05dfa5c2014-05-28 22:40:00 -06001844 goto error;
David Mosberger-Tang05dfa5c2014-05-28 22:40:00 -06001845 max3421_hcd->rx = kmalloc(sizeof(*max3421_hcd->rx), GFP_KERNEL);
Wolfram Sang13dcf7802016-08-25 19:39:07 +02001846 if (!max3421_hcd->rx)
David Mosberger-Tang05dfa5c2014-05-28 22:40:00 -06001847 goto error;
David Mosberger-Tang05dfa5c2014-05-28 22:40:00 -06001848
David Mosberger2d531392014-04-28 22:14:07 -06001849 max3421_hcd->spi_thread = kthread_run(max3421_spi_thread, hcd,
1850 "max3421_spi_thread");
1851 if (max3421_hcd->spi_thread == ERR_PTR(-ENOMEM)) {
1852 dev_err(&spi->dev,
1853 "failed to create SPI thread (out of memory)\n");
David Mosberger-Tang05dfa5c2014-05-28 22:40:00 -06001854 goto error;
David Mosberger2d531392014-04-28 22:14:07 -06001855 }
1856
1857 retval = usb_add_hcd(hcd, 0, 0);
1858 if (retval) {
1859 dev_err(&spi->dev, "failed to add HCD\n");
David Mosberger-Tang05dfa5c2014-05-28 22:40:00 -06001860 goto error;
David Mosberger2d531392014-04-28 22:14:07 -06001861 }
1862
1863 retval = request_irq(spi->irq, max3421_irq_handler,
1864 IRQF_TRIGGER_LOW, "max3421", hcd);
1865 if (retval < 0) {
David Mosberger2d531392014-04-28 22:14:07 -06001866 dev_err(&spi->dev, "failed to request irq %d\n", spi->irq);
David Mosberger-Tang05dfa5c2014-05-28 22:40:00 -06001867 goto error;
David Mosberger2d531392014-04-28 22:14:07 -06001868 }
1869 return 0;
David Mosberger-Tang05dfa5c2014-05-28 22:40:00 -06001870
1871error:
1872 if (hcd) {
1873 kfree(max3421_hcd->tx);
1874 kfree(max3421_hcd->rx);
1875 if (max3421_hcd->spi_thread)
1876 kthread_stop(max3421_hcd->spi_thread);
1877 usb_put_hcd(hcd);
1878 }
1879 return retval;
David Mosberger2d531392014-04-28 22:14:07 -06001880}
1881
1882static int
1883max3421_remove(struct spi_device *spi)
1884{
1885 struct max3421_hcd *max3421_hcd = NULL, **prev;
1886 struct usb_hcd *hcd = NULL;
1887 unsigned long flags;
1888
1889 for (prev = &max3421_hcd_list; *prev; prev = &(*prev)->next) {
1890 max3421_hcd = *prev;
1891 hcd = max3421_to_hcd(max3421_hcd);
1892 if (hcd->self.controller == &spi->dev)
1893 break;
1894 }
1895 if (!max3421_hcd) {
1896 dev_err(&spi->dev, "no MAX3421 HCD found for SPI device %p\n",
1897 spi);
1898 return -ENODEV;
1899 }
1900
1901 usb_remove_hcd(hcd);
1902
1903 spin_lock_irqsave(&max3421_hcd->lock, flags);
1904
1905 kthread_stop(max3421_hcd->spi_thread);
1906 *prev = max3421_hcd->next;
1907
1908 spin_unlock_irqrestore(&max3421_hcd->lock, flags);
1909
1910 free_irq(spi->irq, hcd);
1911
1912 usb_put_hcd(hcd);
1913 return 0;
1914}
1915
1916static struct spi_driver max3421_driver = {
1917 .probe = max3421_probe,
1918 .remove = max3421_remove,
1919 .driver = {
1920 .name = "max3421-hcd",
David Mosberger2d531392014-04-28 22:14:07 -06001921 },
1922};
1923
Sachin Kamat7df45d52014-05-29 17:21:01 +05301924module_spi_driver(max3421_driver);
David Mosberger2d531392014-04-28 22:14:07 -06001925
1926MODULE_DESCRIPTION(DRIVER_DESC);
1927MODULE_AUTHOR("David Mosberger <davidm@egauge.net>");
1928MODULE_LICENSE("GPL");