blob: 4d62a3e74710d79321d17b5618cf36b2a69f7f08 [file] [log] [blame]
Pavankumar Kondeti38e508c2014-02-24 14:53:31 +05301/*
2 * Copyright (c) 2014, The Linux Foundation. All rights reserved.
3 * Copyright (c) 2001-2004 by David Brownell
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 and
7 * only version 2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 */
15
16/*
17 * Root HUB management and Asynchronous scheduling traversal
18 * Based on ehci-hub.c and ehci-q.c
19 */
20
21#define pr_fmt(fmt) "%s: " fmt, __func__
22
23#include <linux/kernel.h>
24#include <linux/module.h>
25#include <linux/err.h>
26#include <linux/ktime.h>
27#include <linux/uaccess.h>
28#include <linux/debugfs.h>
29#include <linux/pm_runtime.h>
30#include <linux/regulator/consumer.h>
31#include <linux/gpio.h>
32#include <linux/of_gpio.h>
33#include <linux/spinlock.h>
34#include <linux/firmware.h>
35#include <linux/spi/spi.h>
36#include <linux/usb.h>
37#include <linux/usb/hcd.h>
38#include <linux/usb/ch9.h>
39#include <linux/usb/ch11.h>
40
41#include <asm/unaligned.h>
42#include <mach/gpiomux.h>
43
44#define CREATE_TRACE_POINTS
45#include <trace/events/ice40.h>
46
47#define FADDR_REG 0x00 /* R/W: Device address */
48#define HCMD_REG 0x01 /* R/W: Host transfer command */
49#define XFRST_REG 0x02 /* R: Transfer status */
50#define IRQ_REG 0x03 /* R/C: IRQ status */
51#define IEN_REG 0x04 /* R/W: IRQ enable */
52#define CTRL0_REG 0x05 /* R/W: Host control command */
53#define CTRL1_REG 0x06 /* R/W: Host control command */
54#define WBUF0_REG 0x10 /* W: Tx fifo 0 */
55#define WBUF1_REG 0x11 /* W: Tx fifo 1 */
56#define SUBUF_REG 0x12 /* W: SETUP fifo */
57#define WBLEN_REG 0x13 /* W: Tx fifo size */
58#define RBUF0_REG 0x18 /* R: Rx fifo 0 */
59#define RBUF1_REG 0x19 /* R: Rx fifo 1 */
60#define RBLEN_REG 0x1B /* R: Rx fifo size */
61
62#define WRITE_CMD(addr) ((addr << 3) | 1)
63#define READ_CMD(addr) ((addr << 3) | 0)
64
65/* Host controller command register definitions */
66#define HCMD_EP(ep) (ep & 0xF)
67#define HCMD_BSEL(sel) (sel << 4)
68#define HCMD_TOGV(toggle) (toggle << 5)
69#define HCMD_PT(token) (token << 6)
70
71/* Transfer status register definitions */
72#define XFR_MASK(xfr) (xfr & 0xF)
73#define XFR_SUCCESS 0x0
74#define XFR_BUSY 0x1
75#define XFR_PKTERR 0x2
76#define XFR_PIDERR 0x3
77#define XFR_NAK 0x4
78#define XFR_STALL 0x5
79#define XFR_WRONGPID 0x6
80#define XFR_CRCERR 0x7
81#define XFR_TOGERR 0x8
82#define XFR_BADLEN 0x9
83#define XFR_TIMEOUT 0xA
84
85#define LINE_STATE(xfr) ((xfr & 0x30) >> 4) /* D+, D- */
86#define DPST BIT(5)
87#define DMST BIT(4)
88#define PLLOK BIT(6)
89#define R64B BIT(7)
90
91/* Interrupt enable/status register definitions */
92#define RESET_IRQ BIT(0)
93#define RESUME_IRQ BIT(1)
94#define SUSP_IRQ BIT(3)
95#define DISCONNECT_IRQ BIT(4)
96#define CONNECT_IRQ BIT(5)
97#define FRAME_IRQ BIT(6)
98#define XFR_IRQ BIT(7)
99
100/* Control 0 register definitions */
101#define RESET_CTRL BIT(0)
102#define FRAME_RESET_CTRL BIT(1)
103#define DET_BUS_CTRL BIT(2)
104#define RESUME_CTRL BIT(3)
105#define SOFEN_CTRL BIT(4)
106#define DM_PD_CTRL BIT(6)
107#define DP_PD_CTRL BIT(7)
108#define HRST_CTRL BIT(5)
109
110/* Control 1 register definitions */
111#define INT_EN_CTRL BIT(0)
112
113enum ice40_xfr_type {
114 FIRMWARE_XFR,
115 REG_WRITE_XFR,
116 REG_READ_XFR,
117 SETUP_XFR,
118 DATA_IN_XFR,
119 DATA_OUT_XFR,
120};
121
122enum ice40_ep_phase {
123 SETUP_PHASE = 1,
124 DATA_PHASE,
125 STATUS_PHASE,
126};
127
128struct ice40_ep {
129 u8 xcat_err;
130 bool unlinking;
131 bool halted;
132 struct usb_host_endpoint *ep;
133 struct list_head ep_list;
134};
135
136struct ice40_hcd {
137 spinlock_t lock;
138
139 struct mutex wlock;
140 struct mutex rlock;
141
142 u8 devnum;
143 u32 port_flags;
144 u8 ctrl0;
145 u8 wblen0;
146
147 enum ice40_ep_phase ep0_state;
148 struct usb_hcd *hcd;
149
150 struct list_head async_list;
151 struct workqueue_struct *wq;
152 struct work_struct async_work;
153
154 int reset_gpio;
155 int slave_select_gpio;
156 int config_done_gpio;
157 int vcc_en_gpio;
158 int clk_en_gpio;
159
160 struct regulator *core_vcc;
161 struct regulator *spi_vcc;
162 struct regulator *gpio_vcc;
163 bool powered;
164
165 struct dentry *dbg_root;
166 bool pcd_pending;
167
168 /* SPI stuff later */
169 struct spi_device *spi;
170
171 struct spi_message *fmsg;
172 struct spi_transfer *fmsg_xfr; /* size 1 */
173
174 struct spi_message *wmsg;
175 struct spi_transfer *wmsg_xfr; /* size 1 */
176 u8 *w_tx_buf;
177 u8 *w_rx_buf;
178
179 struct spi_message *rmsg;
180 struct spi_transfer *rmsg_xfr; /* size 1 */
181 u8 *r_tx_buf;
182 u8 *r_rx_buf;
183
184 struct spi_message *setup_msg;
185 struct spi_transfer *setup_xfr; /* size 2 */
186 u8 *setup_buf; /* size 1 for SUBUF */
187
188 struct spi_message *in_msg;
189 struct spi_transfer *in_xfr; /* size 2 */
190 u8 *in_buf; /* size 2 for reading from RBUF0 */
191
192 struct spi_message *out_msg;
193 struct spi_transfer *out_xfr; /* size 2 */
194 u8 *out_buf; /* size 1 for writing WBUF0 */
195};
196
197static char fw_name[16] = "ice40.bin";
198module_param_string(fw, fw_name, sizeof(fw_name), S_IRUGO | S_IWUSR);
199MODULE_PARM_DESC(fw, "firmware blob file name");
200
201static bool debugger;
202module_param(debugger, bool, S_IRUGO | S_IWUSR);
203MODULE_PARM_DESC(debugger, "true to use the debug port");
204
205static inline struct ice40_hcd *hcd_to_ihcd(struct usb_hcd *hcd)
206{
207 return *((struct ice40_hcd **) hcd->hcd_priv);
208}
209
210static void ice40_spi_reg_write(struct ice40_hcd *ihcd, u8 val, u8 addr)
211{
212 int ret;
213
214 /*
215 * Register Write Pattern:
216 * TX: 1st byte is CMD (register + write), 2nd byte is value
217 * RX: Ignore
218 *
219 * The Mutex is to protect concurrent register writes as
220 * we have only 1 SPI message struct.
221 */
222
223 mutex_lock(&ihcd->wlock);
224
225 ihcd->w_tx_buf[0] = WRITE_CMD(addr);
226 ihcd->w_tx_buf[1] = val;
227 ret = spi_sync(ihcd->spi, ihcd->wmsg);
228 if (ret < 0) /* should not happen */
229 pr_err("failed. val = %d addr = %d\n", val, addr);
230
231 trace_ice40_reg_write(addr, val, ihcd->w_tx_buf[0],
232 ihcd->w_tx_buf[1], ret);
233
234 mutex_unlock(&ihcd->wlock);
235}
236
237static int ice40_spi_reg_read(struct ice40_hcd *ihcd, u8 addr)
238{
239 int ret;
240
241 /*
242 * Register Read Pattern:
243 * TX: 1st byte is CMD (register + read)
244 * RX: 1st, 2nd byte Ignore, 3rd byte value.
245 *
246 * The Mutex is to protect concurrent register reads as
247 * we have only 1 SPI message struct.
248 */
249
250 mutex_lock(&ihcd->rlock);
251
252 ihcd->r_tx_buf[0] = READ_CMD(addr);
253 ret = spi_sync(ihcd->spi, ihcd->rmsg);
254 if (ret < 0)
255 pr_err("failed. addr = %d\n", addr);
256 else
257 ret = ihcd->r_rx_buf[2];
258
259 trace_ice40_reg_read(addr, ihcd->r_tx_buf[0], ret);
260
261 mutex_unlock(&ihcd->rlock);
262
263 return ret;
264}
265
266static int ice40_poll_xfer(struct ice40_hcd *ihcd, int usecs)
267{
268 ktime_t start = ktime_get();
269 u8 val, retry = 0;
270 u8 ret = ~0; /* time out */
271
272again:
273
274 /*
275 * The SPI transaction may take tens of usec. Use ktime
276 * based checks rather than loop count.
277 */
278 do {
279 val = ice40_spi_reg_read(ihcd, XFRST_REG);
280
281 if (XFR_MASK(val) != XFR_BUSY)
282 return val;
283
284 } while (ktime_us_delta(ktime_get(), start) < usecs);
285
286 /*
287 * The SPI transaction involves a context switch. For any
288 * reason, if we are scheduled out more than usecs after
289 * the 1st read, this extra read will help.
290 */
291 if (!retry) {
292 retry = 1;
293 goto again;
294 }
295
296 return ret;
297}
298
299static int
300ice40_handshake(struct ice40_hcd *ihcd, u8 reg, u8 mask, u8 done, int usecs)
301{
302 ktime_t start = ktime_get();
303 u8 val, retry = 0;
304
305again:
306 do {
307 val = ice40_spi_reg_read(ihcd, reg);
308 val &= mask;
309
310 if (val == done)
311 return 0;
312
313 } while (ktime_us_delta(ktime_get(), start) < usecs);
314
315 if (!retry) {
316 retry = 1;
317 goto again;
318 }
319
320 return -ETIMEDOUT;
321}
322
323
324static const char hcd_name[] = "ice40-hcd";
325
326static int ice40_reset(struct usb_hcd *hcd)
327{
328 struct ice40_hcd *ihcd = hcd_to_ihcd(hcd);
329 u8 ctrl, status;
330 int ret = 0;
331
332 /*
333 * Program the defualt address 0. The device address is
334 * re-programmed after SET_ADDRESS in URB handling path.
335 */
336 ihcd->devnum = 0;
337 ice40_spi_reg_write(ihcd, 0, FADDR_REG);
338
339 ihcd->wblen0 = ~0;
340 /*
341 * Read the line state. This driver is loaded after the
342 * UICC card insertion. So the line state should indicate
343 * that a Full-speed device is connected. Return error
344 * if there is no device connected.
345 *
346 * There can be no device connected during debug. A debugfs
347 * file is provided to sample the bus line and update the
348 * port flags accordingly.
349 */
350
351 if (debugger)
352 goto out;
353
354 ctrl = ice40_spi_reg_read(ihcd, CTRL0_REG);
355 ice40_spi_reg_write(ihcd, ctrl | DET_BUS_CTRL, CTRL0_REG);
356
357 ret = ice40_handshake(ihcd, CTRL0_REG, DET_BUS_CTRL, 0, 5000);
358 if (ret) {
359 pr_err("bus detection failed\n");
360 goto out;
361 }
362
363 status = ice40_spi_reg_read(ihcd, XFRST_REG);
364 pr_debug("line state (D+, D-) is %d\n", LINE_STATE(status));
365
366 if (status & DPST) {
367 pr_debug("Full speed device connected\n");
368 ihcd->port_flags |= USB_PORT_STAT_CONNECTION;
369 } else {
370 pr_err("No device connected\n");
371 ret = -ENODEV;
372 }
373out:
374 return ret;
375}
376
377static int ice40_run(struct usb_hcd *hcd)
378{
379 struct ice40_hcd *ihcd = hcd_to_ihcd(hcd);
380
381 /*
382 * HCD_FLAG_POLL_RH flag is not set by us. Core will not poll
383 * for the port status periodically. This uses_new_polling
384 * flag tells core that this hcd will call usb_hcd_poll_rh_status
385 * upon port change.
386 */
387 hcd->uses_new_polling = 1;
388
389 /*
390 * Cache the ctrl0 register to avoid multiple reads. This register
391 * is written during reset and resume.
392 */
393 ihcd->ctrl0 = ice40_spi_reg_read(ihcd, CTRL0_REG);
394 ihcd->ctrl0 |= SOFEN_CTRL;
395 ice40_spi_reg_write(ihcd, ihcd->ctrl0, CTRL0_REG);
396
397 return 0;
398}
399
400static void ice40_stop(struct usb_hcd *hcd)
401{
402 struct ice40_hcd *ihcd = hcd_to_ihcd(hcd);
403
404 cancel_work_sync(&ihcd->async_work);
405}
406
407/*
408 * The _Error looks odd. But very helpful when looking for
409 * any errors in logs.
410 */
411static char __maybe_unused *xfr_status_string(int status)
412{
413 switch (XFR_MASK(status)) {
414 case XFR_SUCCESS:
415 return "Ack";
416 case XFR_BUSY:
417 return "Busy_Error";
418 case XFR_PKTERR:
419 return "Pkt_Error";
420 case XFR_PIDERR:
421 return "PID_Error";
422 case XFR_NAK:
423 return "Nak";
424 case XFR_STALL:
425 return "Stall_Error";
426 case XFR_WRONGPID:
427 return "WrongPID_Error";
428 case XFR_CRCERR:
429 return "CRC_Error";
430 case XFR_TOGERR:
431 return "Togg_Error";
432 case XFR_BADLEN:
433 return "BadLen_Error";
434 case XFR_TIMEOUT:
435 return "Timeout_Error";
436 default:
437 return "Unknown_Error";
438 }
439}
440
441static int ice40_xfer_setup(struct ice40_hcd *ihcd, struct urb *urb)
442{
443 struct usb_host_endpoint *ep = urb->ep;
444 struct ice40_ep *iep = ep->hcpriv;
445 void *buf = urb->setup_packet;
446 int ret, status;
447 u8 cmd;
448
449 /*
450 * SETUP transaction Handling:
451 * - copy the setup buffer to SUBUF fifo
452 * - Program HCMD register to initiate the SETP transaction.
453 * - poll for completion by reading XFRST register.
454 * - Interpret the result.
455 */
456
457 ihcd->setup_buf[0] = WRITE_CMD(SUBUF_REG);
458 ihcd->setup_xfr[1].tx_buf = buf;
459 ihcd->setup_xfr[1].len = sizeof(struct usb_ctrlrequest);
460
461 ret = spi_sync(ihcd->spi, ihcd->setup_msg);
462 if (ret < 0) {
463 pr_err("SPI transfer failed\n");
464 status = ret = -EIO;
465 goto out;
466 }
467
468 cmd = HCMD_PT(2) | HCMD_TOGV(0) | HCMD_BSEL(0) | HCMD_EP(0);
469 ice40_spi_reg_write(ihcd, cmd, HCMD_REG);
470
471 status = ice40_poll_xfer(ihcd, 1000);
472 switch (XFR_MASK(status)) {
473 case XFR_SUCCESS:
474 iep->xcat_err = 0;
475 ret = 0;
476 break;
477 case XFR_NAK: /* Device should not return Nak for SETUP */
478 case XFR_STALL:
479 iep->xcat_err = 0;
480 ret = -EPIPE;
481 break;
482 case XFR_PKTERR:
483 case XFR_PIDERR:
484 case XFR_WRONGPID:
485 case XFR_CRCERR:
486 case XFR_TIMEOUT:
487 if (++iep->xcat_err < 8)
488 ret = -EINPROGRESS;
489 else
490 ret = -EPROTO;
491 break;
492 default:
493 pr_err("transaction timed out\n");
494 ret = -EIO;
495 }
496
497out:
498 trace_ice40_setup(xfr_status_string(status), ret);
499 return ret;
500}
501
502static int ice40_xfer_in(struct ice40_hcd *ihcd, struct urb *urb)
503{
504 struct usb_host_endpoint *ep = urb->ep;
505 struct usb_device *udev = urb->dev;
506 u32 total_len = urb->transfer_buffer_length;
507 u16 maxpacket = usb_endpoint_maxp(&ep->desc);
508 u8 epnum = usb_pipeendpoint(urb->pipe);
509 bool is_out = usb_pipeout(urb->pipe);
510 struct ice40_ep *iep = ep->hcpriv;
511 u8 cmd, status, len = 0, t, expected_len;
512 void *buf;
513 int ret;
514 bool short_packet = true;
515
516 if (epnum == 0 && ihcd->ep0_state == STATUS_PHASE) {
517 expected_len = 0;
518 buf = NULL;
519 t = 1; /* STATUS PHASE is always DATA1 */
520 } else {
521 expected_len = min_t(u32, maxpacket,
522 total_len - urb->actual_length);
523 buf = urb->transfer_buffer + urb->actual_length;
524 t = usb_gettoggle(udev, epnum, is_out);
525 }
526
527 /*
528 * IN transaction Handling:
529 * - Program HCMD register to initiate the IN transaction.
530 * - poll for completion by reading XFRST register.
531 * - Interpret the result.
532 * - If ACK is received and we expect some data, read RBLEN
533 * - Read the data from RBUF
534 */
535
536 cmd = HCMD_PT(0) | HCMD_TOGV(t) | HCMD_BSEL(0) | HCMD_EP(epnum);
537 ice40_spi_reg_write(ihcd, cmd, HCMD_REG);
538
539 status = ice40_poll_xfer(ihcd, 1000);
540 switch (XFR_MASK(status)) {
541 case XFR_SUCCESS:
542 usb_dotoggle(udev, epnum, is_out);
543 iep->xcat_err = 0;
544 ret = 0;
545 if ((expected_len == 64) && (status & R64B))
546 short_packet = false;
547 break;
548 case XFR_NAK:
549 iep->xcat_err = 0;
550 ret = -EINPROGRESS;
551 break;
552 case XFR_TOGERR:
553 /*
554 * Peripheral had missed the previous Ack and sent
555 * the same packet again. Ack is sent by the hardware.
556 * As the data is received already, ignore this
557 * event.
558 */
559 ret = -EINPROGRESS;
560 break;
561 case XFR_PKTERR:
562 case XFR_PIDERR:
563 case XFR_WRONGPID:
564 case XFR_CRCERR:
565 case XFR_TIMEOUT:
566 if (++iep->xcat_err < 8)
567 ret = -EINPROGRESS;
568 else
569 ret = -EPROTO;
570 break;
571 case XFR_STALL:
572 ret = -EPIPE;
573 break;
574 case XFR_BADLEN:
575 ret = -EOVERFLOW;
576 break;
577 default:
578 pr_err("transaction timed out\n");
579 ret = -EIO;
580 }
581
582 /*
583 * Proceed further only if Ack is received and
584 * we are expecting some data.
585 */
586 if (ret || !expected_len)
587 goto out;
588
589 if (short_packet)
590 len = ice40_spi_reg_read(ihcd, RBLEN_REG);
591 else
592 len = 64;
593
594 /* babble condition */
595 if (len > expected_len) {
596 pr_err("overflow condition\n");
597 ret = -EOVERFLOW;
598 goto out;
599 }
600
601 /*
602 * zero len packet received. nothing to read from
603 * FIFO.
604 */
605 if (len == 0) {
606 ret = 0;
607 goto out;
608 }
609
610 ihcd->in_buf[0] = READ_CMD(RBUF0_REG);
611
612 ihcd->in_xfr[1].rx_buf = buf;
613 ihcd->in_xfr[1].len = len;
614
615 ret = spi_sync(ihcd->spi, ihcd->in_msg);
616 if (ret < 0) {
617 pr_err("SPI transfer failed\n");
618 ret = -EIO;
619 goto out;
620 }
621
622 urb->actual_length += len;
623 if ((urb->actual_length == total_len) ||
624 (len < expected_len))
625 ret = 0; /* URB completed */
626 else
627 ret = -EINPROGRESS; /* still pending */
628out:
629 trace_ice40_in(epnum, xfr_status_string(status), len,
630 expected_len, ret);
631 return ret;
632}
633
634static int ice40_xfer_out(struct ice40_hcd *ihcd, struct urb *urb)
635{
636 struct usb_host_endpoint *ep = urb->ep;
637 struct usb_device *udev = urb->dev;
638 u32 total_len = urb->transfer_buffer_length;
639 u16 maxpacket = usb_endpoint_maxp(&ep->desc);
640 u8 epnum = usb_pipeendpoint(urb->pipe);
641 bool is_out = usb_pipeout(urb->pipe);
642 struct ice40_ep *iep = ep->hcpriv;
643 u8 cmd, status, len, t;
644 void *buf;
645 int ret;
646
647 if (epnum == 0 && ihcd->ep0_state == STATUS_PHASE) {
648 len = 0;
649 buf = NULL;
650 t = 1; /* STATUS PHASE is always DATA1 */
651 } else {
652 len = min_t(u32, maxpacket, total_len - urb->actual_length);
653 buf = urb->transfer_buffer + urb->actual_length;
654 t = usb_gettoggle(udev, epnum, is_out);
655 }
656
657 /*
658 * OUT transaction Handling:
659 * - If we need to send data, write the data to WBUF Fifo
660 * - Program the WBLEN register
661 * - Program HCMD register to initiate the OUT transaction.
662 * - poll for completion by reading XFRST register.
663 * - Interpret the result.
664 */
665
666
667 if (!len)
668 goto no_data;
669
670 ihcd->out_buf[0] = WRITE_CMD(WBUF0_REG);
671
672 ihcd->out_xfr[1].tx_buf = buf;
673 ihcd->out_xfr[1].len = len;
674
675 ret = spi_sync(ihcd->spi, ihcd->out_msg);
676 if (ret < 0) {
677 pr_err("SPI transaction failed\n");
678 status = ret = -EIO;
679 goto out;
680 }
681
682no_data:
683 /*
684 * Cache the WBLEN register and update it only if it
685 * is changed from the previous value.
686 */
687 if (len != ihcd->wblen0) {
688 ice40_spi_reg_write(ihcd, len, WBLEN_REG);
689 ihcd->wblen0 = len;
690 }
691
692 cmd = HCMD_PT(1) | HCMD_TOGV(t) | HCMD_BSEL(0) | HCMD_EP(epnum);
693 ice40_spi_reg_write(ihcd, cmd, HCMD_REG);
694
695 status = ice40_poll_xfer(ihcd, 1000);
696 switch (XFR_MASK(status)) {
697 case XFR_SUCCESS:
698 usb_dotoggle(udev, epnum, is_out);
699 urb->actual_length += len;
700 iep->xcat_err = 0;
701 if (!len || (urb->actual_length == total_len))
702 ret = 0; /* URB completed */
703 else
704 ret = -EINPROGRESS; /* pending */
705 break;
706 case XFR_NAK:
707 iep->xcat_err = 0;
708 ret = -EINPROGRESS;
709 break;
710 case XFR_PKTERR:
711 case XFR_PIDERR:
712 case XFR_WRONGPID:
713 case XFR_CRCERR:
714 case XFR_TIMEOUT:
715 if (++iep->xcat_err < 8)
716 ret = -EINPROGRESS;
717 else
718 ret = -EPROTO;
719 break;
720 case XFR_STALL:
721 ret = -EPIPE;
722 break;
723 case XFR_BADLEN:
724 ret = -EOVERFLOW;
725 break;
726 default:
727 pr_err("transaction timed out\n");
728 ret = -EIO;
729 }
730
731out:
732 trace_ice40_out(epnum, xfr_status_string(status), len, ret);
733 return ret;
734}
735
736static int ice40_process_urb(struct ice40_hcd *ihcd, struct urb *urb)
737{
738 struct usb_device *udev = urb->dev;
739 u8 devnum = usb_pipedevice(urb->pipe);
740 bool is_out = usb_pipeout(urb->pipe);
741 u32 total_len = urb->transfer_buffer_length;
742 int ret = 0;
743
744 /*
745 * The USB device address can be reset to 0 by core temporarily
746 * during reset recovery process. Don't assume anything about
747 * device address. The device address is programmed as 0 by
748 * default. If the device address is different to the previous
749 * cached value, re-program it here before proceeding. The device
750 * address register (FADDR) holds the value across multiple
751 * transactions and we support only one device.
752 */
753 if (ihcd->devnum != devnum) {
754 ice40_spi_reg_write(ihcd, devnum, FADDR_REG);
755 ihcd->devnum = devnum;
756 }
757
758 switch (usb_pipetype(urb->pipe)) {
759 case PIPE_CONTROL:
760 switch (ihcd->ep0_state) {
761 case SETUP_PHASE:
762 trace_ice40_ep0("SETUP");
763 ret = ice40_xfer_setup(ihcd, urb);
764 if (ret)
765 break;
766 if (total_len) {
767 ihcd->ep0_state = DATA_PHASE;
768 /*
769 * Data stage always begin with
770 * DATA1 PID.
771 */
772 usb_settoggle(udev, 0, is_out, 1);
773 } else {
774 ihcd->ep0_state = STATUS_PHASE;
775 goto do_status;
776 }
777 /* fall through */
778 case DATA_PHASE:
779 trace_ice40_ep0("DATA");
780 if (is_out)
781 ret = ice40_xfer_out(ihcd, urb);
782 else
783 ret = ice40_xfer_in(ihcd, urb);
784 if (ret)
785 break;
786 /* DATA Phase is completed successfully */
787 ihcd->ep0_state = STATUS_PHASE;
788 /* fall through */
789 case STATUS_PHASE:
790do_status:
791 trace_ice40_ep0("STATUS");
792 /* zero len DATA transfers have IN status */
793 if (!total_len || is_out)
794 ret = ice40_xfer_in(ihcd, urb);
795 else
796 ret = ice40_xfer_out(ihcd, urb);
797 if (ret)
798 break;
799 ihcd->ep0_state = SETUP_PHASE;
800 break;
801 default:
802 pr_err("unknown stage for a control transfer\n");
803 break;
804 }
805 break;
806 case PIPE_BULK:
807 if (is_out)
808 ret = ice40_xfer_out(ihcd, urb);
809 else
810 ret = ice40_xfer_in(ihcd, urb);
811 /*
812 * We may have to support zero len packet terminations
813 * for URB_ZERO_PACKET URBs.
814 */
815 break;
816 default:
817 pr_err("IN/ISO transfers not supported\n");
818 break;
819 }
820
821 return ret;
822}
823
824/* Must be called with spin lock and interrupts disabled */
825static void ice40_complete_urb(struct usb_hcd *hcd, struct urb *urb, int status)
826{
827 struct ice40_hcd *ihcd = hcd_to_ihcd(hcd);
828 struct usb_host_endpoint *ep = urb->ep;
829 struct ice40_ep *iep = ep->hcpriv;
830 struct urb *first_urb;
831 bool needs_update = false;
832 bool control = usb_pipecontrol(urb->pipe);
833
834 /*
835 * If the active URB i.e the first URB in the ep list is being
836 * removed, clear the transaction error count. If it is a control
837 * URB ep0_state needs to be reset to SETUP_PHASE.
838 */
839 first_urb = list_first_entry(&ep->urb_list, struct urb, urb_list);
840 if (urb == first_urb)
841 needs_update = true;
842
843 usb_hcd_unlink_urb_from_ep(hcd, urb);
844 spin_unlock(&ihcd->lock);
845 trace_ice40_urb_done(urb, status);
846 usb_hcd_giveback_urb(ihcd->hcd, urb, status);
847 spin_lock(&ihcd->lock);
848
849 if (needs_update) {
850 iep->xcat_err = 0;
851 if (control)
852 ihcd->ep0_state = SETUP_PHASE;
853 }
854}
855
856static void ice40_async_work(struct work_struct *work)
857{
858 struct ice40_hcd *ihcd = container_of(work,
859 struct ice40_hcd, async_work);
860 struct usb_hcd *hcd = ihcd->hcd;
861 struct list_head *tmp, *uent, *utmp;
862 struct ice40_ep *iep;
863 struct usb_host_endpoint *ep;
864 struct urb *urb;
865 unsigned long flags;
866 int status;
867
868 /*
869 * Traverse the active endpoints circularly and process URBs.
870 * If any endpoint is marked for unlinking, the URBs are
871 * completed here. The endpoint is removed from active list
872 * if a URB is retired with -EPIPE/-EPROTO errors.
873 */
874
875 spin_lock_irqsave(&ihcd->lock, flags);
876
877 if (list_empty(&ihcd->async_list))
878 goto out;
879
880 iep = list_first_entry(&ihcd->async_list, struct ice40_ep, ep_list);
881 while (1) {
882 ep = iep->ep;
883
884 urb = list_first_entry(&ep->urb_list, struct urb, urb_list);
885 if (urb->unlinked) {
886 status = urb->unlinked;
887 } else {
888 spin_unlock_irqrestore(&ihcd->lock, flags);
889 status = ice40_process_urb(ihcd, urb);
890 spin_lock_irqsave(&ihcd->lock, flags);
891 }
892
893 if ((status == -EPIPE) || (status == -EPROTO))
894 iep->halted = true;
895
896 if (status != -EINPROGRESS)
897 ice40_complete_urb(hcd, urb, status);
898
899 if (iep->unlinking) {
900 list_for_each_safe(uent, utmp, &ep->urb_list) {
901 urb = list_entry(uent, struct urb, urb_list);
902 if (urb->unlinked)
903 ice40_complete_urb(hcd, urb, 0);
904 }
905 iep->unlinking = false;
906 }
907
908 tmp = iep->ep_list.next;
909 if (list_empty(&ep->urb_list) || iep->halted) {
910 list_del_init(&iep->ep_list);
911
912 if (list_empty(&ihcd->async_list))
913 break;
914 }
915
916 if (tmp == &ihcd->async_list)
917 tmp = tmp->next;
918 iep = list_entry(tmp, struct ice40_ep, ep_list);
919 }
920out:
921 spin_unlock_irqrestore(&ihcd->lock, flags);
922}
923
924static int
925ice40_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
926{
927 struct ice40_hcd *ihcd = hcd_to_ihcd(hcd);
928 struct usb_device *udev = urb->dev;
929 struct usb_host_endpoint *ep = urb->ep;
930 bool is_out = usb_pipeout(urb->pipe);
931 u8 epnum = usb_pipeendpoint(urb->pipe);
932 struct ice40_ep *iep;
933 unsigned long flags;
934 int ret;
935
936 /*
937 * This bridge chip supports only Full-speed. So ISO is not
938 * supported. Interrupt support is not implemented as there
939 * is no use case.
940 */
941 if (usb_pipeisoc(urb->pipe) || usb_pipeint(urb->pipe)) {
942 pr_debug("iso and int xfers not supported\n");
943 ret = -ENOTSUPP;
944 goto out;
945 }
946
947 spin_lock_irqsave(&ihcd->lock, flags);
948
949 ret = usb_hcd_link_urb_to_ep(hcd, urb);
950 if (ret)
951 goto rel_lock;
952
953 trace_ice40_urb_enqueue(urb);
954
955 iep = ep->hcpriv;
956 if (!iep) {
957 iep = kzalloc(sizeof(struct ice40_ep), GFP_ATOMIC);
958 if (!iep) {
959 pr_debug("fail to allocate iep\n");
960 ret = -ENOMEM;
961 goto unlink;
962 }
963 ep->hcpriv = iep;
964 INIT_LIST_HEAD(&iep->ep_list);
965 iep->ep = ep;
966 usb_settoggle(udev, epnum, is_out, 0);
967 if (usb_pipecontrol(urb->pipe))
968 ihcd->ep0_state = SETUP_PHASE;
969 }
970
971 /*
972 * We expect the interface driver to clear the stall condition
973 * before queueing another URB. For example mass storage
974 * device may STALL a bulk endpoint for un-supported command.
975 * The storage driver clear the STALL condition before queueing
976 * another URB.
977 */
978 iep->halted = false;
979 if (list_empty(&iep->ep_list))
980 list_add_tail(&iep->ep_list, &ihcd->async_list);
981
982 queue_work(ihcd->wq, &ihcd->async_work);
983
984 spin_unlock_irqrestore(&ihcd->lock, flags);
985
986 return 0;
987unlink:
988 usb_hcd_unlink_urb_from_ep(hcd, urb);
989rel_lock:
990 spin_unlock_irqrestore(&ihcd->lock, flags);
991out:
992 return ret;
993}
994
995static int
996ice40_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
997{
998 struct ice40_hcd *ihcd = hcd_to_ihcd(hcd);
999 struct usb_host_endpoint *ep = urb->ep;
1000 struct ice40_ep *iep;
1001 unsigned long flags;
1002 int ret;
1003
1004 spin_lock_irqsave(&ihcd->lock, flags);
1005
1006 ret = usb_hcd_check_unlink_urb(hcd, urb, status);
1007 if (ret)
1008 goto rel_lock;
1009
1010 trace_ice40_urb_dequeue(urb);
1011 iep = ep->hcpriv;
1012
1013 /*
1014 * If the endpoint is not in asynchronous schedule, complete
1015 * the URB immediately. Otherwise mark it as being unlinked.
1016 * The asynchronous schedule work will take care of completing
1017 * the URB when this endpoint is encountered during traversal.
1018 */
1019 if (list_empty(&iep->ep_list))
1020 ice40_complete_urb(hcd, urb, status);
1021 else
1022 iep->unlinking = true;
1023
1024rel_lock:
1025 spin_unlock_irqrestore(&ihcd->lock, flags);
1026 return ret;
1027}
1028
1029static void
1030ice40_endpoint_disable(struct usb_hcd *hcd, struct usb_host_endpoint *ep)
1031{
1032 struct ice40_ep *iep = ep->hcpriv;
1033
1034 /*
1035 * If there is no I/O on this endpoint before, ep->hcpriv
1036 * will be NULL. nothing to do in this case.
1037 */
1038 if (!iep)
1039 return;
1040
1041 if (!list_empty(&ep->urb_list))
1042 pr_err("trying to disable an non-empty endpoint\n");
1043
1044 kfree(iep);
1045 ep->hcpriv = NULL;
1046}
1047
1048
1049static int ice40_hub_status_data(struct usb_hcd *hcd, char *buf)
1050{
1051 struct ice40_hcd *ihcd = hcd_to_ihcd(hcd);
1052 int ret = 0;
1053
1054 /*
1055 * core calls hub_status_method during suspend/resume.
1056 * return 0 if there is no port change. pcd_pending
1057 * is set to true when a device is connected and line
1058 * state is sampled via debugfs command. clear this
1059 * flag after returning the port change status.
1060 */
1061 if (ihcd->pcd_pending) {
1062 *buf = (1 << 1);
1063 ret = 1;
1064 ihcd->pcd_pending = false;
1065 }
1066
1067 return ret;
1068}
1069
1070static void ice40_hub_descriptor(struct usb_hub_descriptor *desc)
1071{
1072 /* There is nothing special about us!! */
1073 desc->bDescLength = 9;
1074 desc->bDescriptorType = 0x29;
1075 desc->bNbrPorts = 1;
1076 desc->wHubCharacteristics = cpu_to_le16(HUB_CHAR_NO_LPSM |
1077 HUB_CHAR_NO_OCPM);
1078 desc->bPwrOn2PwrGood = 0;
1079 desc->bHubContrCurrent = 0;
1080 desc->u.hs.DeviceRemovable[0] = 0;
1081 desc->u.hs.DeviceRemovable[1] = ~0;
1082}
1083
1084static int
1085ice40_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
1086 u16 wIndex, char *buf, u16 wLength)
1087{
1088 int ret = 0;
1089 u8 ctrl;
1090 struct ice40_hcd *ihcd = hcd_to_ihcd(hcd);
1091
1092 /*
1093 * We have only 1 port. No special locking is required while
1094 * handling root hub commands. The bridge chip does not maintain
1095 * any port states. Maintain different port states in software.
1096 */
1097 switch (typeReq) {
1098 case ClearPortFeature:
1099 if (wIndex != 1 || wLength != 0)
1100 goto error;
1101 switch (wValue) {
1102 case USB_PORT_FEAT_SUSPEND:
1103 /*
1104 * The device is resumed as part of the root hub
1105 * resume to simplify the resume sequence. so
1106 * we may simply return from here. If device is
1107 * resumed before root hub is suspended, this
1108 * flags will be cleared here.
1109 */
1110 if (!(ihcd->port_flags & USB_PORT_STAT_SUSPEND))
1111 break;
1112 ihcd->port_flags &= ~USB_PORT_STAT_SUSPEND;
1113 break;
1114 case USB_PORT_FEAT_ENABLE:
1115 ihcd->port_flags &= ~USB_PORT_STAT_ENABLE;
1116 break;
1117 case USB_PORT_FEAT_POWER:
1118 ihcd->port_flags &= ~USB_PORT_STAT_POWER;
1119 break;
1120 case USB_PORT_FEAT_C_CONNECTION:
1121 ihcd->port_flags &= ~(USB_PORT_STAT_C_CONNECTION << 16);
1122 break;
1123 case USB_PORT_FEAT_C_ENABLE:
1124 case USB_PORT_FEAT_C_SUSPEND:
1125 case USB_PORT_FEAT_C_OVER_CURRENT:
1126 case USB_PORT_FEAT_C_RESET:
1127 /* nothing special here */
1128 break;
1129 default:
1130 goto error;
1131 }
1132 break;
1133 case GetHubDescriptor:
1134 ice40_hub_descriptor((struct usb_hub_descriptor *) buf);
1135 break;
1136 case GetHubStatus:
1137 put_unaligned_le32(0, buf);
1138 break;
1139 case GetPortStatus:
1140 if (wIndex != 1)
1141 goto error;
1142
1143 /*
1144 * Core resets the device and requests port status to
1145 * stop the reset signaling. If there is a reset in
1146 * progress, finish it here.
1147 */
1148 ctrl = ice40_spi_reg_read(ihcd, CTRL0_REG);
1149 if (!(ctrl & RESET_CTRL))
1150 ihcd->port_flags &= ~USB_PORT_STAT_RESET;
1151
1152 put_unaligned_le32(ihcd->port_flags, buf);
1153 break;
1154 case SetPortFeature:
1155 if (wIndex != 1 || wLength != 0)
1156 goto error;
1157 switch (wValue) {
1158 case USB_PORT_FEAT_SUSPEND:
1159 if (ihcd->port_flags & USB_PORT_STAT_RESET)
1160 goto error;
1161 if (!(ihcd->port_flags & USB_PORT_STAT_ENABLE))
1162 goto error;
1163 /* SOFs will be stopped during root hub suspend */
1164 ihcd->port_flags |= USB_PORT_STAT_SUSPEND;
1165 break;
1166 case USB_PORT_FEAT_POWER:
1167 ihcd->port_flags |= USB_PORT_STAT_POWER;
1168 break;
1169 case USB_PORT_FEAT_RESET:
1170 /* Good time to enable the port */
1171 ice40_spi_reg_write(ihcd, ihcd->ctrl0 |
1172 RESET_CTRL, CTRL0_REG);
1173 ihcd->port_flags |= USB_PORT_STAT_RESET;
1174 ihcd->port_flags |= USB_PORT_STAT_ENABLE;
1175 break;
1176 default:
1177 goto error;
1178 }
1179 break;
1180 default:
1181error:
1182 /* "protocol stall" on error */
1183 ret = -EPIPE;
1184 }
1185
1186 trace_ice40_hub_control(typeReq, wValue, wIndex, wLength, ret);
1187 return ret;
1188}
1189
1190static void ice40_spi_power_off(struct ice40_hcd *ihcd);
1191static int ice40_bus_suspend(struct usb_hcd *hcd)
1192{
1193 struct ice40_hcd *ihcd = hcd_to_ihcd(hcd);
1194
1195 trace_ice40_bus_suspend(0); /* start */
1196
1197 /* This happens only during debugging */
1198 if (!ihcd->devnum) {
1199 pr_debug("device still not connected. abort suspend\n");
1200 trace_ice40_bus_suspend(2); /* failure */
1201 return -EAGAIN;
1202 }
1203 /*
1204 * Stop sending the SOFs on downstream port. The device
1205 * finds the bus idle and enter suspend. The device
1206 * takes ~3 msec to enter suspend.
1207 */
1208 ihcd->ctrl0 &= ~SOFEN_CTRL;
1209 ice40_spi_reg_write(ihcd, ihcd->ctrl0, CTRL0_REG);
1210 usleep_range(4500, 5000);
1211
1212 /*
1213 * Power collapse the bridge chip to avoid the leakage
1214 * current.
1215 */
1216 ice40_spi_power_off(ihcd);
1217
1218 trace_ice40_bus_suspend(1); /* successful */
1219 pm_relax(&ihcd->spi->dev);
1220 return 0;
1221}
1222
1223static int ice40_spi_load_fw(struct ice40_hcd *ihcd);
1224static int ice40_bus_resume(struct usb_hcd *hcd)
1225{
1226 struct ice40_hcd *ihcd = hcd_to_ihcd(hcd);
1227 u8 ctrl0;
1228 int ret;
1229
1230 pm_stay_awake(&ihcd->spi->dev);
1231 trace_ice40_bus_resume(0); /* start */
1232 /*
1233 * Power up the bridge chip and load the configuration file.
1234 * Re-program the previous settings. For now we need to
1235 * update the device address only.
1236 */
1237 ice40_spi_load_fw(ihcd);
1238 ice40_spi_reg_write(ihcd, ihcd->devnum, FADDR_REG);
1239 ihcd->wblen0 = ~0;
1240
1241 /*
1242 * Program the bridge chip to drive resume signaling. The SOFs
1243 * are automatically transmitted after resume completion. It
1244 * will take ~20 msec for resume completion.
1245 */
1246 ice40_spi_reg_write(ihcd, ihcd->ctrl0 | RESUME_CTRL, CTRL0_REG);
1247 usleep_range(20000, 21000);
1248 ret = ice40_handshake(ihcd, CTRL0_REG, RESUME_CTRL, 0, 5000);
1249 if (ret) {
1250 pr_err("resume failed\n");
1251 trace_ice40_bus_resume(2); /* failure */
1252 return -ENODEV;
1253 }
1254
1255 ctrl0 = ice40_spi_reg_read(ihcd, CTRL0_REG);
1256 if (!(ctrl0 & SOFEN_CTRL)) {
1257 pr_err("SOFs are not transmitted after resume\n");
1258 trace_ice40_bus_resume(3); /* failure */
1259 return -ENODEV;
1260 }
1261
1262 ihcd->port_flags &= ~USB_PORT_STAT_SUSPEND;
1263 ihcd->ctrl0 |= SOFEN_CTRL;
1264
1265 trace_ice40_bus_resume(1); /* success */
1266 return 0;
1267}
1268
1269static void ice40_set_autosuspend_delay(struct usb_device *dev)
1270{
1271 /*
1272 * Immediate suspend for root hub and 500 msec auto-suspend
1273 * timeout for the card.
1274 */
1275 if (!dev->parent)
1276 pm_runtime_set_autosuspend_delay(&dev->dev, 0);
1277 else
1278 pm_runtime_set_autosuspend_delay(&dev->dev, 500);
1279}
1280
1281static const struct hc_driver ice40_hc_driver = {
1282 .description = hcd_name,
1283 .product_desc = "ICE40 SPI Host Controller",
1284 .hcd_priv_size = sizeof(struct ice40_hcd *),
1285 .flags = HCD_USB11,
1286
1287 /* setup and clean up */
1288 .reset = ice40_reset,
1289 .start = ice40_run,
1290 .stop = ice40_stop,
1291
1292 /* endpoint and I/O routines */
1293 .urb_enqueue = ice40_urb_enqueue,
1294 .urb_dequeue = ice40_urb_dequeue,
1295 .endpoint_disable = ice40_endpoint_disable,
1296
1297 /* Root hub operations */
1298 .hub_status_data = ice40_hub_status_data,
1299 .hub_control = ice40_hub_control,
1300 .bus_suspend = ice40_bus_suspend,
1301 .bus_resume = ice40_bus_resume,
1302
1303 .set_autosuspend_delay = ice40_set_autosuspend_delay,
1304};
1305
1306static int ice40_spi_parse_dt(struct ice40_hcd *ihcd)
1307{
1308 struct device_node *node = ihcd->spi->dev.of_node;
1309 int ret = 0;
1310
1311 if (!node) {
1312 pr_err("device specific info missing\n");
1313 ret = -ENODEV;
1314 goto out;
1315 }
1316
1317 ihcd->reset_gpio = of_get_named_gpio(node, "lattice,reset-gpio", 0);
1318 if (ihcd->reset_gpio < 0) {
1319 pr_err("reset gpio is missing\n");
1320 ret = ihcd->reset_gpio;
1321 goto out;
1322 }
1323
1324 ihcd->slave_select_gpio = of_get_named_gpio(node,
1325 "lattice,slave-select-gpio", 0);
1326 if (ihcd->slave_select_gpio < 0) {
1327 pr_err("slave select gpio is missing\n");
1328 ret = ihcd->slave_select_gpio;
1329 goto out;
1330 }
1331
1332 ihcd->config_done_gpio = of_get_named_gpio(node,
1333 "lattice,config-done-gpio", 0);
1334 if (ihcd->config_done_gpio < 0) {
1335 pr_err("config done gpio is missing\n");
1336 ret = ihcd->config_done_gpio;
1337 goto out;
1338 }
1339
1340 ihcd->vcc_en_gpio = of_get_named_gpio(node, "lattice,vcc-en-gpio", 0);
1341 if (ihcd->vcc_en_gpio < 0) {
1342 pr_err("vcc enable gpio is missing\n");
1343 ret = ihcd->vcc_en_gpio;
1344 goto out;
1345 }
1346
1347 /*
1348 * When clk-en-gpio is present, it is used to enable the 19.2 MHz
1349 * clock from MSM to the bridge chip. Otherwise on-board clock
1350 * is used.
1351 */
1352 ihcd->clk_en_gpio = of_get_named_gpio(node, "lattice,clk-en-gpio", 0);
1353 if (ihcd->clk_en_gpio < 0)
1354 ihcd->clk_en_gpio = 0;
1355out:
1356 return ret;
1357}
1358
1359static void ice40_spi_power_off(struct ice40_hcd *ihcd)
1360{
1361 if (!ihcd->powered)
1362 return;
1363
1364 gpio_direction_output(ihcd->vcc_en_gpio, 0);
1365 regulator_disable(ihcd->core_vcc);
1366 regulator_disable(ihcd->spi_vcc);
1367 if (ihcd->gpio_vcc)
1368 regulator_disable(ihcd->gpio_vcc);
1369 if (ihcd->clk_en_gpio)
1370 gpio_direction_output(ihcd->clk_en_gpio, 0);
1371
1372 ihcd->powered = false;
1373}
1374
1375static int ice40_spi_power_up(struct ice40_hcd *ihcd)
1376{
1377 int ret;
1378
1379 if (ihcd->clk_en_gpio) {
1380 ret = gpio_direction_output(ihcd->clk_en_gpio, 1);
1381 if (ret < 0) {
1382 pr_err("fail to enabel clk %d\n", ret);
1383 goto out;
1384 }
1385 }
1386
1387 if (ihcd->gpio_vcc) {
1388 ret = regulator_enable(ihcd->gpio_vcc); /* 1.8 V */
1389 if (ret < 0) {
1390 pr_err("fail to enable gpio vcc\n");
1391 goto disable_clk;
1392 }
1393 }
1394
1395 ret = regulator_enable(ihcd->spi_vcc); /* 1.8 V */
1396 if (ret < 0) {
1397 pr_err("fail to enable spi vcc\n");
1398 goto disable_gpio_vcc;
1399 }
1400
1401 ret = regulator_enable(ihcd->core_vcc); /* 1.2 V */
1402 if (ret < 0) {
1403 pr_err("fail to enable core vcc\n");
1404 goto disable_spi_vcc;
1405 }
1406
1407 ret = gpio_direction_output(ihcd->vcc_en_gpio, 1);
1408 if (ret < 0) {
1409 pr_err("fail to assert vcc gpio\n");
1410 goto disable_core_vcc;
1411 }
1412
1413 ihcd->powered = true;
1414
1415 return 0;
1416
1417disable_core_vcc:
1418 regulator_disable(ihcd->core_vcc);
1419disable_spi_vcc:
1420 regulator_disable(ihcd->spi_vcc);
1421disable_gpio_vcc:
1422 if (ihcd->gpio_vcc)
1423 regulator_disable(ihcd->gpio_vcc);
1424disable_clk:
1425 if (ihcd->clk_en_gpio)
1426 gpio_direction_output(ihcd->clk_en_gpio, 0);
1427out:
1428 return ret;
1429}
1430
1431static struct gpiomux_setting slave_select_setting = {
1432 .func = GPIOMUX_FUNC_GPIO,
1433 .drv = GPIOMUX_DRV_2MA,
1434 .pull = GPIOMUX_PULL_NONE,
1435 .dir = GPIOMUX_OUT_LOW,
1436};
1437
1438static int ice40_spi_cache_fw(struct ice40_hcd *ihcd)
1439{
1440 const struct firmware *fw;
1441 void *buf;
1442 size_t buf_len;
1443 int ret;
1444
1445 ret = request_firmware(&fw, fw_name, &ihcd->spi->dev);
1446 if (ret < 0) {
1447 pr_err("fail to get the firmware\n");
1448 goto out;
1449 }
1450
1451 pr_debug("received firmware size = %zu\n", fw->size);
1452
1453 /*
1454 * The bridge expects additional clock cycles after
1455 * receiving the configuration data. We don't have a
1456 * direct control over SPI clock. Add extra bytes
1457 * to the confiration data.
1458 */
1459 buf_len = fw->size + 16;
1460 buf = devm_kzalloc(&ihcd->spi->dev, buf_len, GFP_KERNEL);
1461 if (!buf) {
1462 pr_err("fail to allocate firmware buffer\n");
1463 ret = -ENOMEM;
1464 goto release;
1465 }
1466
1467 /*
1468 * The firmware buffer can not be used for DMA as it
1469 * is not physically contiguous. We copy the data
1470 * in kmalloc buffer. This buffer will be freed only
1471 * during unbind or rmmod.
1472 */
1473 memcpy(buf, fw->data, fw->size);
1474 release_firmware(fw);
1475
1476 /*
1477 * The bridge supports only 25 MHz during configuration
1478 * file loading.
1479 */
1480 ihcd->fmsg_xfr[0].tx_buf = buf;
1481 ihcd->fmsg_xfr[0].len = buf_len;
1482 ihcd->fmsg_xfr[0].speed_hz = 25000000;
1483
1484 return 0;
1485
1486release:
1487 release_firmware(fw);
1488out:
1489 return ret;
1490}
1491
1492static int ice40_spi_load_fw(struct ice40_hcd *ihcd)
1493{
1494 int ret, i;
1495 struct gpiomux_setting old_setting;
1496
1497 ret = gpio_direction_output(ihcd->reset_gpio, 0);
1498 if (ret < 0) {
1499 pr_err("fail to assert reset %d\n", ret);
1500 goto out;
1501 }
1502
1503 ret = gpio_direction_output(ihcd->vcc_en_gpio, 0);
1504 if (ret < 0) {
1505 pr_err("fail to de-assert vcc_en gpio %d\n", ret);
1506 goto out;
1507 }
1508
1509 /*
1510 * The bridge chip samples the chip select signal during
1511 * power-up. If it is low, it enters SPI slave mode and
1512 * accepts the configuration data from us. The chip
1513 * select signal is managed by the SPI controller driver.
1514 * We temporarily override the chip select config to
1515 * drive it low. The SPI bus needs to be locked down during
1516 * this period to avoid other slave data going to our
1517 * bridge chip.
1518 *
1519 */
1520 spi_bus_lock(ihcd->spi->master);
1521
1522 ret = msm_gpiomux_write(ihcd->slave_select_gpio, GPIOMUX_SUSPENDED,
1523 &slave_select_setting, &old_setting);
1524 if (ret < 0) {
1525 pr_err("fail to select the slave %d\n", ret);
1526 goto out;
1527 }
1528
1529 ret = ice40_spi_power_up(ihcd);
1530 if (ret < 0) {
1531 pr_err("fail to power up the chip\n");
1532 goto out;
1533 }
1534
1535
1536 /*
1537 * The databook says 1200 usec is required before the
1538 * chip becomes ready for the SPI transfer.
1539 */
1540 usleep_range(1200, 1250);
1541
1542 ret = msm_gpiomux_write(ihcd->slave_select_gpio, GPIOMUX_SUSPENDED,
1543 &old_setting, NULL);
1544 if (ret < 0) {
1545 pr_err("fail to de-select the slave %d\n", ret);
1546 goto power_off;
1547 }
1548
1549 ret = spi_sync_locked(ihcd->spi, ihcd->fmsg);
1550
1551 spi_bus_unlock(ihcd->spi->master);
1552
1553 if (ret < 0) {
1554 pr_err("spi write failed\n");
1555 goto power_off;
1556 }
1557
1558 for (i = 0; i < 1000; i++) {
1559 ret = gpio_get_value(ihcd->config_done_gpio);
1560 if (ret) {
1561 pr_debug("config done asserted %d\n", i);
1562 break;
1563 }
1564 udelay(1);
1565 }
1566
1567 if (ret <= 0) {
1568 pr_err("config done not asserted\n");
1569 ret = -ENODEV;
1570 goto power_off;
1571 }
1572
1573 ret = gpio_direction_output(ihcd->reset_gpio, 1);
1574 if (ret < 0) {
1575 pr_err("fail to assert reset %d\n", ret);
1576 goto power_off;
1577 }
1578 udelay(50);
1579
1580 ret = ice40_spi_reg_read(ihcd, XFRST_REG);
1581 pr_debug("XFRST val is %x\n", ret);
1582 if (!(ret & PLLOK)) {
1583 pr_err("The PLL2 is not synchronized\n");
1584 goto power_off;
1585 }
1586
1587 pr_info("Firmware load success\n");
1588
1589 return 0;
1590
1591power_off:
1592 ice40_spi_power_off(ihcd);
1593out:
1594 return ret;
1595}
1596
1597static int ice40_spi_init_regulators(struct ice40_hcd *ihcd)
1598{
1599 int ret;
1600
1601 ihcd->spi_vcc = devm_regulator_get(&ihcd->spi->dev, "spi-vcc");
1602 if (IS_ERR(ihcd->spi_vcc)) {
1603 ret = PTR_ERR(ihcd->spi_vcc);
1604 if (ret != -EPROBE_DEFER)
1605 pr_err("fail to get spi-vcc %d\n", ret);
1606 goto out;
1607 }
1608
1609 ret = regulator_set_voltage(ihcd->spi_vcc, 1800000, 1800000);
1610 if (ret < 0) {
1611 pr_err("fail to set spi-vcc %d\n", ret);
1612 goto out;
1613 }
1614
1615 ihcd->core_vcc = devm_regulator_get(&ihcd->spi->dev, "core-vcc");
1616 if (IS_ERR(ihcd->core_vcc)) {
1617 ret = PTR_ERR(ihcd->core_vcc);
1618 if (ret != -EPROBE_DEFER)
1619 pr_err("fail to get core-vcc %d\n", ret);
1620 goto out;
1621 }
1622
1623 ret = regulator_set_voltage(ihcd->core_vcc, 1200000, 1200000);
1624 if (ret < 0) {
1625 pr_err("fail to set core-vcc %d\n", ret);
1626 goto out;
1627 }
1628
1629 if (!of_get_property(ihcd->spi->dev.of_node, "gpio-supply", NULL))
1630 goto out;
1631
1632 ihcd->gpio_vcc = devm_regulator_get(&ihcd->spi->dev, "gpio");
1633 if (IS_ERR(ihcd->gpio_vcc)) {
1634 ret = PTR_ERR(ihcd->gpio_vcc);
1635 if (ret != -EPROBE_DEFER)
1636 pr_err("fail to get gpio_vcc %d\n", ret);
1637 goto out;
1638 }
1639
1640 ret = regulator_set_voltage(ihcd->gpio_vcc, 1800000, 1800000);
1641 if (ret < 0) {
1642 pr_err("fail to set gpio_vcc %d\n", ret);
1643 goto out;
1644 }
1645
1646out:
1647 return ret;
1648}
1649
1650static int ice40_spi_request_gpios(struct ice40_hcd *ihcd)
1651{
1652 int ret;
1653
1654 ret = devm_gpio_request(&ihcd->spi->dev, ihcd->reset_gpio,
1655 "ice40_reset");
1656 if (ret < 0) {
1657 pr_err("fail to request reset gpio\n");
1658 goto out;
1659 }
1660
1661 ret = devm_gpio_request(&ihcd->spi->dev, ihcd->config_done_gpio,
1662 "ice40_config_done");
1663 if (ret < 0) {
1664 pr_err("fail to request config_done gpio\n");
1665 goto out;
1666 }
1667
1668 ret = devm_gpio_request(&ihcd->spi->dev, ihcd->vcc_en_gpio,
1669 "ice40_vcc_en");
1670 if (ret < 0) {
1671 pr_err("fail to request vcc_en gpio\n");
1672 goto out;
1673 }
1674
1675 if (ihcd->clk_en_gpio) {
1676
1677 ret = devm_gpio_request(&ihcd->spi->dev, ihcd->clk_en_gpio,
1678 "ice40_clk_en");
1679 if (ret < 0)
1680 pr_err("fail to request clk_en gpio\n");
1681 }
1682
1683out:
1684 return ret;
1685}
1686
1687static int
1688ice40_spi_init_one_xfr(struct ice40_hcd *ihcd, enum ice40_xfr_type type)
1689{
1690 struct spi_message **m;
1691 struct spi_transfer **t;
1692 int n;
1693
1694 switch (type) {
1695 case FIRMWARE_XFR:
1696 m = &ihcd->fmsg;
1697 t = &ihcd->fmsg_xfr;
1698 n = 1;
1699 break;
1700 case REG_WRITE_XFR:
1701 m = &ihcd->wmsg;
1702 t = &ihcd->wmsg_xfr;
1703 n = 1;
1704 break;
1705 case REG_READ_XFR:
1706 m = &ihcd->rmsg;
1707 t = &ihcd->rmsg_xfr;
1708 n = 1;
1709 break;
1710 case SETUP_XFR:
1711 m = &ihcd->setup_msg;
1712 t = &ihcd->setup_xfr;
1713 n = 2;
1714 break;
1715 case DATA_IN_XFR:
1716 m = &ihcd->in_msg;
1717 t = &ihcd->in_xfr;
1718 n = 2;
1719 break;
1720 case DATA_OUT_XFR:
1721 m = &ihcd->out_msg;
1722 t = &ihcd->out_xfr;
1723 n = 2;
1724 break;
1725 default:
1726 return -EINVAL;
1727 }
1728
1729 *m = devm_kzalloc(&ihcd->spi->dev, sizeof(**m), GFP_KERNEL);
1730 if (*m == NULL)
1731 goto out;
1732
1733 *t = devm_kzalloc(&ihcd->spi->dev, n * sizeof(**t), GFP_KERNEL);
1734 if (*t == NULL)
1735 goto out;
1736
1737 spi_message_init_with_transfers(*m, *t, n);
1738
1739 return 0;
1740out:
1741 return -ENOMEM;
1742}
1743
1744static int ice40_spi_init_xfrs(struct ice40_hcd *ihcd)
1745{
1746 int ret = -ENOMEM;
1747
1748 ret = ice40_spi_init_one_xfr(ihcd, FIRMWARE_XFR);
1749 if (ret < 0)
1750 goto out;
1751
1752 ret = ice40_spi_init_one_xfr(ihcd, REG_WRITE_XFR);
1753 if (ret < 0)
1754 goto out;
1755
1756 ihcd->w_tx_buf = devm_kzalloc(&ihcd->spi->dev, 2, GFP_KERNEL);
1757 if (!ihcd->w_tx_buf)
1758 goto out;
1759
1760 ihcd->w_rx_buf = devm_kzalloc(&ihcd->spi->dev, 2, GFP_KERNEL);
1761 if (!ihcd->w_rx_buf)
1762 goto out;
1763
1764 ihcd->wmsg_xfr[0].tx_buf = ihcd->w_tx_buf;
1765 ihcd->wmsg_xfr[0].rx_buf = ihcd->w_rx_buf;
1766 ihcd->wmsg_xfr[0].len = 2;
1767
1768 ret = ice40_spi_init_one_xfr(ihcd, REG_READ_XFR);
1769 if (ret < 0)
1770 goto out;
1771
1772 ihcd->r_tx_buf = devm_kzalloc(&ihcd->spi->dev, 3, GFP_KERNEL);
1773 if (!ihcd->r_tx_buf)
1774 goto out;
1775
1776 ihcd->r_rx_buf = devm_kzalloc(&ihcd->spi->dev, 3, GFP_KERNEL);
1777 if (!ihcd->r_rx_buf)
1778 goto out;
1779
1780 ihcd->rmsg_xfr[0].tx_buf = ihcd->r_tx_buf;
1781 ihcd->rmsg_xfr[0].rx_buf = ihcd->r_rx_buf;
1782 ihcd->rmsg_xfr[0].len = 3;
1783
1784 ret = ice40_spi_init_one_xfr(ihcd, SETUP_XFR);
1785 if (ret < 0)
1786 goto out;
1787
1788 ihcd->setup_buf = devm_kzalloc(&ihcd->spi->dev, 1, GFP_KERNEL);
1789 if (!ihcd->setup_buf)
1790 goto out;
1791 ihcd->setup_xfr[0].tx_buf = ihcd->setup_buf;
1792 ihcd->setup_xfr[0].len = 1;
1793
1794 ret = ice40_spi_init_one_xfr(ihcd, DATA_IN_XFR);
1795 if (ret < 0)
1796 goto out;
1797 ihcd->in_buf = devm_kzalloc(&ihcd->spi->dev, 2, GFP_KERNEL);
1798 if (!ihcd->in_buf)
1799 goto out;
1800 ihcd->in_xfr[0].tx_buf = ihcd->in_buf;
1801 ihcd->in_xfr[0].len = 2;
1802
1803 ret = ice40_spi_init_one_xfr(ihcd, DATA_OUT_XFR);
1804 if (ret < 0)
1805 goto out;
1806 ihcd->out_buf = devm_kzalloc(&ihcd->spi->dev, 1, GFP_KERNEL);
1807 if (!ihcd->out_buf)
1808 goto out;
1809 ihcd->out_xfr[0].tx_buf = ihcd->out_buf;
1810 ihcd->out_xfr[0].len = 1;
1811
1812 return 0;
1813
1814out:
1815 return -ENOMEM;
1816}
1817
1818static int ice40_dbg_cmd_open(struct inode *inode, struct file *file)
1819{
1820 return single_open(file, NULL, inode->i_private);
1821}
1822
1823static ssize_t ice40_dbg_cmd_write(struct file *file, const char __user *ubuf,
1824 size_t count, loff_t *ppos)
1825{
1826 struct seq_file *s = file->private_data;
1827 struct ice40_hcd *ihcd = s->private;
1828 char buf[32];
1829 int ret;
1830 u8 status, addr;
1831
1832 memset(buf, 0x00, sizeof(buf));
1833
1834 if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count))) {
1835 ret = -EFAULT;
1836 goto out;
1837 }
1838
1839 if (!strcmp(buf, "poll")) {
1840 if (!HCD_RH_RUNNING(ihcd->hcd)) {
1841 ret = -EAGAIN;
1842 goto out;
1843 }
1844 /*
1845 * The bridge chip supports interrupt for device
1846 * connect and disconnect. We don;t have a real
1847 * use case of connect/disconnect. This debugfs
1848 * interface provides a way to enumerate the
1849 * attached device.
1850 */
1851 ice40_spi_reg_write(ihcd, ihcd->ctrl0 |
1852 DET_BUS_CTRL, CTRL0_REG);
1853 ice40_handshake(ihcd, CTRL0_REG, DET_BUS_CTRL, 0, 5000);
1854 status = ice40_spi_reg_read(ihcd, XFRST_REG);
1855 if ((status & DPST)) {
1856 ihcd->port_flags |= USB_PORT_STAT_CONNECTION;
1857 ihcd->port_flags |= USB_PORT_STAT_C_CONNECTION << 16;
1858 ihcd->pcd_pending = true;
1859 usb_hcd_poll_rh_status(ihcd->hcd);
1860 } else if (ihcd->port_flags & USB_PORT_STAT_CONNECTION) {
1861 ihcd->port_flags &= ~USB_PORT_STAT_ENABLE;
1862 ihcd->port_flags &= ~USB_PORT_STAT_CONNECTION;
1863 ihcd->port_flags |= (USB_PORT_STAT_C_CONNECTION << 16);
1864 ihcd->pcd_pending = true;
1865 usb_hcd_poll_rh_status(ihcd->hcd);
1866 }
1867 } else if (!strcmp(buf, "rwtest")) {
1868 ihcd->devnum = 1;
1869 ice40_spi_reg_write(ihcd, 0x1, FADDR_REG);
1870 addr = ice40_spi_reg_read(ihcd, FADDR_REG);
1871 pr_info("addr written was 0x1 read as %x\n", addr);
1872 } else if (!strcmp(buf, "force_disconnect")) {
1873 if (!HCD_RH_RUNNING(ihcd->hcd)) {
1874 ret = -EAGAIN;
1875 goto out;
1876 }
1877 /*
1878 * Forcfully disconnect the device. This is required
1879 * for simulating the disconnect on a USB port which
1880 * does not have pull-down resistors.
1881 */
1882 ihcd->port_flags &= ~USB_PORT_STAT_ENABLE;
1883 ihcd->port_flags &= ~USB_PORT_STAT_CONNECTION;
1884 ihcd->port_flags |= (USB_PORT_STAT_C_CONNECTION << 16);
1885 ihcd->pcd_pending = true;
1886 usb_hcd_poll_rh_status(ihcd->hcd);
1887 } else {
1888 ret = -EINVAL;
1889 goto out;
1890 }
1891
1892 ret = count;
1893out:
1894 return ret;
1895}
1896
1897const struct file_operations ice40_dbg_cmd_ops = {
1898 .open = ice40_dbg_cmd_open,
1899 .write = ice40_dbg_cmd_write,
1900 .llseek = seq_lseek,
1901 .release = single_release,
1902};
1903
1904static int ice40_debugfs_init(struct ice40_hcd *ihcd)
1905{
1906 struct dentry *dir;
1907 int ret = 0;
1908
1909 dir = debugfs_create_dir("ice40_hcd", NULL);
1910
1911 if (!dir || IS_ERR(dir)) {
1912 ret = -ENODEV;
1913 goto out;
1914 }
1915
1916 ihcd->dbg_root = dir;
1917
1918 dir = debugfs_create_file("command", S_IWUSR, ihcd->dbg_root, ihcd,
1919 &ice40_dbg_cmd_ops);
1920
1921 if (!dir) {
1922 debugfs_remove_recursive(ihcd->dbg_root);
1923 ihcd->dbg_root = NULL;
1924 ret = -ENODEV;
1925 }
1926
1927out:
1928 return ret;
1929}
1930
1931static int ice40_spi_probe(struct spi_device *spi)
1932{
1933 struct ice40_hcd *ihcd;
1934 int ret;
1935
1936 ihcd = devm_kzalloc(&spi->dev, sizeof(*ihcd), GFP_KERNEL);
1937 if (!ihcd) {
1938 pr_err("fail to allocate ihcd\n");
1939 ret = -ENOMEM;
1940 goto out;
1941 }
1942 ihcd->spi = spi;
1943
1944 ret = ice40_spi_parse_dt(ihcd);
1945 if (ret) {
1946 pr_err("fail to parse dt node\n");
1947 goto out;
1948 }
1949
1950 ret = ice40_spi_init_regulators(ihcd);
1951 if (ret) {
1952 pr_err("fail to init regulators\n");
1953 goto out;
1954 }
1955
1956 ret = ice40_spi_request_gpios(ihcd);
1957 if (ret) {
1958 pr_err("fail to request gpios\n");
1959 goto out;
1960 }
1961
1962 spin_lock_init(&ihcd->lock);
1963 INIT_LIST_HEAD(&ihcd->async_list);
1964 INIT_WORK(&ihcd->async_work, ice40_async_work);
1965 mutex_init(&ihcd->wlock);
1966 mutex_init(&ihcd->rlock);
1967
1968 /*
1969 * Enable all our trace points. Useful in debugging card
1970 * enumeration issues.
1971 */
1972 ret = trace_set_clr_event(__stringify(TRACE_SYSTEM), NULL, 1);
1973 if (ret < 0)
1974 pr_err("fail to enable trace points with %d\n", ret);
1975
1976 ihcd->wq = create_singlethread_workqueue("ice40_wq");
1977 if (!ihcd->wq) {
1978 pr_err("fail to create workqueue\n");
1979 ret = -ENOMEM;
1980 goto destroy_mutex;
1981 }
1982
1983 ret = ice40_spi_init_xfrs(ihcd);
1984 if (ret) {
1985 pr_err("fail to init spi xfrs %d\n", ret);
1986 goto destroy_wq;
1987 }
1988
1989 ret = ice40_spi_cache_fw(ihcd);
1990 if (ret) {
1991 pr_err("fail to cache fw %d\n", ret);
1992 goto destroy_wq;
1993 }
1994
1995 ret = ice40_spi_load_fw(ihcd);
1996 if (ret) {
1997 pr_err("fail to load fw %d\n", ret);
1998 goto destroy_wq;
1999 }
2000
2001 ihcd->hcd = usb_create_hcd(&ice40_hc_driver, &spi->dev, "ice40");
2002 if (!ihcd->hcd) {
2003 pr_err("fail to alloc hcd\n");
2004 ret = -ENOMEM;
2005 goto power_off;
2006 }
2007 *((struct ice40_hcd **) ihcd->hcd->hcd_priv) = ihcd;
2008
2009 ret = usb_add_hcd(ihcd->hcd, 0, 0);
2010
2011 if (ret < 0) {
2012 pr_err("fail to add HCD\n");
2013 goto put_hcd;
2014 }
2015
2016 ice40_debugfs_init(ihcd);
2017
2018 /*
2019 * We manage the power states of the bridge chip
2020 * as part of root hub suspend/resume. We don't
2021 * need to implement any additional runtime PM
2022 * methods.
2023 */
2024 pm_runtime_no_callbacks(&spi->dev);
2025 pm_runtime_set_active(&spi->dev);
2026 pm_runtime_enable(&spi->dev);
2027
2028 /*
2029 * This does not mean bridge chip can wakeup the
2030 * system from sleep. It's activity can prevent
2031 * or abort the system sleep. The device_init_wakeup
2032 * creates the wakeup source for us which we will
2033 * use to control system sleep.
2034 */
2035 device_init_wakeup(&spi->dev, 1);
2036 pm_stay_awake(&spi->dev);
2037
2038 pr_debug("success\n");
2039
2040 return 0;
2041
2042put_hcd:
2043 usb_put_hcd(ihcd->hcd);
2044power_off:
2045 ice40_spi_power_off(ihcd);
2046destroy_wq:
2047 destroy_workqueue(ihcd->wq);
2048destroy_mutex:
2049 mutex_destroy(&ihcd->rlock);
2050 mutex_destroy(&ihcd->wlock);
2051out:
2052 pr_info("ice40_spi_probe failed\n");
2053 return ret;
2054}
2055
2056static int ice40_spi_remove(struct spi_device *spi)
2057{
2058 struct usb_hcd *hcd = spi_get_drvdata(spi);
2059 struct ice40_hcd *ihcd = hcd_to_ihcd(hcd);
2060
2061 debugfs_remove_recursive(ihcd->dbg_root);
2062
2063 usb_remove_hcd(hcd);
2064 usb_put_hcd(hcd);
2065 destroy_workqueue(ihcd->wq);
2066 ice40_spi_power_off(ihcd);
2067
2068 pm_runtime_disable(&spi->dev);
2069 pm_relax(&spi->dev);
2070
2071 return 0;
2072}
2073
2074static struct of_device_id ice40_spi_of_match_table[] = {
2075 { .compatible = "lattice,ice40-spi-usb", },
2076 {},
2077};
2078
2079static struct spi_driver ice40_spi_driver = {
2080 .driver = {
2081 .name = "ice40_spi",
2082 .owner = THIS_MODULE,
2083 .of_match_table = ice40_spi_of_match_table,
2084 },
2085 .probe = ice40_spi_probe,
2086 .remove = ice40_spi_remove,
2087};
2088
2089module_spi_driver(ice40_spi_driver);
2090
2091MODULE_DESCRIPTION("ICE40 FPGA based SPI-USB bridge HCD");
2092MODULE_LICENSE("GPL v2");