blob: b8389e2b9166d6884d388d61455a574d34a3caa4 [file] [log] [blame]
Manu Gautam115c2982013-07-17 11:58:34 +05301/*
2 * ci13xxx_udc.c - MIPS USB IP core family device controller
3 *
4 * Copyright (C) 2008 Chipidea - MIPS Technologies, Inc. All rights reserved.
5 *
6 * Author: David Lopo
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13/*
14 * Description: MIPS USB IP core family device controller
15 * Currently it only supports IP part number CI13412
16 *
17 * This driver is composed of several blocks:
18 * - HW: hardware interface
19 * - DBG: debug facilities (optional)
20 * - UTIL: utilities
21 * - ISR: interrupts handling
22 * - ENDPT: endpoint operations (Gadget API)
23 * - GADGET: gadget operations (Gadget API)
24 * - BUS: bus glue code, bus abstraction layer
25 *
26 * Compile Options
27 * - CONFIG_USB_GADGET_DEBUG_FILES: enable debug facilities
28 * - STALL_IN: non-empty bulk-in pipes cannot be halted
29 * if defined mass storage compliance succeeds but with warnings
30 * => case 4: Hi > Dn
31 * => case 5: Hi > Di
32 * => case 8: Hi <> Do
33 * if undefined usbtest 13 fails
34 * - TRACE: enable function tracing (depends on DEBUG)
35 *
36 * Main Features
37 * - Chapter 9 & Mass Storage Compliance with Gadget File Storage
38 * - Chapter 9 Compliance with Gadget Zero (STALL_IN undefined)
39 * - Normal & LPM support
40 *
41 * USBTEST Report
42 * - OK: 0-12, 13 (STALL_IN defined) & 14
43 * - Not Supported: 15 & 16 (ISO)
44 *
45 * TODO List
46 * - OTG
47 * - Isochronous & Interrupt Traffic
48 * - Handle requests which spawns into several TDs
49 * - GET_STATUS(device) - always reports 0
50 * - Gadget API (majority of optional features)
51 */
52#include <linux/delay.h>
53#include <linux/device.h>
54#include <linux/dmapool.h>
55#include <linux/dma-mapping.h>
56#include <linux/init.h>
57#include <linux/ratelimit.h>
58#include <linux/interrupt.h>
59#include <linux/io.h>
60#include <linux/irq.h>
61#include <linux/kernel.h>
62#include <linux/slab.h>
63#include <linux/module.h>
64#include <linux/pm_runtime.h>
65#include <linux/usb/ch9.h>
66#include <linux/usb/gadget.h>
67#include <linux/usb/otg.h>
68#include <linux/usb/msm_hsusb.h>
69#include <linux/tracepoint.h>
70#include <mach/usb_trace.h>
71#include "ci13xxx_udc.h"
72
73/* Turns on streaming. overrides CI13XXX_DISABLE_STREAMING */
74static unsigned int streaming;
75module_param(streaming, uint, S_IRUGO | S_IWUSR);
76
77/******************************************************************************
78 * DEFINE
79 *****************************************************************************/
80
81#define DMA_ADDR_INVALID (~(dma_addr_t)0)
82#define USB_MAX_TIMEOUT 25 /* 25msec timeout */
83#define EP_PRIME_CHECK_DELAY (jiffies + msecs_to_jiffies(1000))
84#define MAX_PRIME_CHECK_RETRY 3 /*Wait for 3sec for EP prime failure */
85
86/* ctrl register bank access */
87static DEFINE_SPINLOCK(udc_lock);
88
89/* control endpoint description */
90static const struct usb_endpoint_descriptor
91ctrl_endpt_out_desc = {
92 .bLength = USB_DT_ENDPOINT_SIZE,
93 .bDescriptorType = USB_DT_ENDPOINT,
94
95 .bEndpointAddress = USB_DIR_OUT,
96 .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
97 .wMaxPacketSize = cpu_to_le16(CTRL_PAYLOAD_MAX),
98};
99
100static const struct usb_endpoint_descriptor
101ctrl_endpt_in_desc = {
102 .bLength = USB_DT_ENDPOINT_SIZE,
103 .bDescriptorType = USB_DT_ENDPOINT,
104
105 .bEndpointAddress = USB_DIR_IN,
106 .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
107 .wMaxPacketSize = cpu_to_le16(CTRL_PAYLOAD_MAX),
108};
109
110/* UDC descriptor */
111static struct ci13xxx *_udc;
112
113/* Interrupt statistics */
114#define ISR_MASK 0x1F
115static struct {
116 u32 test;
117 u32 ui;
118 u32 uei;
119 u32 pci;
120 u32 uri;
121 u32 sli;
122 u32 none;
123 struct {
124 u32 cnt;
125 u32 buf[ISR_MASK+1];
126 u32 idx;
127 } hndl;
128} isr_statistics;
129
130/**
131 * ffs_nr: find first (least significant) bit set
132 * @x: the word to search
133 *
134 * This function returns bit number (instead of position)
135 */
136static int ffs_nr(u32 x)
137{
138 int n = ffs(x);
139
140 return n ? n-1 : 32;
141}
142
143struct ci13xxx_ebi_err_entry {
144 u32 *usb_req_buf;
145 u32 usb_req_length;
146 u32 ep_info;
147 struct ci13xxx_ebi_err_entry *next;
148};
149
150struct ci13xxx_ebi_err_data {
151 u32 ebi_err_addr;
152 u32 apkt0;
153 u32 apkt1;
154 struct ci13xxx_ebi_err_entry *ebi_err_entry;
155};
156static struct ci13xxx_ebi_err_data *ebi_err_data;
157
158/******************************************************************************
159 * HW block
160 *****************************************************************************/
161/* register bank descriptor */
162static struct {
163 unsigned lpm; /* is LPM? */
164 void __iomem *abs; /* bus map offset */
165 void __iomem *cap; /* bus map offset + CAP offset + CAP data */
166 size_t size; /* bank size */
167} hw_bank;
168
169/* MSM specific */
170#define ABS_AHBBURST (0x0090UL)
171#define ABS_AHBMODE (0x0098UL)
172/* UDC register map */
173#define ABS_CAPLENGTH (0x100UL)
174#define ABS_HCCPARAMS (0x108UL)
175#define ABS_DCCPARAMS (0x124UL)
176#define ABS_TESTMODE (hw_bank.lpm ? 0x0FCUL : 0x138UL)
177/* offset to CAPLENTGH (addr + data) */
178#define CAP_USBCMD (0x000UL)
179#define CAP_USBSTS (0x004UL)
180#define CAP_USBINTR (0x008UL)
181#define CAP_DEVICEADDR (0x014UL)
182#define CAP_ENDPTLISTADDR (0x018UL)
183#define CAP_PORTSC (0x044UL)
184#define CAP_DEVLC (0x084UL)
185#define CAP_ENDPTPIPEID (0x0BCUL)
186#define CAP_USBMODE (hw_bank.lpm ? 0x0C8UL : 0x068UL)
187#define CAP_ENDPTSETUPSTAT (hw_bank.lpm ? 0x0D8UL : 0x06CUL)
188#define CAP_ENDPTPRIME (hw_bank.lpm ? 0x0DCUL : 0x070UL)
189#define CAP_ENDPTFLUSH (hw_bank.lpm ? 0x0E0UL : 0x074UL)
190#define CAP_ENDPTSTAT (hw_bank.lpm ? 0x0E4UL : 0x078UL)
191#define CAP_ENDPTCOMPLETE (hw_bank.lpm ? 0x0E8UL : 0x07CUL)
192#define CAP_ENDPTCTRL (hw_bank.lpm ? 0x0ECUL : 0x080UL)
193#define CAP_LAST (hw_bank.lpm ? 0x12CUL : 0x0C0UL)
194
195#define REMOTE_WAKEUP_DELAY msecs_to_jiffies(200)
196
197/* maximum number of enpoints: valid only after hw_device_reset() */
198static unsigned hw_ep_max;
199static void dbg_usb_op_fail(u8 addr, const char *name,
200 const struct ci13xxx_ep *mep);
201/**
202 * hw_ep_bit: calculates the bit number
203 * @num: endpoint number
204 * @dir: endpoint direction
205 *
206 * This function returns bit number
207 */
208static inline int hw_ep_bit(int num, int dir)
209{
210 return num + (dir ? 16 : 0);
211}
212
213static int ep_to_bit(int n)
214{
215 int fill = 16 - hw_ep_max / 2;
216
217 if (n >= hw_ep_max / 2)
218 n += fill;
219
220 return n;
221}
222
223/**
224 * hw_aread: reads from register bitfield
225 * @addr: address relative to bus map
226 * @mask: bitfield mask
227 *
228 * This function returns register bitfield data
229 */
230static u32 hw_aread(u32 addr, u32 mask)
231{
232 return ioread32(addr + hw_bank.abs) & mask;
233}
234
235/**
236 * hw_awrite: writes to register bitfield
237 * @addr: address relative to bus map
238 * @mask: bitfield mask
239 * @data: new data
240 */
241static void hw_awrite(u32 addr, u32 mask, u32 data)
242{
243 iowrite32(hw_aread(addr, ~mask) | (data & mask),
244 addr + hw_bank.abs);
245}
246
247/**
248 * hw_cread: reads from register bitfield
249 * @addr: address relative to CAP offset plus content
250 * @mask: bitfield mask
251 *
252 * This function returns register bitfield data
253 */
254static u32 hw_cread(u32 addr, u32 mask)
255{
256 return ioread32(addr + hw_bank.cap) & mask;
257}
258
259/**
260 * hw_cwrite: writes to register bitfield
261 * @addr: address relative to CAP offset plus content
262 * @mask: bitfield mask
263 * @data: new data
264 */
265static void hw_cwrite(u32 addr, u32 mask, u32 data)
266{
267 iowrite32(hw_cread(addr, ~mask) | (data & mask),
268 addr + hw_bank.cap);
269}
270
271/**
272 * hw_ctest_and_clear: tests & clears register bitfield
273 * @addr: address relative to CAP offset plus content
274 * @mask: bitfield mask
275 *
276 * This function returns register bitfield data
277 */
278static u32 hw_ctest_and_clear(u32 addr, u32 mask)
279{
280 u32 reg = hw_cread(addr, mask);
281
282 iowrite32(reg, addr + hw_bank.cap);
283 return reg;
284}
285
286/**
287 * hw_ctest_and_write: tests & writes register bitfield
288 * @addr: address relative to CAP offset plus content
289 * @mask: bitfield mask
290 * @data: new data
291 *
292 * This function returns register bitfield data
293 */
294static u32 hw_ctest_and_write(u32 addr, u32 mask, u32 data)
295{
296 u32 reg = hw_cread(addr, ~0);
297
298 iowrite32((reg & ~mask) | (data & mask), addr + hw_bank.cap);
299 return (reg & mask) >> ffs_nr(mask);
300}
301
302static int hw_device_init(void __iomem *base)
303{
304 u32 reg;
305
306 /* bank is a module variable */
307 hw_bank.abs = base;
308
309 hw_bank.cap = hw_bank.abs;
310 hw_bank.cap += ABS_CAPLENGTH;
311 hw_bank.cap += ioread8(hw_bank.cap);
312
313 reg = hw_aread(ABS_HCCPARAMS, HCCPARAMS_LEN) >> ffs_nr(HCCPARAMS_LEN);
314 hw_bank.lpm = reg;
315 hw_bank.size = hw_bank.cap - hw_bank.abs;
316 hw_bank.size += CAP_LAST;
317 hw_bank.size /= sizeof(u32);
318
319 reg = hw_aread(ABS_DCCPARAMS, DCCPARAMS_DEN) >> ffs_nr(DCCPARAMS_DEN);
320 hw_ep_max = reg * 2; /* cache hw ENDPT_MAX */
321
322 if (hw_ep_max == 0 || hw_ep_max > ENDPT_MAX)
323 return -ENODEV;
324
325 /* setup lock mode ? */
326
327 /* ENDPTSETUPSTAT is '0' by default */
328
329 /* HCSPARAMS.bf.ppc SHOULD BE zero for device */
330
331 return 0;
332}
333/**
334 * hw_device_reset: resets chip (execute without interruption)
335 * @base: register base address
336 *
337 * This function returns an error code
338 */
339static int hw_device_reset(struct ci13xxx *udc)
340{
341 int delay_count = 25; /* 250 usec */
342
343 /* should flush & stop before reset */
344 hw_cwrite(CAP_ENDPTFLUSH, ~0, ~0);
345 hw_cwrite(CAP_USBCMD, USBCMD_RS, 0);
346
347 hw_cwrite(CAP_USBCMD, USBCMD_RST, USBCMD_RST);
348 while (delay_count-- && hw_cread(CAP_USBCMD, USBCMD_RST))
349 udelay(10);
350 if (delay_count < 0)
351 pr_err("USB controller reset failed\n");
352
353 if (udc->udc_driver->notify_event)
354 udc->udc_driver->notify_event(udc,
355 CI13XXX_CONTROLLER_RESET_EVENT);
356
357 /* USBMODE should be configured step by step */
358 hw_cwrite(CAP_USBMODE, USBMODE_CM, USBMODE_CM_IDLE);
359 hw_cwrite(CAP_USBMODE, USBMODE_CM, USBMODE_CM_DEVICE);
360 hw_cwrite(CAP_USBMODE, USBMODE_SLOM, USBMODE_SLOM); /* HW >= 2.3 */
361
362 /*
363 * ITC (Interrupt Threshold Control) field is to set the maximum
364 * rate at which the device controller will issue interrupts.
365 * The maximum interrupt interval measured in micro frames.
366 * Valid values are 0, 1, 2, 4, 8, 16, 32, 64. The default value is
367 * 8 micro frames. If CPU can handle interrupts at faster rate, ITC
368 * can be set to lesser value to gain performance.
369 */
370 if (udc->udc_driver->nz_itc)
371 hw_cwrite(CAP_USBCMD, USBCMD_ITC_MASK,
372 USBCMD_ITC(udc->udc_driver->nz_itc));
373 else if (udc->udc_driver->flags & CI13XXX_ZERO_ITC)
374 hw_cwrite(CAP_USBCMD, USBCMD_ITC_MASK, USBCMD_ITC(0));
375
376 if (hw_cread(CAP_USBMODE, USBMODE_CM) != USBMODE_CM_DEVICE) {
377 pr_err("cannot enter in device mode");
378 pr_err("lpm = %i", hw_bank.lpm);
379 return -ENODEV;
380 }
381
382 return 0;
383}
384
385/**
386 * hw_device_state: enables/disables interrupts & starts/stops device (execute
387 * without interruption)
388 * @dma: 0 => disable, !0 => enable and set dma engine
389 *
390 * This function returns an error code
391 */
392static int hw_device_state(u32 dma)
393{
394 struct ci13xxx *udc = _udc;
395
396 if (dma) {
397 if (streaming || !(udc->udc_driver->flags &
398 CI13XXX_DISABLE_STREAMING))
399 hw_cwrite(CAP_USBMODE, USBMODE_SDIS, 0);
400 else
401 hw_cwrite(CAP_USBMODE, USBMODE_SDIS, USBMODE_SDIS);
402
403 hw_cwrite(CAP_ENDPTLISTADDR, ~0, dma);
404
405 if (udc->udc_driver->notify_event)
406 udc->udc_driver->notify_event(udc,
407 CI13XXX_CONTROLLER_CONNECT_EVENT);
408
409 /* interrupt, error, port change, reset, sleep/suspend */
410 hw_cwrite(CAP_USBINTR, ~0,
411 USBi_UI|USBi_UEI|USBi_PCI|USBi_URI|USBi_SLI);
412 hw_cwrite(CAP_USBCMD, USBCMD_RS, USBCMD_RS);
413 } else {
414 hw_cwrite(CAP_USBCMD, USBCMD_RS, 0);
415 hw_cwrite(CAP_USBINTR, ~0, 0);
416 }
417 return 0;
418}
419
420static void debug_ept_flush_info(int ep_num, int dir)
421{
422 struct ci13xxx *udc = _udc;
423 struct ci13xxx_ep *mep;
424
425 if (dir)
426 mep = &udc->ci13xxx_ep[ep_num + hw_ep_max/2];
427 else
428 mep = &udc->ci13xxx_ep[ep_num];
429
430 pr_err_ratelimited("USB Registers\n");
431 pr_err_ratelimited("USBCMD:%x\n", hw_cread(CAP_USBCMD, ~0));
432 pr_err_ratelimited("USBSTS:%x\n", hw_cread(CAP_USBSTS, ~0));
433 pr_err_ratelimited("ENDPTLISTADDR:%x\n",
434 hw_cread(CAP_ENDPTLISTADDR, ~0));
435 pr_err_ratelimited("PORTSC:%x\n", hw_cread(CAP_PORTSC, ~0));
436 pr_err_ratelimited("USBMODE:%x\n", hw_cread(CAP_USBMODE, ~0));
437 pr_err_ratelimited("ENDPTSTAT:%x\n", hw_cread(CAP_ENDPTSTAT, ~0));
438
439 dbg_usb_op_fail(0xFF, "FLUSHF", mep);
440}
441/**
442 * hw_ep_flush: flush endpoint fifo (execute without interruption)
443 * @num: endpoint number
444 * @dir: endpoint direction
445 *
446 * This function returns an error code
447 */
448static int hw_ep_flush(int num, int dir)
449{
450 ktime_t start, diff;
451 int n = hw_ep_bit(num, dir);
452 struct ci13xxx_ep *mEp = &_udc->ci13xxx_ep[n];
453
454 /* Flush ep0 even when queue is empty */
455 if (_udc->skip_flush || (num && list_empty(&mEp->qh.queue)))
456 return 0;
457
458 start = ktime_get();
459 do {
460 /* flush any pending transfer */
461 hw_cwrite(CAP_ENDPTFLUSH, BIT(n), BIT(n));
462 while (hw_cread(CAP_ENDPTFLUSH, BIT(n))) {
463 cpu_relax();
464 diff = ktime_sub(ktime_get(), start);
465 if (ktime_to_ms(diff) > USB_MAX_TIMEOUT) {
466 printk_ratelimited(KERN_ERR
467 "%s: Failed to flush ep#%d %s\n",
468 __func__, num,
469 dir ? "IN" : "OUT");
470 debug_ept_flush_info(num, dir);
471 _udc->skip_flush = true;
472 return 0;
473 }
474 }
475 } while (hw_cread(CAP_ENDPTSTAT, BIT(n)));
476
477 return 0;
478}
479
480/**
481 * hw_ep_disable: disables endpoint (execute without interruption)
482 * @num: endpoint number
483 * @dir: endpoint direction
484 *
485 * This function returns an error code
486 */
487static int hw_ep_disable(int num, int dir)
488{
489 hw_cwrite(CAP_ENDPTCTRL + num * sizeof(u32),
490 dir ? ENDPTCTRL_TXE : ENDPTCTRL_RXE, 0);
491 return 0;
492}
493
494/**
495 * hw_ep_enable: enables endpoint (execute without interruption)
496 * @num: endpoint number
497 * @dir: endpoint direction
498 * @type: endpoint type
499 *
500 * This function returns an error code
501 */
502static int hw_ep_enable(int num, int dir, int type)
503{
504 u32 mask, data;
505
506 if (dir) {
507 mask = ENDPTCTRL_TXT; /* type */
508 data = type << ffs_nr(mask);
509
510 mask |= ENDPTCTRL_TXS; /* unstall */
511 mask |= ENDPTCTRL_TXR; /* reset data toggle */
512 data |= ENDPTCTRL_TXR;
513 mask |= ENDPTCTRL_TXE; /* enable */
514 data |= ENDPTCTRL_TXE;
515 } else {
516 mask = ENDPTCTRL_RXT; /* type */
517 data = type << ffs_nr(mask);
518
519 mask |= ENDPTCTRL_RXS; /* unstall */
520 mask |= ENDPTCTRL_RXR; /* reset data toggle */
521 data |= ENDPTCTRL_RXR;
522 mask |= ENDPTCTRL_RXE; /* enable */
523 data |= ENDPTCTRL_RXE;
524 }
525 hw_cwrite(CAP_ENDPTCTRL + num * sizeof(u32), mask, data);
526
527 /* make sure endpoint is enabled before returning */
528 mb();
529
530 return 0;
531}
532
533/**
534 * hw_ep_get_halt: return endpoint halt status
535 * @num: endpoint number
536 * @dir: endpoint direction
537 *
538 * This function returns 1 if endpoint halted
539 */
540static int hw_ep_get_halt(int num, int dir)
541{
542 u32 mask = dir ? ENDPTCTRL_TXS : ENDPTCTRL_RXS;
543
544 return hw_cread(CAP_ENDPTCTRL + num * sizeof(u32), mask) ? 1 : 0;
545}
546
547/**
548 * hw_test_and_clear_setup_status: test & clear setup status (execute without
549 * interruption)
550 * @n: endpoint number
551 *
552 * This function returns setup status
553 */
554static int hw_test_and_clear_setup_status(int n)
555{
556 n = ep_to_bit(n);
557 return hw_ctest_and_clear(CAP_ENDPTSETUPSTAT, BIT(n));
558}
559
560/**
561 * hw_ep_prime: primes endpoint (execute without interruption)
562 * @num: endpoint number
563 * @dir: endpoint direction
564 * @is_ctrl: true if control endpoint
565 *
566 * This function returns an error code
567 */
568static int hw_ep_prime(int num, int dir, int is_ctrl)
569{
570 int n = hw_ep_bit(num, dir);
571
572 if (is_ctrl && dir == RX && hw_cread(CAP_ENDPTSETUPSTAT, BIT(num)))
573 return -EAGAIN;
574
575 hw_cwrite(CAP_ENDPTPRIME, BIT(n), BIT(n));
576
577 if (is_ctrl && dir == RX && hw_cread(CAP_ENDPTSETUPSTAT, BIT(num)))
578 return -EAGAIN;
579
580 /* status shoult be tested according with manual but it doesn't work */
581 return 0;
582}
583
584/**
585 * hw_ep_set_halt: configures ep halt & resets data toggle after clear (execute
586 * without interruption)
587 * @num: endpoint number
588 * @dir: endpoint direction
589 * @value: true => stall, false => unstall
590 *
591 * This function returns an error code
592 */
593static int hw_ep_set_halt(int num, int dir, int value)
594{
595 u32 addr, mask_xs, mask_xr;
596
597 if (value != 0 && value != 1)
598 return -EINVAL;
599
600 do {
601 if (hw_cread(CAP_ENDPTSETUPSTAT, BIT(num)))
602 return 0;
603
604 addr = CAP_ENDPTCTRL + num * sizeof(u32);
605 mask_xs = dir ? ENDPTCTRL_TXS : ENDPTCTRL_RXS;
606 mask_xr = dir ? ENDPTCTRL_TXR : ENDPTCTRL_RXR;
607
608 /* data toggle - reserved for EP0 but it's in ESS */
609 hw_cwrite(addr, mask_xs|mask_xr, value ? mask_xs : mask_xr);
610
611 } while (value != hw_ep_get_halt(num, dir));
612
613 return 0;
614}
615
616/**
617 * hw_intr_clear: disables interrupt & clears interrupt status (execute without
618 * interruption)
619 * @n: interrupt bit
620 *
621 * This function returns an error code
622 */
623static int hw_intr_clear(int n)
624{
625 if (n >= REG_BITS)
626 return -EINVAL;
627
628 hw_cwrite(CAP_USBINTR, BIT(n), 0);
629 hw_cwrite(CAP_USBSTS, BIT(n), BIT(n));
630 return 0;
631}
632
633/**
634 * hw_intr_force: enables interrupt & forces interrupt status (execute without
635 * interruption)
636 * @n: interrupt bit
637 *
638 * This function returns an error code
639 */
640static int hw_intr_force(int n)
641{
642 if (n >= REG_BITS)
643 return -EINVAL;
644
645 hw_awrite(ABS_TESTMODE, TESTMODE_FORCE, TESTMODE_FORCE);
646 hw_cwrite(CAP_USBINTR, BIT(n), BIT(n));
647 hw_cwrite(CAP_USBSTS, BIT(n), BIT(n));
648 hw_awrite(ABS_TESTMODE, TESTMODE_FORCE, 0);
649 return 0;
650}
651
652/**
653 * hw_is_port_high_speed: test if port is high speed
654 *
655 * This function returns true if high speed port
656 */
657static int hw_port_is_high_speed(void)
658{
659 return hw_bank.lpm ? hw_cread(CAP_DEVLC, DEVLC_PSPD) :
660 hw_cread(CAP_PORTSC, PORTSC_HSP);
661}
662
663/**
664 * hw_port_test_get: reads port test mode value
665 *
666 * This function returns port test mode value
667 */
668static u8 hw_port_test_get(void)
669{
670 return hw_cread(CAP_PORTSC, PORTSC_PTC) >> ffs_nr(PORTSC_PTC);
671}
672
673/**
674 * hw_port_test_set: writes port test mode (execute without interruption)
675 * @mode: new value
676 *
677 * This function returns an error code
678 */
679static int hw_port_test_set(u8 mode)
680{
681 const u8 TEST_MODE_MAX = 7;
682
683 if (mode > TEST_MODE_MAX)
684 return -EINVAL;
685
686 hw_cwrite(CAP_PORTSC, PORTSC_PTC, mode << ffs_nr(PORTSC_PTC));
687 return 0;
688}
689
690/**
691 * hw_read_intr_enable: returns interrupt enable register
692 *
693 * This function returns register data
694 */
695static u32 hw_read_intr_enable(void)
696{
697 return hw_cread(CAP_USBINTR, ~0);
698}
699
700/**
701 * hw_read_intr_status: returns interrupt status register
702 *
703 * This function returns register data
704 */
705static u32 hw_read_intr_status(void)
706{
707 return hw_cread(CAP_USBSTS, ~0);
708}
709
710/**
711 * hw_register_read: reads all device registers (execute without interruption)
712 * @buf: destination buffer
713 * @size: buffer size
714 *
715 * This function returns number of registers read
716 */
717static size_t hw_register_read(u32 *buf, size_t size)
718{
719 unsigned i;
720
721 if (size > hw_bank.size)
722 size = hw_bank.size;
723
724 for (i = 0; i < size; i++)
725 buf[i] = hw_aread(i * sizeof(u32), ~0);
726
727 return size;
728}
729
730/**
731 * hw_register_write: writes to register
732 * @addr: register address
733 * @data: register value
734 *
735 * This function returns an error code
736 */
737static int hw_register_write(u16 addr, u32 data)
738{
739 /* align */
740 addr /= sizeof(u32);
741
742 if (addr >= hw_bank.size)
743 return -EINVAL;
744
745 /* align */
746 addr *= sizeof(u32);
747
748 hw_awrite(addr, ~0, data);
749 return 0;
750}
751
752/**
753 * hw_test_and_clear_complete: test & clear complete status (execute without
754 * interruption)
755 * @n: endpoint number
756 *
757 * This function returns complete status
758 */
759static int hw_test_and_clear_complete(int n)
760{
761 n = ep_to_bit(n);
762 return hw_ctest_and_clear(CAP_ENDPTCOMPLETE, BIT(n));
763}
764
765/**
766 * hw_test_and_clear_intr_active: test & clear active interrupts (execute
767 * without interruption)
768 *
769 * This function returns active interrutps
770 */
771static u32 hw_test_and_clear_intr_active(void)
772{
773 u32 reg = hw_read_intr_status() & hw_read_intr_enable();
774
775 hw_cwrite(CAP_USBSTS, ~0, reg);
776 return reg;
777}
778
779/**
780 * hw_test_and_clear_setup_guard: test & clear setup guard (execute without
781 * interruption)
782 *
783 * This function returns guard value
784 */
785static int hw_test_and_clear_setup_guard(void)
786{
787 return hw_ctest_and_write(CAP_USBCMD, USBCMD_SUTW, 0);
788}
789
790/**
791 * hw_test_and_set_setup_guard: test & set setup guard (execute without
792 * interruption)
793 *
794 * This function returns guard value
795 */
796static int hw_test_and_set_setup_guard(void)
797{
798 return hw_ctest_and_write(CAP_USBCMD, USBCMD_SUTW, USBCMD_SUTW);
799}
800
801/**
802 * hw_usb_set_address: configures USB address (execute without interruption)
803 * @value: new USB address
804 *
805 * This function returns an error code
806 */
807static int hw_usb_set_address(u8 value)
808{
809 /* advance */
810 hw_cwrite(CAP_DEVICEADDR, DEVICEADDR_USBADR | DEVICEADDR_USBADRA,
811 value << ffs_nr(DEVICEADDR_USBADR) | DEVICEADDR_USBADRA);
812 return 0;
813}
814
815/**
816 * hw_usb_reset: restart device after a bus reset (execute without
817 * interruption)
818 *
819 * This function returns an error code
820 */
821static int hw_usb_reset(void)
822{
823 int delay_count = 10; /* 100 usec delay */
824
825 hw_usb_set_address(0);
826
827 /* ESS flushes only at end?!? */
828 hw_cwrite(CAP_ENDPTFLUSH, ~0, ~0); /* flush all EPs */
829
830 /* clear setup token semaphores */
831 hw_cwrite(CAP_ENDPTSETUPSTAT, 0, 0); /* writes its content */
832
833 /* clear complete status */
834 hw_cwrite(CAP_ENDPTCOMPLETE, 0, 0); /* writes its content */
835
836 /* wait until all bits cleared */
837 while (delay_count-- && hw_cread(CAP_ENDPTPRIME, ~0))
838 udelay(10);
839 if (delay_count < 0)
840 pr_err("ENDPTPRIME is not cleared during bus reset\n");
841
842 /* reset all endpoints ? */
843
844 /* reset internal status and wait for further instructions
845 no need to verify the port reset status (ESS does it) */
846
847 return 0;
848}
849
850/******************************************************************************
851 * DBG block
852 *****************************************************************************/
853/**
854 * show_device: prints information about device capabilities and status
855 *
856 * Check "device.h" for details
857 */
858static ssize_t show_device(struct device *dev, struct device_attribute *attr,
859 char *buf)
860{
861 struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
862 struct usb_gadget *gadget = &udc->gadget;
863 int n = 0;
864
865 dbg_trace("[%s] %p\n", __func__, buf);
866 if (attr == NULL || buf == NULL) {
867 dev_err(dev, "[%s] EINVAL\n", __func__);
868 return 0;
869 }
870
871 n += scnprintf(buf + n, PAGE_SIZE - n, "speed = %d\n",
872 gadget->speed);
873 n += scnprintf(buf + n, PAGE_SIZE - n, "max_speed = %d\n",
874 gadget->max_speed);
875 /* TODO: Scheduled for removal in 3.8. */
876 n += scnprintf(buf + n, PAGE_SIZE - n, "is_dualspeed = %d\n",
877 gadget_is_dualspeed(gadget));
878 n += scnprintf(buf + n, PAGE_SIZE - n, "is_otg = %d\n",
879 gadget->is_otg);
880 n += scnprintf(buf + n, PAGE_SIZE - n, "is_a_peripheral = %d\n",
881 gadget->is_a_peripheral);
882 n += scnprintf(buf + n, PAGE_SIZE - n, "b_hnp_enable = %d\n",
883 gadget->b_hnp_enable);
884 n += scnprintf(buf + n, PAGE_SIZE - n, "a_hnp_support = %d\n",
885 gadget->a_hnp_support);
886 n += scnprintf(buf + n, PAGE_SIZE - n, "a_alt_hnp_support = %d\n",
887 gadget->a_alt_hnp_support);
888 n += scnprintf(buf + n, PAGE_SIZE - n, "name = %s\n",
889 (gadget->name ? gadget->name : ""));
890
891 return n;
892}
893static DEVICE_ATTR(device, S_IRUSR, show_device, NULL);
894
895/**
896 * show_driver: prints information about attached gadget (if any)
897 *
898 * Check "device.h" for details
899 */
900static ssize_t show_driver(struct device *dev, struct device_attribute *attr,
901 char *buf)
902{
903 struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
904 struct usb_gadget_driver *driver = udc->driver;
905 int n = 0;
906
907 dbg_trace("[%s] %p\n", __func__, buf);
908 if (attr == NULL || buf == NULL) {
909 dev_err(dev, "[%s] EINVAL\n", __func__);
910 return 0;
911 }
912
913 if (driver == NULL)
914 return scnprintf(buf, PAGE_SIZE,
915 "There is no gadget attached!\n");
916
917 n += scnprintf(buf + n, PAGE_SIZE - n, "function = %s\n",
918 (driver->function ? driver->function : ""));
919 n += scnprintf(buf + n, PAGE_SIZE - n, "max speed = %d\n",
920 driver->max_speed);
921
922 return n;
923}
924static DEVICE_ATTR(driver, S_IRUSR, show_driver, NULL);
925
926/* Maximum event message length */
927#define DBG_DATA_MSG 64UL
928
929/* Maximum event messages */
930#define DBG_DATA_MAX 128UL
931
932/* Event buffer descriptor */
933static struct {
934 char (buf[DBG_DATA_MAX])[DBG_DATA_MSG]; /* buffer */
935 unsigned idx; /* index */
936 unsigned tty; /* print to console? */
937 rwlock_t lck; /* lock */
938} dbg_data = {
939 .idx = 0,
940 .tty = 0,
941 .lck = __RW_LOCK_UNLOCKED(lck)
942};
943
944/**
945 * dbg_dec: decrements debug event index
946 * @idx: buffer index
947 */
948static void dbg_dec(unsigned *idx)
949{
950 *idx = (*idx - 1) & (DBG_DATA_MAX-1);
951}
952
953/**
954 * dbg_inc: increments debug event index
955 * @idx: buffer index
956 */
957static void dbg_inc(unsigned *idx)
958{
959 *idx = (*idx + 1) & (DBG_DATA_MAX-1);
960}
961
962
963static unsigned int ep_addr_txdbg_mask;
964module_param(ep_addr_txdbg_mask, uint, S_IRUGO | S_IWUSR);
965static unsigned int ep_addr_rxdbg_mask;
966module_param(ep_addr_rxdbg_mask, uint, S_IRUGO | S_IWUSR);
967
968static int allow_dbg_print(u8 addr)
969{
970 int dir, num;
971
972 /* allow bus wide events */
973 if (addr == 0xff)
974 return 1;
975
976 dir = addr & USB_ENDPOINT_DIR_MASK ? TX : RX;
977 num = addr & ~USB_ENDPOINT_DIR_MASK;
978 num = 1 << num;
979
980 if ((dir == TX) && (num & ep_addr_txdbg_mask))
981 return 1;
982 if ((dir == RX) && (num & ep_addr_rxdbg_mask))
983 return 1;
984
985 return 0;
986}
987
988/**
989 * dbg_print: prints the common part of the event
990 * @addr: endpoint address
991 * @name: event name
992 * @status: status
993 * @extra: extra information
994 */
995static void dbg_print(u8 addr, const char *name, int status, const char *extra)
996{
997 struct timeval tval;
998 unsigned int stamp;
999 unsigned long flags;
1000
1001 if (!allow_dbg_print(addr))
1002 return;
1003
1004 write_lock_irqsave(&dbg_data.lck, flags);
1005
1006 do_gettimeofday(&tval);
1007 stamp = tval.tv_sec & 0xFFFF; /* 2^32 = 4294967296. Limit to 4096s */
1008 stamp = stamp * 1000000 + tval.tv_usec;
1009
1010 scnprintf(dbg_data.buf[dbg_data.idx], DBG_DATA_MSG,
1011 "%04X\t? %02X %-7.7s %4i ?\t%s\n",
1012 stamp, addr, name, status, extra);
1013
1014 dbg_inc(&dbg_data.idx);
1015
1016 write_unlock_irqrestore(&dbg_data.lck, flags);
1017
1018 if (dbg_data.tty != 0)
1019 pr_notice("%04X\t? %02X %-7.7s %4i ?\t%s\n",
1020 stamp, addr, name, status, extra);
1021}
1022
1023/**
1024 * dbg_done: prints a DONE event
1025 * @addr: endpoint address
1026 * @td: transfer descriptor
1027 * @status: status
1028 */
1029static void dbg_done(u8 addr, const u32 token, int status)
1030{
1031 char msg[DBG_DATA_MSG];
1032
1033 scnprintf(msg, sizeof(msg), "%d %02X",
1034 (int)(token & TD_TOTAL_BYTES) >> ffs_nr(TD_TOTAL_BYTES),
1035 (int)(token & TD_STATUS) >> ffs_nr(TD_STATUS));
1036 dbg_print(addr, "DONE", status, msg);
1037}
1038
1039/**
1040 * dbg_event: prints a generic event
1041 * @addr: endpoint address
1042 * @name: event name
1043 * @status: status
1044 */
1045static void dbg_event(u8 addr, const char *name, int status)
1046{
1047 if (name != NULL)
1048 dbg_print(addr, name, status, "");
1049}
1050
1051/*
1052 * dbg_queue: prints a QUEUE event
1053 * @addr: endpoint address
1054 * @req: USB request
1055 * @status: status
1056 */
1057static void dbg_queue(u8 addr, const struct usb_request *req, int status)
1058{
1059 char msg[DBG_DATA_MSG];
1060
1061 if (req != NULL) {
1062 scnprintf(msg, sizeof(msg),
1063 "%d %d", !req->no_interrupt, req->length);
1064 dbg_print(addr, "QUEUE", status, msg);
1065 }
1066}
1067
1068/**
1069 * dbg_setup: prints a SETUP event
1070 * @addr: endpoint address
1071 * @req: setup request
1072 */
1073static void dbg_setup(u8 addr, const struct usb_ctrlrequest *req)
1074{
1075 char msg[DBG_DATA_MSG];
1076
1077 if (req != NULL) {
1078 scnprintf(msg, sizeof(msg),
1079 "%02X %02X %04X %04X %d", req->bRequestType,
1080 req->bRequest, le16_to_cpu(req->wValue),
1081 le16_to_cpu(req->wIndex), le16_to_cpu(req->wLength));
1082 dbg_print(addr, "SETUP", 0, msg);
1083 }
1084}
1085
1086/**
1087 * dbg_usb_op_fail: prints USB Operation FAIL event
1088 * @addr: endpoint address
1089 * @mEp: endpoint structure
1090 */
1091static void dbg_usb_op_fail(u8 addr, const char *name,
1092 const struct ci13xxx_ep *mep)
1093{
1094 char msg[DBG_DATA_MSG];
1095 struct ci13xxx_req *req;
1096 struct list_head *ptr = NULL;
1097
1098 if (mep != NULL) {
1099 scnprintf(msg, sizeof(msg),
1100 "%s Fail EP%d%s QH:%08X",
1101 name, mep->num,
1102 mep->dir ? "IN" : "OUT", mep->qh.ptr->cap);
1103 dbg_print(addr, name, 0, msg);
1104 scnprintf(msg, sizeof(msg),
1105 "cap:%08X %08X %08X\n",
1106 mep->qh.ptr->curr, mep->qh.ptr->td.next,
1107 mep->qh.ptr->td.token);
1108 dbg_print(addr, "QHEAD", 0, msg);
1109
1110 list_for_each(ptr, &mep->qh.queue) {
1111 req = list_entry(ptr, struct ci13xxx_req, queue);
1112 scnprintf(msg, sizeof(msg),
1113 "%08X:%08X:%08X\n",
1114 req->dma, req->ptr->next,
1115 req->ptr->token);
1116 dbg_print(addr, "REQ", 0, msg);
1117 scnprintf(msg, sizeof(msg), "%08X:%d\n",
1118 req->ptr->page[0],
1119 req->req.status);
1120 dbg_print(addr, "REQPAGE", 0, msg);
1121 }
1122 }
1123}
1124
1125/**
1126 * show_events: displays the event buffer
1127 *
1128 * Check "device.h" for details
1129 */
1130static ssize_t show_events(struct device *dev, struct device_attribute *attr,
1131 char *buf)
1132{
1133 unsigned long flags;
1134 unsigned i, j, n = 0;
1135
1136 dbg_trace("[%s] %p\n", __func__, buf);
1137 if (attr == NULL || buf == NULL) {
1138 dev_err(dev, "[%s] EINVAL\n", __func__);
1139 return 0;
1140 }
1141
1142 read_lock_irqsave(&dbg_data.lck, flags);
1143
1144 i = dbg_data.idx;
1145 for (dbg_dec(&i); i != dbg_data.idx; dbg_dec(&i)) {
1146 n += strlen(dbg_data.buf[i]);
1147 if (n >= PAGE_SIZE) {
1148 n -= strlen(dbg_data.buf[i]);
1149 break;
1150 }
1151 }
1152 for (j = 0, dbg_inc(&i); j < n; dbg_inc(&i))
1153 j += scnprintf(buf + j, PAGE_SIZE - j,
1154 "%s", dbg_data.buf[i]);
1155
1156 read_unlock_irqrestore(&dbg_data.lck, flags);
1157
1158 return n;
1159}
1160
1161/**
1162 * store_events: configure if events are going to be also printed to console
1163 *
1164 * Check "device.h" for details
1165 */
1166static ssize_t store_events(struct device *dev, struct device_attribute *attr,
1167 const char *buf, size_t count)
1168{
1169 unsigned tty;
1170
1171 dbg_trace("[%s] %p, %d\n", __func__, buf, count);
1172 if (attr == NULL || buf == NULL) {
1173 dev_err(dev, "[%s] EINVAL\n", __func__);
1174 goto done;
1175 }
1176
1177 if (sscanf(buf, "%u", &tty) != 1 || tty > 1) {
1178 dev_err(dev, "<1|0>: enable|disable console log\n");
1179 goto done;
1180 }
1181
1182 dbg_data.tty = tty;
1183 dev_info(dev, "tty = %u", dbg_data.tty);
1184
1185 done:
1186 return count;
1187}
1188static DEVICE_ATTR(events, S_IRUSR | S_IWUSR, show_events, store_events);
1189
1190/**
1191 * show_inters: interrupt status, enable status and historic
1192 *
1193 * Check "device.h" for details
1194 */
1195static ssize_t show_inters(struct device *dev, struct device_attribute *attr,
1196 char *buf)
1197{
1198 struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
1199 unsigned long flags;
1200 u32 intr;
1201 unsigned i, j, n = 0;
1202
1203 dbg_trace("[%s] %p\n", __func__, buf);
1204 if (attr == NULL || buf == NULL) {
1205 dev_err(dev, "[%s] EINVAL\n", __func__);
1206 return 0;
1207 }
1208
1209 spin_lock_irqsave(udc->lock, flags);
1210
1211 n += scnprintf(buf + n, PAGE_SIZE - n,
1212 "status = %08x\n", hw_read_intr_status());
1213 n += scnprintf(buf + n, PAGE_SIZE - n,
1214 "enable = %08x\n", hw_read_intr_enable());
1215
1216 n += scnprintf(buf + n, PAGE_SIZE - n, "*test = %d\n",
1217 isr_statistics.test);
1218 n += scnprintf(buf + n, PAGE_SIZE - n, "? ui = %d\n",
1219 isr_statistics.ui);
1220 n += scnprintf(buf + n, PAGE_SIZE - n, "? uei = %d\n",
1221 isr_statistics.uei);
1222 n += scnprintf(buf + n, PAGE_SIZE - n, "? pci = %d\n",
1223 isr_statistics.pci);
1224 n += scnprintf(buf + n, PAGE_SIZE - n, "? uri = %d\n",
1225 isr_statistics.uri);
1226 n += scnprintf(buf + n, PAGE_SIZE - n, "? sli = %d\n",
1227 isr_statistics.sli);
1228 n += scnprintf(buf + n, PAGE_SIZE - n, "*none = %d\n",
1229 isr_statistics.none);
1230 n += scnprintf(buf + n, PAGE_SIZE - n, "*hndl = %d\n",
1231 isr_statistics.hndl.cnt);
1232
1233 for (i = isr_statistics.hndl.idx, j = 0; j <= ISR_MASK; j++, i++) {
1234 i &= ISR_MASK;
1235 intr = isr_statistics.hndl.buf[i];
1236
1237 if (USBi_UI & intr)
1238 n += scnprintf(buf + n, PAGE_SIZE - n, "ui ");
1239 intr &= ~USBi_UI;
1240 if (USBi_UEI & intr)
1241 n += scnprintf(buf + n, PAGE_SIZE - n, "uei ");
1242 intr &= ~USBi_UEI;
1243 if (USBi_PCI & intr)
1244 n += scnprintf(buf + n, PAGE_SIZE - n, "pci ");
1245 intr &= ~USBi_PCI;
1246 if (USBi_URI & intr)
1247 n += scnprintf(buf + n, PAGE_SIZE - n, "uri ");
1248 intr &= ~USBi_URI;
1249 if (USBi_SLI & intr)
1250 n += scnprintf(buf + n, PAGE_SIZE - n, "sli ");
1251 intr &= ~USBi_SLI;
1252 if (intr)
1253 n += scnprintf(buf + n, PAGE_SIZE - n, "??? ");
1254 if (isr_statistics.hndl.buf[i])
1255 n += scnprintf(buf + n, PAGE_SIZE - n, "\n");
1256 }
1257
1258 spin_unlock_irqrestore(udc->lock, flags);
1259
1260 return n;
1261}
1262
1263/**
1264 * store_inters: enable & force or disable an individual interrutps
1265 * (to be used for test purposes only)
1266 *
1267 * Check "device.h" for details
1268 */
1269static ssize_t store_inters(struct device *dev, struct device_attribute *attr,
1270 const char *buf, size_t count)
1271{
1272 struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
1273 unsigned long flags;
1274 unsigned en, bit;
1275
1276 dbg_trace("[%s] %p, %d\n", __func__, buf, count);
1277 if (attr == NULL || buf == NULL) {
1278 dev_err(dev, "[%s] EINVAL\n", __func__);
1279 goto done;
1280 }
1281
1282 if (sscanf(buf, "%u %u", &en, &bit) != 2 || en > 1) {
1283 dev_err(dev, "<1|0> <bit>: enable|disable interrupt");
1284 goto done;
1285 }
1286
1287 spin_lock_irqsave(udc->lock, flags);
1288 if (en) {
1289 if (hw_intr_force(bit))
1290 dev_err(dev, "invalid bit number\n");
1291 else
1292 isr_statistics.test++;
1293 } else {
1294 if (hw_intr_clear(bit))
1295 dev_err(dev, "invalid bit number\n");
1296 }
1297 spin_unlock_irqrestore(udc->lock, flags);
1298
1299 done:
1300 return count;
1301}
1302static DEVICE_ATTR(inters, S_IRUSR | S_IWUSR, show_inters, store_inters);
1303
1304/**
1305 * show_port_test: reads port test mode
1306 *
1307 * Check "device.h" for details
1308 */
1309static ssize_t show_port_test(struct device *dev,
1310 struct device_attribute *attr, char *buf)
1311{
1312 struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
1313 unsigned long flags;
1314 unsigned mode;
1315
1316 dbg_trace("[%s] %p\n", __func__, buf);
1317 if (attr == NULL || buf == NULL) {
1318 dev_err(dev, "[%s] EINVAL\n", __func__);
1319 return 0;
1320 }
1321
1322 spin_lock_irqsave(udc->lock, flags);
1323 mode = hw_port_test_get();
1324 spin_unlock_irqrestore(udc->lock, flags);
1325
1326 return scnprintf(buf, PAGE_SIZE, "mode = %u\n", mode);
1327}
1328
1329/**
1330 * store_port_test: writes port test mode
1331 *
1332 * Check "device.h" for details
1333 */
1334static ssize_t store_port_test(struct device *dev,
1335 struct device_attribute *attr,
1336 const char *buf, size_t count)
1337{
1338 struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
1339 unsigned long flags;
1340 unsigned mode;
1341
1342 dbg_trace("[%s] %p, %d\n", __func__, buf, count);
1343 if (attr == NULL || buf == NULL) {
1344 dev_err(dev, "[%s] EINVAL\n", __func__);
1345 goto done;
1346 }
1347
1348 if (sscanf(buf, "%u", &mode) != 1) {
1349 dev_err(dev, "<mode>: set port test mode");
1350 goto done;
1351 }
1352
1353 spin_lock_irqsave(udc->lock, flags);
1354 if (hw_port_test_set(mode))
1355 dev_err(dev, "invalid mode\n");
1356 spin_unlock_irqrestore(udc->lock, flags);
1357
1358 done:
1359 return count;
1360}
1361static DEVICE_ATTR(port_test, S_IRUSR | S_IWUSR,
1362 show_port_test, store_port_test);
1363
1364/**
1365 * show_qheads: DMA contents of all queue heads
1366 *
1367 * Check "device.h" for details
1368 */
1369static ssize_t show_qheads(struct device *dev, struct device_attribute *attr,
1370 char *buf)
1371{
1372 struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
1373 unsigned long flags;
1374 unsigned i, j, n = 0;
1375
1376 dbg_trace("[%s] %p\n", __func__, buf);
1377 if (attr == NULL || buf == NULL) {
1378 dev_err(dev, "[%s] EINVAL\n", __func__);
1379 return 0;
1380 }
1381
1382 spin_lock_irqsave(udc->lock, flags);
1383 for (i = 0; i < hw_ep_max/2; i++) {
1384 struct ci13xxx_ep *mEpRx = &udc->ci13xxx_ep[i];
1385 struct ci13xxx_ep *mEpTx = &udc->ci13xxx_ep[i + hw_ep_max/2];
1386 n += scnprintf(buf + n, PAGE_SIZE - n,
1387 "EP=%02i: RX=%08X TX=%08X\n",
1388 i, (u32)mEpRx->qh.dma, (u32)mEpTx->qh.dma);
1389 for (j = 0; j < (sizeof(struct ci13xxx_qh)/sizeof(u32)); j++) {
1390 n += scnprintf(buf + n, PAGE_SIZE - n,
1391 " %04X: %08X %08X\n", j,
1392 *((u32 *)mEpRx->qh.ptr + j),
1393 *((u32 *)mEpTx->qh.ptr + j));
1394 }
1395 }
1396 spin_unlock_irqrestore(udc->lock, flags);
1397
1398 return n;
1399}
1400static DEVICE_ATTR(qheads, S_IRUSR, show_qheads, NULL);
1401
1402/**
1403 * show_registers: dumps all registers
1404 *
1405 * Check "device.h" for details
1406 */
1407#define DUMP_ENTRIES 512
1408static ssize_t show_registers(struct device *dev,
1409 struct device_attribute *attr, char *buf)
1410{
1411 struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
1412 unsigned long flags;
1413 u32 *dump;
1414 unsigned i, k, n = 0;
1415
1416 dbg_trace("[%s] %p\n", __func__, buf);
1417 if (attr == NULL || buf == NULL) {
1418 dev_err(dev, "[%s] EINVAL\n", __func__);
1419 return 0;
1420 }
1421
1422 dump = kmalloc(sizeof(u32) * DUMP_ENTRIES, GFP_KERNEL);
1423 if (!dump) {
1424 dev_err(dev, "%s: out of memory\n", __func__);
1425 return 0;
1426 }
1427
1428 spin_lock_irqsave(udc->lock, flags);
1429 k = hw_register_read(dump, DUMP_ENTRIES);
1430 spin_unlock_irqrestore(udc->lock, flags);
1431
1432 for (i = 0; i < k; i++) {
1433 n += scnprintf(buf + n, PAGE_SIZE - n,
1434 "reg[0x%04X] = 0x%08X\n",
1435 i * (unsigned)sizeof(u32), dump[i]);
1436 }
1437 kfree(dump);
1438
1439 return n;
1440}
1441
1442/**
1443 * store_registers: writes value to register address
1444 *
1445 * Check "device.h" for details
1446 */
1447static ssize_t store_registers(struct device *dev,
1448 struct device_attribute *attr,
1449 const char *buf, size_t count)
1450{
1451 struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
1452 unsigned long addr, data, flags;
1453
1454 dbg_trace("[%s] %p, %d\n", __func__, buf, count);
1455 if (attr == NULL || buf == NULL) {
1456 dev_err(dev, "[%s] EINVAL\n", __func__);
1457 goto done;
1458 }
1459
1460 if (sscanf(buf, "%li %li", &addr, &data) != 2) {
1461 dev_err(dev, "<addr> <data>: write data to register address");
1462 goto done;
1463 }
1464
1465 spin_lock_irqsave(udc->lock, flags);
1466 if (hw_register_write(addr, data))
1467 dev_err(dev, "invalid address range\n");
1468 spin_unlock_irqrestore(udc->lock, flags);
1469
1470 done:
1471 return count;
1472}
1473static DEVICE_ATTR(registers, S_IRUSR | S_IWUSR,
1474 show_registers, store_registers);
1475
1476/**
1477 * show_requests: DMA contents of all requests currently queued (all endpts)
1478 *
1479 * Check "device.h" for details
1480 */
1481static ssize_t show_requests(struct device *dev, struct device_attribute *attr,
1482 char *buf)
1483{
1484 struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
1485 unsigned long flags;
1486 struct list_head *ptr = NULL;
1487 struct ci13xxx_req *req = NULL;
1488 unsigned i, j, n = 0, qSize = sizeof(struct ci13xxx_td)/sizeof(u32);
1489
1490 dbg_trace("[%s] %p\n", __func__, buf);
1491 if (attr == NULL || buf == NULL) {
1492 dev_err(dev, "[%s] EINVAL\n", __func__);
1493 return 0;
1494 }
1495
1496 spin_lock_irqsave(udc->lock, flags);
1497 for (i = 0; i < hw_ep_max; i++)
1498 list_for_each(ptr, &udc->ci13xxx_ep[i].qh.queue)
1499 {
1500 req = list_entry(ptr, struct ci13xxx_req, queue);
1501
1502 n += scnprintf(buf + n, PAGE_SIZE - n,
1503 "EP=%02i: TD=%08X %s\n",
1504 i % hw_ep_max/2, (u32)req->dma,
1505 ((i < hw_ep_max/2) ? "RX" : "TX"));
1506
1507 for (j = 0; j < qSize; j++)
1508 n += scnprintf(buf + n, PAGE_SIZE - n,
1509 " %04X: %08X\n", j,
1510 *((u32 *)req->ptr + j));
1511 }
1512 spin_unlock_irqrestore(udc->lock, flags);
1513
1514 return n;
1515}
1516static DEVICE_ATTR(requests, S_IRUSR, show_requests, NULL);
1517
1518/* EP# and Direction */
1519static ssize_t prime_ept(struct device *dev,
1520 struct device_attribute *attr,
1521 const char *buf, size_t count)
1522{
1523 struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
1524 struct ci13xxx_ep *mEp;
1525 unsigned int ep_num, dir;
1526 int n;
1527 struct ci13xxx_req *mReq = NULL;
1528
1529 if (sscanf(buf, "%u %u", &ep_num, &dir) != 2) {
1530 dev_err(dev, "<ep_num> <dir>: prime the ep");
1531 goto done;
1532 }
1533
1534 if (dir)
1535 mEp = &udc->ci13xxx_ep[ep_num + hw_ep_max/2];
1536 else
1537 mEp = &udc->ci13xxx_ep[ep_num];
1538
1539 n = hw_ep_bit(mEp->num, mEp->dir);
1540 mReq = list_entry(mEp->qh.queue.next, struct ci13xxx_req, queue);
1541 mEp->qh.ptr->td.next = mReq->dma;
1542 mEp->qh.ptr->td.token &= ~TD_STATUS;
1543
1544 wmb();
1545
1546 hw_cwrite(CAP_ENDPTPRIME, BIT(n), BIT(n));
1547 while (hw_cread(CAP_ENDPTPRIME, BIT(n)))
1548 cpu_relax();
1549
1550 pr_info("%s: prime:%08x stat:%08x ep#%d dir:%s\n", __func__,
1551 hw_cread(CAP_ENDPTPRIME, ~0),
1552 hw_cread(CAP_ENDPTSTAT, ~0),
1553 mEp->num, mEp->dir ? "IN" : "OUT");
1554done:
1555 return count;
1556
1557}
1558static DEVICE_ATTR(prime, S_IWUSR, NULL, prime_ept);
1559
1560/* EP# and Direction */
1561static ssize_t print_dtds(struct device *dev,
1562 struct device_attribute *attr,
1563 const char *buf, size_t count)
1564{
1565 struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
1566 struct ci13xxx_ep *mEp;
1567 unsigned int ep_num, dir;
1568 int n;
1569 struct list_head *ptr = NULL;
1570 struct ci13xxx_req *req = NULL;
1571
1572 if (sscanf(buf, "%u %u", &ep_num, &dir) != 2) {
1573 dev_err(dev, "<ep_num> <dir>: to print dtds");
1574 goto done;
1575 }
1576
1577 if (dir)
1578 mEp = &udc->ci13xxx_ep[ep_num + hw_ep_max/2];
1579 else
1580 mEp = &udc->ci13xxx_ep[ep_num];
1581
1582 n = hw_ep_bit(mEp->num, mEp->dir);
1583 pr_info("%s: prime:%08x stat:%08x ep#%d dir:%s"
1584 "dTD_update_fail_count: %lu "
1585 "mEp->dTD_update_fail_count: %lu"
1586 "mEp->prime_fail_count: %lu\n", __func__,
1587 hw_cread(CAP_ENDPTPRIME, ~0),
1588 hw_cread(CAP_ENDPTSTAT, ~0),
1589 mEp->num, mEp->dir ? "IN" : "OUT",
1590 udc->dTD_update_fail_count,
1591 mEp->dTD_update_fail_count,
1592 mEp->prime_fail_count);
1593
1594 pr_info("QH: cap:%08x cur:%08x next:%08x token:%08x\n",
1595 mEp->qh.ptr->cap, mEp->qh.ptr->curr,
1596 mEp->qh.ptr->td.next, mEp->qh.ptr->td.token);
1597
1598 list_for_each(ptr, &mEp->qh.queue) {
1599 req = list_entry(ptr, struct ci13xxx_req, queue);
1600
1601 pr_info("\treq:%08x next:%08x token:%08x page0:%08x status:%d\n",
1602 req->dma, req->ptr->next, req->ptr->token,
1603 req->ptr->page[0], req->req.status);
1604 }
1605done:
1606 return count;
1607
1608}
1609static DEVICE_ATTR(dtds, S_IWUSR, NULL, print_dtds);
1610
1611static int ci13xxx_wakeup(struct usb_gadget *_gadget)
1612{
1613 struct ci13xxx *udc = container_of(_gadget, struct ci13xxx, gadget);
1614 unsigned long flags;
1615 int ret = 0;
1616
1617 trace();
1618
1619 spin_lock_irqsave(udc->lock, flags);
1620 if (!udc->remote_wakeup) {
1621 ret = -EOPNOTSUPP;
1622 dbg_trace("remote wakeup feature is not enabled\n");
1623 goto out;
1624 }
1625 spin_unlock_irqrestore(udc->lock, flags);
1626
1627 udc->udc_driver->notify_event(udc,
1628 CI13XXX_CONTROLLER_REMOTE_WAKEUP_EVENT);
1629
1630 if (udc->transceiver)
1631 usb_phy_set_suspend(udc->transceiver, 0);
1632
1633 spin_lock_irqsave(udc->lock, flags);
1634 if (!hw_cread(CAP_PORTSC, PORTSC_SUSP)) {
1635 ret = -EINVAL;
1636 dbg_trace("port is not suspended\n");
1637 goto out;
1638 }
1639 hw_cwrite(CAP_PORTSC, PORTSC_FPR, PORTSC_FPR);
1640out:
1641 spin_unlock_irqrestore(udc->lock, flags);
1642 return ret;
1643}
1644
1645static void usb_do_remote_wakeup(struct work_struct *w)
1646{
1647 struct ci13xxx *udc = _udc;
1648 unsigned long flags;
1649 bool do_wake;
1650
1651 /*
1652 * This work can not be canceled from interrupt handler. Check
1653 * if wakeup conditions are still met.
1654 */
1655 spin_lock_irqsave(udc->lock, flags);
1656 do_wake = udc->suspended && udc->remote_wakeup;
1657 spin_unlock_irqrestore(udc->lock, flags);
1658
1659 if (do_wake)
1660 ci13xxx_wakeup(&udc->gadget);
1661}
1662
1663static ssize_t usb_remote_wakeup(struct device *dev,
1664 struct device_attribute *attr, const char *buf, size_t count)
1665{
1666 struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
1667
1668 ci13xxx_wakeup(&udc->gadget);
1669
1670 return count;
1671}
1672static DEVICE_ATTR(wakeup, S_IWUSR, 0, usb_remote_wakeup);
1673
1674/**
1675 * dbg_create_files: initializes the attribute interface
1676 * @dev: device
1677 *
1678 * This function returns an error code
1679 */
1680__maybe_unused static int dbg_create_files(struct device *dev)
1681{
1682 int retval = 0;
1683
1684 if (dev == NULL)
1685 return -EINVAL;
1686 retval = device_create_file(dev, &dev_attr_device);
1687 if (retval)
1688 goto done;
1689 retval = device_create_file(dev, &dev_attr_driver);
1690 if (retval)
1691 goto rm_device;
1692 retval = device_create_file(dev, &dev_attr_events);
1693 if (retval)
1694 goto rm_driver;
1695 retval = device_create_file(dev, &dev_attr_inters);
1696 if (retval)
1697 goto rm_events;
1698 retval = device_create_file(dev, &dev_attr_port_test);
1699 if (retval)
1700 goto rm_inters;
1701 retval = device_create_file(dev, &dev_attr_qheads);
1702 if (retval)
1703 goto rm_port_test;
1704 retval = device_create_file(dev, &dev_attr_registers);
1705 if (retval)
1706 goto rm_qheads;
1707 retval = device_create_file(dev, &dev_attr_requests);
1708 if (retval)
1709 goto rm_registers;
1710 retval = device_create_file(dev, &dev_attr_wakeup);
1711 if (retval)
1712 goto rm_remote_wakeup;
1713 retval = device_create_file(dev, &dev_attr_prime);
1714 if (retval)
1715 goto rm_prime;
1716 retval = device_create_file(dev, &dev_attr_dtds);
1717 if (retval)
1718 goto rm_dtds;
1719
1720 return 0;
1721
1722rm_dtds:
1723 device_remove_file(dev, &dev_attr_dtds);
1724rm_prime:
1725 device_remove_file(dev, &dev_attr_prime);
1726rm_remote_wakeup:
1727 device_remove_file(dev, &dev_attr_wakeup);
1728 rm_registers:
1729 device_remove_file(dev, &dev_attr_registers);
1730 rm_qheads:
1731 device_remove_file(dev, &dev_attr_qheads);
1732 rm_port_test:
1733 device_remove_file(dev, &dev_attr_port_test);
1734 rm_inters:
1735 device_remove_file(dev, &dev_attr_inters);
1736 rm_events:
1737 device_remove_file(dev, &dev_attr_events);
1738 rm_driver:
1739 device_remove_file(dev, &dev_attr_driver);
1740 rm_device:
1741 device_remove_file(dev, &dev_attr_device);
1742 done:
1743 return retval;
1744}
1745
1746/**
1747 * dbg_remove_files: destroys the attribute interface
1748 * @dev: device
1749 *
1750 * This function returns an error code
1751 */
1752__maybe_unused static int dbg_remove_files(struct device *dev)
1753{
1754 if (dev == NULL)
1755 return -EINVAL;
1756 device_remove_file(dev, &dev_attr_requests);
1757 device_remove_file(dev, &dev_attr_registers);
1758 device_remove_file(dev, &dev_attr_qheads);
1759 device_remove_file(dev, &dev_attr_port_test);
1760 device_remove_file(dev, &dev_attr_inters);
1761 device_remove_file(dev, &dev_attr_events);
1762 device_remove_file(dev, &dev_attr_driver);
1763 device_remove_file(dev, &dev_attr_device);
1764 device_remove_file(dev, &dev_attr_wakeup);
1765 return 0;
1766}
1767
1768static void dump_usb_info(void *ignore, unsigned int ebi_addr,
1769 unsigned int ebi_apacket0, unsigned int ebi_apacket1)
1770{
1771 struct ci13xxx *udc = _udc;
1772 unsigned long flags;
1773 struct list_head *ptr = NULL;
1774 struct ci13xxx_req *req = NULL;
1775 struct ci13xxx_ep *mEp;
1776 unsigned i;
1777 struct ci13xxx_ebi_err_entry *temp_dump;
1778 static int count;
1779 u32 epdir = 0;
1780
1781 if (count)
1782 return;
1783 count++;
1784
1785 pr_info("%s: USB EBI error detected\n", __func__);
1786
1787 ebi_err_data = kmalloc(sizeof(struct ci13xxx_ebi_err_data),
1788 GFP_ATOMIC);
1789 if (!ebi_err_data) {
1790 pr_err("%s: memory alloc failed for ebi_err_data\n", __func__);
1791 return;
1792 }
1793
1794 ebi_err_data->ebi_err_entry = kmalloc(
1795 sizeof(struct ci13xxx_ebi_err_entry),
1796 GFP_ATOMIC);
1797 if (!ebi_err_data->ebi_err_entry) {
1798 kfree(ebi_err_data);
1799 pr_err("%s: memory alloc failed for ebi_err_entry\n", __func__);
1800 return;
1801 }
1802
1803 ebi_err_data->ebi_err_addr = ebi_addr;
1804 ebi_err_data->apkt0 = ebi_apacket0;
1805 ebi_err_data->apkt1 = ebi_apacket1;
1806
1807 temp_dump = ebi_err_data->ebi_err_entry;
1808 pr_info("\n DUMPING USB Requests Information\n");
1809 spin_lock_irqsave(udc->lock, flags);
1810 for (i = 0; i < hw_ep_max; i++) {
1811 list_for_each(ptr, &udc->ci13xxx_ep[i].qh.queue) {
1812 mEp = &udc->ci13xxx_ep[i];
1813 req = list_entry(ptr, struct ci13xxx_req, queue);
1814
1815 temp_dump->usb_req_buf = req->req.buf;
1816 temp_dump->usb_req_length = req->req.length;
1817 epdir = mEp->dir;
1818 temp_dump->ep_info = mEp->num | (epdir << 15);
1819
1820 temp_dump->next = kmalloc(
1821 sizeof(struct ci13xxx_ebi_err_entry),
1822 GFP_ATOMIC);
1823 if (!temp_dump->next) {
1824 pr_err("%s: memory alloc failed\n", __func__);
1825 spin_unlock_irqrestore(udc->lock, flags);
1826 return;
1827 }
1828 temp_dump = temp_dump->next;
1829 }
1830 }
1831 spin_unlock_irqrestore(udc->lock, flags);
1832}
1833
1834/******************************************************************************
1835 * UTIL block
1836 *****************************************************************************/
1837/**
1838 * _usb_addr: calculates endpoint address from direction & number
1839 * @ep: endpoint
1840 */
1841static inline u8 _usb_addr(struct ci13xxx_ep *ep)
1842{
1843 return ((ep->dir == TX) ? USB_ENDPOINT_DIR_MASK : 0) | ep->num;
1844}
1845
1846static void ep_prime_timer_func(unsigned long data)
1847{
1848 struct ci13xxx_ep *mep = (struct ci13xxx_ep *)data;
1849 struct ci13xxx_req *req;
1850 struct list_head *ptr = NULL;
1851 int n = hw_ep_bit(mep->num, mep->dir);
1852 unsigned long flags;
1853
1854
1855 spin_lock_irqsave(mep->lock, flags);
1856
1857 if (_udc && (!_udc->vbus_active || _udc->suspended)) {
1858 pr_debug("ep%d%s prime timer when vbus_active=%d,suspend=%d\n",
1859 mep->num, mep->dir ? "IN" : "OUT",
1860 _udc->vbus_active, _udc->suspended);
1861 goto out;
1862 }
1863
1864 if (!hw_cread(CAP_ENDPTPRIME, BIT(n)))
1865 goto out;
1866
1867 if (list_empty(&mep->qh.queue))
1868 goto out;
1869
1870 req = list_entry(mep->qh.queue.next, struct ci13xxx_req, queue);
1871
1872 mb();
1873 if (!(TD_STATUS_ACTIVE & req->ptr->token))
1874 goto out;
1875
1876 mep->prime_timer_count++;
1877 if (mep->prime_timer_count == MAX_PRIME_CHECK_RETRY) {
1878 mep->prime_timer_count = 0;
1879 pr_info("ep%d dir:%s QH:cap:%08x cur:%08x next:%08x tkn:%08x\n",
1880 mep->num, mep->dir ? "IN" : "OUT",
1881 mep->qh.ptr->cap, mep->qh.ptr->curr,
1882 mep->qh.ptr->td.next, mep->qh.ptr->td.token);
1883 list_for_each(ptr, &mep->qh.queue) {
1884 req = list_entry(ptr, struct ci13xxx_req, queue);
1885 pr_info("\treq:%08xnext:%08xtkn:%08xpage0:%08xsts:%d\n",
1886 req->dma, req->ptr->next,
1887 req->ptr->token, req->ptr->page[0],
1888 req->req.status);
1889 }
1890 dbg_usb_op_fail(0xFF, "PRIMEF", mep);
1891 mep->prime_fail_count++;
1892 } else {
1893 mod_timer(&mep->prime_timer, EP_PRIME_CHECK_DELAY);
1894 }
1895
1896 spin_unlock_irqrestore(mep->lock, flags);
1897 return;
1898
1899out:
1900 mep->prime_timer_count = 0;
1901 spin_unlock_irqrestore(mep->lock, flags);
1902
1903}
1904
1905/**
1906 * _hardware_queue: configures a request at hardware level
1907 * @gadget: gadget
1908 * @mEp: endpoint
1909 *
1910 * This function returns an error code
1911 */
1912static int _hardware_enqueue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq)
1913{
1914 unsigned i;
1915 int ret = 0;
1916 unsigned length = mReq->req.length;
1917 struct ci13xxx *udc = _udc;
1918
1919 trace("%p, %p", mEp, mReq);
1920
1921 /* don't queue twice */
1922 if (mReq->req.status == -EALREADY)
1923 return -EALREADY;
1924
1925 mReq->req.status = -EALREADY;
1926 if (length && mReq->req.dma == DMA_ADDR_INVALID) {
1927 mReq->req.dma = \
1928 dma_map_single(mEp->device, mReq->req.buf,
1929 length, mEp->dir ? DMA_TO_DEVICE :
1930 DMA_FROM_DEVICE);
1931 if (mReq->req.dma == 0)
1932 return -ENOMEM;
1933
1934 mReq->map = 1;
1935 }
1936
1937 if (mReq->req.zero && length && (length % mEp->ep.maxpacket == 0)) {
1938 mReq->zptr = dma_pool_alloc(mEp->td_pool, GFP_ATOMIC,
1939 &mReq->zdma);
1940 if (mReq->zptr == NULL) {
1941 if (mReq->map) {
1942 dma_unmap_single(mEp->device, mReq->req.dma,
1943 length, mEp->dir ? DMA_TO_DEVICE :
1944 DMA_FROM_DEVICE);
1945 mReq->req.dma = DMA_ADDR_INVALID;
1946 mReq->map = 0;
1947 }
1948 return -ENOMEM;
1949 }
1950 memset(mReq->zptr, 0, sizeof(*mReq->zptr));
1951 mReq->zptr->next = TD_TERMINATE;
1952 mReq->zptr->token = TD_STATUS_ACTIVE;
1953 if (!mReq->req.no_interrupt)
1954 mReq->zptr->token |= TD_IOC;
1955 }
1956 /*
1957 * TD configuration
1958 * TODO - handle requests which spawns into several TDs
1959 */
1960 memset(mReq->ptr, 0, sizeof(*mReq->ptr));
1961 mReq->ptr->token = length << ffs_nr(TD_TOTAL_BYTES);
1962 mReq->ptr->token &= TD_TOTAL_BYTES;
1963 mReq->ptr->token |= TD_STATUS_ACTIVE;
1964 if (mReq->zptr) {
1965 mReq->ptr->next = mReq->zdma;
1966 } else {
1967 mReq->ptr->next = TD_TERMINATE;
1968 if (!mReq->req.no_interrupt)
1969 mReq->ptr->token |= TD_IOC;
1970 }
1971
1972 /* MSM Specific: updating the request as required for
1973 * SPS mode. Enable MSM DMA engine acording
1974 * to the UDC private data in the request.
1975 */
1976 if (CI13XX_REQ_VENDOR_ID(mReq->req.udc_priv) == MSM_VENDOR_ID) {
1977 if (mReq->req.udc_priv & MSM_SPS_MODE) {
1978 mReq->ptr->token = TD_STATUS_ACTIVE;
1979 if (mReq->req.udc_priv & MSM_IS_FINITE_TRANSFER)
1980 mReq->ptr->next = TD_TERMINATE;
1981 else
1982 mReq->ptr->next = MSM_ETD_TYPE | mReq->dma;
1983 if (!mReq->req.no_interrupt)
1984 mReq->ptr->token |= MSM_ETD_IOC;
1985 }
1986 mReq->req.dma = 0;
1987 }
1988
1989 mReq->ptr->page[0] = mReq->req.dma;
1990 for (i = 1; i < 5; i++)
1991 mReq->ptr->page[i] = (mReq->req.dma + i * CI13XXX_PAGE_SIZE) &
1992 ~TD_RESERVED_MASK;
1993 wmb();
1994
1995 /* Remote Wakeup */
1996 if (udc->suspended) {
1997 if (!udc->remote_wakeup) {
1998 mReq->req.status = -EAGAIN;
1999 dev_dbg(mEp->device, "%s: queue failed (suspend) ept #%d\n",
2000 __func__, mEp->num);
2001 return -EAGAIN;
2002 }
2003 usb_phy_set_suspend(udc->transceiver, 0);
2004 schedule_delayed_work(&udc->rw_work, REMOTE_WAKEUP_DELAY);
2005 }
2006
2007 if (!list_empty(&mEp->qh.queue)) {
2008 struct ci13xxx_req *mReqPrev;
2009 int n = hw_ep_bit(mEp->num, mEp->dir);
2010 int tmp_stat;
2011 ktime_t start, diff;
2012
2013 mReqPrev = list_entry(mEp->qh.queue.prev,
2014 struct ci13xxx_req, queue);
2015 if (mReqPrev->zptr)
2016 mReqPrev->zptr->next = mReq->dma & TD_ADDR_MASK;
2017 else
2018 mReqPrev->ptr->next = mReq->dma & TD_ADDR_MASK;
2019 wmb();
2020 if (hw_cread(CAP_ENDPTPRIME, BIT(n)))
2021 goto done;
2022 start = ktime_get();
2023 do {
2024 hw_cwrite(CAP_USBCMD, USBCMD_ATDTW, USBCMD_ATDTW);
2025 tmp_stat = hw_cread(CAP_ENDPTSTAT, BIT(n));
2026 diff = ktime_sub(ktime_get(), start);
2027 /* poll for max. 100ms */
2028 if (ktime_to_ms(diff) > USB_MAX_TIMEOUT) {
2029 if (hw_cread(CAP_USBCMD, USBCMD_ATDTW))
2030 break;
2031 printk_ratelimited(KERN_ERR
2032 "%s:queue failed ep#%d %s\n",
2033 __func__, mEp->num, mEp->dir ? "IN" : "OUT");
2034 return -EAGAIN;
2035 }
2036 } while (!hw_cread(CAP_USBCMD, USBCMD_ATDTW));
2037 hw_cwrite(CAP_USBCMD, USBCMD_ATDTW, 0);
2038 if (tmp_stat)
2039 goto done;
2040 }
2041
2042 /* QH configuration */
2043 if (!list_empty(&mEp->qh.queue)) {
2044 struct ci13xxx_req *mReq = \
2045 list_entry(mEp->qh.queue.next,
2046 struct ci13xxx_req, queue);
2047
2048 if (TD_STATUS_ACTIVE & mReq->ptr->token) {
2049 mEp->qh.ptr->td.next = mReq->dma;
2050 mEp->qh.ptr->td.token &= ~TD_STATUS;
2051 goto prime;
2052 }
2053 }
2054
2055 mEp->qh.ptr->td.next = mReq->dma; /* TERMINATE = 0 */
2056
2057 if (CI13XX_REQ_VENDOR_ID(mReq->req.udc_priv) == MSM_VENDOR_ID) {
2058 if (mReq->req.udc_priv & MSM_SPS_MODE) {
2059 mEp->qh.ptr->td.next |= MSM_ETD_TYPE;
2060 i = hw_cread(CAP_ENDPTPIPEID +
2061 mEp->num * sizeof(u32), ~0);
2062 /* Read current value of this EPs pipe id */
2063 i = (mEp->dir == TX) ?
2064 ((i >> MSM_TX_PIPE_ID_OFS) & MSM_PIPE_ID_MASK) :
2065 (i & MSM_PIPE_ID_MASK);
2066 /* If requested pipe id is different from current,
2067 then write it */
2068 if (i != (mReq->req.udc_priv & MSM_PIPE_ID_MASK)) {
2069 if (mEp->dir == TX)
2070 hw_cwrite(
2071 CAP_ENDPTPIPEID +
2072 mEp->num * sizeof(u32),
2073 MSM_PIPE_ID_MASK <<
2074 MSM_TX_PIPE_ID_OFS,
2075 (mReq->req.udc_priv &
2076 MSM_PIPE_ID_MASK)
2077 << MSM_TX_PIPE_ID_OFS);
2078 else
2079 hw_cwrite(
2080 CAP_ENDPTPIPEID +
2081 mEp->num * sizeof(u32),
2082 MSM_PIPE_ID_MASK,
2083 mReq->req.udc_priv &
2084 MSM_PIPE_ID_MASK);
2085 }
2086 }
2087 }
2088
2089 mEp->qh.ptr->td.token &= ~TD_STATUS; /* clear status */
2090 mEp->qh.ptr->cap |= QH_ZLT;
2091
2092prime:
2093 wmb(); /* synchronize before ep prime */
2094
2095 ret = hw_ep_prime(mEp->num, mEp->dir,
2096 mEp->type == USB_ENDPOINT_XFER_CONTROL);
2097 if (!ret)
2098 mod_timer(&mEp->prime_timer, EP_PRIME_CHECK_DELAY);
2099done:
2100 return ret;
2101}
2102
2103/**
2104 * _hardware_dequeue: handles a request at hardware level
2105 * @gadget: gadget
2106 * @mEp: endpoint
2107 *
2108 * This function returns an error code
2109 */
2110static int _hardware_dequeue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq)
2111{
2112 trace("%p, %p", mEp, mReq);
2113
2114 if (mReq->req.status != -EALREADY)
2115 return -EINVAL;
2116
2117 /* clean speculative fetches on req->ptr->token */
2118 mb();
2119
2120 if ((TD_STATUS_ACTIVE & mReq->ptr->token) != 0)
2121 return -EBUSY;
2122
2123 if (CI13XX_REQ_VENDOR_ID(mReq->req.udc_priv) == MSM_VENDOR_ID)
2124 if ((mReq->req.udc_priv & MSM_SPS_MODE) &&
2125 (mReq->req.udc_priv & MSM_IS_FINITE_TRANSFER))
2126 return -EBUSY;
2127 if (mReq->zptr) {
2128 if ((TD_STATUS_ACTIVE & mReq->zptr->token) != 0)
2129 return -EBUSY;
2130
2131 /* The controller may access this dTD one more time.
2132 * Defer freeing this to next zero length dTD completion.
2133 * It is safe to assume that controller will no longer
2134 * access the previous dTD after next dTD completion.
2135 */
2136 if (mEp->last_zptr)
2137 dma_pool_free(mEp->td_pool, mEp->last_zptr,
2138 mEp->last_zdma);
2139 mEp->last_zptr = mReq->zptr;
2140 mEp->last_zdma = mReq->zdma;
2141
2142 mReq->zptr = NULL;
2143 }
2144
2145 mReq->req.status = 0;
2146
2147 if (mReq->map) {
2148 dma_unmap_single(mEp->device, mReq->req.dma, mReq->req.length,
2149 mEp->dir ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
2150 mReq->req.dma = DMA_ADDR_INVALID;
2151 mReq->map = 0;
2152 }
2153
2154 mReq->req.status = mReq->ptr->token & TD_STATUS;
2155 if ((TD_STATUS_HALTED & mReq->req.status) != 0)
2156 mReq->req.status = -1;
2157 else if ((TD_STATUS_DT_ERR & mReq->req.status) != 0)
2158 mReq->req.status = -1;
2159 else if ((TD_STATUS_TR_ERR & mReq->req.status) != 0)
2160 mReq->req.status = -1;
2161
2162 mReq->req.actual = mReq->ptr->token & TD_TOTAL_BYTES;
2163 mReq->req.actual >>= ffs_nr(TD_TOTAL_BYTES);
2164 mReq->req.actual = mReq->req.length - mReq->req.actual;
2165 mReq->req.actual = mReq->req.status ? 0 : mReq->req.actual;
2166
2167 return mReq->req.actual;
2168}
2169
2170/**
2171 * restore_original_req: Restore original req's attributes
2172 * @mReq: Request
2173 *
2174 * This function restores original req's attributes. Call
2175 * this function before completing the large req (>16K).
2176 */
2177static void restore_original_req(struct ci13xxx_req *mReq)
2178{
2179 mReq->req.buf = mReq->multi.buf;
2180 mReq->req.length = mReq->multi.len;
2181 if (!mReq->req.status)
2182 mReq->req.actual = mReq->multi.actual;
2183
2184 mReq->multi.len = 0;
2185 mReq->multi.actual = 0;
2186 mReq->multi.buf = NULL;
2187}
2188
2189/**
2190 * _ep_nuke: dequeues all endpoint requests
2191 * @mEp: endpoint
2192 *
2193 * This function returns an error code
2194 * Caller must hold lock
2195 */
2196static int _ep_nuke(struct ci13xxx_ep *mEp)
2197__releases(mEp->lock)
2198__acquires(mEp->lock)
2199{
2200 struct ci13xxx_ep *mEpTemp = mEp;
2201 unsigned val;
2202
2203 trace("%p", mEp);
2204
2205 if (mEp == NULL)
2206 return -EINVAL;
2207
2208 del_timer(&mEp->prime_timer);
2209 mEp->prime_timer_count = 0;
2210
2211 hw_ep_flush(mEp->num, mEp->dir);
2212
2213 while (!list_empty(&mEp->qh.queue)) {
2214
2215 /* pop oldest request */
2216 struct ci13xxx_req *mReq = \
2217 list_entry(mEp->qh.queue.next,
2218 struct ci13xxx_req, queue);
2219 list_del_init(&mReq->queue);
2220
2221 /* MSM Specific: Clear end point specific register */
2222 if (CI13XX_REQ_VENDOR_ID(mReq->req.udc_priv) == MSM_VENDOR_ID) {
2223 if (mReq->req.udc_priv & MSM_SPS_MODE) {
2224 val = hw_cread(CAP_ENDPTPIPEID +
2225 mEp->num * sizeof(u32),
2226 ~0);
2227
2228 if (val != MSM_EP_PIPE_ID_RESET_VAL)
2229 hw_cwrite(
2230 CAP_ENDPTPIPEID +
2231 mEp->num * sizeof(u32),
2232 ~0, MSM_EP_PIPE_ID_RESET_VAL);
2233 }
2234 }
2235 mReq->req.status = -ESHUTDOWN;
2236
2237 if (mReq->map) {
2238 dma_unmap_single(mEp->device, mReq->req.dma,
2239 mReq->req.length,
2240 mEp->dir ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
2241 mReq->req.dma = DMA_ADDR_INVALID;
2242 mReq->map = 0;
2243 }
2244
2245 if (mEp->multi_req) {
2246 restore_original_req(mReq);
2247 mEp->multi_req = false;
2248 }
2249
2250 if (mReq->req.complete != NULL) {
2251 spin_unlock(mEp->lock);
2252 if ((mEp->type == USB_ENDPOINT_XFER_CONTROL) &&
2253 mReq->req.length)
2254 mEpTemp = &_udc->ep0in;
2255 mReq->req.complete(&mEpTemp->ep, &mReq->req);
2256 if (mEp->type == USB_ENDPOINT_XFER_CONTROL)
2257 mReq->req.complete = NULL;
2258 spin_lock(mEp->lock);
2259 }
2260 }
2261 return 0;
2262}
2263
2264/**
2265 * _gadget_stop_activity: stops all USB activity, flushes & disables all endpts
2266 * @gadget: gadget
2267 *
2268 * This function returns an error code
2269 */
2270static int _gadget_stop_activity(struct usb_gadget *gadget)
2271{
2272 struct ci13xxx *udc = container_of(gadget, struct ci13xxx, gadget);
2273 unsigned long flags;
2274
2275 trace("%p", gadget);
2276
2277 if (gadget == NULL)
2278 return -EINVAL;
2279
2280 spin_lock_irqsave(udc->lock, flags);
2281 udc->gadget.speed = USB_SPEED_UNKNOWN;
2282 udc->remote_wakeup = 0;
2283 udc->suspended = 0;
2284 udc->configured = 0;
2285 spin_unlock_irqrestore(udc->lock, flags);
2286
2287 gadget->b_hnp_enable = 0;
2288 gadget->a_hnp_support = 0;
2289 gadget->host_request = 0;
2290 gadget->otg_srp_reqd = 0;
2291
2292 udc->driver->disconnect(gadget);
2293
2294 spin_lock_irqsave(udc->lock, flags);
2295 _ep_nuke(&udc->ep0out);
2296 _ep_nuke(&udc->ep0in);
2297 spin_unlock_irqrestore(udc->lock, flags);
2298
2299 if (udc->ep0in.last_zptr) {
2300 dma_pool_free(udc->ep0in.td_pool, udc->ep0in.last_zptr,
2301 udc->ep0in.last_zdma);
2302 udc->ep0in.last_zptr = NULL;
2303 }
2304
2305 return 0;
2306}
2307
2308/******************************************************************************
2309 * ISR block
2310 *****************************************************************************/
2311/**
2312 * isr_reset_handler: USB reset interrupt handler
2313 * @udc: UDC device
2314 *
2315 * This function resets USB engine after a bus reset occurred
2316 */
2317static void isr_reset_handler(struct ci13xxx *udc)
2318__releases(udc->lock)
2319__acquires(udc->lock)
2320{
2321 int retval;
2322
2323 trace("%p", udc);
2324
2325 if (udc == NULL) {
2326 err("EINVAL");
2327 return;
2328 }
2329
2330 dbg_event(0xFF, "BUS RST", 0);
2331
2332 spin_unlock(udc->lock);
2333
2334 if (udc->suspended) {
2335 if (udc->udc_driver->notify_event)
2336 udc->udc_driver->notify_event(udc,
2337 CI13XXX_CONTROLLER_RESUME_EVENT);
2338 if (udc->transceiver)
2339 usb_phy_set_suspend(udc->transceiver, 0);
2340 udc->driver->resume(&udc->gadget);
2341 udc->suspended = 0;
2342 }
2343
2344 /*stop charging upon reset */
2345 if (udc->transceiver)
2346 usb_phy_set_power(udc->transceiver, 100);
2347
2348 retval = _gadget_stop_activity(&udc->gadget);
2349 if (retval)
2350 goto done;
2351
2352 _udc->skip_flush = false;
2353 retval = hw_usb_reset();
2354 if (retval)
2355 goto done;
2356
2357 spin_lock(udc->lock);
2358
2359 done:
2360 if (retval)
2361 err("error: %i", retval);
2362}
2363
2364/**
2365 * isr_resume_handler: USB PCI interrupt handler
2366 * @udc: UDC device
2367 *
2368 */
2369static void isr_resume_handler(struct ci13xxx *udc)
2370{
2371 udc->gadget.speed = hw_port_is_high_speed() ?
2372 USB_SPEED_HIGH : USB_SPEED_FULL;
2373 if (udc->suspended) {
2374 spin_unlock(udc->lock);
2375 if (udc->udc_driver->notify_event)
2376 udc->udc_driver->notify_event(udc,
2377 CI13XXX_CONTROLLER_RESUME_EVENT);
2378 if (udc->transceiver)
2379 usb_phy_set_suspend(udc->transceiver, 0);
2380 udc->driver->resume(&udc->gadget);
2381 spin_lock(udc->lock);
2382 udc->suspended = 0;
2383 }
2384}
2385
2386/**
2387 * isr_resume_handler: USB SLI interrupt handler
2388 * @udc: UDC device
2389 *
2390 */
2391static void isr_suspend_handler(struct ci13xxx *udc)
2392{
2393 if (udc->gadget.speed != USB_SPEED_UNKNOWN &&
2394 udc->vbus_active) {
2395 if (udc->suspended == 0) {
2396 spin_unlock(udc->lock);
2397 udc->driver->suspend(&udc->gadget);
2398 if (udc->udc_driver->notify_event)
2399 udc->udc_driver->notify_event(udc,
2400 CI13XXX_CONTROLLER_SUSPEND_EVENT);
2401 if (udc->transceiver)
2402 usb_phy_set_suspend(udc->transceiver, 1);
2403 spin_lock(udc->lock);
2404 udc->suspended = 1;
2405 }
2406 }
2407}
2408
2409/**
2410 * isr_get_status_complete: get_status request complete function
2411 * @ep: endpoint
2412 * @req: request handled
2413 *
2414 * Caller must release lock
2415 */
2416static void isr_get_status_complete(struct usb_ep *ep, struct usb_request *req)
2417{
2418 trace("%p, %p", ep, req);
2419
2420 if (ep == NULL || req == NULL) {
2421 err("EINVAL");
2422 return;
2423 }
2424
2425 if (req->status)
2426 err("GET_STATUS failed");
2427}
2428
2429/**
2430 * isr_get_status_response: get_status request response
2431 * @udc: udc struct
2432 * @setup: setup request packet
2433 *
2434 * This function returns an error code
2435 */
2436static int isr_get_status_response(struct ci13xxx *udc,
2437 struct usb_ctrlrequest *setup)
2438__releases(mEp->lock)
2439__acquires(mEp->lock)
2440{
2441 struct ci13xxx_ep *mEp = &udc->ep0in;
2442 struct usb_request *req = udc->status;
2443 int dir, num, retval;
2444
2445 trace("%p, %p", mEp, setup);
2446
2447 if (mEp == NULL || setup == NULL)
2448 return -EINVAL;
2449
2450 req->complete = isr_get_status_complete;
2451 req->length = 2;
2452 req->buf = udc->status_buf;
2453
2454 if ((setup->bRequestType & USB_RECIP_MASK) == USB_RECIP_DEVICE) {
2455 if (setup->wIndex == OTG_STATUS_SELECTOR) {
2456 *((u8 *)req->buf) = _udc->gadget.host_request <<
2457 HOST_REQUEST_FLAG;
2458 req->length = 1;
2459 } else {
2460 /* Assume that device is bus powered for now. */
2461 *((u16 *)req->buf) = _udc->remote_wakeup << 1;
2462 }
2463 /* TODO: D1 - Remote Wakeup; D0 - Self Powered */
2464 retval = 0;
2465 } else if ((setup->bRequestType & USB_RECIP_MASK) \
2466 == USB_RECIP_ENDPOINT) {
2467 dir = (le16_to_cpu(setup->wIndex) & USB_ENDPOINT_DIR_MASK) ?
2468 TX : RX;
2469 num = le16_to_cpu(setup->wIndex) & USB_ENDPOINT_NUMBER_MASK;
2470 *((u16 *)req->buf) = hw_ep_get_halt(num, dir);
2471 }
2472 /* else do nothing; reserved for future use */
2473
2474 spin_unlock(mEp->lock);
2475 retval = usb_ep_queue(&mEp->ep, req, GFP_ATOMIC);
2476 spin_lock(mEp->lock);
2477 return retval;
2478}
2479
2480/**
2481 * isr_setup_status_complete: setup_status request complete function
2482 * @ep: endpoint
2483 * @req: request handled
2484 *
2485 * Caller must release lock. Put the port in test mode if test mode
2486 * feature is selected.
2487 */
2488static void
2489isr_setup_status_complete(struct usb_ep *ep, struct usb_request *req)
2490{
2491 struct ci13xxx *udc = req->context;
2492 unsigned long flags;
2493
2494 trace("%p, %p", ep, req);
2495
2496 spin_lock_irqsave(udc->lock, flags);
2497 if (udc->test_mode)
2498 hw_port_test_set(udc->test_mode);
2499 spin_unlock_irqrestore(udc->lock, flags);
2500}
2501
2502/**
2503 * isr_setup_status_phase: queues the status phase of a setup transation
2504 * @udc: udc struct
2505 *
2506 * This function returns an error code
2507 */
2508static int isr_setup_status_phase(struct ci13xxx *udc)
2509__releases(mEp->lock)
2510__acquires(mEp->lock)
2511{
2512 int retval;
2513 struct ci13xxx_ep *mEp;
2514
2515 trace("%p", udc);
2516
2517 mEp = (udc->ep0_dir == TX) ? &udc->ep0out : &udc->ep0in;
2518 udc->status->context = udc;
2519 udc->status->complete = isr_setup_status_complete;
2520 udc->status->length = 0;
2521
2522 spin_unlock(mEp->lock);
2523 retval = usb_ep_queue(&mEp->ep, udc->status, GFP_ATOMIC);
2524 spin_lock(mEp->lock);
2525
2526 return retval;
2527}
2528
2529/**
2530 * isr_tr_complete_low: transaction complete low level handler
2531 * @mEp: endpoint
2532 *
2533 * This function returns an error code
2534 * Caller must hold lock
2535 */
2536static int isr_tr_complete_low(struct ci13xxx_ep *mEp)
2537__releases(mEp->lock)
2538__acquires(mEp->lock)
2539{
2540 struct ci13xxx_req *mReq, *mReqTemp;
2541 struct ci13xxx_ep *mEpTemp = mEp;
2542 int uninitialized_var(retval);
2543 int req_dequeue = 1;
2544 struct ci13xxx *udc = _udc;
2545
2546 trace("%p", mEp);
2547
2548 if (list_empty(&mEp->qh.queue))
2549 return 0;
2550
2551 del_timer(&mEp->prime_timer);
2552 mEp->prime_timer_count = 0;
2553 list_for_each_entry_safe(mReq, mReqTemp, &mEp->qh.queue,
2554 queue) {
2555dequeue:
2556 retval = _hardware_dequeue(mEp, mReq);
2557 if (retval < 0) {
2558 /*
2559 * FIXME: don't know exact delay
2560 * required for HW to update dTD status
2561 * bits. This is a temporary workaround till
2562 * HW designers come back on this.
2563 */
2564 if (retval == -EBUSY && req_dequeue &&
2565 (mEp->dir == 0 || mEp->num == 0)) {
2566 req_dequeue = 0;
2567 udc->dTD_update_fail_count++;
2568 mEp->dTD_update_fail_count++;
2569 udelay(10);
2570 goto dequeue;
2571 }
2572 break;
2573 }
2574 req_dequeue = 0;
2575
2576 if (mEp->multi_req) { /* Large request in progress */
2577 unsigned remain_len;
2578
2579 mReq->multi.actual += mReq->req.actual;
2580 remain_len = mReq->multi.len - mReq->multi.actual;
2581 if (mReq->req.status || !remain_len ||
2582 (mReq->req.actual != mReq->req.length)) {
2583 restore_original_req(mReq);
2584 mEp->multi_req = false;
2585 } else {
2586 mReq->req.buf = mReq->multi.buf +
2587 mReq->multi.actual;
2588 mReq->req.length = min_t(unsigned, remain_len,
2589 (4 * CI13XXX_PAGE_SIZE));
2590
2591 mReq->req.status = -EINPROGRESS;
2592 mReq->req.actual = 0;
2593 list_del_init(&mReq->queue);
2594 retval = _hardware_enqueue(mEp, mReq);
2595 if (retval) {
2596 err("Large req failed in middle");
2597 mReq->req.status = retval;
2598 restore_original_req(mReq);
2599 mEp->multi_req = false;
2600 goto done;
2601 } else {
2602 list_add_tail(&mReq->queue,
2603 &mEp->qh.queue);
2604 return 0;
2605 }
2606 }
2607 }
2608 list_del_init(&mReq->queue);
2609done:
2610
2611 dbg_done(_usb_addr(mEp), mReq->ptr->token, retval);
2612
2613 if (mReq->req.complete != NULL) {
2614 spin_unlock(mEp->lock);
2615 if ((mEp->type == USB_ENDPOINT_XFER_CONTROL) &&
2616 mReq->req.length)
2617 mEpTemp = &_udc->ep0in;
2618 mReq->req.complete(&mEpTemp->ep, &mReq->req);
2619 spin_lock(mEp->lock);
2620 }
2621 }
2622
2623 if (retval == -EBUSY)
2624 retval = 0;
2625 if (retval < 0)
2626 dbg_event(_usb_addr(mEp), "DONE", retval);
2627
2628 return retval;
2629}
2630
2631/**
2632 * isr_tr_complete_handler: transaction complete interrupt handler
2633 * @udc: UDC descriptor
2634 *
2635 * This function handles traffic events
2636 */
2637static void isr_tr_complete_handler(struct ci13xxx *udc)
2638__releases(udc->lock)
2639__acquires(udc->lock)
2640{
2641 unsigned i;
2642 u8 tmode = 0;
2643
2644 trace("%p", udc);
2645
2646 if (udc == NULL) {
2647 err("EINVAL");
2648 return;
2649 }
2650
2651 for (i = 0; i < hw_ep_max; i++) {
2652 struct ci13xxx_ep *mEp = &udc->ci13xxx_ep[i];
2653 int type, num, dir, err = -EINVAL;
2654 struct usb_ctrlrequest req;
2655
2656 if (mEp->desc == NULL)
2657 continue; /* not configured */
2658
2659 if (hw_test_and_clear_complete(i)) {
2660 err = isr_tr_complete_low(mEp);
2661 if (mEp->type == USB_ENDPOINT_XFER_CONTROL) {
2662 if (err > 0) /* needs status phase */
2663 err = isr_setup_status_phase(udc);
2664 if (err < 0) {
2665 dbg_event(_usb_addr(mEp),
2666 "ERROR", err);
2667 spin_unlock(udc->lock);
2668 if (usb_ep_set_halt(&mEp->ep))
2669 err("error: ep_set_halt");
2670 spin_lock(udc->lock);
2671 }
2672 }
2673 }
2674
2675 if (mEp->type != USB_ENDPOINT_XFER_CONTROL ||
2676 !hw_test_and_clear_setup_status(i))
2677 continue;
2678
2679 if (i != 0) {
2680 warn("ctrl traffic received at endpoint");
2681 continue;
2682 }
2683
2684 /*
2685 * Flush data and handshake transactions of previous
2686 * setup packet.
2687 */
2688 _ep_nuke(&udc->ep0out);
2689 _ep_nuke(&udc->ep0in);
2690
2691 /* read_setup_packet */
2692 do {
2693 hw_test_and_set_setup_guard();
2694 memcpy(&req, &mEp->qh.ptr->setup, sizeof(req));
2695 /* Ensure buffer is read before acknowledging to h/w */
2696 mb();
2697 } while (!hw_test_and_clear_setup_guard());
2698
2699 type = req.bRequestType;
2700
2701 udc->ep0_dir = (type & USB_DIR_IN) ? TX : RX;
2702
2703 dbg_setup(_usb_addr(mEp), &req);
2704
2705 switch (req.bRequest) {
2706 case USB_REQ_CLEAR_FEATURE:
2707 if (type == (USB_DIR_OUT|USB_RECIP_ENDPOINT) &&
2708 le16_to_cpu(req.wValue) ==
2709 USB_ENDPOINT_HALT) {
2710 if (req.wLength != 0)
2711 break;
2712 num = le16_to_cpu(req.wIndex);
2713 dir = num & USB_ENDPOINT_DIR_MASK;
2714 num &= USB_ENDPOINT_NUMBER_MASK;
2715 if (dir) /* TX */
2716 num += hw_ep_max/2;
2717 if (!udc->ci13xxx_ep[num].wedge) {
2718 spin_unlock(udc->lock);
2719 err = usb_ep_clear_halt(
2720 &udc->ci13xxx_ep[num].ep);
2721 spin_lock(udc->lock);
2722 if (err)
2723 break;
2724 }
2725 err = isr_setup_status_phase(udc);
2726 } else if (type == (USB_DIR_OUT|USB_RECIP_DEVICE) &&
2727 le16_to_cpu(req.wValue) ==
2728 USB_DEVICE_REMOTE_WAKEUP) {
2729 if (req.wLength != 0)
2730 break;
2731 udc->remote_wakeup = 0;
2732 err = isr_setup_status_phase(udc);
2733 } else {
2734 goto delegate;
2735 }
2736 break;
2737 case USB_REQ_GET_STATUS:
2738 if (type != (USB_DIR_IN|USB_RECIP_DEVICE) &&
2739 type != (USB_DIR_IN|USB_RECIP_ENDPOINT) &&
2740 type != (USB_DIR_IN|USB_RECIP_INTERFACE))
2741 goto delegate;
2742 if (le16_to_cpu(req.wValue) != 0)
2743 break;
2744 err = isr_get_status_response(udc, &req);
2745 break;
2746 case USB_REQ_SET_ADDRESS:
2747 if (type != (USB_DIR_OUT|USB_RECIP_DEVICE))
2748 goto delegate;
2749 if (le16_to_cpu(req.wLength) != 0 ||
2750 le16_to_cpu(req.wIndex) != 0)
2751 break;
2752 err = hw_usb_set_address((u8)le16_to_cpu(req.wValue));
2753 if (err)
2754 break;
2755 err = isr_setup_status_phase(udc);
2756 break;
2757 case USB_REQ_SET_CONFIGURATION:
2758 if (type == (USB_DIR_OUT|USB_TYPE_STANDARD))
2759 udc->configured = !!req.wValue;
2760 goto delegate;
2761 case USB_REQ_SET_FEATURE:
2762 if (type == (USB_DIR_OUT|USB_RECIP_ENDPOINT) &&
2763 le16_to_cpu(req.wValue) ==
2764 USB_ENDPOINT_HALT) {
2765 if (req.wLength != 0)
2766 break;
2767 num = le16_to_cpu(req.wIndex);
2768 dir = num & USB_ENDPOINT_DIR_MASK;
2769 num &= USB_ENDPOINT_NUMBER_MASK;
2770 if (dir) /* TX */
2771 num += hw_ep_max/2;
2772
2773 spin_unlock(udc->lock);
2774 err = usb_ep_set_halt(&udc->ci13xxx_ep[num].ep);
2775 spin_lock(udc->lock);
2776 if (!err)
2777 isr_setup_status_phase(udc);
2778 } else if (type == (USB_DIR_OUT|USB_RECIP_DEVICE)) {
2779 if (req.wLength != 0)
2780 break;
2781 switch (le16_to_cpu(req.wValue)) {
2782 case USB_DEVICE_REMOTE_WAKEUP:
2783 udc->remote_wakeup = 1;
2784 err = isr_setup_status_phase(udc);
2785 break;
2786 case USB_DEVICE_B_HNP_ENABLE:
2787 udc->gadget.b_hnp_enable = 1;
2788 err = isr_setup_status_phase(udc);
2789 break;
2790 case USB_DEVICE_A_HNP_SUPPORT:
2791 udc->gadget.a_hnp_support = 1;
2792 err = isr_setup_status_phase(udc);
2793 break;
2794 case USB_DEVICE_A_ALT_HNP_SUPPORT:
2795 break;
2796 case USB_DEVICE_TEST_MODE:
2797 tmode = le16_to_cpu(req.wIndex) >> 8;
2798 switch (tmode) {
2799 case TEST_J:
2800 case TEST_K:
2801 case TEST_SE0_NAK:
2802 case TEST_PACKET:
2803 case TEST_FORCE_EN:
2804 udc->test_mode = tmode;
2805 err = isr_setup_status_phase(
2806 udc);
2807 break;
2808 case TEST_OTG_SRP_REQD:
2809 udc->gadget.otg_srp_reqd = 1;
2810 err = isr_setup_status_phase(
2811 udc);
2812 break;
2813 case TEST_OTG_HNP_REQD:
2814 udc->gadget.host_request = 1;
2815 err = isr_setup_status_phase(
2816 udc);
2817 break;
2818 default:
2819 break;
2820 }
2821 default:
2822 break;
2823 }
2824 } else {
2825 goto delegate;
2826 }
2827 break;
2828 default:
2829delegate:
2830 if (req.wLength == 0) /* no data phase */
2831 udc->ep0_dir = TX;
2832
2833 spin_unlock(udc->lock);
2834 err = udc->driver->setup(&udc->gadget, &req);
2835 spin_lock(udc->lock);
2836 break;
2837 }
2838
2839 if (err < 0) {
2840 dbg_event(_usb_addr(mEp), "ERROR", err);
2841
2842 spin_unlock(udc->lock);
2843 if (usb_ep_set_halt(&mEp->ep))
2844 err("error: ep_set_halt");
2845 spin_lock(udc->lock);
2846 }
2847 }
2848}
2849
2850/******************************************************************************
2851 * ENDPT block
2852 *****************************************************************************/
2853/**
2854 * ep_enable: configure endpoint, making it usable
2855 *
2856 * Check usb_ep_enable() at "usb_gadget.h" for details
2857 */
2858static int ep_enable(struct usb_ep *ep,
2859 const struct usb_endpoint_descriptor *desc)
2860{
2861 struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
2862 int retval = 0;
2863 unsigned long flags;
2864 unsigned mult = 0;
2865
2866 trace("ep = %p, desc = %p", ep, desc);
2867
2868 if (ep == NULL || desc == NULL)
2869 return -EINVAL;
2870
2871 spin_lock_irqsave(mEp->lock, flags);
2872
2873 /* only internal SW should enable ctrl endpts */
2874
2875 mEp->desc = desc;
2876
2877 if (!list_empty(&mEp->qh.queue))
2878 warn("enabling a non-empty endpoint!");
2879
2880 mEp->dir = usb_endpoint_dir_in(desc) ? TX : RX;
2881 mEp->num = usb_endpoint_num(desc);
2882 mEp->type = usb_endpoint_type(desc);
2883
2884 mEp->ep.maxpacket = usb_endpoint_maxp(desc);
2885
2886 dbg_event(_usb_addr(mEp), "ENABLE", 0);
2887
2888 mEp->qh.ptr->cap = 0;
2889
2890 if (mEp->type == USB_ENDPOINT_XFER_CONTROL) {
2891 mEp->qh.ptr->cap |= QH_IOS;
2892 } else if (mEp->type == USB_ENDPOINT_XFER_ISOC) {
2893 mEp->qh.ptr->cap &= ~QH_MULT;
2894 mult = ((mEp->ep.maxpacket >> QH_MULT_SHIFT) + 1) & 0x03;
2895 mEp->qh.ptr->cap |= (mult << ffs_nr(QH_MULT));
2896 } else {
2897 mEp->qh.ptr->cap |= QH_ZLT;
2898 }
2899
2900 mEp->qh.ptr->cap |=
2901 (mEp->ep.maxpacket << ffs_nr(QH_MAX_PKT)) & QH_MAX_PKT;
2902 mEp->qh.ptr->td.next |= TD_TERMINATE; /* needed? */
2903
2904 /* complete all the updates to ept->head before enabling endpoint*/
2905 mb();
2906
2907 /*
2908 * Enable endpoints in the HW other than ep0 as ep0
2909 * is always enabled
2910 */
2911 if (mEp->num)
2912 retval |= hw_ep_enable(mEp->num, mEp->dir, mEp->type);
2913
2914 spin_unlock_irqrestore(mEp->lock, flags);
2915 return retval;
2916}
2917
2918/**
2919 * ep_disable: endpoint is no longer usable
2920 *
2921 * Check usb_ep_disable() at "usb_gadget.h" for details
2922 */
2923static int ep_disable(struct usb_ep *ep)
2924{
2925 struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
2926 int direction, retval = 0;
2927 unsigned long flags;
2928
2929 trace("%p", ep);
2930
2931 if (ep == NULL)
2932 return -EINVAL;
2933 else if (mEp->desc == NULL)
2934 return -EBUSY;
2935
2936 spin_lock_irqsave(mEp->lock, flags);
2937
2938 /* only internal SW should disable ctrl endpts */
2939
2940 direction = mEp->dir;
2941 do {
2942 dbg_event(_usb_addr(mEp), "DISABLE", 0);
2943
2944 retval |= _ep_nuke(mEp);
2945 retval |= hw_ep_disable(mEp->num, mEp->dir);
2946
2947 if (mEp->type == USB_ENDPOINT_XFER_CONTROL)
2948 mEp->dir = (mEp->dir == TX) ? RX : TX;
2949
2950 } while (mEp->dir != direction);
2951
2952 if (mEp->last_zptr) {
2953 dma_pool_free(mEp->td_pool, mEp->last_zptr,
2954 mEp->last_zdma);
2955 mEp->last_zptr = NULL;
2956 }
2957
2958 mEp->desc = NULL;
2959 mEp->ep.desc = NULL;
2960 mEp->ep.maxpacket = USHRT_MAX;
2961
2962 spin_unlock_irqrestore(mEp->lock, flags);
2963 return retval;
2964}
2965
2966/**
2967 * ep_alloc_request: allocate a request object to use with this endpoint
2968 *
2969 * Check usb_ep_alloc_request() at "usb_gadget.h" for details
2970 */
2971static struct usb_request *ep_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
2972{
2973 struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
2974 struct ci13xxx_req *mReq = NULL;
2975
2976 trace("%p, %i", ep, gfp_flags);
2977
2978 if (ep == NULL) {
2979 err("EINVAL");
2980 return NULL;
2981 }
2982
2983 mReq = kzalloc(sizeof(struct ci13xxx_req), gfp_flags);
2984 if (mReq != NULL) {
2985 INIT_LIST_HEAD(&mReq->queue);
2986 mReq->req.dma = DMA_ADDR_INVALID;
2987
2988 mReq->ptr = dma_pool_alloc(mEp->td_pool, gfp_flags,
2989 &mReq->dma);
2990 if (mReq->ptr == NULL) {
2991 kfree(mReq);
2992 mReq = NULL;
2993 }
2994 }
2995
2996 dbg_event(_usb_addr(mEp), "ALLOC", mReq == NULL);
2997
2998 return (mReq == NULL) ? NULL : &mReq->req;
2999}
3000
3001/**
3002 * ep_free_request: frees a request object
3003 *
3004 * Check usb_ep_free_request() at "usb_gadget.h" for details
3005 */
3006static void ep_free_request(struct usb_ep *ep, struct usb_request *req)
3007{
3008 struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
3009 struct ci13xxx_req *mReq = container_of(req, struct ci13xxx_req, req);
3010 unsigned long flags;
3011
3012 trace("%p, %p", ep, req);
3013
3014 if (ep == NULL || req == NULL) {
3015 err("EINVAL");
3016 return;
3017 } else if (!list_empty(&mReq->queue)) {
3018 err("EBUSY");
3019 return;
3020 }
3021
3022 spin_lock_irqsave(mEp->lock, flags);
3023
3024 if (mReq->ptr)
3025 dma_pool_free(mEp->td_pool, mReq->ptr, mReq->dma);
3026 kfree(mReq);
3027
3028 dbg_event(_usb_addr(mEp), "FREE", 0);
3029
3030 spin_unlock_irqrestore(mEp->lock, flags);
3031}
3032
3033/**
3034 * ep_queue: queues (submits) an I/O request to an endpoint
3035 *
3036 * Check usb_ep_queue()* at usb_gadget.h" for details
3037 */
3038static int ep_queue(struct usb_ep *ep, struct usb_request *req,
3039 gfp_t __maybe_unused gfp_flags)
3040{
3041 struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
3042 struct ci13xxx_req *mReq = container_of(req, struct ci13xxx_req, req);
3043 int retval = 0;
3044 unsigned long flags;
3045 struct ci13xxx *udc = _udc;
3046
3047 trace("%p, %p, %X", ep, req, gfp_flags);
3048
3049 spin_lock_irqsave(mEp->lock, flags);
3050 if (ep == NULL || req == NULL || mEp->desc == NULL) {
3051 retval = -EINVAL;
3052 goto done;
3053 }
3054
3055 if (!udc->softconnect) {
3056 retval = -ENODEV;
3057 goto done;
3058 }
3059
3060 if (!udc->configured && mEp->type !=
3061 USB_ENDPOINT_XFER_CONTROL) {
3062 trace("usb is not configured"
3063 "ept #%d, ept name#%s\n",
3064 mEp->num, mEp->ep.name);
3065 retval = -ESHUTDOWN;
3066 goto done;
3067 }
3068
3069 if (mEp->type == USB_ENDPOINT_XFER_CONTROL) {
3070 if (req->length)
3071 mEp = (_udc->ep0_dir == RX) ?
3072 &_udc->ep0out : &_udc->ep0in;
3073 if (!list_empty(&mEp->qh.queue)) {
3074 _ep_nuke(mEp);
3075 retval = -EOVERFLOW;
3076 warn("endpoint ctrl %X nuked", _usb_addr(mEp));
3077 }
3078 }
3079
3080 /* first nuke then test link, e.g. previous status has not sent */
3081 if (!list_empty(&mReq->queue)) {
3082 retval = -EBUSY;
3083 err("request already in queue");
3084 goto done;
3085 }
3086 if (mEp->multi_req) {
3087 retval = -EAGAIN;
3088 err("Large request is in progress. come again");
3089 goto done;
3090 }
3091
3092 if (req->length > (4 * CI13XXX_PAGE_SIZE)) {
3093 if (!list_empty(&mEp->qh.queue)) {
3094 retval = -EAGAIN;
3095 err("Queue is busy. Large req is not allowed");
3096 goto done;
3097 }
3098 if ((mEp->type != USB_ENDPOINT_XFER_BULK) ||
3099 (mEp->dir != RX)) {
3100 retval = -EINVAL;
3101 err("Larger req is supported only for Bulk OUT");
3102 goto done;
3103 }
3104 mEp->multi_req = true;
3105 mReq->multi.len = req->length;
3106 mReq->multi.buf = req->buf;
3107 req->length = (4 * CI13XXX_PAGE_SIZE);
3108 }
3109
3110 dbg_queue(_usb_addr(mEp), req, retval);
3111
3112 /* push request */
3113 mReq->req.status = -EINPROGRESS;
3114 mReq->req.actual = 0;
3115
3116 retval = _hardware_enqueue(mEp, mReq);
3117
3118 if (retval == -EALREADY) {
3119 dbg_event(_usb_addr(mEp), "QUEUE", retval);
3120 retval = 0;
3121 }
3122 if (!retval)
3123 list_add_tail(&mReq->queue, &mEp->qh.queue);
3124 else if (mEp->multi_req)
3125 mEp->multi_req = false;
3126
3127 done:
3128 spin_unlock_irqrestore(mEp->lock, flags);
3129 return retval;
3130}
3131
3132/**
3133 * ep_dequeue: dequeues (cancels, unlinks) an I/O request from an endpoint
3134 *
3135 * Check usb_ep_dequeue() at "usb_gadget.h" for details
3136 */
3137static int ep_dequeue(struct usb_ep *ep, struct usb_request *req)
3138{
3139 struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
3140 struct ci13xxx_ep *mEpTemp = mEp;
3141 struct ci13xxx_req *mReq = container_of(req, struct ci13xxx_req, req);
3142 unsigned long flags;
3143
3144 trace("%p, %p", ep, req);
3145
3146 spin_lock_irqsave(mEp->lock, flags);
3147 /*
3148 * Only ep0 IN is exposed to composite. When a req is dequeued
3149 * on ep0, check both ep0 IN and ep0 OUT queues.
3150 */
3151 if (ep == NULL || req == NULL || mReq->req.status != -EALREADY ||
3152 mEp->desc == NULL || list_empty(&mReq->queue) ||
3153 (list_empty(&mEp->qh.queue) && ((mEp->type !=
3154 USB_ENDPOINT_XFER_CONTROL) ||
3155 list_empty(&_udc->ep0out.qh.queue)))) {
3156 spin_unlock_irqrestore(mEp->lock, flags);
3157 return -EINVAL;
3158 }
3159
3160 dbg_event(_usb_addr(mEp), "DEQUEUE", 0);
3161
3162 if ((mEp->type == USB_ENDPOINT_XFER_CONTROL)) {
3163 hw_ep_flush(_udc->ep0out.num, RX);
3164 hw_ep_flush(_udc->ep0in.num, TX);
3165 } else {
3166 hw_ep_flush(mEp->num, mEp->dir);
3167 }
3168
3169 /* pop request */
3170 list_del_init(&mReq->queue);
3171 if (mReq->map) {
3172 dma_unmap_single(mEp->device, mReq->req.dma, mReq->req.length,
3173 mEp->dir ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
3174 mReq->req.dma = DMA_ADDR_INVALID;
3175 mReq->map = 0;
3176 }
3177 req->status = -ECONNRESET;
3178 if (mEp->multi_req) {
3179 restore_original_req(mReq);
3180 mEp->multi_req = false;
3181 }
3182
3183 if (mReq->req.complete != NULL) {
3184 spin_unlock(mEp->lock);
3185 if ((mEp->type == USB_ENDPOINT_XFER_CONTROL) &&
3186 mReq->req.length)
3187 mEpTemp = &_udc->ep0in;
3188 mReq->req.complete(&mEpTemp->ep, &mReq->req);
3189 if (mEp->type == USB_ENDPOINT_XFER_CONTROL)
3190 mReq->req.complete = NULL;
3191 spin_lock(mEp->lock);
3192 }
3193
3194 spin_unlock_irqrestore(mEp->lock, flags);
3195 return 0;
3196}
3197
3198static int is_sps_req(struct ci13xxx_req *mReq)
3199{
3200 return (CI13XX_REQ_VENDOR_ID(mReq->req.udc_priv) == MSM_VENDOR_ID &&
3201 mReq->req.udc_priv & MSM_SPS_MODE);
3202}
3203
3204/**
3205 * ep_set_halt: sets the endpoint halt feature
3206 *
3207 * Check usb_ep_set_halt() at "usb_gadget.h" for details
3208 */
3209static int ep_set_halt(struct usb_ep *ep, int value)
3210{
3211 struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
3212 int direction, retval = 0;
3213 unsigned long flags;
3214
3215 trace("%p, %i", ep, value);
3216
3217 if (ep == NULL || mEp->desc == NULL)
3218 return -EINVAL;
3219
3220 spin_lock_irqsave(mEp->lock, flags);
3221
3222#ifndef STALL_IN
3223 /* g_file_storage MS compliant but g_zero fails chapter 9 compliance */
3224 if (value && mEp->type == USB_ENDPOINT_XFER_BULK && mEp->dir == TX &&
3225 !list_empty(&mEp->qh.queue) &&
3226 !is_sps_req(list_entry(mEp->qh.queue.next, struct ci13xxx_req,
3227 queue))){
3228 spin_unlock_irqrestore(mEp->lock, flags);
3229 return -EAGAIN;
3230 }
3231#endif
3232
3233 direction = mEp->dir;
3234 do {
3235 dbg_event(_usb_addr(mEp), "HALT", value);
3236 retval |= hw_ep_set_halt(mEp->num, mEp->dir, value);
3237
3238 if (!value)
3239 mEp->wedge = 0;
3240
3241 if (mEp->type == USB_ENDPOINT_XFER_CONTROL)
3242 mEp->dir = (mEp->dir == TX) ? RX : TX;
3243
3244 } while (mEp->dir != direction);
3245
3246 spin_unlock_irqrestore(mEp->lock, flags);
3247 return retval;
3248}
3249
3250/**
3251 * ep_set_wedge: sets the halt feature and ignores clear requests
3252 *
3253 * Check usb_ep_set_wedge() at "usb_gadget.h" for details
3254 */
3255static int ep_set_wedge(struct usb_ep *ep)
3256{
3257 struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
3258 unsigned long flags;
3259
3260 trace("%p", ep);
3261
3262 if (ep == NULL || mEp->desc == NULL)
3263 return -EINVAL;
3264
3265 spin_lock_irqsave(mEp->lock, flags);
3266
3267 dbg_event(_usb_addr(mEp), "WEDGE", 0);
3268 mEp->wedge = 1;
3269
3270 spin_unlock_irqrestore(mEp->lock, flags);
3271
3272 return usb_ep_set_halt(ep);
3273}
3274
3275/**
3276 * ep_fifo_flush: flushes contents of a fifo
3277 *
3278 * Check usb_ep_fifo_flush() at "usb_gadget.h" for details
3279 */
3280static void ep_fifo_flush(struct usb_ep *ep)
3281{
3282 struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
3283 unsigned long flags;
3284
3285 trace("%p", ep);
3286
3287 if (ep == NULL) {
3288 err("%02X: -EINVAL", _usb_addr(mEp));
3289 return;
3290 }
3291
3292 spin_lock_irqsave(mEp->lock, flags);
3293
3294 dbg_event(_usb_addr(mEp), "FFLUSH", 0);
3295 /*
3296 * _ep_nuke() takes care of flushing the endpoint.
3297 * some function drivers expect udc to retire all
3298 * pending requests upon flushing an endpoint. There
3299 * is no harm in doing it.
3300 */
3301 _ep_nuke(mEp);
3302
3303 spin_unlock_irqrestore(mEp->lock, flags);
3304}
3305
3306/**
3307 * Endpoint-specific part of the API to the USB controller hardware
3308 * Check "usb_gadget.h" for details
3309 */
3310static const struct usb_ep_ops usb_ep_ops = {
3311 .enable = ep_enable,
3312 .disable = ep_disable,
3313 .alloc_request = ep_alloc_request,
3314 .free_request = ep_free_request,
3315 .queue = ep_queue,
3316 .dequeue = ep_dequeue,
3317 .set_halt = ep_set_halt,
3318 .set_wedge = ep_set_wedge,
3319 .fifo_flush = ep_fifo_flush,
3320};
3321
3322/******************************************************************************
3323 * GADGET block
3324 *****************************************************************************/
3325static int ci13xxx_vbus_session(struct usb_gadget *_gadget, int is_active)
3326{
3327 struct ci13xxx *udc = container_of(_gadget, struct ci13xxx, gadget);
3328 unsigned long flags;
3329 int gadget_ready = 0;
3330
3331 if (!(udc->udc_driver->flags & CI13XXX_PULLUP_ON_VBUS))
3332 return -EOPNOTSUPP;
3333
3334 spin_lock_irqsave(udc->lock, flags);
3335 udc->vbus_active = is_active;
3336 if (udc->driver)
3337 gadget_ready = 1;
3338 spin_unlock_irqrestore(udc->lock, flags);
3339
3340 if (gadget_ready) {
3341 if (is_active) {
3342 pm_runtime_get_sync(&_gadget->dev);
3343 hw_device_reset(udc);
3344 if (udc->softconnect)
3345 hw_device_state(udc->ep0out.qh.dma);
3346 } else {
3347 hw_device_state(0);
3348 _gadget_stop_activity(&udc->gadget);
3349 if (udc->udc_driver->notify_event)
3350 udc->udc_driver->notify_event(udc,
3351 CI13XXX_CONTROLLER_DISCONNECT_EVENT);
3352 pm_runtime_put_sync(&_gadget->dev);
3353 }
3354 }
3355
3356 return 0;
3357}
3358
3359static int ci13xxx_vbus_draw(struct usb_gadget *_gadget, unsigned mA)
3360{
3361 struct ci13xxx *udc = container_of(_gadget, struct ci13xxx, gadget);
3362
3363 if (udc->transceiver)
3364 return usb_phy_set_power(udc->transceiver, mA);
3365 return -ENOTSUPP;
3366}
3367
3368static int ci13xxx_pullup(struct usb_gadget *_gadget, int is_active)
3369{
3370 struct ci13xxx *udc = container_of(_gadget, struct ci13xxx, gadget);
3371 unsigned long flags;
3372
3373 spin_lock_irqsave(udc->lock, flags);
3374 udc->softconnect = is_active;
3375 if (((udc->udc_driver->flags & CI13XXX_PULLUP_ON_VBUS) &&
3376 !udc->vbus_active) || !udc->driver) {
3377 spin_unlock_irqrestore(udc->lock, flags);
3378 return 0;
3379 }
3380 spin_unlock_irqrestore(udc->lock, flags);
3381
3382 if (is_active)
3383 hw_device_state(udc->ep0out.qh.dma);
3384 else
3385 hw_device_state(0);
3386
3387 return 0;
3388}
3389
3390static int ci13xxx_start(struct usb_gadget_driver *driver,
3391 int (*bind)(struct usb_gadget *));
3392static int ci13xxx_stop(struct usb_gadget_driver *driver);
3393
3394/**
3395 * Device operations part of the API to the USB controller hardware,
3396 * which don't involve endpoints (or i/o)
3397 * Check "usb_gadget.h" for details
3398 */
3399static const struct usb_gadget_ops usb_gadget_ops = {
3400 .vbus_session = ci13xxx_vbus_session,
3401 .wakeup = ci13xxx_wakeup,
3402 .vbus_draw = ci13xxx_vbus_draw,
3403 .pullup = ci13xxx_pullup,
3404 .start = ci13xxx_start,
3405 .stop = ci13xxx_stop,
3406};
3407
3408/**
3409 * ci13xxx_start: register a gadget driver
3410 * @driver: the driver being registered
3411 * @bind: the driver's bind callback
3412 *
3413 * Check ci13xxx_start() at <linux/usb/gadget.h> for details.
3414 * Interrupts are enabled here.
3415 */
3416static int ci13xxx_start(struct usb_gadget_driver *driver,
3417 int (*bind)(struct usb_gadget *))
3418{
3419 struct ci13xxx *udc = _udc;
3420 unsigned long flags;
3421 int i, j;
3422 int retval = -ENOMEM;
3423 bool put = false;
3424
3425 trace("%p", driver);
3426
3427 if (driver == NULL ||
3428 bind == NULL ||
3429 driver->setup == NULL ||
3430 driver->disconnect == NULL)
3431 return -EINVAL;
3432 else if (udc == NULL)
3433 return -ENODEV;
3434 else if (udc->driver != NULL)
3435 return -EBUSY;
3436
3437 /* alloc resources */
3438 udc->qh_pool = dma_pool_create("ci13xxx_qh", &udc->gadget.dev,
3439 sizeof(struct ci13xxx_qh),
3440 64, CI13XXX_PAGE_SIZE);
3441 if (udc->qh_pool == NULL)
3442 return -ENOMEM;
3443
3444 udc->td_pool = dma_pool_create("ci13xxx_td", &udc->gadget.dev,
3445 sizeof(struct ci13xxx_td),
3446 64, CI13XXX_PAGE_SIZE);
3447 if (udc->td_pool == NULL) {
3448 dma_pool_destroy(udc->qh_pool);
3449 udc->qh_pool = NULL;
3450 return -ENOMEM;
3451 }
3452
3453 spin_lock_irqsave(udc->lock, flags);
3454
3455 info("hw_ep_max = %d", hw_ep_max);
3456
3457 udc->gadget.dev.driver = NULL;
3458
3459 retval = 0;
3460 for (i = 0; i < hw_ep_max/2; i++) {
3461 for (j = RX; j <= TX; j++) {
3462 int k = i + j * hw_ep_max/2;
3463 struct ci13xxx_ep *mEp = &udc->ci13xxx_ep[k];
3464
3465 scnprintf(mEp->name, sizeof(mEp->name), "ep%i%s", i,
3466 (j == TX) ? "in" : "out");
3467
3468 mEp->lock = udc->lock;
3469 mEp->device = &udc->gadget.dev;
3470 mEp->td_pool = udc->td_pool;
3471
3472 mEp->ep.name = mEp->name;
3473 mEp->ep.ops = &usb_ep_ops;
3474 mEp->ep.maxpacket =
3475 k ? USHRT_MAX : CTRL_PAYLOAD_MAX;
3476
3477 INIT_LIST_HEAD(&mEp->qh.queue);
3478 spin_unlock_irqrestore(udc->lock, flags);
3479 mEp->qh.ptr = dma_pool_alloc(udc->qh_pool, GFP_KERNEL,
3480 &mEp->qh.dma);
3481 spin_lock_irqsave(udc->lock, flags);
3482 if (mEp->qh.ptr == NULL)
3483 retval = -ENOMEM;
3484 else
3485 memset(mEp->qh.ptr, 0, sizeof(*mEp->qh.ptr));
3486
3487 /* skip ep0 out and in endpoints */
3488 if (i == 0)
3489 continue;
3490
3491 list_add_tail(&mEp->ep.ep_list, &udc->gadget.ep_list);
3492 }
3493 }
3494 if (retval)
3495 goto done;
3496 spin_unlock_irqrestore(udc->lock, flags);
3497 udc->ep0out.ep.desc = &ctrl_endpt_out_desc;
3498 retval = usb_ep_enable(&udc->ep0out.ep);
3499 if (retval)
3500 return retval;
3501
3502 udc->ep0in.ep.desc = &ctrl_endpt_in_desc;
3503 retval = usb_ep_enable(&udc->ep0in.ep);
3504 if (retval)
3505 return retval;
3506 udc->status = usb_ep_alloc_request(&udc->ep0in.ep, GFP_KERNEL);
3507 if (!udc->status)
3508 return -ENOMEM;
3509 udc->status_buf = kzalloc(2, GFP_KERNEL); /* for GET_STATUS */
3510 if (!udc->status_buf) {
3511 usb_ep_free_request(&udc->ep0in.ep, udc->status);
3512 return -ENOMEM;
3513 }
3514 spin_lock_irqsave(udc->lock, flags);
3515
3516 udc->gadget.ep0 = &udc->ep0in.ep;
3517 /* bind gadget */
3518 driver->driver.bus = NULL;
3519 udc->gadget.dev.driver = &driver->driver;
3520 udc->softconnect = 1;
3521
3522 spin_unlock_irqrestore(udc->lock, flags);
3523 pm_runtime_get_sync(&udc->gadget.dev);
3524 retval = bind(&udc->gadget); /* MAY SLEEP */
3525 spin_lock_irqsave(udc->lock, flags);
3526
3527 if (retval) {
3528 udc->gadget.dev.driver = NULL;
3529 goto done;
3530 }
3531
3532 udc->driver = driver;
3533 if (udc->udc_driver->flags & CI13XXX_PULLUP_ON_VBUS) {
3534 if (udc->vbus_active) {
3535 if (udc->udc_driver->flags & CI13XXX_REGS_SHARED)
3536 hw_device_reset(udc);
3537 } else {
3538 put = true;
3539 goto done;
3540 }
3541 }
3542
3543 if (!udc->softconnect) {
3544 put = true;
3545 goto done;
3546 }
3547
3548 retval = hw_device_state(udc->ep0out.qh.dma);
3549
3550 done:
3551 spin_unlock_irqrestore(udc->lock, flags);
3552 if (retval || put)
3553 pm_runtime_put_sync(&udc->gadget.dev);
3554
3555 if (udc->udc_driver->notify_event)
3556 udc->udc_driver->notify_event(udc,
3557 CI13XXX_CONTROLLER_UDC_STARTED_EVENT);
3558
3559 return retval;
3560}
3561
3562/**
3563 * ci13xxx_stop: unregister a gadget driver
3564 *
3565 * Check usb_gadget_unregister_driver() at "usb_gadget.h" for details
3566 */
3567static int ci13xxx_stop(struct usb_gadget_driver *driver)
3568{
3569 struct ci13xxx *udc = _udc;
3570 unsigned long i, flags;
3571
3572 trace("%p", driver);
3573
3574 if (driver == NULL ||
3575 driver->unbind == NULL ||
3576 driver->setup == NULL ||
3577 driver->disconnect == NULL ||
3578 driver != udc->driver)
3579 return -EINVAL;
3580
3581 spin_lock_irqsave(udc->lock, flags);
3582
3583 if (!(udc->udc_driver->flags & CI13XXX_PULLUP_ON_VBUS) ||
3584 udc->vbus_active) {
3585 hw_device_state(0);
3586 spin_unlock_irqrestore(udc->lock, flags);
3587 _gadget_stop_activity(&udc->gadget);
3588 spin_lock_irqsave(udc->lock, flags);
3589 pm_runtime_put(&udc->gadget.dev);
3590 }
3591
3592 /* unbind gadget */
3593 spin_unlock_irqrestore(udc->lock, flags);
3594 driver->unbind(&udc->gadget); /* MAY SLEEP */
3595 spin_lock_irqsave(udc->lock, flags);
3596
3597 usb_ep_free_request(&udc->ep0in.ep, udc->status);
3598 kfree(udc->status_buf);
3599
3600 udc->gadget.dev.driver = NULL;
3601
3602 /* free resources */
3603 for (i = 0; i < hw_ep_max; i++) {
3604 struct ci13xxx_ep *mEp = &udc->ci13xxx_ep[i];
3605
3606 if (!list_empty(&mEp->ep.ep_list))
3607 list_del_init(&mEp->ep.ep_list);
3608
3609 if (mEp->qh.ptr != NULL)
3610 dma_pool_free(udc->qh_pool, mEp->qh.ptr, mEp->qh.dma);
3611 }
3612
3613 udc->gadget.ep0 = NULL;
3614 udc->driver = NULL;
3615
3616 spin_unlock_irqrestore(udc->lock, flags);
3617
3618 if (udc->td_pool != NULL) {
3619 dma_pool_destroy(udc->td_pool);
3620 udc->td_pool = NULL;
3621 }
3622 if (udc->qh_pool != NULL) {
3623 dma_pool_destroy(udc->qh_pool);
3624 udc->qh_pool = NULL;
3625 }
3626
3627 return 0;
3628}
3629
3630/******************************************************************************
3631 * BUS block
3632 *****************************************************************************/
3633/**
3634 * udc_irq: global interrupt handler
3635 *
3636 * This function returns IRQ_HANDLED if the IRQ has been handled
3637 * It locks access to registers
3638 */
3639static irqreturn_t udc_irq(void)
3640{
3641 struct ci13xxx *udc = _udc;
3642 irqreturn_t retval;
3643 u32 intr;
3644
3645 trace();
3646
3647 if (udc == NULL) {
3648 err("ENODEV");
3649 return IRQ_HANDLED;
3650 }
3651
3652 spin_lock(udc->lock);
3653
3654 if (udc->udc_driver->flags & CI13XXX_REGS_SHARED) {
3655 if (hw_cread(CAP_USBMODE, USBMODE_CM) !=
3656 USBMODE_CM_DEVICE) {
3657 spin_unlock(udc->lock);
3658 return IRQ_NONE;
3659 }
3660 }
3661 intr = hw_test_and_clear_intr_active();
3662 if (intr) {
3663 isr_statistics.hndl.buf[isr_statistics.hndl.idx++] = intr;
3664 isr_statistics.hndl.idx &= ISR_MASK;
3665 isr_statistics.hndl.cnt++;
3666
3667 /* order defines priority - do NOT change it */
3668 if (USBi_URI & intr) {
3669 isr_statistics.uri++;
3670 isr_reset_handler(udc);
3671 }
3672 if (USBi_PCI & intr) {
3673 isr_statistics.pci++;
3674 isr_resume_handler(udc);
3675 }
3676 if (USBi_UEI & intr)
3677 isr_statistics.uei++;
3678 if (USBi_UI & intr) {
3679 isr_statistics.ui++;
3680 isr_tr_complete_handler(udc);
3681 }
3682 if (USBi_SLI & intr) {
3683 isr_suspend_handler(udc);
3684 isr_statistics.sli++;
3685 }
3686 retval = IRQ_HANDLED;
3687 } else {
3688 isr_statistics.none++;
3689 retval = IRQ_NONE;
3690 }
3691 spin_unlock(udc->lock);
3692
3693 return retval;
3694}
3695
3696/**
3697 * udc_release: driver release function
3698 * @dev: device
3699 *
3700 * Currently does nothing
3701 */
3702static void udc_release(struct device *dev)
3703{
3704 trace("%p", dev);
3705
3706 if (dev == NULL)
3707 err("EINVAL");
3708}
3709
3710/**
3711 * udc_probe: parent probe must call this to initialize UDC
3712 * @dev: parent device
3713 * @regs: registers base address
3714 * @name: driver name
3715 *
3716 * This function returns an error code
3717 * No interrupts active, the IRQ has not been requested yet
3718 * Kernel assumes 32-bit DMA operations by default, no need to dma_set_mask
3719 */
3720static int udc_probe(struct ci13xxx_udc_driver *driver, struct device *dev,
3721 void __iomem *regs)
3722{
3723 struct ci13xxx *udc;
3724 struct ci13xxx_platform_data *pdata;
3725 int retval = 0, i;
3726
3727 trace("%p, %p, %p", dev, regs, driver->name);
3728
3729 if (dev == NULL || regs == NULL || driver == NULL ||
3730 driver->name == NULL)
3731 return -EINVAL;
3732
3733 udc = kzalloc(sizeof(struct ci13xxx), GFP_KERNEL);
3734 if (udc == NULL)
3735 return -ENOMEM;
3736
3737 udc->lock = &udc_lock;
3738 udc->regs = regs;
3739 udc->udc_driver = driver;
3740
3741 udc->gadget.ops = &usb_gadget_ops;
3742 udc->gadget.speed = USB_SPEED_UNKNOWN;
3743 udc->gadget.max_speed = USB_SPEED_HIGH;
3744 if (udc->udc_driver->flags & CI13XXX_IS_OTG)
3745 udc->gadget.is_otg = 1;
3746 else
3747 udc->gadget.is_otg = 0;
3748 udc->gadget.name = driver->name;
3749
3750 INIT_LIST_HEAD(&udc->gadget.ep_list);
3751 udc->gadget.ep0 = NULL;
3752
3753 pdata = dev->platform_data;
3754 if (pdata)
3755 udc->gadget.usb_core_id = pdata->usb_core_id;
3756
3757 dev_set_name(&udc->gadget.dev, "gadget");
3758 udc->gadget.dev.dma_mask = dev->dma_mask;
3759 udc->gadget.dev.coherent_dma_mask = dev->coherent_dma_mask;
3760 udc->gadget.dev.parent = dev;
3761 udc->gadget.dev.release = udc_release;
3762
3763 if (udc->udc_driver->flags & CI13XXX_REQUIRE_TRANSCEIVER) {
3764 udc->transceiver = usb_get_transceiver();
3765 if (udc->transceiver == NULL) {
3766 retval = -ENODEV;
3767 goto free_udc;
3768 }
3769 }
3770
3771 INIT_DELAYED_WORK(&udc->rw_work, usb_do_remote_wakeup);
3772
3773 retval = hw_device_init(regs);
3774 if (retval < 0)
3775 goto put_transceiver;
3776
3777 for (i = 0; i < hw_ep_max; i++) {
3778 struct ci13xxx_ep *mEp = &udc->ci13xxx_ep[i];
3779 INIT_LIST_HEAD(&mEp->ep.ep_list);
3780 setup_timer(&mEp->prime_timer, ep_prime_timer_func,
3781 (unsigned long) mEp);
3782 }
3783
3784 if (!(udc->udc_driver->flags & CI13XXX_REGS_SHARED)) {
3785 retval = hw_device_reset(udc);
3786 if (retval)
3787 goto put_transceiver;
3788 }
3789
3790 retval = device_register(&udc->gadget.dev);
3791 if (retval) {
3792 put_device(&udc->gadget.dev);
3793 goto put_transceiver;
3794 }
3795
3796#ifdef CONFIG_USB_GADGET_DEBUG_FILES
3797 retval = dbg_create_files(&udc->gadget.dev);
3798#endif
3799 if (retval)
3800 goto unreg_device;
3801
3802 if (udc->transceiver) {
3803 retval = otg_set_peripheral(udc->transceiver->otg,
3804 &udc->gadget);
3805 if (retval)
3806 goto remove_dbg;
3807 }
3808
3809 retval = usb_add_gadget_udc(dev, &udc->gadget);
3810 if (retval)
3811 goto remove_trans;
3812
3813 pm_runtime_no_callbacks(&udc->gadget.dev);
3814 pm_runtime_enable(&udc->gadget.dev);
3815
3816 if (register_trace_usb_daytona_invalid_access(dump_usb_info, NULL))
3817 pr_err("Registering trace failed\n");
3818
3819 _udc = udc;
3820 return retval;
3821
3822remove_trans:
3823 if (udc->transceiver) {
3824 otg_set_peripheral(udc->transceiver->otg, &udc->gadget);
3825 usb_put_transceiver(udc->transceiver);
3826 }
3827
3828 err("error = %i", retval);
3829remove_dbg:
3830#ifdef CONFIG_USB_GADGET_DEBUG_FILES
3831 dbg_remove_files(&udc->gadget.dev);
3832#endif
3833unreg_device:
3834 device_unregister(&udc->gadget.dev);
3835put_transceiver:
3836 if (udc->transceiver)
3837 usb_put_transceiver(udc->transceiver);
3838free_udc:
3839 kfree(udc);
3840 _udc = NULL;
3841 return retval;
3842}
3843
3844/**
3845 * udc_remove: parent remove must call this to remove UDC
3846 *
3847 * No interrupts active, the IRQ has been released
3848 */
3849static void udc_remove(void)
3850{
3851 struct ci13xxx *udc = _udc;
3852 int retval;
3853
3854 if (udc == NULL) {
3855 err("EINVAL");
3856 return;
3857 }
3858 retval = unregister_trace_usb_daytona_invalid_access(dump_usb_info,
3859 NULL);
3860 if (retval)
3861 pr_err("Unregistering trace failed\n");
3862
3863 usb_del_gadget_udc(&udc->gadget);
3864
3865 if (udc->transceiver) {
3866 otg_set_peripheral(udc->transceiver->otg, &udc->gadget);
3867 usb_put_transceiver(udc->transceiver);
3868 }
3869#ifdef CONFIG_USB_GADGET_DEBUG_FILES
3870 dbg_remove_files(&udc->gadget.dev);
3871#endif
3872 device_unregister(&udc->gadget.dev);
3873
3874 kfree(udc);
3875 _udc = NULL;
3876}