blob: fdc6d489084aea61d2afa528a74f416089321d7a [file] [log] [blame]
Paul Zimmerman7359d482013-03-11 17:47:59 -07001/*
2 * hcd.h - DesignWare HS OTG Controller host-mode declarations
3 *
4 * Copyright (C) 2004-2013 Synopsys, Inc.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions, and the following disclaimer,
11 * without modification.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. The names of the above-listed copyright holders may not be used
16 * to endorse or promote products derived from this software without
17 * specific prior written permission.
18 *
19 * ALTERNATIVELY, this software may be distributed under the terms of the
20 * GNU General Public License ("GPL") as published by the Free Software
21 * Foundation; either version 2 of the License, or (at your option) any
22 * later version.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
25 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
28 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
29 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
30 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
31 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
32 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
33 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
34 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 */
36#ifndef __DWC2_HCD_H__
37#define __DWC2_HCD_H__
38
39/*
40 * This file contains the structures, constants, and interfaces for the
41 * Host Contoller Driver (HCD)
42 *
43 * The Host Controller Driver (HCD) is responsible for translating requests
44 * from the USB Driver into the appropriate actions on the DWC_otg controller.
45 * It isolates the USBD from the specifics of the controller by providing an
46 * API to the USBD.
47 */
48
49struct dwc2_qh;
50
51/**
52 * struct dwc2_host_chan - Software host channel descriptor
53 *
54 * @hc_num: Host channel number, used for register address lookup
55 * @dev_addr: Address of the device
56 * @ep_num: Endpoint of the device
57 * @ep_is_in: Endpoint direction
58 * @speed: Device speed. One of the following values:
59 * - USB_SPEED_LOW
60 * - USB_SPEED_FULL
61 * - USB_SPEED_HIGH
62 * @ep_type: Endpoint type. One of the following values:
63 * - USB_ENDPOINT_XFER_CONTROL: 0
64 * - USB_ENDPOINT_XFER_ISOC: 1
65 * - USB_ENDPOINT_XFER_BULK: 2
66 * - USB_ENDPOINT_XFER_INTR: 3
67 * @max_packet: Max packet size in bytes
68 * @data_pid_start: PID for initial transaction.
69 * 0: DATA0
70 * 1: DATA2
71 * 2: DATA1
72 * 3: MDATA (non-Control EP),
73 * SETUP (Control EP)
74 * @multi_count: Number of additional periodic transactions per
75 * (micro)frame
76 * @xfer_buf: Pointer to current transfer buffer position
77 * @xfer_dma: DMA address of xfer_buf
78 * @align_buf: In Buffer DMA mode this will be used if xfer_buf is not
79 * DWORD aligned
80 * @xfer_len: Total number of bytes to transfer
81 * @xfer_count: Number of bytes transferred so far
82 * @start_pkt_count: Packet count at start of transfer
83 * @xfer_started: True if the transfer has been started
84 * @ping: True if a PING request should be issued on this channel
85 * @error_state: True if the error count for this transaction is non-zero
86 * @halt_on_queue: True if this channel should be halted the next time a
87 * request is queued for the channel. This is necessary in
88 * slave mode if no request queue space is available when
89 * an attempt is made to halt the channel.
90 * @halt_pending: True if the host channel has been halted, but the core
91 * is not finished flushing queued requests
92 * @do_split: Enable split for the channel
93 * @complete_split: Enable complete split
94 * @hub_addr: Address of high speed hub for the split
95 * @hub_port: Port of the low/full speed device for the split
96 * @xact_pos: Split transaction position. One of the following values:
97 * - DWC2_HCSPLT_XACTPOS_MID
98 * - DWC2_HCSPLT_XACTPOS_BEGIN
99 * - DWC2_HCSPLT_XACTPOS_END
100 * - DWC2_HCSPLT_XACTPOS_ALL
101 * @requests: Number of requests issued for this channel since it was
102 * assigned to the current transfer (not counting PINGs)
103 * @schinfo: Scheduling micro-frame bitmap
104 * @ntd: Number of transfer descriptors for the transfer
105 * @halt_status: Reason for halting the host channel
106 * @hcint Contents of the HCINT register when the interrupt came
107 * @qh: QH for the transfer being processed by this channel
108 * @hc_list_entry: For linking to list of host channels
109 * @desc_list_addr: Current QH's descriptor list DMA address
110 *
111 * This structure represents the state of a single host channel when acting in
112 * host mode. It contains the data items needed to transfer packets to an
113 * endpoint via a host channel.
114 */
115struct dwc2_host_chan {
116 u8 hc_num;
117
118 unsigned dev_addr:7;
119 unsigned ep_num:4;
120 unsigned ep_is_in:1;
121 unsigned speed:4;
122 unsigned ep_type:2;
123 unsigned max_packet:11;
124 unsigned data_pid_start:2;
Matthijs Kooijmanf9234632013-08-30 18:45:13 +0200125#define DWC2_HC_PID_DATA0 TSIZ_SC_MC_PID_DATA0
126#define DWC2_HC_PID_DATA2 TSIZ_SC_MC_PID_DATA2
127#define DWC2_HC_PID_DATA1 TSIZ_SC_MC_PID_DATA1
128#define DWC2_HC_PID_MDATA TSIZ_SC_MC_PID_MDATA
129#define DWC2_HC_PID_SETUP TSIZ_SC_MC_PID_SETUP
Paul Zimmerman7359d482013-03-11 17:47:59 -0700130
131 unsigned multi_count:2;
132
133 u8 *xfer_buf;
134 dma_addr_t xfer_dma;
135 dma_addr_t align_buf;
136 u32 xfer_len;
137 u32 xfer_count;
138 u16 start_pkt_count;
139 u8 xfer_started;
140 u8 do_ping;
141 u8 error_state;
142 u8 halt_on_queue;
143 u8 halt_pending;
144 u8 do_split;
145 u8 complete_split;
146 u8 hub_addr;
147 u8 hub_port;
148 u8 xact_pos;
Matthijs Kooijmanf9234632013-08-30 18:45:13 +0200149#define DWC2_HCSPLT_XACTPOS_MID HCSPLT_XACTPOS_MID
150#define DWC2_HCSPLT_XACTPOS_END HCSPLT_XACTPOS_END
151#define DWC2_HCSPLT_XACTPOS_BEGIN HCSPLT_XACTPOS_BEGIN
152#define DWC2_HCSPLT_XACTPOS_ALL HCSPLT_XACTPOS_ALL
Paul Zimmerman7359d482013-03-11 17:47:59 -0700153
154 u8 requests;
155 u8 schinfo;
156 u16 ntd;
157 enum dwc2_halt_status halt_status;
158 u32 hcint;
159 struct dwc2_qh *qh;
160 struct list_head hc_list_entry;
161 dma_addr_t desc_list_addr;
162};
163
164struct dwc2_hcd_pipe_info {
165 u8 dev_addr;
166 u8 ep_num;
167 u8 pipe_type;
168 u8 pipe_dir;
169 u16 mps;
170};
171
172struct dwc2_hcd_iso_packet_desc {
173 u32 offset;
174 u32 length;
175 u32 actual_length;
176 u32 status;
177};
178
179struct dwc2_qtd;
180
181struct dwc2_hcd_urb {
182 void *priv;
183 struct dwc2_qtd *qtd;
184 void *buf;
185 dma_addr_t dma;
186 void *setup_packet;
187 dma_addr_t setup_dma;
188 u32 length;
189 u32 actual_length;
190 u32 status;
191 u32 error_count;
192 u32 packet_count;
193 u32 flags;
194 u16 interval;
195 struct dwc2_hcd_pipe_info pipe_info;
196 struct dwc2_hcd_iso_packet_desc iso_descs[0];
197};
198
199/* Phases for control transfers */
200enum dwc2_control_phase {
201 DWC2_CONTROL_SETUP,
202 DWC2_CONTROL_DATA,
203 DWC2_CONTROL_STATUS,
204};
205
206/* Transaction types */
207enum dwc2_transaction_type {
208 DWC2_TRANSACTION_NONE,
209 DWC2_TRANSACTION_PERIODIC,
210 DWC2_TRANSACTION_NON_PERIODIC,
211 DWC2_TRANSACTION_ALL,
212};
213
214/**
215 * struct dwc2_qh - Software queue head structure
216 *
217 * @ep_type: Endpoint type. One of the following values:
218 * - USB_ENDPOINT_XFER_CONTROL
219 * - USB_ENDPOINT_XFER_BULK
220 * - USB_ENDPOINT_XFER_INT
221 * - USB_ENDPOINT_XFER_ISOC
222 * @ep_is_in: Endpoint direction
223 * @maxp: Value from wMaxPacketSize field of Endpoint Descriptor
224 * @dev_speed: Device speed. One of the following values:
225 * - USB_SPEED_LOW
226 * - USB_SPEED_FULL
227 * - USB_SPEED_HIGH
228 * @data_toggle: Determines the PID of the next data packet for
229 * non-controltransfers. Ignored for control transfers.
230 * One of the following values:
231 * - DWC2_HC_PID_DATA0
232 * - DWC2_HC_PID_DATA1
233 * @ping_state: Ping state
234 * @do_split: Full/low speed endpoint on high-speed hub requires split
Paul Zimmerman725acc82013-08-11 12:50:17 -0700235 * @td_first: Index of first activated isochronous transfer descriptor
236 * @td_last: Index of last activated isochronous transfer descriptor
Paul Zimmerman7359d482013-03-11 17:47:59 -0700237 * @usecs: Bandwidth in microseconds per (micro)frame
238 * @interval: Interval between transfers in (micro)frames
Paul Zimmerman725acc82013-08-11 12:50:17 -0700239 * @sched_frame: (Micro)frame to initialize a periodic transfer.
Paul Zimmerman7359d482013-03-11 17:47:59 -0700240 * The transfer executes in the following (micro)frame.
Dom Cobley20f2eb92013-09-23 14:23:34 -0700241 * @frame_usecs: Internal variable used by the microframe scheduler
Paul Zimmerman7359d482013-03-11 17:47:59 -0700242 * @start_split_frame: (Micro)frame at which last start split was initialized
Paul Zimmerman725acc82013-08-11 12:50:17 -0700243 * @ntd: Actual number of transfer descriptors in a list
Paul Zimmerman7359d482013-03-11 17:47:59 -0700244 * @dw_align_buf: Used instead of original buffer if its physical address
245 * is not dword-aligned
246 * @dw_align_buf_dma: DMA address for align_buf
Paul Zimmerman725acc82013-08-11 12:50:17 -0700247 * @qtd_list: List of QTDs for this QH
248 * @channel: Host channel currently processing transfers for this QH
Paul Zimmerman7359d482013-03-11 17:47:59 -0700249 * @qh_list_entry: Entry for QH in either the periodic or non-periodic
250 * schedule
251 * @desc_list: List of transfer descriptors
252 * @desc_list_dma: Physical address of desc_list
253 * @n_bytes: Xfer Bytes array. Each element corresponds to a transfer
254 * descriptor and indicates original XferSize value for the
255 * descriptor
Paul Zimmerman7359d482013-03-11 17:47:59 -0700256 * @tt_buffer_dirty True if clear_tt_buffer_complete is pending
257 *
258 * A Queue Head (QH) holds the static characteristics of an endpoint and
259 * maintains a list of transfers (QTDs) for that endpoint. A QH structure may
260 * be entered in either the non-periodic or periodic schedule.
261 */
262struct dwc2_qh {
263 u8 ep_type;
264 u8 ep_is_in;
265 u16 maxp;
266 u8 dev_speed;
267 u8 data_toggle;
268 u8 ping_state;
269 u8 do_split;
Paul Zimmerman725acc82013-08-11 12:50:17 -0700270 u8 td_first;
271 u8 td_last;
Paul Zimmerman7359d482013-03-11 17:47:59 -0700272 u16 usecs;
273 u16 interval;
274 u16 sched_frame;
Dom Cobley20f2eb92013-09-23 14:23:34 -0700275 u16 frame_usecs[8];
Paul Zimmerman7359d482013-03-11 17:47:59 -0700276 u16 start_split_frame;
Paul Zimmerman725acc82013-08-11 12:50:17 -0700277 u16 ntd;
Paul Zimmerman7359d482013-03-11 17:47:59 -0700278 u8 *dw_align_buf;
279 dma_addr_t dw_align_buf_dma;
Paul Zimmerman725acc82013-08-11 12:50:17 -0700280 struct list_head qtd_list;
281 struct dwc2_host_chan *channel;
Paul Zimmerman7359d482013-03-11 17:47:59 -0700282 struct list_head qh_list_entry;
283 struct dwc2_hcd_dma_desc *desc_list;
284 dma_addr_t desc_list_dma;
285 u32 *n_bytes;
Paul Zimmerman7359d482013-03-11 17:47:59 -0700286 unsigned tt_buffer_dirty:1;
287};
288
289/**
290 * struct dwc2_qtd - Software queue transfer descriptor (QTD)
291 *
292 * @control_phase: Current phase for control transfers (Setup, Data, or
293 * Status)
294 * @in_process: Indicates if this QTD is currently processed by HW
295 * @data_toggle: Determines the PID of the next data packet for the
296 * data phase of control transfers. Ignored for other
297 * transfer types. One of the following values:
298 * - DWC2_HC_PID_DATA0
299 * - DWC2_HC_PID_DATA1
300 * @complete_split: Keeps track of the current split type for FS/LS
301 * endpoints on a HS Hub
302 * @isoc_split_pos: Position of the ISOC split in full/low speed
303 * @isoc_frame_index: Index of the next frame descriptor for an isochronous
304 * transfer. A frame descriptor describes the buffer
305 * position and length of the data to be transferred in the
306 * next scheduled (micro)frame of an isochronous transfer.
307 * It also holds status for that transaction. The frame
308 * index starts at 0.
309 * @isoc_split_offset: Position of the ISOC split in the buffer for the
310 * current frame
311 * @ssplit_out_xfer_count: How many bytes transferred during SSPLIT OUT
312 * @error_count: Holds the number of bus errors that have occurred for
313 * a transaction within this transfer
314 * @n_desc: Number of DMA descriptors for this QTD
315 * @isoc_frame_index_last: Last activated frame (packet) index, used in
316 * descriptor DMA mode only
317 * @urb: URB for this transfer
318 * @qh: Queue head for this QTD
319 * @qtd_list_entry: For linking to the QH's list of QTDs
320 *
321 * A Queue Transfer Descriptor (QTD) holds the state of a bulk, control,
322 * interrupt, or isochronous transfer. A single QTD is created for each URB
323 * (of one of these types) submitted to the HCD. The transfer associated with
324 * a QTD may require one or multiple transactions.
325 *
326 * A QTD is linked to a Queue Head, which is entered in either the
327 * non-periodic or periodic schedule for execution. When a QTD is chosen for
328 * execution, some or all of its transactions may be executed. After
329 * execution, the state of the QTD is updated. The QTD may be retired if all
330 * its transactions are complete or if an error occurred. Otherwise, it
331 * remains in the schedule so more transactions can be executed later.
332 */
333struct dwc2_qtd {
334 enum dwc2_control_phase control_phase;
335 u8 in_process;
336 u8 data_toggle;
337 u8 complete_split;
338 u8 isoc_split_pos;
339 u16 isoc_frame_index;
340 u16 isoc_split_offset;
341 u32 ssplit_out_xfer_count;
342 u8 error_count;
343 u8 n_desc;
344 u16 isoc_frame_index_last;
345 struct dwc2_hcd_urb *urb;
346 struct dwc2_qh *qh;
347 struct list_head qtd_list_entry;
348};
349
350#ifdef DEBUG
351struct hc_xfer_info {
352 struct dwc2_hsotg *hsotg;
353 struct dwc2_host_chan *chan;
354};
355#endif
356
357/* Gets the struct usb_hcd that contains a struct dwc2_hsotg */
358static inline struct usb_hcd *dwc2_hsotg_to_hcd(struct dwc2_hsotg *hsotg)
359{
360 return (struct usb_hcd *)hsotg->priv;
361}
362
363/*
364 * Inline used to disable one channel interrupt. Channel interrupts are
365 * disabled when the channel is halted or released by the interrupt handler.
366 * There is no need to handle further interrupts of that type until the
367 * channel is re-assigned. In fact, subsequent handling may cause crashes
368 * because the channel structures are cleaned up when the channel is released.
369 */
370static inline void disable_hc_int(struct dwc2_hsotg *hsotg, int chnum, u32 intr)
371{
372 u32 mask = readl(hsotg->regs + HCINTMSK(chnum));
373
374 mask &= ~intr;
375 writel(mask, hsotg->regs + HCINTMSK(chnum));
376}
377
378/*
379 * Returns the mode of operation, host or device
380 */
381static inline int dwc2_is_host_mode(struct dwc2_hsotg *hsotg)
382{
383 return (readl(hsotg->regs + GINTSTS) & GINTSTS_CURMODE_HOST) != 0;
384}
385static inline int dwc2_is_device_mode(struct dwc2_hsotg *hsotg)
386{
387 return (readl(hsotg->regs + GINTSTS) & GINTSTS_CURMODE_HOST) == 0;
388}
389
390/*
391 * Reads HPRT0 in preparation to modify. It keeps the WC bits 0 so that if they
392 * are read as 1, they won't clear when written back.
393 */
394static inline u32 dwc2_read_hprt0(struct dwc2_hsotg *hsotg)
395{
396 u32 hprt0 = readl(hsotg->regs + HPRT0);
397
398 hprt0 &= ~(HPRT0_ENA | HPRT0_CONNDET | HPRT0_ENACHG | HPRT0_OVRCURRCHG);
399 return hprt0;
400}
401
402static inline u8 dwc2_hcd_get_ep_num(struct dwc2_hcd_pipe_info *pipe)
403{
404 return pipe->ep_num;
405}
406
407static inline u8 dwc2_hcd_get_pipe_type(struct dwc2_hcd_pipe_info *pipe)
408{
409 return pipe->pipe_type;
410}
411
412static inline u16 dwc2_hcd_get_mps(struct dwc2_hcd_pipe_info *pipe)
413{
414 return pipe->mps;
415}
416
417static inline u8 dwc2_hcd_get_dev_addr(struct dwc2_hcd_pipe_info *pipe)
418{
419 return pipe->dev_addr;
420}
421
422static inline u8 dwc2_hcd_is_pipe_isoc(struct dwc2_hcd_pipe_info *pipe)
423{
424 return pipe->pipe_type == USB_ENDPOINT_XFER_ISOC;
425}
426
427static inline u8 dwc2_hcd_is_pipe_int(struct dwc2_hcd_pipe_info *pipe)
428{
429 return pipe->pipe_type == USB_ENDPOINT_XFER_INT;
430}
431
432static inline u8 dwc2_hcd_is_pipe_bulk(struct dwc2_hcd_pipe_info *pipe)
433{
434 return pipe->pipe_type == USB_ENDPOINT_XFER_BULK;
435}
436
437static inline u8 dwc2_hcd_is_pipe_control(struct dwc2_hcd_pipe_info *pipe)
438{
439 return pipe->pipe_type == USB_ENDPOINT_XFER_CONTROL;
440}
441
442static inline u8 dwc2_hcd_is_pipe_in(struct dwc2_hcd_pipe_info *pipe)
443{
444 return pipe->pipe_dir == USB_DIR_IN;
445}
446
447static inline u8 dwc2_hcd_is_pipe_out(struct dwc2_hcd_pipe_info *pipe)
448{
449 return !dwc2_hcd_is_pipe_in(pipe);
450}
451
Paul Zimmermane62662c2013-03-25 17:03:35 -0700452extern int dwc2_hcd_init(struct dwc2_hsotg *hsotg, int irq,
Stephen Warren90dbcea2013-04-29 19:49:08 +0000453 const struct dwc2_core_params *params);
Paul Zimmermane62662c2013-03-25 17:03:35 -0700454extern void dwc2_hcd_remove(struct dwc2_hsotg *hsotg);
Paul Zimmerman7218dae2013-11-22 16:43:48 -0800455extern void dwc2_set_parameters(struct dwc2_hsotg *hsotg,
456 const struct dwc2_core_params *params);
Matthijs Kooijman8284f932013-04-11 18:43:47 +0200457extern void dwc2_set_all_params(struct dwc2_core_params *params, int value);
Matthijs Kooijman9badec22013-08-30 18:45:21 +0200458extern int dwc2_get_hwparams(struct dwc2_hsotg *hsotg);
Paul Zimmerman7359d482013-03-11 17:47:59 -0700459
460/* Transaction Execution Functions */
461extern enum dwc2_transaction_type dwc2_hcd_select_transactions(
462 struct dwc2_hsotg *hsotg);
463extern void dwc2_hcd_queue_transactions(struct dwc2_hsotg *hsotg,
464 enum dwc2_transaction_type tr_type);
465
466/* Schedule Queue Functions */
467/* Implemented in hcd_queue.c */
Dom Cobley20f2eb92013-09-23 14:23:34 -0700468extern void dwc2_hcd_init_usecs(struct dwc2_hsotg *hsotg);
Paul Zimmerman7359d482013-03-11 17:47:59 -0700469extern void dwc2_hcd_qh_free(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh);
470extern int dwc2_hcd_qh_add(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh);
471extern void dwc2_hcd_qh_unlink(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh);
472extern void dwc2_hcd_qh_deactivate(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
473 int sched_csplit);
474
475extern void dwc2_hcd_qtd_init(struct dwc2_qtd *qtd, struct dwc2_hcd_urb *urb);
476extern int dwc2_hcd_qtd_add(struct dwc2_hsotg *hsotg, struct dwc2_qtd *qtd,
477 struct dwc2_qh **qh, gfp_t mem_flags);
478
479/* Unlinks and frees a QTD */
480static inline void dwc2_hcd_qtd_unlink_and_free(struct dwc2_hsotg *hsotg,
481 struct dwc2_qtd *qtd,
482 struct dwc2_qh *qh)
483{
484 list_del(&qtd->qtd_list_entry);
485 kfree(qtd);
486}
487
488/* Descriptor DMA support functions */
489extern void dwc2_hcd_start_xfer_ddma(struct dwc2_hsotg *hsotg,
490 struct dwc2_qh *qh);
491extern void dwc2_hcd_complete_xfer_ddma(struct dwc2_hsotg *hsotg,
492 struct dwc2_host_chan *chan, int chnum,
493 enum dwc2_halt_status halt_status);
494
495extern int dwc2_hcd_qh_init_ddma(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
496 gfp_t mem_flags);
497extern void dwc2_hcd_qh_free_ddma(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh);
498
499/* Check if QH is non-periodic */
500#define dwc2_qh_is_non_per(_qh_ptr_) \
501 ((_qh_ptr_)->ep_type == USB_ENDPOINT_XFER_BULK || \
502 (_qh_ptr_)->ep_type == USB_ENDPOINT_XFER_CONTROL)
503
Matthijs Kooijmanb49977a2013-04-10 09:55:50 +0200504#ifdef CONFIG_USB_DWC2_DEBUG_PERIODIC
505static inline bool dbg_hc(struct dwc2_host_chan *hc) { return true; }
506static inline bool dbg_qh(struct dwc2_qh *qh) { return true; }
507static inline bool dbg_urb(struct urb *urb) { return true; }
508static inline bool dbg_perio(void) { return true; }
509#else /* !CONFIG_USB_DWC2_DEBUG_PERIODIC */
510static inline bool dbg_hc(struct dwc2_host_chan *hc)
511{
512 return hc->ep_type == USB_ENDPOINT_XFER_BULK ||
513 hc->ep_type == USB_ENDPOINT_XFER_CONTROL;
514}
515
516static inline bool dbg_qh(struct dwc2_qh *qh)
517{
518 return qh->ep_type == USB_ENDPOINT_XFER_BULK ||
519 qh->ep_type == USB_ENDPOINT_XFER_CONTROL;
520}
521
522static inline bool dbg_urb(struct urb *urb)
523{
524 return usb_pipetype(urb->pipe) == PIPE_BULK ||
525 usb_pipetype(urb->pipe) == PIPE_CONTROL;
526}
527
528static inline bool dbg_perio(void) { return false; }
529#endif
530
Paul Zimmerman7359d482013-03-11 17:47:59 -0700531/* High bandwidth multiplier as encoded in highspeed endpoint descriptors */
532#define dwc2_hb_mult(wmaxpacketsize) (1 + (((wmaxpacketsize) >> 11) & 0x03))
533
534/* Packet size for any kind of endpoint descriptor */
535#define dwc2_max_packet(wmaxpacketsize) ((wmaxpacketsize) & 0x07ff)
536
537/*
538 * Returns true if frame1 is less than or equal to frame2. The comparison is
539 * done modulo HFNUM_MAX_FRNUM. This accounts for the rollover of the
540 * frame number when the max frame number is reached.
541 */
542static inline int dwc2_frame_num_le(u16 frame1, u16 frame2)
543{
544 return ((frame2 - frame1) & HFNUM_MAX_FRNUM) <= (HFNUM_MAX_FRNUM >> 1);
545}
546
547/*
548 * Returns true if frame1 is greater than frame2. The comparison is done
549 * modulo HFNUM_MAX_FRNUM. This accounts for the rollover of the frame
550 * number when the max frame number is reached.
551 */
552static inline int dwc2_frame_num_gt(u16 frame1, u16 frame2)
553{
554 return (frame1 != frame2) &&
555 ((frame1 - frame2) & HFNUM_MAX_FRNUM) < (HFNUM_MAX_FRNUM >> 1);
556}
557
558/*
559 * Increments frame by the amount specified by inc. The addition is done
560 * modulo HFNUM_MAX_FRNUM. Returns the incremented value.
561 */
562static inline u16 dwc2_frame_num_inc(u16 frame, u16 inc)
563{
564 return (frame + inc) & HFNUM_MAX_FRNUM;
565}
566
567static inline u16 dwc2_full_frame_num(u16 frame)
568{
569 return (frame & HFNUM_MAX_FRNUM) >> 3;
570}
571
572static inline u16 dwc2_micro_frame_num(u16 frame)
573{
574 return frame & 0x7;
575}
576
577/*
578 * Returns the Core Interrupt Status register contents, ANDed with the Core
579 * Interrupt Mask register contents
580 */
581static inline u32 dwc2_read_core_intr(struct dwc2_hsotg *hsotg)
582{
583 return readl(hsotg->regs + GINTSTS) & readl(hsotg->regs + GINTMSK);
584}
585
586static inline u32 dwc2_hcd_urb_get_status(struct dwc2_hcd_urb *dwc2_urb)
587{
588 return dwc2_urb->status;
589}
590
591static inline u32 dwc2_hcd_urb_get_actual_length(
592 struct dwc2_hcd_urb *dwc2_urb)
593{
594 return dwc2_urb->actual_length;
595}
596
597static inline u32 dwc2_hcd_urb_get_error_count(struct dwc2_hcd_urb *dwc2_urb)
598{
599 return dwc2_urb->error_count;
600}
601
602static inline void dwc2_hcd_urb_set_iso_desc_params(
603 struct dwc2_hcd_urb *dwc2_urb, int desc_num, u32 offset,
604 u32 length)
605{
606 dwc2_urb->iso_descs[desc_num].offset = offset;
607 dwc2_urb->iso_descs[desc_num].length = length;
608}
609
610static inline u32 dwc2_hcd_urb_get_iso_desc_status(
611 struct dwc2_hcd_urb *dwc2_urb, int desc_num)
612{
613 return dwc2_urb->iso_descs[desc_num].status;
614}
615
616static inline u32 dwc2_hcd_urb_get_iso_desc_actual_length(
617 struct dwc2_hcd_urb *dwc2_urb, int desc_num)
618{
619 return dwc2_urb->iso_descs[desc_num].actual_length;
620}
621
622static inline int dwc2_hcd_is_bandwidth_allocated(struct dwc2_hsotg *hsotg,
623 struct usb_host_endpoint *ep)
624{
625 struct dwc2_qh *qh = ep->hcpriv;
626
627 if (qh && !list_empty(&qh->qh_list_entry))
628 return 1;
629
630 return 0;
631}
632
633static inline u16 dwc2_hcd_get_ep_bandwidth(struct dwc2_hsotg *hsotg,
634 struct usb_host_endpoint *ep)
635{
636 struct dwc2_qh *qh = ep->hcpriv;
637
638 if (!qh) {
639 WARN_ON(1);
640 return 0;
641 }
642
643 return qh->usecs;
644}
645
646extern void dwc2_hcd_save_data_toggle(struct dwc2_hsotg *hsotg,
647 struct dwc2_host_chan *chan, int chnum,
648 struct dwc2_qtd *qtd);
649
650/* HCD Core API */
651
652/**
Matthijs Kooijmanca18f4a2013-04-25 23:39:15 +0200653 * dwc2_handle_hcd_intr() - Called on every hardware interrupt
Paul Zimmerman7359d482013-03-11 17:47:59 -0700654 *
655 * @hsotg: The DWC2 HCD
656 *
Matthijs Kooijman6aafb002013-04-25 23:39:14 +0200657 * Returns IRQ_HANDLED if interrupt is handled
658 * Return IRQ_NONE if interrupt is not handled
Paul Zimmerman7359d482013-03-11 17:47:59 -0700659 */
Matthijs Kooijmanca18f4a2013-04-25 23:39:15 +0200660extern irqreturn_t dwc2_handle_hcd_intr(struct dwc2_hsotg *hsotg);
Paul Zimmerman7359d482013-03-11 17:47:59 -0700661
662/**
663 * dwc2_hcd_stop() - Halts the DWC_otg host mode operation
664 *
665 * @hsotg: The DWC2 HCD
666 */
667extern void dwc2_hcd_stop(struct dwc2_hsotg *hsotg);
668
669extern void dwc2_hcd_start(struct dwc2_hsotg *hsotg);
670extern void dwc2_hcd_disconnect(struct dwc2_hsotg *hsotg);
671
672/**
673 * dwc2_hcd_is_b_host() - Returns 1 if core currently is acting as B host,
674 * and 0 otherwise
675 *
676 * @hsotg: The DWC2 HCD
677 */
678extern int dwc2_hcd_is_b_host(struct dwc2_hsotg *hsotg);
679
680/**
681 * dwc2_hcd_get_frame_number() - Returns current frame number
682 *
683 * @hsotg: The DWC2 HCD
684 */
685extern int dwc2_hcd_get_frame_number(struct dwc2_hsotg *hsotg);
686
687/**
688 * dwc2_hcd_dump_state() - Dumps hsotg state
689 *
690 * @hsotg: The DWC2 HCD
691 *
692 * NOTE: This function will be removed once the peripheral controller code
693 * is integrated and the driver is stable
694 */
695extern void dwc2_hcd_dump_state(struct dwc2_hsotg *hsotg);
696
697/**
698 * dwc2_hcd_dump_frrem() - Dumps the average frame remaining at SOF
699 *
700 * @hsotg: The DWC2 HCD
701 *
702 * This can be used to determine average interrupt latency. Frame remaining is
703 * also shown for start transfer and two additional sample points.
704 *
705 * NOTE: This function will be removed once the peripheral controller code
706 * is integrated and the driver is stable
707 */
708extern void dwc2_hcd_dump_frrem(struct dwc2_hsotg *hsotg);
709
710/* URB interface */
711
712/* Transfer flags */
713#define URB_GIVEBACK_ASAP 0x1
714#define URB_SEND_ZERO_PACKET 0x2
715
716/* Host driver callbacks */
717
718extern void dwc2_host_start(struct dwc2_hsotg *hsotg);
719extern void dwc2_host_disconnect(struct dwc2_hsotg *hsotg);
720extern void dwc2_host_hub_info(struct dwc2_hsotg *hsotg, void *context,
721 int *hub_addr, int *hub_port);
722extern int dwc2_host_get_speed(struct dwc2_hsotg *hsotg, void *context);
Paul Zimmerman0d012b92013-07-13 14:53:48 -0700723extern void dwc2_host_complete(struct dwc2_hsotg *hsotg, struct dwc2_qtd *qtd,
724 int status);
Paul Zimmerman7359d482013-03-11 17:47:59 -0700725
726#ifdef DEBUG
727/*
728 * Macro to sample the remaining PHY clocks left in the current frame. This
729 * may be used during debugging to determine the average time it takes to
730 * execute sections of code. There are two possible sample points, "a" and
731 * "b", so the _letter_ argument must be one of these values.
732 *
733 * To dump the average sample times, read the "hcd_frrem" sysfs attribute. For
734 * example, "cat /sys/devices/lm0/hcd_frrem".
735 */
736#define dwc2_sample_frrem(_hcd_, _qh_, _letter_) \
737do { \
738 struct hfnum_data _hfnum_; \
739 struct dwc2_qtd *_qtd_; \
740 \
741 _qtd_ = list_entry((_qh_)->qtd_list.next, struct dwc2_qtd, \
742 qtd_list_entry); \
743 if (usb_pipeint(_qtd_->urb->pipe) && \
744 (_qh_)->start_split_frame != 0 && !_qtd_->complete_split) { \
745 _hfnum_.d32 = readl((_hcd_)->regs + HFNUM); \
746 switch (_hfnum_.b.frnum & 0x7) { \
747 case 7: \
748 (_hcd_)->hfnum_7_samples_##_letter_++; \
749 (_hcd_)->hfnum_7_frrem_accum_##_letter_ += \
750 _hfnum_.b.frrem; \
751 break; \
752 case 0: \
753 (_hcd_)->hfnum_0_samples_##_letter_++; \
754 (_hcd_)->hfnum_0_frrem_accum_##_letter_ += \
755 _hfnum_.b.frrem; \
756 break; \
757 default: \
758 (_hcd_)->hfnum_other_samples_##_letter_++; \
759 (_hcd_)->hfnum_other_frrem_accum_##_letter_ += \
760 _hfnum_.b.frrem; \
761 break; \
762 } \
763 } \
764} while (0)
765#else
766#define dwc2_sample_frrem(_hcd_, _qh_, _letter_) do {} while (0)
767#endif
768
769#endif /* __DWC2_HCD_H__ */