blob: 428d184ceb339390bd0de08f21dc7a9034fd7f18 [file] [log] [blame]
Lothar Wassmanna9d43092009-07-16 20:51:21 -04001/*
2 * ISP1362 HCD (Host Controller Driver) for USB.
3 *
4 * Copyright (C) 2005 Lothar Wassmann <LW@KARO-electronics.de>
5 *
6 * Derived from the SL811 HCD, rewritten for ISP116x.
7 * Copyright (C) 2005 Olav Kongas <ok@artecdesign.ee>
8 *
9 * Portions:
10 * Copyright (C) 2004 Psion Teklogix (for NetBook PRO)
11 * Copyright (C) 2004 David Brownell
12 */
13
14/*
15 * The ISP1362 chip requires a large delay (300ns and 462ns) between
16 * accesses to the address and data register.
17 * The following timing options exist:
18 *
19 * 1. Configure your memory controller to add such delays if it can (the best)
20 * 2. Implement platform-specific delay function possibly
21 * combined with configuring the memory controller; see
22 * include/linux/usb_isp1362.h for more info.
23 * 3. Use ndelay (easiest, poorest).
24 *
25 * Use the corresponding macros USE_PLATFORM_DELAY and USE_NDELAY in the
26 * platform specific section of isp1362.h to select the appropriate variant.
27 *
28 * Also note that according to the Philips "ISP1362 Errata" document
29 * Rev 1.00 from 27 May data corruption may occur when the #WR signal
30 * is reasserted (even with #CS deasserted) within 132ns after a
31 * write cycle to any controller register. If the hardware doesn't
32 * implement the recommended fix (gating the #WR with #CS) software
33 * must ensure that no further write cycle (not necessarily to the chip!)
34 * is issued by the CPU within this interval.
35
36 * For PXA25x this can be ensured by using VLIO with the maximum
37 * recovery time (MSCx = 0x7f8c) with a memory clock of 99.53 MHz.
38 */
39
40#ifdef CONFIG_USB_DEBUG
41# define ISP1362_DEBUG
42#else
43# undef ISP1362_DEBUG
44#endif
45
46/*
47 * The PXA255 UDC apparently doesn't handle GET_STATUS, GET_CONFIG and
48 * GET_INTERFACE requests correctly when the SETUP and DATA stages of the
49 * requests are carried out in separate frames. This will delay any SETUP
50 * packets until the start of the next frame so that this situation is
51 * unlikely to occur (and makes usbtest happy running with a PXA255 target
52 * device).
53 */
54#undef BUGGY_PXA2XX_UDC_USBTEST
55
56#undef PTD_TRACE
57#undef URB_TRACE
58#undef VERBOSE
59#undef REGISTERS
60
61/* This enables a memory test on the ISP1362 chip memory to make sure the
62 * chip access timing is correct.
63 */
64#undef CHIP_BUFFER_TEST
65
66#include <linux/module.h>
67#include <linux/moduleparam.h>
68#include <linux/kernel.h>
69#include <linux/delay.h>
70#include <linux/ioport.h>
71#include <linux/sched.h>
72#include <linux/slab.h>
Lothar Wassmanna9d43092009-07-16 20:51:21 -040073#include <linux/errno.h>
74#include <linux/init.h>
75#include <linux/list.h>
76#include <linux/interrupt.h>
77#include <linux/usb.h>
78#include <linux/usb/isp1362.h>
Eric Lescouet27729aa2010-04-24 23:21:52 +020079#include <linux/usb/hcd.h>
Lothar Wassmanna9d43092009-07-16 20:51:21 -040080#include <linux/platform_device.h>
81#include <linux/pm.h>
82#include <linux/io.h>
Akinobu Mita735e1b92009-12-15 16:48:28 -080083#include <linux/bitmap.h>
Linus Torvalds268bb0c2011-05-20 12:50:29 -070084#include <linux/prefetch.h>
Lothar Wassmanna9d43092009-07-16 20:51:21 -040085
86#include <asm/irq.h>
Lothar Wassmanna9d43092009-07-16 20:51:21 -040087#include <asm/byteorder.h>
88#include <asm/unaligned.h>
89
90static int dbg_level;
91#ifdef ISP1362_DEBUG
92module_param(dbg_level, int, 0644);
93#else
94module_param(dbg_level, int, 0);
95#define STUB_DEBUG_FILE
96#endif
97
Lothar Wassmanna9d43092009-07-16 20:51:21 -040098#include "../core/usb.h"
99#include "isp1362.h"
100
101
102#define DRIVER_VERSION "2005-04-04"
103#define DRIVER_DESC "ISP1362 USB Host Controller Driver"
104
105MODULE_DESCRIPTION(DRIVER_DESC);
106MODULE_LICENSE("GPL");
107
108static const char hcd_name[] = "isp1362-hcd";
109
110static void isp1362_hc_stop(struct usb_hcd *hcd);
111static int isp1362_hc_start(struct usb_hcd *hcd);
112
113/*-------------------------------------------------------------------------*/
114
115/*
116 * When called from the interrupthandler only isp1362_hcd->irqenb is modified,
117 * since the interrupt handler will write isp1362_hcd->irqenb to HCuPINT upon
118 * completion.
119 * We don't need a 'disable' counterpart, since interrupts will be disabled
120 * only by the interrupt handler.
121 */
122static inline void isp1362_enable_int(struct isp1362_hcd *isp1362_hcd, u16 mask)
123{
124 if ((isp1362_hcd->irqenb | mask) == isp1362_hcd->irqenb)
125 return;
126 if (mask & ~isp1362_hcd->irqenb)
127 isp1362_write_reg16(isp1362_hcd, HCuPINT, mask & ~isp1362_hcd->irqenb);
128 isp1362_hcd->irqenb |= mask;
129 if (isp1362_hcd->irq_active)
130 return;
131 isp1362_write_reg16(isp1362_hcd, HCuPINTENB, isp1362_hcd->irqenb);
132}
133
134/*-------------------------------------------------------------------------*/
135
136static inline struct isp1362_ep_queue *get_ptd_queue(struct isp1362_hcd *isp1362_hcd,
137 u16 offset)
138{
139 struct isp1362_ep_queue *epq = NULL;
140
141 if (offset < isp1362_hcd->istl_queue[1].buf_start)
142 epq = &isp1362_hcd->istl_queue[0];
143 else if (offset < isp1362_hcd->intl_queue.buf_start)
144 epq = &isp1362_hcd->istl_queue[1];
145 else if (offset < isp1362_hcd->atl_queue.buf_start)
146 epq = &isp1362_hcd->intl_queue;
147 else if (offset < isp1362_hcd->atl_queue.buf_start +
148 isp1362_hcd->atl_queue.buf_size)
149 epq = &isp1362_hcd->atl_queue;
150
151 if (epq)
152 DBG(1, "%s: PTD $%04x is on %s queue\n", __func__, offset, epq->name);
153 else
154 pr_warning("%s: invalid PTD $%04x\n", __func__, offset);
155
156 return epq;
157}
158
159static inline int get_ptd_offset(struct isp1362_ep_queue *epq, u8 index)
160{
161 int offset;
162
163 if (index * epq->blk_size > epq->buf_size) {
164 pr_warning("%s: Bad %s index %d(%d)\n", __func__, epq->name, index,
165 epq->buf_size / epq->blk_size);
166 return -EINVAL;
167 }
168 offset = epq->buf_start + index * epq->blk_size;
169 DBG(3, "%s: %s PTD[%02x] # %04x\n", __func__, epq->name, index, offset);
170
171 return offset;
172}
173
174/*-------------------------------------------------------------------------*/
175
176static inline u16 max_transfer_size(struct isp1362_ep_queue *epq, size_t size,
177 int mps)
178{
179 u16 xfer_size = min_t(size_t, MAX_XFER_SIZE, size);
180
181 xfer_size = min_t(size_t, xfer_size, epq->buf_avail * epq->blk_size - PTD_HEADER_SIZE);
182 if (xfer_size < size && xfer_size % mps)
183 xfer_size -= xfer_size % mps;
184
185 return xfer_size;
186}
187
188static int claim_ptd_buffers(struct isp1362_ep_queue *epq,
189 struct isp1362_ep *ep, u16 len)
190{
191 int ptd_offset = -EINVAL;
Lothar Wassmanna9d43092009-07-16 20:51:21 -0400192 int num_ptds = ((len + PTD_HEADER_SIZE - 1) / epq->blk_size) + 1;
Akinobu Mita735e1b92009-12-15 16:48:28 -0800193 int found;
Lothar Wassmanna9d43092009-07-16 20:51:21 -0400194
195 BUG_ON(len > epq->buf_size);
196
197 if (!epq->buf_avail)
198 return -ENOMEM;
199
200 if (ep->num_ptds)
201 pr_err("%s: %s len %d/%d num_ptds %d buf_map %08lx skip_map %08lx\n", __func__,
202 epq->name, len, epq->blk_size, num_ptds, epq->buf_map, epq->skip_map);
203 BUG_ON(ep->num_ptds != 0);
204
Akinobu Mita735e1b92009-12-15 16:48:28 -0800205 found = bitmap_find_next_zero_area(&epq->buf_map, epq->buf_count, 0,
206 num_ptds, 0);
207 if (found >= epq->buf_count)
Lothar Wassmanna9d43092009-07-16 20:51:21 -0400208 return -EOVERFLOW;
209
210 DBG(1, "%s: Found %d PTDs[%d] for %d/%d byte\n", __func__,
211 num_ptds, found, len, (int)(epq->blk_size - PTD_HEADER_SIZE));
212 ptd_offset = get_ptd_offset(epq, found);
213 WARN_ON(ptd_offset < 0);
214 ep->ptd_offset = ptd_offset;
215 ep->num_ptds += num_ptds;
216 epq->buf_avail -= num_ptds;
217 BUG_ON(epq->buf_avail > epq->buf_count);
218 ep->ptd_index = found;
Akinobu Mita735e1b92009-12-15 16:48:28 -0800219 bitmap_set(&epq->buf_map, found, num_ptds);
Lothar Wassmanna9d43092009-07-16 20:51:21 -0400220 DBG(1, "%s: Done %s PTD[%d] $%04x, avail %d count %d claimed %d %08lx:%08lx\n",
221 __func__, epq->name, ep->ptd_index, ep->ptd_offset,
222 epq->buf_avail, epq->buf_count, num_ptds, epq->buf_map, epq->skip_map);
223
224 return found;
225}
226
227static inline void release_ptd_buffers(struct isp1362_ep_queue *epq, struct isp1362_ep *ep)
228{
Lothar Wassmanna9d43092009-07-16 20:51:21 -0400229 int last = ep->ptd_index + ep->num_ptds;
230
231 if (last > epq->buf_count)
232 pr_err("%s: ep %p req %d len %d %s PTD[%d] $%04x num_ptds %d buf_count %d buf_avail %d buf_map %08lx skip_map %08lx\n",
233 __func__, ep, ep->num_req, ep->length, epq->name, ep->ptd_index,
234 ep->ptd_offset, ep->num_ptds, epq->buf_count, epq->buf_avail,
235 epq->buf_map, epq->skip_map);
236 BUG_ON(last > epq->buf_count);
237
Akinobu Mita04b31c72011-02-16 23:47:51 +0900238 bitmap_clear(&epq->buf_map, ep->ptd_index, ep->num_ptds);
239 bitmap_set(&epq->skip_map, ep->ptd_index, ep->num_ptds);
Lothar Wassmanna9d43092009-07-16 20:51:21 -0400240 epq->buf_avail += ep->num_ptds;
241 epq->ptd_count--;
242
243 BUG_ON(epq->buf_avail > epq->buf_count);
244 BUG_ON(epq->ptd_count > epq->buf_count);
245
246 DBG(1, "%s: Done %s PTDs $%04x released %d avail %d count %d\n",
247 __func__, epq->name,
248 ep->ptd_offset, ep->num_ptds, epq->buf_avail, epq->buf_count);
249 DBG(1, "%s: buf_map %08lx skip_map %08lx\n", __func__,
250 epq->buf_map, epq->skip_map);
251
252 ep->num_ptds = 0;
253 ep->ptd_offset = -EINVAL;
254 ep->ptd_index = -EINVAL;
255}
256
257/*-------------------------------------------------------------------------*/
258
259/*
260 Set up PTD's.
261*/
262static void prepare_ptd(struct isp1362_hcd *isp1362_hcd, struct urb *urb,
263 struct isp1362_ep *ep, struct isp1362_ep_queue *epq,
264 u16 fno)
265{
266 struct ptd *ptd;
267 int toggle;
268 int dir;
269 u16 len;
270 size_t buf_len = urb->transfer_buffer_length - urb->actual_length;
271
272 DBG(3, "%s: %s ep %p\n", __func__, epq->name, ep);
273
274 ptd = &ep->ptd;
275
276 ep->data = (unsigned char *)urb->transfer_buffer + urb->actual_length;
277
278 switch (ep->nextpid) {
279 case USB_PID_IN:
280 toggle = usb_gettoggle(urb->dev, ep->epnum, 0);
281 dir = PTD_DIR_IN;
282 if (usb_pipecontrol(urb->pipe)) {
283 len = min_t(size_t, ep->maxpacket, buf_len);
284 } else if (usb_pipeisoc(urb->pipe)) {
285 len = min_t(size_t, urb->iso_frame_desc[fno].length, MAX_XFER_SIZE);
286 ep->data = urb->transfer_buffer + urb->iso_frame_desc[fno].offset;
287 } else
288 len = max_transfer_size(epq, buf_len, ep->maxpacket);
289 DBG(1, "%s: IN len %d/%d/%d from URB\n", __func__, len, ep->maxpacket,
290 (int)buf_len);
291 break;
292 case USB_PID_OUT:
293 toggle = usb_gettoggle(urb->dev, ep->epnum, 1);
294 dir = PTD_DIR_OUT;
295 if (usb_pipecontrol(urb->pipe))
296 len = min_t(size_t, ep->maxpacket, buf_len);
297 else if (usb_pipeisoc(urb->pipe))
298 len = min_t(size_t, urb->iso_frame_desc[0].length, MAX_XFER_SIZE);
299 else
300 len = max_transfer_size(epq, buf_len, ep->maxpacket);
301 if (len == 0)
302 pr_info("%s: Sending ZERO packet: %d\n", __func__,
303 urb->transfer_flags & URB_ZERO_PACKET);
304 DBG(1, "%s: OUT len %d/%d/%d from URB\n", __func__, len, ep->maxpacket,
305 (int)buf_len);
306 break;
307 case USB_PID_SETUP:
308 toggle = 0;
309 dir = PTD_DIR_SETUP;
310 len = sizeof(struct usb_ctrlrequest);
311 DBG(1, "%s: SETUP len %d\n", __func__, len);
312 ep->data = urb->setup_packet;
313 break;
314 case USB_PID_ACK:
315 toggle = 1;
316 len = 0;
317 dir = (urb->transfer_buffer_length && usb_pipein(urb->pipe)) ?
318 PTD_DIR_OUT : PTD_DIR_IN;
319 DBG(1, "%s: ACK len %d\n", __func__, len);
320 break;
321 default:
322 toggle = dir = len = 0;
323 pr_err("%s@%d: ep->nextpid %02x\n", __func__, __LINE__, ep->nextpid);
324 BUG_ON(1);
325 }
326
327 ep->length = len;
328 if (!len)
329 ep->data = NULL;
330
331 ptd->count = PTD_CC_MSK | PTD_ACTIVE_MSK | PTD_TOGGLE(toggle);
332 ptd->mps = PTD_MPS(ep->maxpacket) | PTD_SPD(urb->dev->speed == USB_SPEED_LOW) |
333 PTD_EP(ep->epnum);
334 ptd->len = PTD_LEN(len) | PTD_DIR(dir);
335 ptd->faddr = PTD_FA(usb_pipedevice(urb->pipe));
336
337 if (usb_pipeint(urb->pipe)) {
338 ptd->faddr |= PTD_SF_INT(ep->branch);
339 ptd->faddr |= PTD_PR(ep->interval ? __ffs(ep->interval) : 0);
340 }
341 if (usb_pipeisoc(urb->pipe))
342 ptd->faddr |= PTD_SF_ISO(fno);
343
344 DBG(1, "%s: Finished\n", __func__);
345}
346
347static void isp1362_write_ptd(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep,
348 struct isp1362_ep_queue *epq)
349{
350 struct ptd *ptd = &ep->ptd;
351 int len = PTD_GET_DIR(ptd) == PTD_DIR_IN ? 0 : ep->length;
352
Lothar Wassmanna9d43092009-07-16 20:51:21 -0400353 prefetch(ptd);
354 isp1362_write_buffer(isp1362_hcd, ptd, ep->ptd_offset, PTD_HEADER_SIZE);
355 if (len)
356 isp1362_write_buffer(isp1362_hcd, ep->data,
357 ep->ptd_offset + PTD_HEADER_SIZE, len);
358
359 dump_ptd(ptd);
360 dump_ptd_out_data(ptd, ep->data);
361}
362
363static void isp1362_read_ptd(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep,
364 struct isp1362_ep_queue *epq)
365{
366 struct ptd *ptd = &ep->ptd;
367 int act_len;
368
369 WARN_ON(list_empty(&ep->active));
370 BUG_ON(ep->ptd_offset < 0);
371
372 list_del_init(&ep->active);
373 DBG(1, "%s: ep %p removed from active list %p\n", __func__, ep, &epq->active);
374
375 prefetchw(ptd);
376 isp1362_read_buffer(isp1362_hcd, ptd, ep->ptd_offset, PTD_HEADER_SIZE);
377 dump_ptd(ptd);
378 act_len = PTD_GET_COUNT(ptd);
379 if (PTD_GET_DIR(ptd) != PTD_DIR_IN || act_len == 0)
380 return;
381 if (act_len > ep->length)
382 pr_err("%s: ep %p PTD $%04x act_len %d ep->length %d\n", __func__, ep,
383 ep->ptd_offset, act_len, ep->length);
384 BUG_ON(act_len > ep->length);
385 /* Only transfer the amount of data that has actually been overwritten
386 * in the chip buffer. We don't want any data that doesn't belong to the
387 * transfer to leak out of the chip to the callers transfer buffer!
388 */
389 prefetchw(ep->data);
390 isp1362_read_buffer(isp1362_hcd, ep->data,
391 ep->ptd_offset + PTD_HEADER_SIZE, act_len);
392 dump_ptd_in_data(ptd, ep->data);
393}
394
395/*
396 * INT PTDs will stay in the chip until data is available.
397 * This function will remove a PTD from the chip when the URB is dequeued.
398 * Must be called with the spinlock held and IRQs disabled
399 */
400static void remove_ptd(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep)
401
402{
403 int index;
404 struct isp1362_ep_queue *epq;
405
406 DBG(1, "%s: ep %p PTD[%d] $%04x\n", __func__, ep, ep->ptd_index, ep->ptd_offset);
407 BUG_ON(ep->ptd_offset < 0);
408
409 epq = get_ptd_queue(isp1362_hcd, ep->ptd_offset);
410 BUG_ON(!epq);
411
412 /* put ep in remove_list for cleanup */
413 WARN_ON(!list_empty(&ep->remove_list));
414 list_add_tail(&ep->remove_list, &isp1362_hcd->remove_list);
415 /* let SOF interrupt handle the cleanup */
416 isp1362_enable_int(isp1362_hcd, HCuPINT_SOF);
417
418 index = ep->ptd_index;
419 if (index < 0)
420 /* ISO queues don't have SKIP registers */
421 return;
422
423 DBG(1, "%s: Disabling PTD[%02x] $%04x %08lx|%08x\n", __func__,
424 index, ep->ptd_offset, epq->skip_map, 1 << index);
425
426 /* prevent further processing of PTD (will be effective after next SOF) */
427 epq->skip_map |= 1 << index;
428 if (epq == &isp1362_hcd->atl_queue) {
429 DBG(2, "%s: ATLSKIP = %08x -> %08lx\n", __func__,
430 isp1362_read_reg32(isp1362_hcd, HCATLSKIP), epq->skip_map);
431 isp1362_write_reg32(isp1362_hcd, HCATLSKIP, epq->skip_map);
432 if (~epq->skip_map == 0)
433 isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ATL_ACTIVE);
434 } else if (epq == &isp1362_hcd->intl_queue) {
435 DBG(2, "%s: INTLSKIP = %08x -> %08lx\n", __func__,
436 isp1362_read_reg32(isp1362_hcd, HCINTLSKIP), epq->skip_map);
437 isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, epq->skip_map);
438 if (~epq->skip_map == 0)
439 isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_INTL_ACTIVE);
440 }
441}
442
443/*
444 Take done or failed requests out of schedule. Give back
445 processed urbs.
446*/
447static void finish_request(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep,
448 struct urb *urb, int status)
449 __releases(isp1362_hcd->lock)
450 __acquires(isp1362_hcd->lock)
451{
452 urb->hcpriv = NULL;
453 ep->error_count = 0;
454
455 if (usb_pipecontrol(urb->pipe))
456 ep->nextpid = USB_PID_SETUP;
457
458 URB_DBG("%s: req %d FA %d ep%d%s %s: len %d/%d %s stat %d\n", __func__,
459 ep->num_req, usb_pipedevice(urb->pipe),
460 usb_pipeendpoint(urb->pipe),
461 !usb_pipein(urb->pipe) ? "out" : "in",
462 usb_pipecontrol(urb->pipe) ? "ctrl" :
463 usb_pipeint(urb->pipe) ? "int" :
464 usb_pipebulk(urb->pipe) ? "bulk" :
465 "iso",
466 urb->actual_length, urb->transfer_buffer_length,
467 !(urb->transfer_flags & URB_SHORT_NOT_OK) ?
468 "short_ok" : "", urb->status);
469
470
471 usb_hcd_unlink_urb_from_ep(isp1362_hcd_to_hcd(isp1362_hcd), urb);
472 spin_unlock(&isp1362_hcd->lock);
473 usb_hcd_giveback_urb(isp1362_hcd_to_hcd(isp1362_hcd), urb, status);
474 spin_lock(&isp1362_hcd->lock);
475
476 /* take idle endpoints out of the schedule right away */
477 if (!list_empty(&ep->hep->urb_list))
478 return;
479
480 /* async deschedule */
481 if (!list_empty(&ep->schedule)) {
482 list_del_init(&ep->schedule);
483 return;
484 }
485
486
487 if (ep->interval) {
488 /* periodic deschedule */
489 DBG(1, "deschedule qh%d/%p branch %d load %d bandwidth %d -> %d\n", ep->interval,
490 ep, ep->branch, ep->load,
491 isp1362_hcd->load[ep->branch],
492 isp1362_hcd->load[ep->branch] - ep->load);
493 isp1362_hcd->load[ep->branch] -= ep->load;
494 ep->branch = PERIODIC_SIZE;
495 }
496}
497
498/*
499 * Analyze transfer results, handle partial transfers and errors
500*/
501static void postproc_ep(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep)
502{
503 struct urb *urb = get_urb(ep);
504 struct usb_device *udev;
505 struct ptd *ptd;
506 int short_ok;
507 u16 len;
508 int urbstat = -EINPROGRESS;
509 u8 cc;
510
511 DBG(2, "%s: ep %p req %d\n", __func__, ep, ep->num_req);
512
513 udev = urb->dev;
514 ptd = &ep->ptd;
515 cc = PTD_GET_CC(ptd);
516 if (cc == PTD_NOTACCESSED) {
517 pr_err("%s: req %d PTD %p Untouched by ISP1362\n", __func__,
518 ep->num_req, ptd);
519 cc = PTD_DEVNOTRESP;
520 }
521
522 short_ok = !(urb->transfer_flags & URB_SHORT_NOT_OK);
523 len = urb->transfer_buffer_length - urb->actual_length;
524
525 /* Data underrun is special. For allowed underrun
526 we clear the error and continue as normal. For
527 forbidden underrun we finish the DATA stage
528 immediately while for control transfer,
529 we do a STATUS stage.
530 */
531 if (cc == PTD_DATAUNDERRUN) {
532 if (short_ok) {
533 DBG(1, "%s: req %d Allowed data underrun short_%sok %d/%d/%d byte\n",
534 __func__, ep->num_req, short_ok ? "" : "not_",
535 PTD_GET_COUNT(ptd), ep->maxpacket, len);
536 cc = PTD_CC_NOERROR;
537 urbstat = 0;
538 } else {
539 DBG(1, "%s: req %d Data Underrun %s nextpid %02x short_%sok %d/%d/%d byte\n",
540 __func__, ep->num_req,
541 usb_pipein(urb->pipe) ? "IN" : "OUT", ep->nextpid,
542 short_ok ? "" : "not_",
543 PTD_GET_COUNT(ptd), ep->maxpacket, len);
Bruno Morelli4840ae12012-07-30 15:26:50 +0200544 /* save the data underrun error code for later and
545 * proceed with the status stage
546 */
547 urb->actual_length += PTD_GET_COUNT(ptd);
Lothar Wassmanna9d43092009-07-16 20:51:21 -0400548 if (usb_pipecontrol(urb->pipe)) {
549 ep->nextpid = USB_PID_ACK;
Lothar Wassmanna9d43092009-07-16 20:51:21 -0400550 BUG_ON(urb->actual_length > urb->transfer_buffer_length);
551
552 if (urb->status == -EINPROGRESS)
553 urb->status = cc_to_error[PTD_DATAUNDERRUN];
554 } else {
555 usb_settoggle(udev, ep->epnum, ep->nextpid == USB_PID_OUT,
556 PTD_GET_TOGGLE(ptd));
557 urbstat = cc_to_error[PTD_DATAUNDERRUN];
558 }
559 goto out;
560 }
561 }
562
563 if (cc != PTD_CC_NOERROR) {
564 if (++ep->error_count >= 3 || cc == PTD_CC_STALL || cc == PTD_DATAOVERRUN) {
565 urbstat = cc_to_error[cc];
566 DBG(1, "%s: req %d nextpid %02x, status %d, error %d, error_count %d\n",
567 __func__, ep->num_req, ep->nextpid, urbstat, cc,
568 ep->error_count);
569 }
570 goto out;
571 }
572
573 switch (ep->nextpid) {
574 case USB_PID_OUT:
575 if (PTD_GET_COUNT(ptd) != ep->length)
576 pr_err("%s: count=%d len=%d\n", __func__,
577 PTD_GET_COUNT(ptd), ep->length);
578 BUG_ON(PTD_GET_COUNT(ptd) != ep->length);
579 urb->actual_length += ep->length;
580 BUG_ON(urb->actual_length > urb->transfer_buffer_length);
581 usb_settoggle(udev, ep->epnum, 1, PTD_GET_TOGGLE(ptd));
582 if (urb->actual_length == urb->transfer_buffer_length) {
583 DBG(3, "%s: req %d xfer complete %d/%d status %d -> 0\n", __func__,
584 ep->num_req, len, ep->maxpacket, urbstat);
585 if (usb_pipecontrol(urb->pipe)) {
586 DBG(3, "%s: req %d %s Wait for ACK\n", __func__,
587 ep->num_req,
588 usb_pipein(urb->pipe) ? "IN" : "OUT");
589 ep->nextpid = USB_PID_ACK;
590 } else {
591 if (len % ep->maxpacket ||
592 !(urb->transfer_flags & URB_ZERO_PACKET)) {
593 urbstat = 0;
594 DBG(3, "%s: req %d URB %s status %d count %d/%d/%d\n",
595 __func__, ep->num_req, usb_pipein(urb->pipe) ? "IN" : "OUT",
596 urbstat, len, ep->maxpacket, urb->actual_length);
597 }
598 }
599 }
600 break;
601 case USB_PID_IN:
602 len = PTD_GET_COUNT(ptd);
603 BUG_ON(len > ep->length);
604 urb->actual_length += len;
605 BUG_ON(urb->actual_length > urb->transfer_buffer_length);
606 usb_settoggle(udev, ep->epnum, 0, PTD_GET_TOGGLE(ptd));
607 /* if transfer completed or (allowed) data underrun */
608 if ((urb->transfer_buffer_length == urb->actual_length) ||
609 len % ep->maxpacket) {
610 DBG(3, "%s: req %d xfer complete %d/%d status %d -> 0\n", __func__,
611 ep->num_req, len, ep->maxpacket, urbstat);
612 if (usb_pipecontrol(urb->pipe)) {
613 DBG(3, "%s: req %d %s Wait for ACK\n", __func__,
614 ep->num_req,
615 usb_pipein(urb->pipe) ? "IN" : "OUT");
616 ep->nextpid = USB_PID_ACK;
617 } else {
618 urbstat = 0;
619 DBG(3, "%s: req %d URB %s status %d count %d/%d/%d\n",
620 __func__, ep->num_req, usb_pipein(urb->pipe) ? "IN" : "OUT",
621 urbstat, len, ep->maxpacket, urb->actual_length);
622 }
623 }
624 break;
625 case USB_PID_SETUP:
626 if (urb->transfer_buffer_length == urb->actual_length) {
627 ep->nextpid = USB_PID_ACK;
628 } else if (usb_pipeout(urb->pipe)) {
629 usb_settoggle(udev, 0, 1, 1);
630 ep->nextpid = USB_PID_OUT;
631 } else {
632 usb_settoggle(udev, 0, 0, 1);
633 ep->nextpid = USB_PID_IN;
634 }
635 break;
636 case USB_PID_ACK:
637 DBG(3, "%s: req %d got ACK %d -> 0\n", __func__, ep->num_req,
638 urbstat);
639 WARN_ON(urbstat != -EINPROGRESS);
640 urbstat = 0;
641 ep->nextpid = 0;
642 break;
643 default:
644 BUG_ON(1);
645 }
646
647 out:
648 if (urbstat != -EINPROGRESS) {
649 DBG(2, "%s: Finishing ep %p req %d urb %p status %d\n", __func__,
650 ep, ep->num_req, urb, urbstat);
651 finish_request(isp1362_hcd, ep, urb, urbstat);
652 }
653}
654
655static void finish_unlinks(struct isp1362_hcd *isp1362_hcd)
656{
657 struct isp1362_ep *ep;
658 struct isp1362_ep *tmp;
659
660 list_for_each_entry_safe(ep, tmp, &isp1362_hcd->remove_list, remove_list) {
661 struct isp1362_ep_queue *epq =
662 get_ptd_queue(isp1362_hcd, ep->ptd_offset);
663 int index = ep->ptd_index;
664
665 BUG_ON(epq == NULL);
666 if (index >= 0) {
667 DBG(1, "%s: remove PTD[%d] $%04x\n", __func__, index, ep->ptd_offset);
668 BUG_ON(ep->num_ptds == 0);
669 release_ptd_buffers(epq, ep);
670 }
671 if (!list_empty(&ep->hep->urb_list)) {
672 struct urb *urb = get_urb(ep);
673
674 DBG(1, "%s: Finishing req %d ep %p from remove_list\n", __func__,
675 ep->num_req, ep);
676 finish_request(isp1362_hcd, ep, urb, -ESHUTDOWN);
677 }
678 WARN_ON(list_empty(&ep->active));
679 if (!list_empty(&ep->active)) {
680 list_del_init(&ep->active);
681 DBG(1, "%s: ep %p removed from active list\n", __func__, ep);
682 }
683 list_del_init(&ep->remove_list);
684 DBG(1, "%s: ep %p removed from remove_list\n", __func__, ep);
685 }
686 DBG(1, "%s: Done\n", __func__);
687}
688
689static inline void enable_atl_transfers(struct isp1362_hcd *isp1362_hcd, int count)
690{
691 if (count > 0) {
692 if (count < isp1362_hcd->atl_queue.ptd_count)
693 isp1362_write_reg16(isp1362_hcd, HCATLDTC, count);
694 isp1362_enable_int(isp1362_hcd, HCuPINT_ATL);
695 isp1362_write_reg32(isp1362_hcd, HCATLSKIP, isp1362_hcd->atl_queue.skip_map);
696 isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ATL_ACTIVE);
697 } else
698 isp1362_enable_int(isp1362_hcd, HCuPINT_SOF);
699}
700
701static inline void enable_intl_transfers(struct isp1362_hcd *isp1362_hcd)
702{
703 isp1362_enable_int(isp1362_hcd, HCuPINT_INTL);
704 isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_INTL_ACTIVE);
705 isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, isp1362_hcd->intl_queue.skip_map);
706}
707
708static inline void enable_istl_transfers(struct isp1362_hcd *isp1362_hcd, int flip)
709{
710 isp1362_enable_int(isp1362_hcd, flip ? HCuPINT_ISTL1 : HCuPINT_ISTL0);
711 isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, flip ?
712 HCBUFSTAT_ISTL1_FULL : HCBUFSTAT_ISTL0_FULL);
713}
714
715static int submit_req(struct isp1362_hcd *isp1362_hcd, struct urb *urb,
716 struct isp1362_ep *ep, struct isp1362_ep_queue *epq)
717{
718 int index = epq->free_ptd;
719
720 prepare_ptd(isp1362_hcd, urb, ep, epq, 0);
721 index = claim_ptd_buffers(epq, ep, ep->length);
722 if (index == -ENOMEM) {
723 DBG(1, "%s: req %d No free %s PTD available: %d, %08lx:%08lx\n", __func__,
724 ep->num_req, epq->name, ep->num_ptds, epq->buf_map, epq->skip_map);
725 return index;
726 } else if (index == -EOVERFLOW) {
727 DBG(1, "%s: req %d Not enough space for %d byte %s PTD %d %08lx:%08lx\n",
728 __func__, ep->num_req, ep->length, epq->name, ep->num_ptds,
729 epq->buf_map, epq->skip_map);
730 return index;
731 } else
732 BUG_ON(index < 0);
733 list_add_tail(&ep->active, &epq->active);
734 DBG(1, "%s: ep %p req %d len %d added to active list %p\n", __func__,
735 ep, ep->num_req, ep->length, &epq->active);
736 DBG(1, "%s: Submitting %s PTD $%04x for ep %p req %d\n", __func__, epq->name,
737 ep->ptd_offset, ep, ep->num_req);
738 isp1362_write_ptd(isp1362_hcd, ep, epq);
739 __clear_bit(ep->ptd_index, &epq->skip_map);
740
741 return 0;
742}
743
744static void start_atl_transfers(struct isp1362_hcd *isp1362_hcd)
745{
746 int ptd_count = 0;
747 struct isp1362_ep_queue *epq = &isp1362_hcd->atl_queue;
748 struct isp1362_ep *ep;
749 int defer = 0;
750
751 if (atomic_read(&epq->finishing)) {
752 DBG(1, "%s: finish_transfers is active for %s\n", __func__, epq->name);
753 return;
754 }
755
756 list_for_each_entry(ep, &isp1362_hcd->async, schedule) {
757 struct urb *urb = get_urb(ep);
758 int ret;
759
760 if (!list_empty(&ep->active)) {
761 DBG(2, "%s: Skipping active %s ep %p\n", __func__, epq->name, ep);
762 continue;
763 }
764
765 DBG(1, "%s: Processing %s ep %p req %d\n", __func__, epq->name,
766 ep, ep->num_req);
767
768 ret = submit_req(isp1362_hcd, urb, ep, epq);
769 if (ret == -ENOMEM) {
770 defer = 1;
771 break;
772 } else if (ret == -EOVERFLOW) {
773 defer = 1;
774 continue;
775 }
776#ifdef BUGGY_PXA2XX_UDC_USBTEST
777 defer = ep->nextpid == USB_PID_SETUP;
778#endif
779 ptd_count++;
780 }
781
782 /* Avoid starving of endpoints */
783 if (isp1362_hcd->async.next != isp1362_hcd->async.prev) {
784 DBG(2, "%s: Cycling ASYNC schedule %d\n", __func__, ptd_count);
785 list_move(&isp1362_hcd->async, isp1362_hcd->async.next);
786 }
787 if (ptd_count || defer)
788 enable_atl_transfers(isp1362_hcd, defer ? 0 : ptd_count);
789
790 epq->ptd_count += ptd_count;
791 if (epq->ptd_count > epq->stat_maxptds) {
792 epq->stat_maxptds = epq->ptd_count;
793 DBG(0, "%s: max_ptds: %d\n", __func__, epq->stat_maxptds);
794 }
795}
796
797static void start_intl_transfers(struct isp1362_hcd *isp1362_hcd)
798{
799 int ptd_count = 0;
800 struct isp1362_ep_queue *epq = &isp1362_hcd->intl_queue;
801 struct isp1362_ep *ep;
802
803 if (atomic_read(&epq->finishing)) {
804 DBG(1, "%s: finish_transfers is active for %s\n", __func__, epq->name);
805 return;
806 }
807
808 list_for_each_entry(ep, &isp1362_hcd->periodic, schedule) {
809 struct urb *urb = get_urb(ep);
810 int ret;
811
812 if (!list_empty(&ep->active)) {
813 DBG(1, "%s: Skipping active %s ep %p\n", __func__,
814 epq->name, ep);
815 continue;
816 }
817
818 DBG(1, "%s: Processing %s ep %p req %d\n", __func__,
819 epq->name, ep, ep->num_req);
820 ret = submit_req(isp1362_hcd, urb, ep, epq);
821 if (ret == -ENOMEM)
822 break;
823 else if (ret == -EOVERFLOW)
824 continue;
825 ptd_count++;
826 }
827
828 if (ptd_count) {
829 static int last_count;
830
831 if (ptd_count != last_count) {
832 DBG(0, "%s: ptd_count: %d\n", __func__, ptd_count);
833 last_count = ptd_count;
834 }
835 enable_intl_transfers(isp1362_hcd);
836 }
837
838 epq->ptd_count += ptd_count;
839 if (epq->ptd_count > epq->stat_maxptds)
840 epq->stat_maxptds = epq->ptd_count;
841}
842
843static inline int next_ptd(struct isp1362_ep_queue *epq, struct isp1362_ep *ep)
844{
845 u16 ptd_offset = ep->ptd_offset;
846 int num_ptds = (ep->length + PTD_HEADER_SIZE + (epq->blk_size - 1)) / epq->blk_size;
847
848 DBG(2, "%s: PTD offset $%04x + %04x => %d * %04x -> $%04x\n", __func__, ptd_offset,
849 ep->length, num_ptds, epq->blk_size, ptd_offset + num_ptds * epq->blk_size);
850
851 ptd_offset += num_ptds * epq->blk_size;
852 if (ptd_offset < epq->buf_start + epq->buf_size)
853 return ptd_offset;
854 else
855 return -ENOMEM;
856}
857
858static void start_iso_transfers(struct isp1362_hcd *isp1362_hcd)
859{
860 int ptd_count = 0;
861 int flip = isp1362_hcd->istl_flip;
862 struct isp1362_ep_queue *epq;
863 int ptd_offset;
864 struct isp1362_ep *ep;
865 struct isp1362_ep *tmp;
866 u16 fno = isp1362_read_reg32(isp1362_hcd, HCFMNUM);
867
868 fill2:
869 epq = &isp1362_hcd->istl_queue[flip];
870 if (atomic_read(&epq->finishing)) {
871 DBG(1, "%s: finish_transfers is active for %s\n", __func__, epq->name);
872 return;
873 }
874
875 if (!list_empty(&epq->active))
876 return;
877
878 ptd_offset = epq->buf_start;
879 list_for_each_entry_safe(ep, tmp, &isp1362_hcd->isoc, schedule) {
880 struct urb *urb = get_urb(ep);
881 s16 diff = fno - (u16)urb->start_frame;
882
883 DBG(1, "%s: Processing %s ep %p\n", __func__, epq->name, ep);
884
885 if (diff > urb->number_of_packets) {
886 /* time frame for this URB has elapsed */
887 finish_request(isp1362_hcd, ep, urb, -EOVERFLOW);
888 continue;
889 } else if (diff < -1) {
890 /* URB is not due in this frame or the next one.
891 * Comparing with '-1' instead of '0' accounts for double
892 * buffering in the ISP1362 which enables us to queue the PTD
893 * one frame ahead of time
894 */
895 } else if (diff == -1) {
896 /* submit PTD's that are due in the next frame */
897 prepare_ptd(isp1362_hcd, urb, ep, epq, fno);
898 if (ptd_offset + PTD_HEADER_SIZE + ep->length >
899 epq->buf_start + epq->buf_size) {
900 pr_err("%s: Not enough ISO buffer space for %d byte PTD\n",
901 __func__, ep->length);
902 continue;
903 }
904 ep->ptd_offset = ptd_offset;
905 list_add_tail(&ep->active, &epq->active);
906
907 ptd_offset = next_ptd(epq, ep);
908 if (ptd_offset < 0) {
909 pr_warning("%s: req %d No more %s PTD buffers available\n", __func__,
910 ep->num_req, epq->name);
911 break;
912 }
913 }
914 }
915 list_for_each_entry(ep, &epq->active, active) {
916 if (epq->active.next == &ep->active)
917 ep->ptd.mps |= PTD_LAST_MSK;
918 isp1362_write_ptd(isp1362_hcd, ep, epq);
919 ptd_count++;
920 }
921
922 if (ptd_count)
923 enable_istl_transfers(isp1362_hcd, flip);
924
925 epq->ptd_count += ptd_count;
926 if (epq->ptd_count > epq->stat_maxptds)
927 epq->stat_maxptds = epq->ptd_count;
928
929 /* check, whether the second ISTL buffer may also be filled */
930 if (!(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
931 (flip ? HCBUFSTAT_ISTL0_FULL : HCBUFSTAT_ISTL1_FULL))) {
932 fno++;
933 ptd_count = 0;
934 flip = 1 - flip;
935 goto fill2;
936 }
937}
938
939static void finish_transfers(struct isp1362_hcd *isp1362_hcd, unsigned long done_map,
940 struct isp1362_ep_queue *epq)
941{
942 struct isp1362_ep *ep;
943 struct isp1362_ep *tmp;
944
945 if (list_empty(&epq->active)) {
946 DBG(1, "%s: Nothing to do for %s queue\n", __func__, epq->name);
947 return;
948 }
949
950 DBG(1, "%s: Finishing %s transfers %08lx\n", __func__, epq->name, done_map);
951
952 atomic_inc(&epq->finishing);
953 list_for_each_entry_safe(ep, tmp, &epq->active, active) {
954 int index = ep->ptd_index;
955
956 DBG(1, "%s: Checking %s PTD[%02x] $%04x\n", __func__, epq->name,
957 index, ep->ptd_offset);
958
959 BUG_ON(index < 0);
960 if (__test_and_clear_bit(index, &done_map)) {
961 isp1362_read_ptd(isp1362_hcd, ep, epq);
962 epq->free_ptd = index;
963 BUG_ON(ep->num_ptds == 0);
964 release_ptd_buffers(epq, ep);
965
966 DBG(1, "%s: ep %p req %d removed from active list\n", __func__,
967 ep, ep->num_req);
968 if (!list_empty(&ep->remove_list)) {
969 list_del_init(&ep->remove_list);
970 DBG(1, "%s: ep %p removed from remove list\n", __func__, ep);
971 }
972 DBG(1, "%s: Postprocessing %s ep %p req %d\n", __func__, epq->name,
973 ep, ep->num_req);
974 postproc_ep(isp1362_hcd, ep);
975 }
976 if (!done_map)
977 break;
978 }
979 if (done_map)
980 pr_warning("%s: done_map not clear: %08lx:%08lx\n", __func__, done_map,
981 epq->skip_map);
982 atomic_dec(&epq->finishing);
983}
984
985static void finish_iso_transfers(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep_queue *epq)
986{
987 struct isp1362_ep *ep;
988 struct isp1362_ep *tmp;
989
990 if (list_empty(&epq->active)) {
991 DBG(1, "%s: Nothing to do for %s queue\n", __func__, epq->name);
992 return;
993 }
994
995 DBG(1, "%s: Finishing %s transfers\n", __func__, epq->name);
996
997 atomic_inc(&epq->finishing);
998 list_for_each_entry_safe(ep, tmp, &epq->active, active) {
999 DBG(1, "%s: Checking PTD $%04x\n", __func__, ep->ptd_offset);
1000
1001 isp1362_read_ptd(isp1362_hcd, ep, epq);
1002 DBG(1, "%s: Postprocessing %s ep %p\n", __func__, epq->name, ep);
1003 postproc_ep(isp1362_hcd, ep);
1004 }
1005 WARN_ON(epq->blk_size != 0);
1006 atomic_dec(&epq->finishing);
1007}
1008
1009static irqreturn_t isp1362_irq(struct usb_hcd *hcd)
1010{
1011 int handled = 0;
1012 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1013 u16 irqstat;
1014 u16 svc_mask;
1015
1016 spin_lock(&isp1362_hcd->lock);
1017
1018 BUG_ON(isp1362_hcd->irq_active++);
1019
1020 isp1362_write_reg16(isp1362_hcd, HCuPINTENB, 0);
1021
1022 irqstat = isp1362_read_reg16(isp1362_hcd, HCuPINT);
1023 DBG(3, "%s: got IRQ %04x:%04x\n", __func__, irqstat, isp1362_hcd->irqenb);
1024
1025 /* only handle interrupts that are currently enabled */
1026 irqstat &= isp1362_hcd->irqenb;
1027 isp1362_write_reg16(isp1362_hcd, HCuPINT, irqstat);
1028 svc_mask = irqstat;
1029
1030 if (irqstat & HCuPINT_SOF) {
1031 isp1362_hcd->irqenb &= ~HCuPINT_SOF;
1032 isp1362_hcd->irq_stat[ISP1362_INT_SOF]++;
1033 handled = 1;
1034 svc_mask &= ~HCuPINT_SOF;
1035 DBG(3, "%s: SOF\n", __func__);
1036 isp1362_hcd->fmindex = isp1362_read_reg32(isp1362_hcd, HCFMNUM);
1037 if (!list_empty(&isp1362_hcd->remove_list))
1038 finish_unlinks(isp1362_hcd);
1039 if (!list_empty(&isp1362_hcd->async) && !(irqstat & HCuPINT_ATL)) {
1040 if (list_empty(&isp1362_hcd->atl_queue.active)) {
1041 start_atl_transfers(isp1362_hcd);
1042 } else {
1043 isp1362_enable_int(isp1362_hcd, HCuPINT_ATL);
1044 isp1362_write_reg32(isp1362_hcd, HCATLSKIP,
1045 isp1362_hcd->atl_queue.skip_map);
1046 isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ATL_ACTIVE);
1047 }
1048 }
1049 }
1050
1051 if (irqstat & HCuPINT_ISTL0) {
1052 isp1362_hcd->irq_stat[ISP1362_INT_ISTL0]++;
1053 handled = 1;
1054 svc_mask &= ~HCuPINT_ISTL0;
1055 isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ISTL0_FULL);
1056 DBG(1, "%s: ISTL0\n", __func__);
1057 WARN_ON((int)!!isp1362_hcd->istl_flip);
Julia Lawall3d2b0812009-08-12 16:51:09 +02001058 WARN_ON(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
1059 HCBUFSTAT_ISTL0_ACTIVE);
1060 WARN_ON(!(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
1061 HCBUFSTAT_ISTL0_DONE));
Lothar Wassmanna9d43092009-07-16 20:51:21 -04001062 isp1362_hcd->irqenb &= ~HCuPINT_ISTL0;
1063 }
1064
1065 if (irqstat & HCuPINT_ISTL1) {
1066 isp1362_hcd->irq_stat[ISP1362_INT_ISTL1]++;
1067 handled = 1;
1068 svc_mask &= ~HCuPINT_ISTL1;
1069 isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ISTL1_FULL);
1070 DBG(1, "%s: ISTL1\n", __func__);
1071 WARN_ON(!(int)isp1362_hcd->istl_flip);
Julia Lawall3d2b0812009-08-12 16:51:09 +02001072 WARN_ON(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
1073 HCBUFSTAT_ISTL1_ACTIVE);
1074 WARN_ON(!(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
1075 HCBUFSTAT_ISTL1_DONE));
Lothar Wassmanna9d43092009-07-16 20:51:21 -04001076 isp1362_hcd->irqenb &= ~HCuPINT_ISTL1;
1077 }
1078
1079 if (irqstat & (HCuPINT_ISTL0 | HCuPINT_ISTL1)) {
1080 WARN_ON((irqstat & (HCuPINT_ISTL0 | HCuPINT_ISTL1)) ==
1081 (HCuPINT_ISTL0 | HCuPINT_ISTL1));
1082 finish_iso_transfers(isp1362_hcd,
1083 &isp1362_hcd->istl_queue[isp1362_hcd->istl_flip]);
1084 start_iso_transfers(isp1362_hcd);
1085 isp1362_hcd->istl_flip = 1 - isp1362_hcd->istl_flip;
1086 }
1087
1088 if (irqstat & HCuPINT_INTL) {
1089 u32 done_map = isp1362_read_reg32(isp1362_hcd, HCINTLDONE);
1090 u32 skip_map = isp1362_read_reg32(isp1362_hcd, HCINTLSKIP);
1091 isp1362_hcd->irq_stat[ISP1362_INT_INTL]++;
1092
1093 DBG(2, "%s: INTL\n", __func__);
1094
1095 svc_mask &= ~HCuPINT_INTL;
1096
1097 isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, skip_map | done_map);
1098 if (~(done_map | skip_map) == 0)
1099 /* All PTDs are finished, disable INTL processing entirely */
1100 isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_INTL_ACTIVE);
1101
1102 handled = 1;
1103 WARN_ON(!done_map);
1104 if (done_map) {
1105 DBG(3, "%s: INTL done_map %08x\n", __func__, done_map);
1106 finish_transfers(isp1362_hcd, done_map, &isp1362_hcd->intl_queue);
1107 start_intl_transfers(isp1362_hcd);
1108 }
1109 }
1110
1111 if (irqstat & HCuPINT_ATL) {
1112 u32 done_map = isp1362_read_reg32(isp1362_hcd, HCATLDONE);
1113 u32 skip_map = isp1362_read_reg32(isp1362_hcd, HCATLSKIP);
1114 isp1362_hcd->irq_stat[ISP1362_INT_ATL]++;
1115
1116 DBG(2, "%s: ATL\n", __func__);
1117
1118 svc_mask &= ~HCuPINT_ATL;
1119
1120 isp1362_write_reg32(isp1362_hcd, HCATLSKIP, skip_map | done_map);
1121 if (~(done_map | skip_map) == 0)
1122 isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ATL_ACTIVE);
1123 if (done_map) {
1124 DBG(3, "%s: ATL done_map %08x\n", __func__, done_map);
1125 finish_transfers(isp1362_hcd, done_map, &isp1362_hcd->atl_queue);
1126 start_atl_transfers(isp1362_hcd);
1127 }
1128 handled = 1;
1129 }
1130
1131 if (irqstat & HCuPINT_OPR) {
1132 u32 intstat = isp1362_read_reg32(isp1362_hcd, HCINTSTAT);
1133 isp1362_hcd->irq_stat[ISP1362_INT_OPR]++;
1134
1135 svc_mask &= ~HCuPINT_OPR;
1136 DBG(2, "%s: OPR %08x:%08x\n", __func__, intstat, isp1362_hcd->intenb);
1137 intstat &= isp1362_hcd->intenb;
1138 if (intstat & OHCI_INTR_UE) {
1139 pr_err("Unrecoverable error\n");
1140 /* FIXME: do here reset or cleanup or whatever */
1141 }
1142 if (intstat & OHCI_INTR_RHSC) {
1143 isp1362_hcd->rhstatus = isp1362_read_reg32(isp1362_hcd, HCRHSTATUS);
1144 isp1362_hcd->rhport[0] = isp1362_read_reg32(isp1362_hcd, HCRHPORT1);
1145 isp1362_hcd->rhport[1] = isp1362_read_reg32(isp1362_hcd, HCRHPORT2);
1146 }
1147 if (intstat & OHCI_INTR_RD) {
1148 pr_info("%s: RESUME DETECTED\n", __func__);
1149 isp1362_show_reg(isp1362_hcd, HCCONTROL);
1150 usb_hcd_resume_root_hub(hcd);
1151 }
1152 isp1362_write_reg32(isp1362_hcd, HCINTSTAT, intstat);
1153 irqstat &= ~HCuPINT_OPR;
1154 handled = 1;
1155 }
1156
1157 if (irqstat & HCuPINT_SUSP) {
1158 isp1362_hcd->irq_stat[ISP1362_INT_SUSP]++;
1159 handled = 1;
1160 svc_mask &= ~HCuPINT_SUSP;
1161
1162 pr_info("%s: SUSPEND IRQ\n", __func__);
1163 }
1164
1165 if (irqstat & HCuPINT_CLKRDY) {
1166 isp1362_hcd->irq_stat[ISP1362_INT_CLKRDY]++;
1167 handled = 1;
1168 isp1362_hcd->irqenb &= ~HCuPINT_CLKRDY;
1169 svc_mask &= ~HCuPINT_CLKRDY;
1170 pr_info("%s: CLKRDY IRQ\n", __func__);
1171 }
1172
1173 if (svc_mask)
1174 pr_err("%s: Unserviced interrupt(s) %04x\n", __func__, svc_mask);
1175
1176 isp1362_write_reg16(isp1362_hcd, HCuPINTENB, isp1362_hcd->irqenb);
1177 isp1362_hcd->irq_active--;
1178 spin_unlock(&isp1362_hcd->lock);
1179
1180 return IRQ_RETVAL(handled);
1181}
1182
1183/*-------------------------------------------------------------------------*/
1184
1185#define MAX_PERIODIC_LOAD 900 /* out of 1000 usec */
1186static int balance(struct isp1362_hcd *isp1362_hcd, u16 interval, u16 load)
1187{
1188 int i, branch = -ENOSPC;
1189
1190 /* search for the least loaded schedule branch of that interval
1191 * which has enough bandwidth left unreserved.
1192 */
1193 for (i = 0; i < interval; i++) {
1194 if (branch < 0 || isp1362_hcd->load[branch] > isp1362_hcd->load[i]) {
1195 int j;
1196
1197 for (j = i; j < PERIODIC_SIZE; j += interval) {
1198 if ((isp1362_hcd->load[j] + load) > MAX_PERIODIC_LOAD) {
1199 pr_err("%s: new load %d load[%02x] %d max %d\n", __func__,
1200 load, j, isp1362_hcd->load[j], MAX_PERIODIC_LOAD);
1201 break;
1202 }
1203 }
1204 if (j < PERIODIC_SIZE)
1205 continue;
1206 branch = i;
1207 }
1208 }
1209 return branch;
1210}
1211
1212/* NB! ALL the code above this point runs with isp1362_hcd->lock
1213 held, irqs off
1214*/
1215
1216/*-------------------------------------------------------------------------*/
1217
1218static int isp1362_urb_enqueue(struct usb_hcd *hcd,
1219 struct urb *urb,
1220 gfp_t mem_flags)
1221{
1222 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1223 struct usb_device *udev = urb->dev;
1224 unsigned int pipe = urb->pipe;
1225 int is_out = !usb_pipein(pipe);
1226 int type = usb_pipetype(pipe);
1227 int epnum = usb_pipeendpoint(pipe);
1228 struct usb_host_endpoint *hep = urb->ep;
1229 struct isp1362_ep *ep = NULL;
1230 unsigned long flags;
1231 int retval = 0;
1232
1233 DBG(3, "%s: urb %p\n", __func__, urb);
1234
1235 if (type == PIPE_ISOCHRONOUS) {
1236 pr_err("Isochronous transfers not supported\n");
1237 return -ENOSPC;
1238 }
1239
1240 URB_DBG("%s: FA %d ep%d%s %s: len %d %s%s\n", __func__,
1241 usb_pipedevice(pipe), epnum,
1242 is_out ? "out" : "in",
1243 usb_pipecontrol(pipe) ? "ctrl" :
1244 usb_pipeint(pipe) ? "int" :
1245 usb_pipebulk(pipe) ? "bulk" :
1246 "iso",
1247 urb->transfer_buffer_length,
1248 (urb->transfer_flags & URB_ZERO_PACKET) ? "ZERO_PACKET " : "",
1249 !(urb->transfer_flags & URB_SHORT_NOT_OK) ?
1250 "short_ok" : "");
1251
1252 /* avoid all allocations within spinlocks: request or endpoint */
1253 if (!hep->hcpriv) {
Julia Lawall6ebb7d12009-12-19 08:17:44 +01001254 ep = kzalloc(sizeof *ep, mem_flags);
Lothar Wassmanna9d43092009-07-16 20:51:21 -04001255 if (!ep)
1256 return -ENOMEM;
1257 }
1258 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1259
1260 /* don't submit to a dead or disabled port */
1261 if (!((isp1362_hcd->rhport[0] | isp1362_hcd->rhport[1]) &
Alan Stern749da5f2010-03-04 17:05:08 -05001262 USB_PORT_STAT_ENABLE) ||
Lothar Wassmanna9d43092009-07-16 20:51:21 -04001263 !HC_IS_RUNNING(hcd->state)) {
1264 kfree(ep);
1265 retval = -ENODEV;
1266 goto fail_not_linked;
1267 }
1268
1269 retval = usb_hcd_link_urb_to_ep(hcd, urb);
1270 if (retval) {
1271 kfree(ep);
1272 goto fail_not_linked;
1273 }
1274
1275 if (hep->hcpriv) {
1276 ep = hep->hcpriv;
1277 } else {
1278 INIT_LIST_HEAD(&ep->schedule);
1279 INIT_LIST_HEAD(&ep->active);
1280 INIT_LIST_HEAD(&ep->remove_list);
1281 ep->udev = usb_get_dev(udev);
1282 ep->hep = hep;
1283 ep->epnum = epnum;
1284 ep->maxpacket = usb_maxpacket(udev, urb->pipe, is_out);
1285 ep->ptd_offset = -EINVAL;
1286 ep->ptd_index = -EINVAL;
1287 usb_settoggle(udev, epnum, is_out, 0);
1288
1289 if (type == PIPE_CONTROL)
1290 ep->nextpid = USB_PID_SETUP;
1291 else if (is_out)
1292 ep->nextpid = USB_PID_OUT;
1293 else
1294 ep->nextpid = USB_PID_IN;
1295
1296 switch (type) {
1297 case PIPE_ISOCHRONOUS:
1298 case PIPE_INTERRUPT:
1299 if (urb->interval > PERIODIC_SIZE)
1300 urb->interval = PERIODIC_SIZE;
1301 ep->interval = urb->interval;
1302 ep->branch = PERIODIC_SIZE;
1303 ep->load = usb_calc_bus_time(udev->speed, !is_out,
1304 (type == PIPE_ISOCHRONOUS),
1305 usb_maxpacket(udev, pipe, is_out)) / 1000;
1306 break;
1307 }
1308 hep->hcpriv = ep;
1309 }
1310 ep->num_req = isp1362_hcd->req_serial++;
1311
1312 /* maybe put endpoint into schedule */
1313 switch (type) {
1314 case PIPE_CONTROL:
1315 case PIPE_BULK:
1316 if (list_empty(&ep->schedule)) {
1317 DBG(1, "%s: Adding ep %p req %d to async schedule\n",
1318 __func__, ep, ep->num_req);
1319 list_add_tail(&ep->schedule, &isp1362_hcd->async);
1320 }
1321 break;
1322 case PIPE_ISOCHRONOUS:
1323 case PIPE_INTERRUPT:
1324 urb->interval = ep->interval;
1325
1326 /* urb submitted for already existing EP */
1327 if (ep->branch < PERIODIC_SIZE)
1328 break;
1329
1330 retval = balance(isp1362_hcd, ep->interval, ep->load);
1331 if (retval < 0) {
1332 pr_err("%s: balance returned %d\n", __func__, retval);
1333 goto fail;
1334 }
1335 ep->branch = retval;
1336 retval = 0;
1337 isp1362_hcd->fmindex = isp1362_read_reg32(isp1362_hcd, HCFMNUM);
1338 DBG(1, "%s: Current frame %04x branch %02x start_frame %04x(%04x)\n",
1339 __func__, isp1362_hcd->fmindex, ep->branch,
1340 ((isp1362_hcd->fmindex + PERIODIC_SIZE - 1) &
1341 ~(PERIODIC_SIZE - 1)) + ep->branch,
1342 (isp1362_hcd->fmindex & (PERIODIC_SIZE - 1)) + ep->branch);
1343
1344 if (list_empty(&ep->schedule)) {
1345 if (type == PIPE_ISOCHRONOUS) {
1346 u16 frame = isp1362_hcd->fmindex;
1347
1348 frame += max_t(u16, 8, ep->interval);
1349 frame &= ~(ep->interval - 1);
1350 frame |= ep->branch;
1351 if (frame_before(frame, isp1362_hcd->fmindex))
1352 frame += ep->interval;
1353 urb->start_frame = frame;
1354
1355 DBG(1, "%s: Adding ep %p to isoc schedule\n", __func__, ep);
1356 list_add_tail(&ep->schedule, &isp1362_hcd->isoc);
1357 } else {
1358 DBG(1, "%s: Adding ep %p to periodic schedule\n", __func__, ep);
1359 list_add_tail(&ep->schedule, &isp1362_hcd->periodic);
1360 }
1361 } else
1362 DBG(1, "%s: ep %p already scheduled\n", __func__, ep);
1363
1364 DBG(2, "%s: load %d bandwidth %d -> %d\n", __func__,
1365 ep->load / ep->interval, isp1362_hcd->load[ep->branch],
1366 isp1362_hcd->load[ep->branch] + ep->load);
1367 isp1362_hcd->load[ep->branch] += ep->load;
1368 }
1369
1370 urb->hcpriv = hep;
1371 ALIGNSTAT(isp1362_hcd, urb->transfer_buffer);
1372
1373 switch (type) {
1374 case PIPE_CONTROL:
1375 case PIPE_BULK:
1376 start_atl_transfers(isp1362_hcd);
1377 break;
1378 case PIPE_INTERRUPT:
1379 start_intl_transfers(isp1362_hcd);
1380 break;
1381 case PIPE_ISOCHRONOUS:
1382 start_iso_transfers(isp1362_hcd);
1383 break;
1384 default:
1385 BUG();
1386 }
1387 fail:
1388 if (retval)
1389 usb_hcd_unlink_urb_from_ep(hcd, urb);
1390
1391
1392 fail_not_linked:
1393 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1394 if (retval)
1395 DBG(0, "%s: urb %p failed with %d\n", __func__, urb, retval);
1396 return retval;
1397}
1398
1399static int isp1362_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1400{
1401 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1402 struct usb_host_endpoint *hep;
1403 unsigned long flags;
1404 struct isp1362_ep *ep;
1405 int retval = 0;
1406
1407 DBG(3, "%s: urb %p\n", __func__, urb);
1408
1409 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1410 retval = usb_hcd_check_unlink_urb(hcd, urb, status);
1411 if (retval)
1412 goto done;
1413
1414 hep = urb->hcpriv;
1415
1416 if (!hep) {
1417 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1418 return -EIDRM;
1419 }
1420
1421 ep = hep->hcpriv;
1422 if (ep) {
1423 /* In front of queue? */
1424 if (ep->hep->urb_list.next == &urb->urb_list) {
1425 if (!list_empty(&ep->active)) {
1426 DBG(1, "%s: urb %p ep %p req %d active PTD[%d] $%04x\n", __func__,
1427 urb, ep, ep->num_req, ep->ptd_index, ep->ptd_offset);
1428 /* disable processing and queue PTD for removal */
1429 remove_ptd(isp1362_hcd, ep);
1430 urb = NULL;
1431 }
1432 }
1433 if (urb) {
1434 DBG(1, "%s: Finishing ep %p req %d\n", __func__, ep,
1435 ep->num_req);
1436 finish_request(isp1362_hcd, ep, urb, status);
1437 } else
1438 DBG(1, "%s: urb %p active; wait4irq\n", __func__, urb);
1439 } else {
1440 pr_warning("%s: No EP in URB %p\n", __func__, urb);
1441 retval = -EINVAL;
1442 }
1443done:
1444 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1445
1446 DBG(3, "%s: exit\n", __func__);
1447
1448 return retval;
1449}
1450
1451static void isp1362_endpoint_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
1452{
1453 struct isp1362_ep *ep = hep->hcpriv;
1454 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1455 unsigned long flags;
1456
1457 DBG(1, "%s: ep %p\n", __func__, ep);
1458 if (!ep)
1459 return;
1460 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1461 if (!list_empty(&hep->urb_list)) {
1462 if (!list_empty(&ep->active) && list_empty(&ep->remove_list)) {
1463 DBG(1, "%s: Removing ep %p req %d PTD[%d] $%04x\n", __func__,
1464 ep, ep->num_req, ep->ptd_index, ep->ptd_offset);
1465 remove_ptd(isp1362_hcd, ep);
1466 pr_info("%s: Waiting for Interrupt to clean up\n", __func__);
1467 }
1468 }
1469 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1470 /* Wait for interrupt to clear out active list */
1471 while (!list_empty(&ep->active))
1472 msleep(1);
1473
1474 DBG(1, "%s: Freeing EP %p\n", __func__, ep);
1475
1476 usb_put_dev(ep->udev);
1477 kfree(ep);
1478 hep->hcpriv = NULL;
1479}
1480
1481static int isp1362_get_frame(struct usb_hcd *hcd)
1482{
1483 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1484 u32 fmnum;
1485 unsigned long flags;
1486
1487 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1488 fmnum = isp1362_read_reg32(isp1362_hcd, HCFMNUM);
1489 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1490
1491 return (int)fmnum;
1492}
1493
1494/*-------------------------------------------------------------------------*/
1495
1496/* Adapted from ohci-hub.c */
1497static int isp1362_hub_status_data(struct usb_hcd *hcd, char *buf)
1498{
1499 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1500 int ports, i, changed = 0;
1501 unsigned long flags;
1502
1503 if (!HC_IS_RUNNING(hcd->state))
1504 return -ESHUTDOWN;
1505
1506 /* Report no status change now, if we are scheduled to be
1507 called later */
1508 if (timer_pending(&hcd->rh_timer))
1509 return 0;
1510
1511 ports = isp1362_hcd->rhdesca & RH_A_NDP;
1512 BUG_ON(ports > 2);
1513
1514 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1515 /* init status */
1516 if (isp1362_hcd->rhstatus & (RH_HS_LPSC | RH_HS_OCIC))
1517 buf[0] = changed = 1;
1518 else
1519 buf[0] = 0;
1520
1521 for (i = 0; i < ports; i++) {
1522 u32 status = isp1362_hcd->rhport[i];
1523
1524 if (status & (RH_PS_CSC | RH_PS_PESC | RH_PS_PSSC |
1525 RH_PS_OCIC | RH_PS_PRSC)) {
1526 changed = 1;
1527 buf[0] |= 1 << (i + 1);
1528 continue;
1529 }
1530
1531 if (!(status & RH_PS_CCS))
1532 continue;
1533 }
1534 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1535 return changed;
1536}
1537
1538static void isp1362_hub_descriptor(struct isp1362_hcd *isp1362_hcd,
1539 struct usb_hub_descriptor *desc)
1540{
1541 u32 reg = isp1362_hcd->rhdesca;
1542
1543 DBG(3, "%s: enter\n", __func__);
1544
1545 desc->bDescriptorType = 0x29;
1546 desc->bDescLength = 9;
1547 desc->bHubContrCurrent = 0;
1548 desc->bNbrPorts = reg & 0x3;
1549 /* Power switching, device type, overcurrent. */
1550 desc->wHubCharacteristics = cpu_to_le16((reg >> 8) & 0x1f);
1551 DBG(0, "%s: hubcharacteristics = %02x\n", __func__, cpu_to_le16((reg >> 8) & 0x1f));
1552 desc->bPwrOn2PwrGood = (reg >> 24) & 0xff;
Sarah Sharpda130512010-11-30 15:55:51 -08001553 /* ports removable, and legacy PortPwrCtrlMask */
John Youndbe79bb2001-09-17 00:00:00 -07001554 desc->u.hs.DeviceRemovable[0] = desc->bNbrPorts == 1 ? 1 << 1 : 3 << 1;
1555 desc->u.hs.DeviceRemovable[1] = ~0;
Lothar Wassmanna9d43092009-07-16 20:51:21 -04001556
1557 DBG(3, "%s: exit\n", __func__);
1558}
1559
1560/* Adapted from ohci-hub.c */
1561static int isp1362_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
1562 u16 wIndex, char *buf, u16 wLength)
1563{
1564 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1565 int retval = 0;
1566 unsigned long flags;
1567 unsigned long t1;
1568 int ports = isp1362_hcd->rhdesca & RH_A_NDP;
1569 u32 tmp = 0;
1570
1571 switch (typeReq) {
1572 case ClearHubFeature:
1573 DBG(0, "ClearHubFeature: ");
1574 switch (wValue) {
1575 case C_HUB_OVER_CURRENT:
Greg Kroah-Hartman374f4bf2013-06-28 11:33:00 -07001576 DBG(0, "C_HUB_OVER_CURRENT\n");
Lothar Wassmanna9d43092009-07-16 20:51:21 -04001577 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1578 isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_OCIC);
1579 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1580 case C_HUB_LOCAL_POWER:
Greg Kroah-Hartman374f4bf2013-06-28 11:33:00 -07001581 DBG(0, "C_HUB_LOCAL_POWER\n");
Lothar Wassmanna9d43092009-07-16 20:51:21 -04001582 break;
1583 default:
1584 goto error;
1585 }
1586 break;
1587 case SetHubFeature:
1588 DBG(0, "SetHubFeature: ");
1589 switch (wValue) {
1590 case C_HUB_OVER_CURRENT:
1591 case C_HUB_LOCAL_POWER:
Greg Kroah-Hartman374f4bf2013-06-28 11:33:00 -07001592 DBG(0, "C_HUB_OVER_CURRENT or C_HUB_LOCAL_POWER\n");
Lothar Wassmanna9d43092009-07-16 20:51:21 -04001593 break;
1594 default:
1595 goto error;
1596 }
1597 break;
1598 case GetHubDescriptor:
1599 DBG(0, "GetHubDescriptor\n");
1600 isp1362_hub_descriptor(isp1362_hcd, (struct usb_hub_descriptor *)buf);
1601 break;
1602 case GetHubStatus:
1603 DBG(0, "GetHubStatus\n");
1604 put_unaligned(cpu_to_le32(0), (__le32 *) buf);
1605 break;
1606 case GetPortStatus:
1607#ifndef VERBOSE
1608 DBG(0, "GetPortStatus\n");
1609#endif
1610 if (!wIndex || wIndex > ports)
1611 goto error;
1612 tmp = isp1362_hcd->rhport[--wIndex];
1613 put_unaligned(cpu_to_le32(tmp), (__le32 *) buf);
1614 break;
1615 case ClearPortFeature:
1616 DBG(0, "ClearPortFeature: ");
1617 if (!wIndex || wIndex > ports)
1618 goto error;
1619 wIndex--;
1620
1621 switch (wValue) {
1622 case USB_PORT_FEAT_ENABLE:
Greg Kroah-Hartman374f4bf2013-06-28 11:33:00 -07001623 DBG(0, "USB_PORT_FEAT_ENABLE\n");
Lothar Wassmanna9d43092009-07-16 20:51:21 -04001624 tmp = RH_PS_CCS;
1625 break;
1626 case USB_PORT_FEAT_C_ENABLE:
Greg Kroah-Hartman374f4bf2013-06-28 11:33:00 -07001627 DBG(0, "USB_PORT_FEAT_C_ENABLE\n");
Lothar Wassmanna9d43092009-07-16 20:51:21 -04001628 tmp = RH_PS_PESC;
1629 break;
1630 case USB_PORT_FEAT_SUSPEND:
Greg Kroah-Hartman374f4bf2013-06-28 11:33:00 -07001631 DBG(0, "USB_PORT_FEAT_SUSPEND\n");
Lothar Wassmanna9d43092009-07-16 20:51:21 -04001632 tmp = RH_PS_POCI;
1633 break;
1634 case USB_PORT_FEAT_C_SUSPEND:
Greg Kroah-Hartman374f4bf2013-06-28 11:33:00 -07001635 DBG(0, "USB_PORT_FEAT_C_SUSPEND\n");
Lothar Wassmanna9d43092009-07-16 20:51:21 -04001636 tmp = RH_PS_PSSC;
1637 break;
1638 case USB_PORT_FEAT_POWER:
Greg Kroah-Hartman374f4bf2013-06-28 11:33:00 -07001639 DBG(0, "USB_PORT_FEAT_POWER\n");
Lothar Wassmanna9d43092009-07-16 20:51:21 -04001640 tmp = RH_PS_LSDA;
1641
1642 break;
1643 case USB_PORT_FEAT_C_CONNECTION:
Greg Kroah-Hartman374f4bf2013-06-28 11:33:00 -07001644 DBG(0, "USB_PORT_FEAT_C_CONNECTION\n");
Lothar Wassmanna9d43092009-07-16 20:51:21 -04001645 tmp = RH_PS_CSC;
1646 break;
1647 case USB_PORT_FEAT_C_OVER_CURRENT:
Greg Kroah-Hartman374f4bf2013-06-28 11:33:00 -07001648 DBG(0, "USB_PORT_FEAT_C_OVER_CURRENT\n");
Lothar Wassmanna9d43092009-07-16 20:51:21 -04001649 tmp = RH_PS_OCIC;
1650 break;
1651 case USB_PORT_FEAT_C_RESET:
Greg Kroah-Hartman374f4bf2013-06-28 11:33:00 -07001652 DBG(0, "USB_PORT_FEAT_C_RESET\n");
Lothar Wassmanna9d43092009-07-16 20:51:21 -04001653 tmp = RH_PS_PRSC;
1654 break;
1655 default:
1656 goto error;
1657 }
1658
1659 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1660 isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, tmp);
1661 isp1362_hcd->rhport[wIndex] =
1662 isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + wIndex);
1663 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1664 break;
1665 case SetPortFeature:
1666 DBG(0, "SetPortFeature: ");
1667 if (!wIndex || wIndex > ports)
1668 goto error;
1669 wIndex--;
1670 switch (wValue) {
1671 case USB_PORT_FEAT_SUSPEND:
Greg Kroah-Hartman374f4bf2013-06-28 11:33:00 -07001672 DBG(0, "USB_PORT_FEAT_SUSPEND\n");
Lothar Wassmanna9d43092009-07-16 20:51:21 -04001673 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1674 isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, RH_PS_PSS);
1675 isp1362_hcd->rhport[wIndex] =
1676 isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + wIndex);
1677 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1678 break;
1679 case USB_PORT_FEAT_POWER:
Greg Kroah-Hartman374f4bf2013-06-28 11:33:00 -07001680 DBG(0, "USB_PORT_FEAT_POWER\n");
Lothar Wassmanna9d43092009-07-16 20:51:21 -04001681 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1682 isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, RH_PS_PPS);
1683 isp1362_hcd->rhport[wIndex] =
1684 isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + wIndex);
1685 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1686 break;
1687 case USB_PORT_FEAT_RESET:
Greg Kroah-Hartman374f4bf2013-06-28 11:33:00 -07001688 DBG(0, "USB_PORT_FEAT_RESET\n");
Lothar Wassmanna9d43092009-07-16 20:51:21 -04001689 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1690
1691 t1 = jiffies + msecs_to_jiffies(USB_RESET_WIDTH);
1692 while (time_before(jiffies, t1)) {
1693 /* spin until any current reset finishes */
1694 for (;;) {
1695 tmp = isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + wIndex);
1696 if (!(tmp & RH_PS_PRS))
1697 break;
1698 udelay(500);
1699 }
1700 if (!(tmp & RH_PS_CCS))
1701 break;
1702 /* Reset lasts 10ms (claims datasheet) */
1703 isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, (RH_PS_PRS));
1704
1705 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1706 msleep(10);
1707 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1708 }
1709
1710 isp1362_hcd->rhport[wIndex] = isp1362_read_reg32(isp1362_hcd,
1711 HCRHPORT1 + wIndex);
1712 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1713 break;
1714 default:
1715 goto error;
1716 }
1717 break;
1718
1719 default:
1720 error:
1721 /* "protocol stall" on error */
Greg Kroah-Hartman374f4bf2013-06-28 11:33:00 -07001722 DBG(0, "PROTOCOL STALL\n");
Lothar Wassmanna9d43092009-07-16 20:51:21 -04001723 retval = -EPIPE;
1724 }
1725
1726 return retval;
1727}
1728
1729#ifdef CONFIG_PM
1730static int isp1362_bus_suspend(struct usb_hcd *hcd)
1731{
1732 int status = 0;
1733 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1734 unsigned long flags;
1735
1736 if (time_before(jiffies, isp1362_hcd->next_statechange))
1737 msleep(5);
1738
1739 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1740
1741 isp1362_hcd->hc_control = isp1362_read_reg32(isp1362_hcd, HCCONTROL);
1742 switch (isp1362_hcd->hc_control & OHCI_CTRL_HCFS) {
1743 case OHCI_USB_RESUME:
1744 DBG(0, "%s: resume/suspend?\n", __func__);
1745 isp1362_hcd->hc_control &= ~OHCI_CTRL_HCFS;
1746 isp1362_hcd->hc_control |= OHCI_USB_RESET;
1747 isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
1748 /* FALL THROUGH */
1749 case OHCI_USB_RESET:
1750 status = -EBUSY;
1751 pr_warning("%s: needs reinit!\n", __func__);
1752 goto done;
1753 case OHCI_USB_SUSPEND:
1754 pr_warning("%s: already suspended?\n", __func__);
1755 goto done;
1756 }
1757 DBG(0, "%s: suspend root hub\n", __func__);
1758
1759 /* First stop any processing */
1760 hcd->state = HC_STATE_QUIESCING;
1761 if (!list_empty(&isp1362_hcd->atl_queue.active) ||
1762 !list_empty(&isp1362_hcd->intl_queue.active) ||
1763 !list_empty(&isp1362_hcd->istl_queue[0] .active) ||
1764 !list_empty(&isp1362_hcd->istl_queue[1] .active)) {
1765 int limit;
1766
1767 isp1362_write_reg32(isp1362_hcd, HCATLSKIP, ~0);
1768 isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, ~0);
1769 isp1362_write_reg16(isp1362_hcd, HCBUFSTAT, 0);
1770 isp1362_write_reg16(isp1362_hcd, HCuPINTENB, 0);
1771 isp1362_write_reg32(isp1362_hcd, HCINTSTAT, OHCI_INTR_SF);
1772
1773 DBG(0, "%s: stopping schedules ...\n", __func__);
1774 limit = 2000;
1775 while (limit > 0) {
1776 udelay(250);
1777 limit -= 250;
1778 if (isp1362_read_reg32(isp1362_hcd, HCINTSTAT) & OHCI_INTR_SF)
1779 break;
1780 }
1781 mdelay(7);
1782 if (isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_ATL) {
1783 u32 done_map = isp1362_read_reg32(isp1362_hcd, HCATLDONE);
1784 finish_transfers(isp1362_hcd, done_map, &isp1362_hcd->atl_queue);
1785 }
1786 if (isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_INTL) {
1787 u32 done_map = isp1362_read_reg32(isp1362_hcd, HCINTLDONE);
1788 finish_transfers(isp1362_hcd, done_map, &isp1362_hcd->intl_queue);
1789 }
1790 if (isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_ISTL0)
1791 finish_iso_transfers(isp1362_hcd, &isp1362_hcd->istl_queue[0]);
1792 if (isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_ISTL1)
1793 finish_iso_transfers(isp1362_hcd, &isp1362_hcd->istl_queue[1]);
1794 }
1795 DBG(0, "%s: HCINTSTAT: %08x\n", __func__,
1796 isp1362_read_reg32(isp1362_hcd, HCINTSTAT));
1797 isp1362_write_reg32(isp1362_hcd, HCINTSTAT,
1798 isp1362_read_reg32(isp1362_hcd, HCINTSTAT));
1799
1800 /* Suspend hub */
1801 isp1362_hcd->hc_control = OHCI_USB_SUSPEND;
1802 isp1362_show_reg(isp1362_hcd, HCCONTROL);
1803 isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
1804 isp1362_show_reg(isp1362_hcd, HCCONTROL);
1805
1806#if 1
1807 isp1362_hcd->hc_control = isp1362_read_reg32(isp1362_hcd, HCCONTROL);
1808 if ((isp1362_hcd->hc_control & OHCI_CTRL_HCFS) != OHCI_USB_SUSPEND) {
1809 pr_err("%s: controller won't suspend %08x\n", __func__,
1810 isp1362_hcd->hc_control);
1811 status = -EBUSY;
1812 } else
1813#endif
1814 {
1815 /* no resumes until devices finish suspending */
1816 isp1362_hcd->next_statechange = jiffies + msecs_to_jiffies(5);
1817 }
1818done:
1819 if (status == 0) {
1820 hcd->state = HC_STATE_SUSPENDED;
1821 DBG(0, "%s: HCD suspended: %08x\n", __func__,
1822 isp1362_read_reg32(isp1362_hcd, HCCONTROL));
1823 }
1824 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1825 return status;
1826}
1827
1828static int isp1362_bus_resume(struct usb_hcd *hcd)
1829{
1830 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1831 u32 port;
1832 unsigned long flags;
1833 int status = -EINPROGRESS;
1834
1835 if (time_before(jiffies, isp1362_hcd->next_statechange))
1836 msleep(5);
1837
1838 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1839 isp1362_hcd->hc_control = isp1362_read_reg32(isp1362_hcd, HCCONTROL);
1840 pr_info("%s: HCCONTROL: %08x\n", __func__, isp1362_hcd->hc_control);
1841 if (hcd->state == HC_STATE_RESUMING) {
1842 pr_warning("%s: duplicate resume\n", __func__);
1843 status = 0;
1844 } else
1845 switch (isp1362_hcd->hc_control & OHCI_CTRL_HCFS) {
1846 case OHCI_USB_SUSPEND:
1847 DBG(0, "%s: resume root hub\n", __func__);
1848 isp1362_hcd->hc_control &= ~OHCI_CTRL_HCFS;
1849 isp1362_hcd->hc_control |= OHCI_USB_RESUME;
1850 isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
1851 break;
1852 case OHCI_USB_RESUME:
1853 /* HCFS changes sometime after INTR_RD */
1854 DBG(0, "%s: remote wakeup\n", __func__);
1855 break;
1856 case OHCI_USB_OPER:
1857 DBG(0, "%s: odd resume\n", __func__);
1858 status = 0;
1859 hcd->self.root_hub->dev.power.power_state = PMSG_ON;
1860 break;
1861 default: /* RESET, we lost power */
1862 DBG(0, "%s: root hub hardware reset\n", __func__);
1863 status = -EBUSY;
1864 }
1865 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1866 if (status == -EBUSY) {
1867 DBG(0, "%s: Restarting HC\n", __func__);
1868 isp1362_hc_stop(hcd);
1869 return isp1362_hc_start(hcd);
1870 }
1871 if (status != -EINPROGRESS)
1872 return status;
1873 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1874 port = isp1362_read_reg32(isp1362_hcd, HCRHDESCA) & RH_A_NDP;
1875 while (port--) {
1876 u32 stat = isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + port);
1877
1878 /* force global, not selective, resume */
1879 if (!(stat & RH_PS_PSS)) {
1880 DBG(0, "%s: Not Resuming RH port %d\n", __func__, port);
1881 continue;
1882 }
1883 DBG(0, "%s: Resuming RH port %d\n", __func__, port);
1884 isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + port, RH_PS_POCI);
1885 }
1886 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1887
1888 /* Some controllers (lucent) need extra-long delays */
1889 hcd->state = HC_STATE_RESUMING;
1890 mdelay(20 /* usb 11.5.1.10 */ + 15);
1891
1892 isp1362_hcd->hc_control = OHCI_USB_OPER;
1893 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1894 isp1362_show_reg(isp1362_hcd, HCCONTROL);
1895 isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
1896 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1897 /* TRSMRCY */
1898 msleep(10);
1899
1900 /* keep it alive for ~5x suspend + resume costs */
1901 isp1362_hcd->next_statechange = jiffies + msecs_to_jiffies(250);
1902
1903 hcd->self.root_hub->dev.power.power_state = PMSG_ON;
1904 hcd->state = HC_STATE_RUNNING;
1905 return 0;
1906}
1907#else
1908#define isp1362_bus_suspend NULL
1909#define isp1362_bus_resume NULL
1910#endif
1911
1912/*-------------------------------------------------------------------------*/
1913
1914#ifdef STUB_DEBUG_FILE
1915
1916static inline void create_debug_file(struct isp1362_hcd *isp1362_hcd)
1917{
1918}
1919static inline void remove_debug_file(struct isp1362_hcd *isp1362_hcd)
1920{
1921}
1922
1923#else
1924
1925#include <linux/proc_fs.h>
1926#include <linux/seq_file.h>
1927
1928static void dump_irq(struct seq_file *s, char *label, u16 mask)
1929{
1930 seq_printf(s, "%-15s %04x%s%s%s%s%s%s\n", label, mask,
1931 mask & HCuPINT_CLKRDY ? " clkrdy" : "",
1932 mask & HCuPINT_SUSP ? " susp" : "",
1933 mask & HCuPINT_OPR ? " opr" : "",
1934 mask & HCuPINT_EOT ? " eot" : "",
1935 mask & HCuPINT_ATL ? " atl" : "",
1936 mask & HCuPINT_SOF ? " sof" : "");
1937}
1938
1939static void dump_int(struct seq_file *s, char *label, u32 mask)
1940{
1941 seq_printf(s, "%-15s %08x%s%s%s%s%s%s%s\n", label, mask,
1942 mask & OHCI_INTR_MIE ? " MIE" : "",
1943 mask & OHCI_INTR_RHSC ? " rhsc" : "",
1944 mask & OHCI_INTR_FNO ? " fno" : "",
1945 mask & OHCI_INTR_UE ? " ue" : "",
1946 mask & OHCI_INTR_RD ? " rd" : "",
1947 mask & OHCI_INTR_SF ? " sof" : "",
1948 mask & OHCI_INTR_SO ? " so" : "");
1949}
1950
1951static void dump_ctrl(struct seq_file *s, char *label, u32 mask)
1952{
1953 seq_printf(s, "%-15s %08x%s%s%s\n", label, mask,
1954 mask & OHCI_CTRL_RWC ? " rwc" : "",
1955 mask & OHCI_CTRL_RWE ? " rwe" : "",
1956 ({
1957 char *hcfs;
1958 switch (mask & OHCI_CTRL_HCFS) {
1959 case OHCI_USB_OPER:
1960 hcfs = " oper";
1961 break;
1962 case OHCI_USB_RESET:
1963 hcfs = " reset";
1964 break;
1965 case OHCI_USB_RESUME:
1966 hcfs = " resume";
1967 break;
1968 case OHCI_USB_SUSPEND:
1969 hcfs = " suspend";
1970 break;
1971 default:
1972 hcfs = " ?";
1973 }
1974 hcfs;
1975 }));
1976}
1977
1978static void dump_regs(struct seq_file *s, struct isp1362_hcd *isp1362_hcd)
1979{
1980 seq_printf(s, "HCREVISION [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCREVISION),
1981 isp1362_read_reg32(isp1362_hcd, HCREVISION));
1982 seq_printf(s, "HCCONTROL [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCCONTROL),
1983 isp1362_read_reg32(isp1362_hcd, HCCONTROL));
1984 seq_printf(s, "HCCMDSTAT [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCCMDSTAT),
1985 isp1362_read_reg32(isp1362_hcd, HCCMDSTAT));
1986 seq_printf(s, "HCINTSTAT [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTSTAT),
1987 isp1362_read_reg32(isp1362_hcd, HCINTSTAT));
1988 seq_printf(s, "HCINTENB [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTENB),
1989 isp1362_read_reg32(isp1362_hcd, HCINTENB));
1990 seq_printf(s, "HCFMINTVL [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCFMINTVL),
1991 isp1362_read_reg32(isp1362_hcd, HCFMINTVL));
1992 seq_printf(s, "HCFMREM [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCFMREM),
1993 isp1362_read_reg32(isp1362_hcd, HCFMREM));
1994 seq_printf(s, "HCFMNUM [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCFMNUM),
1995 isp1362_read_reg32(isp1362_hcd, HCFMNUM));
1996 seq_printf(s, "HCLSTHRESH [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCLSTHRESH),
1997 isp1362_read_reg32(isp1362_hcd, HCLSTHRESH));
1998 seq_printf(s, "HCRHDESCA [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHDESCA),
1999 isp1362_read_reg32(isp1362_hcd, HCRHDESCA));
2000 seq_printf(s, "HCRHDESCB [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHDESCB),
2001 isp1362_read_reg32(isp1362_hcd, HCRHDESCB));
2002 seq_printf(s, "HCRHSTATUS [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHSTATUS),
2003 isp1362_read_reg32(isp1362_hcd, HCRHSTATUS));
2004 seq_printf(s, "HCRHPORT1 [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHPORT1),
2005 isp1362_read_reg32(isp1362_hcd, HCRHPORT1));
2006 seq_printf(s, "HCRHPORT2 [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHPORT2),
2007 isp1362_read_reg32(isp1362_hcd, HCRHPORT2));
2008 seq_printf(s, "\n");
2009 seq_printf(s, "HCHWCFG [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCHWCFG),
2010 isp1362_read_reg16(isp1362_hcd, HCHWCFG));
2011 seq_printf(s, "HCDMACFG [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCDMACFG),
2012 isp1362_read_reg16(isp1362_hcd, HCDMACFG));
2013 seq_printf(s, "HCXFERCTR [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCXFERCTR),
2014 isp1362_read_reg16(isp1362_hcd, HCXFERCTR));
2015 seq_printf(s, "HCuPINT [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCuPINT),
2016 isp1362_read_reg16(isp1362_hcd, HCuPINT));
2017 seq_printf(s, "HCuPINTENB [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCuPINTENB),
2018 isp1362_read_reg16(isp1362_hcd, HCuPINTENB));
2019 seq_printf(s, "HCCHIPID [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCCHIPID),
2020 isp1362_read_reg16(isp1362_hcd, HCCHIPID));
2021 seq_printf(s, "HCSCRATCH [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCSCRATCH),
2022 isp1362_read_reg16(isp1362_hcd, HCSCRATCH));
2023 seq_printf(s, "HCBUFSTAT [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCBUFSTAT),
2024 isp1362_read_reg16(isp1362_hcd, HCBUFSTAT));
2025 seq_printf(s, "HCDIRADDR [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCDIRADDR),
2026 isp1362_read_reg32(isp1362_hcd, HCDIRADDR));
2027#if 0
2028 seq_printf(s, "HCDIRDATA [%02x] %04x\n", ISP1362_REG_NO(HCDIRDATA),
2029 isp1362_read_reg16(isp1362_hcd, HCDIRDATA));
2030#endif
2031 seq_printf(s, "HCISTLBUFSZ[%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCISTLBUFSZ),
2032 isp1362_read_reg16(isp1362_hcd, HCISTLBUFSZ));
2033 seq_printf(s, "HCISTLRATE [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCISTLRATE),
2034 isp1362_read_reg16(isp1362_hcd, HCISTLRATE));
2035 seq_printf(s, "\n");
2036 seq_printf(s, "HCINTLBUFSZ[%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLBUFSZ),
2037 isp1362_read_reg16(isp1362_hcd, HCINTLBUFSZ));
2038 seq_printf(s, "HCINTLBLKSZ[%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLBLKSZ),
2039 isp1362_read_reg16(isp1362_hcd, HCINTLBLKSZ));
2040 seq_printf(s, "HCINTLDONE [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLDONE),
2041 isp1362_read_reg32(isp1362_hcd, HCINTLDONE));
2042 seq_printf(s, "HCINTLSKIP [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLSKIP),
2043 isp1362_read_reg32(isp1362_hcd, HCINTLSKIP));
2044 seq_printf(s, "HCINTLLAST [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLLAST),
2045 isp1362_read_reg32(isp1362_hcd, HCINTLLAST));
2046 seq_printf(s, "HCINTLCURR [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLCURR),
2047 isp1362_read_reg16(isp1362_hcd, HCINTLCURR));
2048 seq_printf(s, "\n");
2049 seq_printf(s, "HCATLBUFSZ [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLBUFSZ),
2050 isp1362_read_reg16(isp1362_hcd, HCATLBUFSZ));
2051 seq_printf(s, "HCATLBLKSZ [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLBLKSZ),
2052 isp1362_read_reg16(isp1362_hcd, HCATLBLKSZ));
2053#if 0
2054 seq_printf(s, "HCATLDONE [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCATLDONE),
2055 isp1362_read_reg32(isp1362_hcd, HCATLDONE));
2056#endif
2057 seq_printf(s, "HCATLSKIP [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCATLSKIP),
2058 isp1362_read_reg32(isp1362_hcd, HCATLSKIP));
2059 seq_printf(s, "HCATLLAST [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCATLLAST),
2060 isp1362_read_reg32(isp1362_hcd, HCATLLAST));
2061 seq_printf(s, "HCATLCURR [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLCURR),
2062 isp1362_read_reg16(isp1362_hcd, HCATLCURR));
2063 seq_printf(s, "\n");
2064 seq_printf(s, "HCATLDTC [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLDTC),
2065 isp1362_read_reg16(isp1362_hcd, HCATLDTC));
2066 seq_printf(s, "HCATLDTCTO [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLDTCTO),
2067 isp1362_read_reg16(isp1362_hcd, HCATLDTCTO));
2068}
2069
2070static int proc_isp1362_show(struct seq_file *s, void *unused)
2071{
2072 struct isp1362_hcd *isp1362_hcd = s->private;
2073 struct isp1362_ep *ep;
2074 int i;
2075
2076 seq_printf(s, "%s\n%s version %s\n",
2077 isp1362_hcd_to_hcd(isp1362_hcd)->product_desc, hcd_name, DRIVER_VERSION);
2078
2079 /* collect statistics to help estimate potential win for
2080 * DMA engines that care about alignment (PXA)
2081 */
2082 seq_printf(s, "alignment: 16b/%ld 8b/%ld 4b/%ld 2b/%ld 1b/%ld\n",
2083 isp1362_hcd->stat16, isp1362_hcd->stat8, isp1362_hcd->stat4,
2084 isp1362_hcd->stat2, isp1362_hcd->stat1);
2085 seq_printf(s, "max # ptds in ATL fifo: %d\n", isp1362_hcd->atl_queue.stat_maxptds);
2086 seq_printf(s, "max # ptds in INTL fifo: %d\n", isp1362_hcd->intl_queue.stat_maxptds);
2087 seq_printf(s, "max # ptds in ISTL fifo: %d\n",
2088 max(isp1362_hcd->istl_queue[0] .stat_maxptds,
2089 isp1362_hcd->istl_queue[1] .stat_maxptds));
2090
2091 /* FIXME: don't show the following in suspended state */
2092 spin_lock_irq(&isp1362_hcd->lock);
2093
2094 dump_irq(s, "hc_irq_enable", isp1362_read_reg16(isp1362_hcd, HCuPINTENB));
2095 dump_irq(s, "hc_irq_status", isp1362_read_reg16(isp1362_hcd, HCuPINT));
2096 dump_int(s, "ohci_int_enable", isp1362_read_reg32(isp1362_hcd, HCINTENB));
2097 dump_int(s, "ohci_int_status", isp1362_read_reg32(isp1362_hcd, HCINTSTAT));
2098 dump_ctrl(s, "ohci_control", isp1362_read_reg32(isp1362_hcd, HCCONTROL));
2099
2100 for (i = 0; i < NUM_ISP1362_IRQS; i++)
2101 if (isp1362_hcd->irq_stat[i])
2102 seq_printf(s, "%-15s: %d\n",
2103 ISP1362_INT_NAME(i), isp1362_hcd->irq_stat[i]);
2104
2105 dump_regs(s, isp1362_hcd);
2106 list_for_each_entry(ep, &isp1362_hcd->async, schedule) {
2107 struct urb *urb;
2108
2109 seq_printf(s, "%p, ep%d%s, maxpacket %d:\n", ep, ep->epnum,
2110 ({
2111 char *s;
2112 switch (ep->nextpid) {
2113 case USB_PID_IN:
2114 s = "in";
2115 break;
2116 case USB_PID_OUT:
2117 s = "out";
2118 break;
2119 case USB_PID_SETUP:
2120 s = "setup";
2121 break;
2122 case USB_PID_ACK:
2123 s = "status";
2124 break;
2125 default:
2126 s = "?";
2127 break;
2128 };
2129 s;}), ep->maxpacket) ;
2130 list_for_each_entry(urb, &ep->hep->urb_list, urb_list) {
2131 seq_printf(s, " urb%p, %d/%d\n", urb,
2132 urb->actual_length,
2133 urb->transfer_buffer_length);
2134 }
2135 }
2136 if (!list_empty(&isp1362_hcd->async))
2137 seq_printf(s, "\n");
2138 dump_ptd_queue(&isp1362_hcd->atl_queue);
2139
2140 seq_printf(s, "periodic size= %d\n", PERIODIC_SIZE);
2141
2142 list_for_each_entry(ep, &isp1362_hcd->periodic, schedule) {
2143 seq_printf(s, "branch:%2d load:%3d PTD[%d] $%04x:\n", ep->branch,
2144 isp1362_hcd->load[ep->branch], ep->ptd_index, ep->ptd_offset);
2145
2146 seq_printf(s, " %d/%p (%sdev%d ep%d%s max %d)\n",
2147 ep->interval, ep,
2148 (ep->udev->speed == USB_SPEED_FULL) ? "" : "ls ",
2149 ep->udev->devnum, ep->epnum,
2150 (ep->epnum == 0) ? "" :
2151 ((ep->nextpid == USB_PID_IN) ?
2152 "in" : "out"), ep->maxpacket);
2153 }
2154 dump_ptd_queue(&isp1362_hcd->intl_queue);
2155
2156 seq_printf(s, "ISO:\n");
2157
2158 list_for_each_entry(ep, &isp1362_hcd->isoc, schedule) {
2159 seq_printf(s, " %d/%p (%sdev%d ep%d%s max %d)\n",
2160 ep->interval, ep,
2161 (ep->udev->speed == USB_SPEED_FULL) ? "" : "ls ",
2162 ep->udev->devnum, ep->epnum,
2163 (ep->epnum == 0) ? "" :
2164 ((ep->nextpid == USB_PID_IN) ?
2165 "in" : "out"), ep->maxpacket);
2166 }
2167
2168 spin_unlock_irq(&isp1362_hcd->lock);
2169 seq_printf(s, "\n");
2170
2171 return 0;
2172}
2173
2174static int proc_isp1362_open(struct inode *inode, struct file *file)
2175{
Al Virod9dda782013-03-31 18:16:14 -04002176 return single_open(file, proc_isp1362_show, PDE_DATA(inode));
Lothar Wassmanna9d43092009-07-16 20:51:21 -04002177}
2178
2179static const struct file_operations proc_ops = {
2180 .open = proc_isp1362_open,
2181 .read = seq_read,
2182 .llseek = seq_lseek,
2183 .release = single_release,
2184};
2185
2186/* expect just one isp1362_hcd per system */
2187static const char proc_filename[] = "driver/isp1362";
2188
2189static void create_debug_file(struct isp1362_hcd *isp1362_hcd)
2190{
2191 struct proc_dir_entry *pde;
2192
Al Viro96e7d912013-03-30 13:15:27 -04002193 pde = proc_create_data(proc_filename, 0, NULL, &proc_ops, isp1362_hcd);
Lothar Wassmanna9d43092009-07-16 20:51:21 -04002194 if (pde == NULL) {
2195 pr_warning("%s: Failed to create debug file '%s'\n", __func__, proc_filename);
2196 return;
2197 }
Lothar Wassmanna9d43092009-07-16 20:51:21 -04002198 isp1362_hcd->pde = pde;
2199}
2200
2201static void remove_debug_file(struct isp1362_hcd *isp1362_hcd)
2202{
2203 if (isp1362_hcd->pde)
Randy Dunlap326b4812010-04-19 08:53:50 -07002204 remove_proc_entry(proc_filename, NULL);
Lothar Wassmanna9d43092009-07-16 20:51:21 -04002205}
2206
2207#endif
2208
2209/*-------------------------------------------------------------------------*/
2210
Jiri Slaby1c815572010-06-21 17:02:51 +02002211static void __isp1362_sw_reset(struct isp1362_hcd *isp1362_hcd)
Lothar Wassmanna9d43092009-07-16 20:51:21 -04002212{
2213 int tmp = 20;
Lothar Wassmanna9d43092009-07-16 20:51:21 -04002214
2215 isp1362_write_reg16(isp1362_hcd, HCSWRES, HCSWRES_MAGIC);
2216 isp1362_write_reg32(isp1362_hcd, HCCMDSTAT, OHCI_HCR);
2217 while (--tmp) {
2218 mdelay(1);
2219 if (!(isp1362_read_reg32(isp1362_hcd, HCCMDSTAT) & OHCI_HCR))
2220 break;
2221 }
2222 if (!tmp)
2223 pr_err("Software reset timeout\n");
Jiri Slaby1c815572010-06-21 17:02:51 +02002224}
2225
2226static void isp1362_sw_reset(struct isp1362_hcd *isp1362_hcd)
2227{
2228 unsigned long flags;
2229
2230 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2231 __isp1362_sw_reset(isp1362_hcd);
Lothar Wassmanna9d43092009-07-16 20:51:21 -04002232 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2233}
2234
2235static int isp1362_mem_config(struct usb_hcd *hcd)
2236{
2237 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2238 unsigned long flags;
2239 u32 total;
2240 u16 istl_size = ISP1362_ISTL_BUFSIZE;
2241 u16 intl_blksize = ISP1362_INTL_BLKSIZE + PTD_HEADER_SIZE;
2242 u16 intl_size = ISP1362_INTL_BUFFERS * intl_blksize;
2243 u16 atl_blksize = ISP1362_ATL_BLKSIZE + PTD_HEADER_SIZE;
2244 u16 atl_buffers = (ISP1362_BUF_SIZE - (istl_size + intl_size)) / atl_blksize;
2245 u16 atl_size;
2246 int i;
2247
2248 WARN_ON(istl_size & 3);
2249 WARN_ON(atl_blksize & 3);
2250 WARN_ON(intl_blksize & 3);
2251 WARN_ON(atl_blksize < PTD_HEADER_SIZE);
2252 WARN_ON(intl_blksize < PTD_HEADER_SIZE);
2253
2254 BUG_ON((unsigned)ISP1362_INTL_BUFFERS > 32);
2255 if (atl_buffers > 32)
2256 atl_buffers = 32;
2257 atl_size = atl_buffers * atl_blksize;
2258 total = atl_size + intl_size + istl_size;
2259 dev_info(hcd->self.controller, "ISP1362 Memory usage:\n");
2260 dev_info(hcd->self.controller, " ISTL: 2 * %4d: %4d @ $%04x:$%04x\n",
2261 istl_size / 2, istl_size, 0, istl_size / 2);
Lothar Wassmann96b85172010-01-15 08:04:55 -05002262 dev_info(hcd->self.controller, " INTL: %4d * (%3zu+8): %4d @ $%04x\n",
Lothar Wassmanna9d43092009-07-16 20:51:21 -04002263 ISP1362_INTL_BUFFERS, intl_blksize - PTD_HEADER_SIZE,
2264 intl_size, istl_size);
Lothar Wassmann96b85172010-01-15 08:04:55 -05002265 dev_info(hcd->self.controller, " ATL : %4d * (%3zu+8): %4d @ $%04x\n",
Lothar Wassmanna9d43092009-07-16 20:51:21 -04002266 atl_buffers, atl_blksize - PTD_HEADER_SIZE,
2267 atl_size, istl_size + intl_size);
2268 dev_info(hcd->self.controller, " USED/FREE: %4d %4d\n", total,
2269 ISP1362_BUF_SIZE - total);
2270
2271 if (total > ISP1362_BUF_SIZE) {
2272 dev_err(hcd->self.controller, "%s: Memory requested: %d, available %d\n",
2273 __func__, total, ISP1362_BUF_SIZE);
2274 return -ENOMEM;
2275 }
2276
2277 total = istl_size + intl_size + atl_size;
2278 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2279
2280 for (i = 0; i < 2; i++) {
2281 isp1362_hcd->istl_queue[i].buf_start = i * istl_size / 2,
2282 isp1362_hcd->istl_queue[i].buf_size = istl_size / 2;
2283 isp1362_hcd->istl_queue[i].blk_size = 4;
2284 INIT_LIST_HEAD(&isp1362_hcd->istl_queue[i].active);
2285 snprintf(isp1362_hcd->istl_queue[i].name,
2286 sizeof(isp1362_hcd->istl_queue[i].name), "ISTL%d", i);
2287 DBG(3, "%s: %5s buf $%04x %d\n", __func__,
2288 isp1362_hcd->istl_queue[i].name,
2289 isp1362_hcd->istl_queue[i].buf_start,
2290 isp1362_hcd->istl_queue[i].buf_size);
2291 }
2292 isp1362_write_reg16(isp1362_hcd, HCISTLBUFSZ, istl_size / 2);
2293
2294 isp1362_hcd->intl_queue.buf_start = istl_size;
2295 isp1362_hcd->intl_queue.buf_size = intl_size;
2296 isp1362_hcd->intl_queue.buf_count = ISP1362_INTL_BUFFERS;
2297 isp1362_hcd->intl_queue.blk_size = intl_blksize;
2298 isp1362_hcd->intl_queue.buf_avail = isp1362_hcd->intl_queue.buf_count;
2299 isp1362_hcd->intl_queue.skip_map = ~0;
2300 INIT_LIST_HEAD(&isp1362_hcd->intl_queue.active);
2301
2302 isp1362_write_reg16(isp1362_hcd, HCINTLBUFSZ,
2303 isp1362_hcd->intl_queue.buf_size);
2304 isp1362_write_reg16(isp1362_hcd, HCINTLBLKSZ,
2305 isp1362_hcd->intl_queue.blk_size - PTD_HEADER_SIZE);
2306 isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, ~0);
2307 isp1362_write_reg32(isp1362_hcd, HCINTLLAST,
2308 1 << (ISP1362_INTL_BUFFERS - 1));
2309
2310 isp1362_hcd->atl_queue.buf_start = istl_size + intl_size;
2311 isp1362_hcd->atl_queue.buf_size = atl_size;
2312 isp1362_hcd->atl_queue.buf_count = atl_buffers;
2313 isp1362_hcd->atl_queue.blk_size = atl_blksize;
2314 isp1362_hcd->atl_queue.buf_avail = isp1362_hcd->atl_queue.buf_count;
2315 isp1362_hcd->atl_queue.skip_map = ~0;
2316 INIT_LIST_HEAD(&isp1362_hcd->atl_queue.active);
2317
2318 isp1362_write_reg16(isp1362_hcd, HCATLBUFSZ,
2319 isp1362_hcd->atl_queue.buf_size);
2320 isp1362_write_reg16(isp1362_hcd, HCATLBLKSZ,
2321 isp1362_hcd->atl_queue.blk_size - PTD_HEADER_SIZE);
2322 isp1362_write_reg32(isp1362_hcd, HCATLSKIP, ~0);
2323 isp1362_write_reg32(isp1362_hcd, HCATLLAST,
2324 1 << (atl_buffers - 1));
2325
2326 snprintf(isp1362_hcd->atl_queue.name,
2327 sizeof(isp1362_hcd->atl_queue.name), "ATL");
2328 snprintf(isp1362_hcd->intl_queue.name,
2329 sizeof(isp1362_hcd->intl_queue.name), "INTL");
2330 DBG(3, "%s: %5s buf $%04x %2d * %4d = %4d\n", __func__,
2331 isp1362_hcd->intl_queue.name,
2332 isp1362_hcd->intl_queue.buf_start,
2333 ISP1362_INTL_BUFFERS, isp1362_hcd->intl_queue.blk_size,
2334 isp1362_hcd->intl_queue.buf_size);
2335 DBG(3, "%s: %5s buf $%04x %2d * %4d = %4d\n", __func__,
2336 isp1362_hcd->atl_queue.name,
2337 isp1362_hcd->atl_queue.buf_start,
2338 atl_buffers, isp1362_hcd->atl_queue.blk_size,
2339 isp1362_hcd->atl_queue.buf_size);
2340
2341 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2342
2343 return 0;
2344}
2345
2346static int isp1362_hc_reset(struct usb_hcd *hcd)
2347{
2348 int ret = 0;
2349 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2350 unsigned long t;
2351 unsigned long timeout = 100;
2352 unsigned long flags;
2353 int clkrdy = 0;
2354
Tobias Klauser7a01f492011-07-27 08:57:25 +02002355 pr_debug("%s:\n", __func__);
Lothar Wassmanna9d43092009-07-16 20:51:21 -04002356
2357 if (isp1362_hcd->board && isp1362_hcd->board->reset) {
2358 isp1362_hcd->board->reset(hcd->self.controller, 1);
2359 msleep(20);
2360 if (isp1362_hcd->board->clock)
2361 isp1362_hcd->board->clock(hcd->self.controller, 1);
2362 isp1362_hcd->board->reset(hcd->self.controller, 0);
2363 } else
2364 isp1362_sw_reset(isp1362_hcd);
2365
2366 /* chip has been reset. First we need to see a clock */
2367 t = jiffies + msecs_to_jiffies(timeout);
2368 while (!clkrdy && time_before_eq(jiffies, t)) {
2369 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2370 clkrdy = isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_CLKRDY;
2371 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2372 if (!clkrdy)
2373 msleep(4);
2374 }
2375
2376 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2377 isp1362_write_reg16(isp1362_hcd, HCuPINT, HCuPINT_CLKRDY);
2378 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2379 if (!clkrdy) {
2380 pr_err("Clock not ready after %lums\n", timeout);
2381 ret = -ENODEV;
2382 }
2383 return ret;
2384}
2385
2386static void isp1362_hc_stop(struct usb_hcd *hcd)
2387{
2388 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2389 unsigned long flags;
2390 u32 tmp;
2391
Tobias Klauser7a01f492011-07-27 08:57:25 +02002392 pr_debug("%s:\n", __func__);
Lothar Wassmanna9d43092009-07-16 20:51:21 -04002393
2394 del_timer_sync(&hcd->rh_timer);
2395
2396 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2397
2398 isp1362_write_reg16(isp1362_hcd, HCuPINTENB, 0);
2399
2400 /* Switch off power for all ports */
2401 tmp = isp1362_read_reg32(isp1362_hcd, HCRHDESCA);
2402 tmp &= ~(RH_A_NPS | RH_A_PSM);
2403 isp1362_write_reg32(isp1362_hcd, HCRHDESCA, tmp);
2404 isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_LPS);
2405
2406 /* Reset the chip */
2407 if (isp1362_hcd->board && isp1362_hcd->board->reset)
2408 isp1362_hcd->board->reset(hcd->self.controller, 1);
2409 else
Jiri Slaby1c815572010-06-21 17:02:51 +02002410 __isp1362_sw_reset(isp1362_hcd);
Lothar Wassmanna9d43092009-07-16 20:51:21 -04002411
2412 if (isp1362_hcd->board && isp1362_hcd->board->clock)
2413 isp1362_hcd->board->clock(hcd->self.controller, 0);
2414
2415 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2416}
2417
2418#ifdef CHIP_BUFFER_TEST
2419static int isp1362_chip_test(struct isp1362_hcd *isp1362_hcd)
2420{
2421 int ret = 0;
2422 u16 *ref;
2423 unsigned long flags;
2424
2425 ref = kmalloc(2 * ISP1362_BUF_SIZE, GFP_KERNEL);
2426 if (ref) {
2427 int offset;
2428 u16 *tst = &ref[ISP1362_BUF_SIZE / 2];
2429
2430 for (offset = 0; offset < ISP1362_BUF_SIZE / 2; offset++) {
2431 ref[offset] = ~offset;
2432 tst[offset] = offset;
2433 }
2434
2435 for (offset = 0; offset < 4; offset++) {
2436 int j;
2437
2438 for (j = 0; j < 8; j++) {
2439 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2440 isp1362_write_buffer(isp1362_hcd, (u8 *)ref + offset, 0, j);
2441 isp1362_read_buffer(isp1362_hcd, (u8 *)tst + offset, 0, j);
2442 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2443
2444 if (memcmp(ref, tst, j)) {
2445 ret = -ENODEV;
2446 pr_err("%s: memory check with %d byte offset %d failed\n",
2447 __func__, j, offset);
2448 dump_data((u8 *)ref + offset, j);
2449 dump_data((u8 *)tst + offset, j);
2450 }
2451 }
2452 }
2453
2454 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2455 isp1362_write_buffer(isp1362_hcd, ref, 0, ISP1362_BUF_SIZE);
2456 isp1362_read_buffer(isp1362_hcd, tst, 0, ISP1362_BUF_SIZE);
2457 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2458
2459 if (memcmp(ref, tst, ISP1362_BUF_SIZE)) {
2460 ret = -ENODEV;
2461 pr_err("%s: memory check failed\n", __func__);
2462 dump_data((u8 *)tst, ISP1362_BUF_SIZE / 2);
2463 }
2464
2465 for (offset = 0; offset < 256; offset++) {
2466 int test_size = 0;
2467
2468 yield();
2469
2470 memset(tst, 0, ISP1362_BUF_SIZE);
2471 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2472 isp1362_write_buffer(isp1362_hcd, tst, 0, ISP1362_BUF_SIZE);
2473 isp1362_read_buffer(isp1362_hcd, tst, 0, ISP1362_BUF_SIZE);
2474 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2475 if (memcmp(tst, tst + (ISP1362_BUF_SIZE / (2 * sizeof(*tst))),
2476 ISP1362_BUF_SIZE / 2)) {
2477 pr_err("%s: Failed to clear buffer\n", __func__);
2478 dump_data((u8 *)tst, ISP1362_BUF_SIZE);
2479 break;
2480 }
2481 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2482 isp1362_write_buffer(isp1362_hcd, ref, offset * 2, PTD_HEADER_SIZE);
2483 isp1362_write_buffer(isp1362_hcd, ref + PTD_HEADER_SIZE / sizeof(*ref),
2484 offset * 2 + PTD_HEADER_SIZE, test_size);
2485 isp1362_read_buffer(isp1362_hcd, tst, offset * 2,
2486 PTD_HEADER_SIZE + test_size);
2487 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2488 if (memcmp(ref, tst, PTD_HEADER_SIZE + test_size)) {
2489 dump_data(((u8 *)ref) + offset, PTD_HEADER_SIZE + test_size);
2490 dump_data((u8 *)tst, PTD_HEADER_SIZE + test_size);
2491 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2492 isp1362_read_buffer(isp1362_hcd, tst, offset * 2,
2493 PTD_HEADER_SIZE + test_size);
2494 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2495 if (memcmp(ref, tst, PTD_HEADER_SIZE + test_size)) {
2496 ret = -ENODEV;
2497 pr_err("%s: memory check with offset %02x failed\n",
2498 __func__, offset);
2499 break;
2500 }
2501 pr_warning("%s: memory check with offset %02x ok after second read\n",
2502 __func__, offset);
2503 }
2504 }
2505 kfree(ref);
2506 }
2507 return ret;
2508}
2509#endif
2510
2511static int isp1362_hc_start(struct usb_hcd *hcd)
2512{
2513 int ret;
2514 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2515 struct isp1362_platform_data *board = isp1362_hcd->board;
2516 u16 hwcfg;
2517 u16 chipid;
2518 unsigned long flags;
2519
Tobias Klauser7a01f492011-07-27 08:57:25 +02002520 pr_debug("%s:\n", __func__);
Lothar Wassmanna9d43092009-07-16 20:51:21 -04002521
2522 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2523 chipid = isp1362_read_reg16(isp1362_hcd, HCCHIPID);
2524 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2525
2526 if ((chipid & HCCHIPID_MASK) != HCCHIPID_MAGIC) {
2527 pr_err("%s: Invalid chip ID %04x\n", __func__, chipid);
2528 return -ENODEV;
2529 }
2530
2531#ifdef CHIP_BUFFER_TEST
2532 ret = isp1362_chip_test(isp1362_hcd);
2533 if (ret)
2534 return -ENODEV;
2535#endif
2536 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2537 /* clear interrupt status and disable all interrupt sources */
2538 isp1362_write_reg16(isp1362_hcd, HCuPINT, 0xff);
2539 isp1362_write_reg16(isp1362_hcd, HCuPINTENB, 0);
2540
2541 /* HW conf */
2542 hwcfg = HCHWCFG_INT_ENABLE | HCHWCFG_DBWIDTH(1);
2543 if (board->sel15Kres)
2544 hwcfg |= HCHWCFG_PULLDOWN_DS2 |
Ken MacLeod7949f4e2009-08-06 14:18:27 -05002545 ((MAX_ROOT_PORTS > 1) ? HCHWCFG_PULLDOWN_DS1 : 0);
Lothar Wassmanna9d43092009-07-16 20:51:21 -04002546 if (board->clknotstop)
2547 hwcfg |= HCHWCFG_CLKNOTSTOP;
2548 if (board->oc_enable)
2549 hwcfg |= HCHWCFG_ANALOG_OC;
2550 if (board->int_act_high)
2551 hwcfg |= HCHWCFG_INT_POL;
2552 if (board->int_edge_triggered)
2553 hwcfg |= HCHWCFG_INT_TRIGGER;
2554 if (board->dreq_act_high)
2555 hwcfg |= HCHWCFG_DREQ_POL;
2556 if (board->dack_act_high)
2557 hwcfg |= HCHWCFG_DACK_POL;
2558 isp1362_write_reg16(isp1362_hcd, HCHWCFG, hwcfg);
2559 isp1362_show_reg(isp1362_hcd, HCHWCFG);
2560 isp1362_write_reg16(isp1362_hcd, HCDMACFG, 0);
2561 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2562
2563 ret = isp1362_mem_config(hcd);
2564 if (ret)
2565 return ret;
2566
2567 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2568
2569 /* Root hub conf */
2570 isp1362_hcd->rhdesca = 0;
2571 if (board->no_power_switching)
2572 isp1362_hcd->rhdesca |= RH_A_NPS;
2573 if (board->power_switching_mode)
2574 isp1362_hcd->rhdesca |= RH_A_PSM;
2575 if (board->potpg)
2576 isp1362_hcd->rhdesca |= (board->potpg << 24) & RH_A_POTPGT;
2577 else
2578 isp1362_hcd->rhdesca |= (25 << 24) & RH_A_POTPGT;
2579
2580 isp1362_write_reg32(isp1362_hcd, HCRHDESCA, isp1362_hcd->rhdesca & ~RH_A_OCPM);
2581 isp1362_write_reg32(isp1362_hcd, HCRHDESCA, isp1362_hcd->rhdesca | RH_A_OCPM);
2582 isp1362_hcd->rhdesca = isp1362_read_reg32(isp1362_hcd, HCRHDESCA);
2583
2584 isp1362_hcd->rhdescb = RH_B_PPCM;
2585 isp1362_write_reg32(isp1362_hcd, HCRHDESCB, isp1362_hcd->rhdescb);
2586 isp1362_hcd->rhdescb = isp1362_read_reg32(isp1362_hcd, HCRHDESCB);
2587
2588 isp1362_read_reg32(isp1362_hcd, HCFMINTVL);
2589 isp1362_write_reg32(isp1362_hcd, HCFMINTVL, (FSMP(FI) << 16) | FI);
2590 isp1362_write_reg32(isp1362_hcd, HCLSTHRESH, LSTHRESH);
2591
2592 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2593
2594 isp1362_hcd->hc_control = OHCI_USB_OPER;
2595 hcd->state = HC_STATE_RUNNING;
2596
2597 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2598 /* Set up interrupts */
2599 isp1362_hcd->intenb = OHCI_INTR_MIE | OHCI_INTR_RHSC | OHCI_INTR_UE;
2600 isp1362_hcd->intenb |= OHCI_INTR_RD;
2601 isp1362_hcd->irqenb = HCuPINT_OPR | HCuPINT_SUSP;
2602 isp1362_write_reg32(isp1362_hcd, HCINTENB, isp1362_hcd->intenb);
2603 isp1362_write_reg16(isp1362_hcd, HCuPINTENB, isp1362_hcd->irqenb);
2604
2605 /* Go operational */
2606 isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
2607 /* enable global power */
2608 isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_LPSC | RH_HS_DRWE);
2609
2610 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2611
2612 return 0;
2613}
2614
2615/*-------------------------------------------------------------------------*/
2616
2617static struct hc_driver isp1362_hc_driver = {
2618 .description = hcd_name,
2619 .product_desc = "ISP1362 Host Controller",
2620 .hcd_priv_size = sizeof(struct isp1362_hcd),
2621
2622 .irq = isp1362_irq,
2623 .flags = HCD_USB11 | HCD_MEMORY,
2624
2625 .reset = isp1362_hc_reset,
2626 .start = isp1362_hc_start,
2627 .stop = isp1362_hc_stop,
2628
2629 .urb_enqueue = isp1362_urb_enqueue,
2630 .urb_dequeue = isp1362_urb_dequeue,
2631 .endpoint_disable = isp1362_endpoint_disable,
2632
2633 .get_frame_number = isp1362_get_frame,
2634
2635 .hub_status_data = isp1362_hub_status_data,
2636 .hub_control = isp1362_hub_control,
2637 .bus_suspend = isp1362_bus_suspend,
2638 .bus_resume = isp1362_bus_resume,
2639};
2640
2641/*-------------------------------------------------------------------------*/
2642
Bill Pembertonfb4e98a2012-11-19 13:26:20 -05002643static int isp1362_remove(struct platform_device *pdev)
Lothar Wassmanna9d43092009-07-16 20:51:21 -04002644{
2645 struct usb_hcd *hcd = platform_get_drvdata(pdev);
2646 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2647 struct resource *res;
2648
2649 remove_debug_file(isp1362_hcd);
2650 DBG(0, "%s: Removing HCD\n", __func__);
2651 usb_remove_hcd(hcd);
2652
Mike Frysingerb0a9cf22009-10-07 04:29:31 -04002653 DBG(0, "%s: Unmapping data_reg @ %p\n", __func__,
2654 isp1362_hcd->data_reg);
Lothar Wassmanna9d43092009-07-16 20:51:21 -04002655 iounmap(isp1362_hcd->data_reg);
2656
Mike Frysingerb0a9cf22009-10-07 04:29:31 -04002657 DBG(0, "%s: Unmapping addr_reg @ %p\n", __func__,
2658 isp1362_hcd->addr_reg);
Lothar Wassmanna9d43092009-07-16 20:51:21 -04002659 iounmap(isp1362_hcd->addr_reg);
2660
2661 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
2662 DBG(0, "%s: release mem_region: %08lx\n", __func__, (long unsigned int)res->start);
2663 if (res)
Axel Lin4e5c3532010-10-15 13:27:57 +08002664 release_mem_region(res->start, resource_size(res));
Lothar Wassmanna9d43092009-07-16 20:51:21 -04002665
2666 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2667 DBG(0, "%s: release mem_region: %08lx\n", __func__, (long unsigned int)res->start);
2668 if (res)
Axel Lin4e5c3532010-10-15 13:27:57 +08002669 release_mem_region(res->start, resource_size(res));
Lothar Wassmanna9d43092009-07-16 20:51:21 -04002670
2671 DBG(0, "%s: put_hcd\n", __func__);
2672 usb_put_hcd(hcd);
2673 DBG(0, "%s: Done\n", __func__);
2674
2675 return 0;
2676}
2677
Bill Pemberton41ac7b32012-11-19 13:21:48 -05002678static int isp1362_probe(struct platform_device *pdev)
Lothar Wassmanna9d43092009-07-16 20:51:21 -04002679{
2680 struct usb_hcd *hcd;
2681 struct isp1362_hcd *isp1362_hcd;
2682 struct resource *addr, *data;
2683 void __iomem *addr_reg;
2684 void __iomem *data_reg;
2685 int irq;
2686 int retval = 0;
Lothar Wassmann0a2fea22010-01-15 14:42:02 -05002687 struct resource *irq_res;
2688 unsigned int irq_flags = 0;
Lothar Wassmanna9d43092009-07-16 20:51:21 -04002689
Tobias Klauseraefd4922012-02-17 16:30:04 +01002690 if (usb_disabled())
2691 return -ENODEV;
2692
Lothar Wassmanna9d43092009-07-16 20:51:21 -04002693 /* basic sanity checks first. board-specific init logic should
2694 * have initialized this the three resources and probably board
2695 * specific platform_data. we don't probe for IRQs, and do only
2696 * minimal sanity checking.
2697 */
2698 if (pdev->num_resources < 3) {
2699 retval = -ENODEV;
2700 goto err1;
2701 }
2702
2703 data = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2704 addr = platform_get_resource(pdev, IORESOURCE_MEM, 1);
Lothar Wassmann0a2fea22010-01-15 14:42:02 -05002705 irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
2706 if (!addr || !data || !irq_res) {
Lothar Wassmanna9d43092009-07-16 20:51:21 -04002707 retval = -ENODEV;
2708 goto err1;
2709 }
Lothar Wassmann0a2fea22010-01-15 14:42:02 -05002710 irq = irq_res->start;
Lothar Wassmanna9d43092009-07-16 20:51:21 -04002711
Lothar Wassmanna9d43092009-07-16 20:51:21 -04002712 if (pdev->dev.dma_mask) {
2713 DBG(1, "won't do DMA");
2714 retval = -ENODEV;
2715 goto err1;
2716 }
Lothar Wassmanna9d43092009-07-16 20:51:21 -04002717
Axel Lin4e5c3532010-10-15 13:27:57 +08002718 if (!request_mem_region(addr->start, resource_size(addr), hcd_name)) {
Lothar Wassmanna9d43092009-07-16 20:51:21 -04002719 retval = -EBUSY;
2720 goto err1;
2721 }
Axel Lin4e5c3532010-10-15 13:27:57 +08002722 addr_reg = ioremap(addr->start, resource_size(addr));
Lothar Wassmanna9d43092009-07-16 20:51:21 -04002723 if (addr_reg == NULL) {
2724 retval = -ENOMEM;
2725 goto err2;
2726 }
2727
Axel Lin4e5c3532010-10-15 13:27:57 +08002728 if (!request_mem_region(data->start, resource_size(data), hcd_name)) {
Lothar Wassmanna9d43092009-07-16 20:51:21 -04002729 retval = -EBUSY;
2730 goto err3;
2731 }
Axel Lin4e5c3532010-10-15 13:27:57 +08002732 data_reg = ioremap(data->start, resource_size(data));
Lothar Wassmanna9d43092009-07-16 20:51:21 -04002733 if (data_reg == NULL) {
2734 retval = -ENOMEM;
2735 goto err4;
2736 }
2737
2738 /* allocate and initialize hcd */
2739 hcd = usb_create_hcd(&isp1362_hc_driver, &pdev->dev, dev_name(&pdev->dev));
2740 if (!hcd) {
2741 retval = -ENOMEM;
2742 goto err5;
2743 }
2744 hcd->rsrc_start = data->start;
2745 isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2746 isp1362_hcd->data_reg = data_reg;
2747 isp1362_hcd->addr_reg = addr_reg;
2748
2749 isp1362_hcd->next_statechange = jiffies;
2750 spin_lock_init(&isp1362_hcd->lock);
2751 INIT_LIST_HEAD(&isp1362_hcd->async);
2752 INIT_LIST_HEAD(&isp1362_hcd->periodic);
2753 INIT_LIST_HEAD(&isp1362_hcd->isoc);
2754 INIT_LIST_HEAD(&isp1362_hcd->remove_list);
2755 isp1362_hcd->board = pdev->dev.platform_data;
2756#if USE_PLATFORM_DELAY
2757 if (!isp1362_hcd->board->delay) {
2758 dev_err(hcd->self.controller, "No platform delay function given\n");
2759 retval = -ENODEV;
2760 goto err6;
2761 }
2762#endif
2763
Lothar Wassmann0a2fea22010-01-15 14:42:02 -05002764 if (irq_res->flags & IORESOURCE_IRQ_HIGHEDGE)
2765 irq_flags |= IRQF_TRIGGER_RISING;
2766 if (irq_res->flags & IORESOURCE_IRQ_LOWEDGE)
2767 irq_flags |= IRQF_TRIGGER_FALLING;
2768 if (irq_res->flags & IORESOURCE_IRQ_HIGHLEVEL)
2769 irq_flags |= IRQF_TRIGGER_HIGH;
2770 if (irq_res->flags & IORESOURCE_IRQ_LOWLEVEL)
2771 irq_flags |= IRQF_TRIGGER_LOW;
Lothar Wassmanna9d43092009-07-16 20:51:21 -04002772
Yong Zhangb5dd18d2011-09-07 16:10:52 +08002773 retval = usb_add_hcd(hcd, irq, irq_flags | IRQF_SHARED);
Lothar Wassmanna9d43092009-07-16 20:51:21 -04002774 if (retval != 0)
2775 goto err6;
2776 pr_info("%s, irq %d\n", hcd->product_desc, irq);
2777
2778 create_debug_file(isp1362_hcd);
2779
2780 return 0;
2781
2782 err6:
Mike Frysingerb0a9cf22009-10-07 04:29:31 -04002783 DBG(0, "%s: Freeing dev %p\n", __func__, isp1362_hcd);
Lothar Wassmanna9d43092009-07-16 20:51:21 -04002784 usb_put_hcd(hcd);
2785 err5:
Mike Frysingerb0a9cf22009-10-07 04:29:31 -04002786 DBG(0, "%s: Unmapping data_reg @ %p\n", __func__, data_reg);
Lothar Wassmanna9d43092009-07-16 20:51:21 -04002787 iounmap(data_reg);
2788 err4:
2789 DBG(0, "%s: Releasing mem region %08lx\n", __func__, (long unsigned int)data->start);
Axel Lin4e5c3532010-10-15 13:27:57 +08002790 release_mem_region(data->start, resource_size(data));
Lothar Wassmanna9d43092009-07-16 20:51:21 -04002791 err3:
Mike Frysingerb0a9cf22009-10-07 04:29:31 -04002792 DBG(0, "%s: Unmapping addr_reg @ %p\n", __func__, addr_reg);
Lothar Wassmanna9d43092009-07-16 20:51:21 -04002793 iounmap(addr_reg);
2794 err2:
2795 DBG(0, "%s: Releasing mem region %08lx\n", __func__, (long unsigned int)addr->start);
Axel Lin4e5c3532010-10-15 13:27:57 +08002796 release_mem_region(addr->start, resource_size(addr));
Lothar Wassmanna9d43092009-07-16 20:51:21 -04002797 err1:
2798 pr_err("%s: init error, %d\n", __func__, retval);
2799
2800 return retval;
2801}
2802
2803#ifdef CONFIG_PM
2804static int isp1362_suspend(struct platform_device *pdev, pm_message_t state)
2805{
2806 struct usb_hcd *hcd = platform_get_drvdata(pdev);
2807 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2808 unsigned long flags;
2809 int retval = 0;
2810
2811 DBG(0, "%s: Suspending device\n", __func__);
2812
2813 if (state.event == PM_EVENT_FREEZE) {
2814 DBG(0, "%s: Suspending root hub\n", __func__);
2815 retval = isp1362_bus_suspend(hcd);
2816 } else {
2817 DBG(0, "%s: Suspending RH ports\n", __func__);
2818 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2819 isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_LPS);
2820 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2821 }
2822 if (retval == 0)
2823 pdev->dev.power.power_state = state;
2824 return retval;
2825}
2826
2827static int isp1362_resume(struct platform_device *pdev)
2828{
2829 struct usb_hcd *hcd = platform_get_drvdata(pdev);
2830 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2831 unsigned long flags;
2832
2833 DBG(0, "%s: Resuming\n", __func__);
2834
2835 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
2836 DBG(0, "%s: Resume RH ports\n", __func__);
2837 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2838 isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_LPSC);
2839 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2840 return 0;
2841 }
2842
2843 pdev->dev.power.power_state = PMSG_ON;
2844
2845 return isp1362_bus_resume(isp1362_hcd_to_hcd(isp1362_hcd));
2846}
2847#else
2848#define isp1362_suspend NULL
2849#define isp1362_resume NULL
2850#endif
2851
2852static struct platform_driver isp1362_driver = {
2853 .probe = isp1362_probe,
Bill Pemberton76904172012-11-19 13:21:08 -05002854 .remove = isp1362_remove,
Lothar Wassmanna9d43092009-07-16 20:51:21 -04002855
2856 .suspend = isp1362_suspend,
2857 .resume = isp1362_resume,
2858 .driver = {
2859 .name = (char *)hcd_name,
2860 .owner = THIS_MODULE,
2861 },
2862};
2863
Tobias Klauseraefd4922012-02-17 16:30:04 +01002864module_platform_driver(isp1362_driver);