blob: fd76256c7e741fef2ca48e9f59546b713705f63d [file] [log] [blame]
Lothar Wassmanna9d43092009-07-16 20:51:21 -04001/*
2 * ISP1362 HCD (Host Controller Driver) for USB.
3 *
4 * Copyright (C) 2005 Lothar Wassmann <LW@KARO-electronics.de>
5 *
6 * Derived from the SL811 HCD, rewritten for ISP116x.
7 * Copyright (C) 2005 Olav Kongas <ok@artecdesign.ee>
8 *
9 * Portions:
10 * Copyright (C) 2004 Psion Teklogix (for NetBook PRO)
11 * Copyright (C) 2004 David Brownell
12 */
13
14/*
15 * The ISP1362 chip requires a large delay (300ns and 462ns) between
16 * accesses to the address and data register.
17 * The following timing options exist:
18 *
19 * 1. Configure your memory controller to add such delays if it can (the best)
20 * 2. Implement platform-specific delay function possibly
21 * combined with configuring the memory controller; see
22 * include/linux/usb_isp1362.h for more info.
23 * 3. Use ndelay (easiest, poorest).
24 *
25 * Use the corresponding macros USE_PLATFORM_DELAY and USE_NDELAY in the
26 * platform specific section of isp1362.h to select the appropriate variant.
27 *
28 * Also note that according to the Philips "ISP1362 Errata" document
29 * Rev 1.00 from 27 May data corruption may occur when the #WR signal
30 * is reasserted (even with #CS deasserted) within 132ns after a
31 * write cycle to any controller register. If the hardware doesn't
32 * implement the recommended fix (gating the #WR with #CS) software
33 * must ensure that no further write cycle (not necessarily to the chip!)
34 * is issued by the CPU within this interval.
35
36 * For PXA25x this can be ensured by using VLIO with the maximum
37 * recovery time (MSCx = 0x7f8c) with a memory clock of 99.53 MHz.
38 */
39
Greg Kroah-Hartman641c86c2013-06-28 11:33:01 -070040#undef ISP1362_DEBUG
Lothar Wassmanna9d43092009-07-16 20:51:21 -040041
42/*
43 * The PXA255 UDC apparently doesn't handle GET_STATUS, GET_CONFIG and
44 * GET_INTERFACE requests correctly when the SETUP and DATA stages of the
45 * requests are carried out in separate frames. This will delay any SETUP
46 * packets until the start of the next frame so that this situation is
47 * unlikely to occur (and makes usbtest happy running with a PXA255 target
48 * device).
49 */
50#undef BUGGY_PXA2XX_UDC_USBTEST
51
52#undef PTD_TRACE
53#undef URB_TRACE
54#undef VERBOSE
55#undef REGISTERS
56
57/* This enables a memory test on the ISP1362 chip memory to make sure the
58 * chip access timing is correct.
59 */
60#undef CHIP_BUFFER_TEST
61
62#include <linux/module.h>
63#include <linux/moduleparam.h>
64#include <linux/kernel.h>
65#include <linux/delay.h>
66#include <linux/ioport.h>
67#include <linux/sched.h>
68#include <linux/slab.h>
Lothar Wassmanna9d43092009-07-16 20:51:21 -040069#include <linux/errno.h>
70#include <linux/init.h>
71#include <linux/list.h>
72#include <linux/interrupt.h>
73#include <linux/usb.h>
74#include <linux/usb/isp1362.h>
Eric Lescouet27729aa2010-04-24 23:21:52 +020075#include <linux/usb/hcd.h>
Lothar Wassmanna9d43092009-07-16 20:51:21 -040076#include <linux/platform_device.h>
77#include <linux/pm.h>
78#include <linux/io.h>
Akinobu Mita735e1b92009-12-15 16:48:28 -080079#include <linux/bitmap.h>
Linus Torvalds268bb0c2011-05-20 12:50:29 -070080#include <linux/prefetch.h>
Lothar Wassmanna9d43092009-07-16 20:51:21 -040081
82#include <asm/irq.h>
Lothar Wassmanna9d43092009-07-16 20:51:21 -040083#include <asm/byteorder.h>
84#include <asm/unaligned.h>
85
86static int dbg_level;
87#ifdef ISP1362_DEBUG
88module_param(dbg_level, int, 0644);
89#else
90module_param(dbg_level, int, 0);
91#define STUB_DEBUG_FILE
92#endif
93
Lothar Wassmanna9d43092009-07-16 20:51:21 -040094#include "../core/usb.h"
95#include "isp1362.h"
96
97
98#define DRIVER_VERSION "2005-04-04"
99#define DRIVER_DESC "ISP1362 USB Host Controller Driver"
100
101MODULE_DESCRIPTION(DRIVER_DESC);
102MODULE_LICENSE("GPL");
103
104static const char hcd_name[] = "isp1362-hcd";
105
106static void isp1362_hc_stop(struct usb_hcd *hcd);
107static int isp1362_hc_start(struct usb_hcd *hcd);
108
109/*-------------------------------------------------------------------------*/
110
111/*
112 * When called from the interrupthandler only isp1362_hcd->irqenb is modified,
113 * since the interrupt handler will write isp1362_hcd->irqenb to HCuPINT upon
114 * completion.
115 * We don't need a 'disable' counterpart, since interrupts will be disabled
116 * only by the interrupt handler.
117 */
118static inline void isp1362_enable_int(struct isp1362_hcd *isp1362_hcd, u16 mask)
119{
120 if ((isp1362_hcd->irqenb | mask) == isp1362_hcd->irqenb)
121 return;
122 if (mask & ~isp1362_hcd->irqenb)
123 isp1362_write_reg16(isp1362_hcd, HCuPINT, mask & ~isp1362_hcd->irqenb);
124 isp1362_hcd->irqenb |= mask;
125 if (isp1362_hcd->irq_active)
126 return;
127 isp1362_write_reg16(isp1362_hcd, HCuPINTENB, isp1362_hcd->irqenb);
128}
129
130/*-------------------------------------------------------------------------*/
131
132static inline struct isp1362_ep_queue *get_ptd_queue(struct isp1362_hcd *isp1362_hcd,
133 u16 offset)
134{
135 struct isp1362_ep_queue *epq = NULL;
136
137 if (offset < isp1362_hcd->istl_queue[1].buf_start)
138 epq = &isp1362_hcd->istl_queue[0];
139 else if (offset < isp1362_hcd->intl_queue.buf_start)
140 epq = &isp1362_hcd->istl_queue[1];
141 else if (offset < isp1362_hcd->atl_queue.buf_start)
142 epq = &isp1362_hcd->intl_queue;
143 else if (offset < isp1362_hcd->atl_queue.buf_start +
144 isp1362_hcd->atl_queue.buf_size)
145 epq = &isp1362_hcd->atl_queue;
146
147 if (epq)
148 DBG(1, "%s: PTD $%04x is on %s queue\n", __func__, offset, epq->name);
149 else
150 pr_warning("%s: invalid PTD $%04x\n", __func__, offset);
151
152 return epq;
153}
154
155static inline int get_ptd_offset(struct isp1362_ep_queue *epq, u8 index)
156{
157 int offset;
158
159 if (index * epq->blk_size > epq->buf_size) {
160 pr_warning("%s: Bad %s index %d(%d)\n", __func__, epq->name, index,
161 epq->buf_size / epq->blk_size);
162 return -EINVAL;
163 }
164 offset = epq->buf_start + index * epq->blk_size;
165 DBG(3, "%s: %s PTD[%02x] # %04x\n", __func__, epq->name, index, offset);
166
167 return offset;
168}
169
170/*-------------------------------------------------------------------------*/
171
172static inline u16 max_transfer_size(struct isp1362_ep_queue *epq, size_t size,
173 int mps)
174{
175 u16 xfer_size = min_t(size_t, MAX_XFER_SIZE, size);
176
177 xfer_size = min_t(size_t, xfer_size, epq->buf_avail * epq->blk_size - PTD_HEADER_SIZE);
178 if (xfer_size < size && xfer_size % mps)
179 xfer_size -= xfer_size % mps;
180
181 return xfer_size;
182}
183
184static int claim_ptd_buffers(struct isp1362_ep_queue *epq,
185 struct isp1362_ep *ep, u16 len)
186{
187 int ptd_offset = -EINVAL;
Lothar Wassmanna9d43092009-07-16 20:51:21 -0400188 int num_ptds = ((len + PTD_HEADER_SIZE - 1) / epq->blk_size) + 1;
Akinobu Mita735e1b92009-12-15 16:48:28 -0800189 int found;
Lothar Wassmanna9d43092009-07-16 20:51:21 -0400190
191 BUG_ON(len > epq->buf_size);
192
193 if (!epq->buf_avail)
194 return -ENOMEM;
195
196 if (ep->num_ptds)
197 pr_err("%s: %s len %d/%d num_ptds %d buf_map %08lx skip_map %08lx\n", __func__,
198 epq->name, len, epq->blk_size, num_ptds, epq->buf_map, epq->skip_map);
199 BUG_ON(ep->num_ptds != 0);
200
Akinobu Mita735e1b92009-12-15 16:48:28 -0800201 found = bitmap_find_next_zero_area(&epq->buf_map, epq->buf_count, 0,
202 num_ptds, 0);
203 if (found >= epq->buf_count)
Lothar Wassmanna9d43092009-07-16 20:51:21 -0400204 return -EOVERFLOW;
205
206 DBG(1, "%s: Found %d PTDs[%d] for %d/%d byte\n", __func__,
207 num_ptds, found, len, (int)(epq->blk_size - PTD_HEADER_SIZE));
208 ptd_offset = get_ptd_offset(epq, found);
209 WARN_ON(ptd_offset < 0);
210 ep->ptd_offset = ptd_offset;
211 ep->num_ptds += num_ptds;
212 epq->buf_avail -= num_ptds;
213 BUG_ON(epq->buf_avail > epq->buf_count);
214 ep->ptd_index = found;
Akinobu Mita735e1b92009-12-15 16:48:28 -0800215 bitmap_set(&epq->buf_map, found, num_ptds);
Lothar Wassmanna9d43092009-07-16 20:51:21 -0400216 DBG(1, "%s: Done %s PTD[%d] $%04x, avail %d count %d claimed %d %08lx:%08lx\n",
217 __func__, epq->name, ep->ptd_index, ep->ptd_offset,
218 epq->buf_avail, epq->buf_count, num_ptds, epq->buf_map, epq->skip_map);
219
220 return found;
221}
222
223static inline void release_ptd_buffers(struct isp1362_ep_queue *epq, struct isp1362_ep *ep)
224{
Lothar Wassmanna9d43092009-07-16 20:51:21 -0400225 int last = ep->ptd_index + ep->num_ptds;
226
227 if (last > epq->buf_count)
228 pr_err("%s: ep %p req %d len %d %s PTD[%d] $%04x num_ptds %d buf_count %d buf_avail %d buf_map %08lx skip_map %08lx\n",
229 __func__, ep, ep->num_req, ep->length, epq->name, ep->ptd_index,
230 ep->ptd_offset, ep->num_ptds, epq->buf_count, epq->buf_avail,
231 epq->buf_map, epq->skip_map);
232 BUG_ON(last > epq->buf_count);
233
Akinobu Mita04b31c72011-02-16 23:47:51 +0900234 bitmap_clear(&epq->buf_map, ep->ptd_index, ep->num_ptds);
235 bitmap_set(&epq->skip_map, ep->ptd_index, ep->num_ptds);
Lothar Wassmanna9d43092009-07-16 20:51:21 -0400236 epq->buf_avail += ep->num_ptds;
237 epq->ptd_count--;
238
239 BUG_ON(epq->buf_avail > epq->buf_count);
240 BUG_ON(epq->ptd_count > epq->buf_count);
241
242 DBG(1, "%s: Done %s PTDs $%04x released %d avail %d count %d\n",
243 __func__, epq->name,
244 ep->ptd_offset, ep->num_ptds, epq->buf_avail, epq->buf_count);
245 DBG(1, "%s: buf_map %08lx skip_map %08lx\n", __func__,
246 epq->buf_map, epq->skip_map);
247
248 ep->num_ptds = 0;
249 ep->ptd_offset = -EINVAL;
250 ep->ptd_index = -EINVAL;
251}
252
253/*-------------------------------------------------------------------------*/
254
255/*
256 Set up PTD's.
257*/
258static void prepare_ptd(struct isp1362_hcd *isp1362_hcd, struct urb *urb,
259 struct isp1362_ep *ep, struct isp1362_ep_queue *epq,
260 u16 fno)
261{
262 struct ptd *ptd;
263 int toggle;
264 int dir;
265 u16 len;
266 size_t buf_len = urb->transfer_buffer_length - urb->actual_length;
267
268 DBG(3, "%s: %s ep %p\n", __func__, epq->name, ep);
269
270 ptd = &ep->ptd;
271
272 ep->data = (unsigned char *)urb->transfer_buffer + urb->actual_length;
273
274 switch (ep->nextpid) {
275 case USB_PID_IN:
276 toggle = usb_gettoggle(urb->dev, ep->epnum, 0);
277 dir = PTD_DIR_IN;
278 if (usb_pipecontrol(urb->pipe)) {
279 len = min_t(size_t, ep->maxpacket, buf_len);
280 } else if (usb_pipeisoc(urb->pipe)) {
281 len = min_t(size_t, urb->iso_frame_desc[fno].length, MAX_XFER_SIZE);
282 ep->data = urb->transfer_buffer + urb->iso_frame_desc[fno].offset;
283 } else
284 len = max_transfer_size(epq, buf_len, ep->maxpacket);
285 DBG(1, "%s: IN len %d/%d/%d from URB\n", __func__, len, ep->maxpacket,
286 (int)buf_len);
287 break;
288 case USB_PID_OUT:
289 toggle = usb_gettoggle(urb->dev, ep->epnum, 1);
290 dir = PTD_DIR_OUT;
291 if (usb_pipecontrol(urb->pipe))
292 len = min_t(size_t, ep->maxpacket, buf_len);
293 else if (usb_pipeisoc(urb->pipe))
294 len = min_t(size_t, urb->iso_frame_desc[0].length, MAX_XFER_SIZE);
295 else
296 len = max_transfer_size(epq, buf_len, ep->maxpacket);
297 if (len == 0)
298 pr_info("%s: Sending ZERO packet: %d\n", __func__,
299 urb->transfer_flags & URB_ZERO_PACKET);
300 DBG(1, "%s: OUT len %d/%d/%d from URB\n", __func__, len, ep->maxpacket,
301 (int)buf_len);
302 break;
303 case USB_PID_SETUP:
304 toggle = 0;
305 dir = PTD_DIR_SETUP;
306 len = sizeof(struct usb_ctrlrequest);
307 DBG(1, "%s: SETUP len %d\n", __func__, len);
308 ep->data = urb->setup_packet;
309 break;
310 case USB_PID_ACK:
311 toggle = 1;
312 len = 0;
313 dir = (urb->transfer_buffer_length && usb_pipein(urb->pipe)) ?
314 PTD_DIR_OUT : PTD_DIR_IN;
315 DBG(1, "%s: ACK len %d\n", __func__, len);
316 break;
317 default:
318 toggle = dir = len = 0;
319 pr_err("%s@%d: ep->nextpid %02x\n", __func__, __LINE__, ep->nextpid);
320 BUG_ON(1);
321 }
322
323 ep->length = len;
324 if (!len)
325 ep->data = NULL;
326
327 ptd->count = PTD_CC_MSK | PTD_ACTIVE_MSK | PTD_TOGGLE(toggle);
328 ptd->mps = PTD_MPS(ep->maxpacket) | PTD_SPD(urb->dev->speed == USB_SPEED_LOW) |
329 PTD_EP(ep->epnum);
330 ptd->len = PTD_LEN(len) | PTD_DIR(dir);
331 ptd->faddr = PTD_FA(usb_pipedevice(urb->pipe));
332
333 if (usb_pipeint(urb->pipe)) {
334 ptd->faddr |= PTD_SF_INT(ep->branch);
335 ptd->faddr |= PTD_PR(ep->interval ? __ffs(ep->interval) : 0);
336 }
337 if (usb_pipeisoc(urb->pipe))
338 ptd->faddr |= PTD_SF_ISO(fno);
339
340 DBG(1, "%s: Finished\n", __func__);
341}
342
343static void isp1362_write_ptd(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep,
344 struct isp1362_ep_queue *epq)
345{
346 struct ptd *ptd = &ep->ptd;
347 int len = PTD_GET_DIR(ptd) == PTD_DIR_IN ? 0 : ep->length;
348
Lothar Wassmanna9d43092009-07-16 20:51:21 -0400349 prefetch(ptd);
350 isp1362_write_buffer(isp1362_hcd, ptd, ep->ptd_offset, PTD_HEADER_SIZE);
351 if (len)
352 isp1362_write_buffer(isp1362_hcd, ep->data,
353 ep->ptd_offset + PTD_HEADER_SIZE, len);
354
355 dump_ptd(ptd);
356 dump_ptd_out_data(ptd, ep->data);
357}
358
359static void isp1362_read_ptd(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep,
360 struct isp1362_ep_queue *epq)
361{
362 struct ptd *ptd = &ep->ptd;
363 int act_len;
364
365 WARN_ON(list_empty(&ep->active));
366 BUG_ON(ep->ptd_offset < 0);
367
368 list_del_init(&ep->active);
369 DBG(1, "%s: ep %p removed from active list %p\n", __func__, ep, &epq->active);
370
371 prefetchw(ptd);
372 isp1362_read_buffer(isp1362_hcd, ptd, ep->ptd_offset, PTD_HEADER_SIZE);
373 dump_ptd(ptd);
374 act_len = PTD_GET_COUNT(ptd);
375 if (PTD_GET_DIR(ptd) != PTD_DIR_IN || act_len == 0)
376 return;
377 if (act_len > ep->length)
378 pr_err("%s: ep %p PTD $%04x act_len %d ep->length %d\n", __func__, ep,
379 ep->ptd_offset, act_len, ep->length);
380 BUG_ON(act_len > ep->length);
381 /* Only transfer the amount of data that has actually been overwritten
382 * in the chip buffer. We don't want any data that doesn't belong to the
383 * transfer to leak out of the chip to the callers transfer buffer!
384 */
385 prefetchw(ep->data);
386 isp1362_read_buffer(isp1362_hcd, ep->data,
387 ep->ptd_offset + PTD_HEADER_SIZE, act_len);
388 dump_ptd_in_data(ptd, ep->data);
389}
390
391/*
392 * INT PTDs will stay in the chip until data is available.
393 * This function will remove a PTD from the chip when the URB is dequeued.
394 * Must be called with the spinlock held and IRQs disabled
395 */
396static void remove_ptd(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep)
397
398{
399 int index;
400 struct isp1362_ep_queue *epq;
401
402 DBG(1, "%s: ep %p PTD[%d] $%04x\n", __func__, ep, ep->ptd_index, ep->ptd_offset);
403 BUG_ON(ep->ptd_offset < 0);
404
405 epq = get_ptd_queue(isp1362_hcd, ep->ptd_offset);
406 BUG_ON(!epq);
407
408 /* put ep in remove_list for cleanup */
409 WARN_ON(!list_empty(&ep->remove_list));
410 list_add_tail(&ep->remove_list, &isp1362_hcd->remove_list);
411 /* let SOF interrupt handle the cleanup */
412 isp1362_enable_int(isp1362_hcd, HCuPINT_SOF);
413
414 index = ep->ptd_index;
415 if (index < 0)
416 /* ISO queues don't have SKIP registers */
417 return;
418
419 DBG(1, "%s: Disabling PTD[%02x] $%04x %08lx|%08x\n", __func__,
420 index, ep->ptd_offset, epq->skip_map, 1 << index);
421
422 /* prevent further processing of PTD (will be effective after next SOF) */
423 epq->skip_map |= 1 << index;
424 if (epq == &isp1362_hcd->atl_queue) {
425 DBG(2, "%s: ATLSKIP = %08x -> %08lx\n", __func__,
426 isp1362_read_reg32(isp1362_hcd, HCATLSKIP), epq->skip_map);
427 isp1362_write_reg32(isp1362_hcd, HCATLSKIP, epq->skip_map);
428 if (~epq->skip_map == 0)
429 isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ATL_ACTIVE);
430 } else if (epq == &isp1362_hcd->intl_queue) {
431 DBG(2, "%s: INTLSKIP = %08x -> %08lx\n", __func__,
432 isp1362_read_reg32(isp1362_hcd, HCINTLSKIP), epq->skip_map);
433 isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, epq->skip_map);
434 if (~epq->skip_map == 0)
435 isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_INTL_ACTIVE);
436 }
437}
438
439/*
440 Take done or failed requests out of schedule. Give back
441 processed urbs.
442*/
443static void finish_request(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep,
444 struct urb *urb, int status)
445 __releases(isp1362_hcd->lock)
446 __acquires(isp1362_hcd->lock)
447{
448 urb->hcpriv = NULL;
449 ep->error_count = 0;
450
451 if (usb_pipecontrol(urb->pipe))
452 ep->nextpid = USB_PID_SETUP;
453
454 URB_DBG("%s: req %d FA %d ep%d%s %s: len %d/%d %s stat %d\n", __func__,
455 ep->num_req, usb_pipedevice(urb->pipe),
456 usb_pipeendpoint(urb->pipe),
457 !usb_pipein(urb->pipe) ? "out" : "in",
458 usb_pipecontrol(urb->pipe) ? "ctrl" :
459 usb_pipeint(urb->pipe) ? "int" :
460 usb_pipebulk(urb->pipe) ? "bulk" :
461 "iso",
462 urb->actual_length, urb->transfer_buffer_length,
463 !(urb->transfer_flags & URB_SHORT_NOT_OK) ?
464 "short_ok" : "", urb->status);
465
466
467 usb_hcd_unlink_urb_from_ep(isp1362_hcd_to_hcd(isp1362_hcd), urb);
468 spin_unlock(&isp1362_hcd->lock);
469 usb_hcd_giveback_urb(isp1362_hcd_to_hcd(isp1362_hcd), urb, status);
470 spin_lock(&isp1362_hcd->lock);
471
472 /* take idle endpoints out of the schedule right away */
473 if (!list_empty(&ep->hep->urb_list))
474 return;
475
476 /* async deschedule */
477 if (!list_empty(&ep->schedule)) {
478 list_del_init(&ep->schedule);
479 return;
480 }
481
482
483 if (ep->interval) {
484 /* periodic deschedule */
485 DBG(1, "deschedule qh%d/%p branch %d load %d bandwidth %d -> %d\n", ep->interval,
486 ep, ep->branch, ep->load,
487 isp1362_hcd->load[ep->branch],
488 isp1362_hcd->load[ep->branch] - ep->load);
489 isp1362_hcd->load[ep->branch] -= ep->load;
490 ep->branch = PERIODIC_SIZE;
491 }
492}
493
494/*
495 * Analyze transfer results, handle partial transfers and errors
496*/
497static void postproc_ep(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep)
498{
499 struct urb *urb = get_urb(ep);
500 struct usb_device *udev;
501 struct ptd *ptd;
502 int short_ok;
503 u16 len;
504 int urbstat = -EINPROGRESS;
505 u8 cc;
506
507 DBG(2, "%s: ep %p req %d\n", __func__, ep, ep->num_req);
508
509 udev = urb->dev;
510 ptd = &ep->ptd;
511 cc = PTD_GET_CC(ptd);
512 if (cc == PTD_NOTACCESSED) {
513 pr_err("%s: req %d PTD %p Untouched by ISP1362\n", __func__,
514 ep->num_req, ptd);
515 cc = PTD_DEVNOTRESP;
516 }
517
518 short_ok = !(urb->transfer_flags & URB_SHORT_NOT_OK);
519 len = urb->transfer_buffer_length - urb->actual_length;
520
521 /* Data underrun is special. For allowed underrun
522 we clear the error and continue as normal. For
523 forbidden underrun we finish the DATA stage
524 immediately while for control transfer,
525 we do a STATUS stage.
526 */
527 if (cc == PTD_DATAUNDERRUN) {
528 if (short_ok) {
529 DBG(1, "%s: req %d Allowed data underrun short_%sok %d/%d/%d byte\n",
530 __func__, ep->num_req, short_ok ? "" : "not_",
531 PTD_GET_COUNT(ptd), ep->maxpacket, len);
532 cc = PTD_CC_NOERROR;
533 urbstat = 0;
534 } else {
535 DBG(1, "%s: req %d Data Underrun %s nextpid %02x short_%sok %d/%d/%d byte\n",
536 __func__, ep->num_req,
537 usb_pipein(urb->pipe) ? "IN" : "OUT", ep->nextpid,
538 short_ok ? "" : "not_",
539 PTD_GET_COUNT(ptd), ep->maxpacket, len);
Bruno Morelli4840ae12012-07-30 15:26:50 +0200540 /* save the data underrun error code for later and
541 * proceed with the status stage
542 */
543 urb->actual_length += PTD_GET_COUNT(ptd);
Lothar Wassmanna9d43092009-07-16 20:51:21 -0400544 if (usb_pipecontrol(urb->pipe)) {
545 ep->nextpid = USB_PID_ACK;
Lothar Wassmanna9d43092009-07-16 20:51:21 -0400546 BUG_ON(urb->actual_length > urb->transfer_buffer_length);
547
548 if (urb->status == -EINPROGRESS)
549 urb->status = cc_to_error[PTD_DATAUNDERRUN];
550 } else {
551 usb_settoggle(udev, ep->epnum, ep->nextpid == USB_PID_OUT,
552 PTD_GET_TOGGLE(ptd));
553 urbstat = cc_to_error[PTD_DATAUNDERRUN];
554 }
555 goto out;
556 }
557 }
558
559 if (cc != PTD_CC_NOERROR) {
560 if (++ep->error_count >= 3 || cc == PTD_CC_STALL || cc == PTD_DATAOVERRUN) {
561 urbstat = cc_to_error[cc];
562 DBG(1, "%s: req %d nextpid %02x, status %d, error %d, error_count %d\n",
563 __func__, ep->num_req, ep->nextpid, urbstat, cc,
564 ep->error_count);
565 }
566 goto out;
567 }
568
569 switch (ep->nextpid) {
570 case USB_PID_OUT:
571 if (PTD_GET_COUNT(ptd) != ep->length)
572 pr_err("%s: count=%d len=%d\n", __func__,
573 PTD_GET_COUNT(ptd), ep->length);
574 BUG_ON(PTD_GET_COUNT(ptd) != ep->length);
575 urb->actual_length += ep->length;
576 BUG_ON(urb->actual_length > urb->transfer_buffer_length);
577 usb_settoggle(udev, ep->epnum, 1, PTD_GET_TOGGLE(ptd));
578 if (urb->actual_length == urb->transfer_buffer_length) {
579 DBG(3, "%s: req %d xfer complete %d/%d status %d -> 0\n", __func__,
580 ep->num_req, len, ep->maxpacket, urbstat);
581 if (usb_pipecontrol(urb->pipe)) {
582 DBG(3, "%s: req %d %s Wait for ACK\n", __func__,
583 ep->num_req,
584 usb_pipein(urb->pipe) ? "IN" : "OUT");
585 ep->nextpid = USB_PID_ACK;
586 } else {
587 if (len % ep->maxpacket ||
588 !(urb->transfer_flags & URB_ZERO_PACKET)) {
589 urbstat = 0;
590 DBG(3, "%s: req %d URB %s status %d count %d/%d/%d\n",
591 __func__, ep->num_req, usb_pipein(urb->pipe) ? "IN" : "OUT",
592 urbstat, len, ep->maxpacket, urb->actual_length);
593 }
594 }
595 }
596 break;
597 case USB_PID_IN:
598 len = PTD_GET_COUNT(ptd);
599 BUG_ON(len > ep->length);
600 urb->actual_length += len;
601 BUG_ON(urb->actual_length > urb->transfer_buffer_length);
602 usb_settoggle(udev, ep->epnum, 0, PTD_GET_TOGGLE(ptd));
603 /* if transfer completed or (allowed) data underrun */
604 if ((urb->transfer_buffer_length == urb->actual_length) ||
605 len % ep->maxpacket) {
606 DBG(3, "%s: req %d xfer complete %d/%d status %d -> 0\n", __func__,
607 ep->num_req, len, ep->maxpacket, urbstat);
608 if (usb_pipecontrol(urb->pipe)) {
609 DBG(3, "%s: req %d %s Wait for ACK\n", __func__,
610 ep->num_req,
611 usb_pipein(urb->pipe) ? "IN" : "OUT");
612 ep->nextpid = USB_PID_ACK;
613 } else {
614 urbstat = 0;
615 DBG(3, "%s: req %d URB %s status %d count %d/%d/%d\n",
616 __func__, ep->num_req, usb_pipein(urb->pipe) ? "IN" : "OUT",
617 urbstat, len, ep->maxpacket, urb->actual_length);
618 }
619 }
620 break;
621 case USB_PID_SETUP:
622 if (urb->transfer_buffer_length == urb->actual_length) {
623 ep->nextpid = USB_PID_ACK;
624 } else if (usb_pipeout(urb->pipe)) {
625 usb_settoggle(udev, 0, 1, 1);
626 ep->nextpid = USB_PID_OUT;
627 } else {
628 usb_settoggle(udev, 0, 0, 1);
629 ep->nextpid = USB_PID_IN;
630 }
631 break;
632 case USB_PID_ACK:
633 DBG(3, "%s: req %d got ACK %d -> 0\n", __func__, ep->num_req,
634 urbstat);
635 WARN_ON(urbstat != -EINPROGRESS);
636 urbstat = 0;
637 ep->nextpid = 0;
638 break;
639 default:
640 BUG_ON(1);
641 }
642
643 out:
644 if (urbstat != -EINPROGRESS) {
645 DBG(2, "%s: Finishing ep %p req %d urb %p status %d\n", __func__,
646 ep, ep->num_req, urb, urbstat);
647 finish_request(isp1362_hcd, ep, urb, urbstat);
648 }
649}
650
651static void finish_unlinks(struct isp1362_hcd *isp1362_hcd)
652{
653 struct isp1362_ep *ep;
654 struct isp1362_ep *tmp;
655
656 list_for_each_entry_safe(ep, tmp, &isp1362_hcd->remove_list, remove_list) {
657 struct isp1362_ep_queue *epq =
658 get_ptd_queue(isp1362_hcd, ep->ptd_offset);
659 int index = ep->ptd_index;
660
661 BUG_ON(epq == NULL);
662 if (index >= 0) {
663 DBG(1, "%s: remove PTD[%d] $%04x\n", __func__, index, ep->ptd_offset);
664 BUG_ON(ep->num_ptds == 0);
665 release_ptd_buffers(epq, ep);
666 }
667 if (!list_empty(&ep->hep->urb_list)) {
668 struct urb *urb = get_urb(ep);
669
670 DBG(1, "%s: Finishing req %d ep %p from remove_list\n", __func__,
671 ep->num_req, ep);
672 finish_request(isp1362_hcd, ep, urb, -ESHUTDOWN);
673 }
674 WARN_ON(list_empty(&ep->active));
675 if (!list_empty(&ep->active)) {
676 list_del_init(&ep->active);
677 DBG(1, "%s: ep %p removed from active list\n", __func__, ep);
678 }
679 list_del_init(&ep->remove_list);
680 DBG(1, "%s: ep %p removed from remove_list\n", __func__, ep);
681 }
682 DBG(1, "%s: Done\n", __func__);
683}
684
685static inline void enable_atl_transfers(struct isp1362_hcd *isp1362_hcd, int count)
686{
687 if (count > 0) {
688 if (count < isp1362_hcd->atl_queue.ptd_count)
689 isp1362_write_reg16(isp1362_hcd, HCATLDTC, count);
690 isp1362_enable_int(isp1362_hcd, HCuPINT_ATL);
691 isp1362_write_reg32(isp1362_hcd, HCATLSKIP, isp1362_hcd->atl_queue.skip_map);
692 isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ATL_ACTIVE);
693 } else
694 isp1362_enable_int(isp1362_hcd, HCuPINT_SOF);
695}
696
697static inline void enable_intl_transfers(struct isp1362_hcd *isp1362_hcd)
698{
699 isp1362_enable_int(isp1362_hcd, HCuPINT_INTL);
700 isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_INTL_ACTIVE);
701 isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, isp1362_hcd->intl_queue.skip_map);
702}
703
704static inline void enable_istl_transfers(struct isp1362_hcd *isp1362_hcd, int flip)
705{
706 isp1362_enable_int(isp1362_hcd, flip ? HCuPINT_ISTL1 : HCuPINT_ISTL0);
707 isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, flip ?
708 HCBUFSTAT_ISTL1_FULL : HCBUFSTAT_ISTL0_FULL);
709}
710
711static int submit_req(struct isp1362_hcd *isp1362_hcd, struct urb *urb,
712 struct isp1362_ep *ep, struct isp1362_ep_queue *epq)
713{
714 int index = epq->free_ptd;
715
716 prepare_ptd(isp1362_hcd, urb, ep, epq, 0);
717 index = claim_ptd_buffers(epq, ep, ep->length);
718 if (index == -ENOMEM) {
719 DBG(1, "%s: req %d No free %s PTD available: %d, %08lx:%08lx\n", __func__,
720 ep->num_req, epq->name, ep->num_ptds, epq->buf_map, epq->skip_map);
721 return index;
722 } else if (index == -EOVERFLOW) {
723 DBG(1, "%s: req %d Not enough space for %d byte %s PTD %d %08lx:%08lx\n",
724 __func__, ep->num_req, ep->length, epq->name, ep->num_ptds,
725 epq->buf_map, epq->skip_map);
726 return index;
727 } else
728 BUG_ON(index < 0);
729 list_add_tail(&ep->active, &epq->active);
730 DBG(1, "%s: ep %p req %d len %d added to active list %p\n", __func__,
731 ep, ep->num_req, ep->length, &epq->active);
732 DBG(1, "%s: Submitting %s PTD $%04x for ep %p req %d\n", __func__, epq->name,
733 ep->ptd_offset, ep, ep->num_req);
734 isp1362_write_ptd(isp1362_hcd, ep, epq);
735 __clear_bit(ep->ptd_index, &epq->skip_map);
736
737 return 0;
738}
739
740static void start_atl_transfers(struct isp1362_hcd *isp1362_hcd)
741{
742 int ptd_count = 0;
743 struct isp1362_ep_queue *epq = &isp1362_hcd->atl_queue;
744 struct isp1362_ep *ep;
745 int defer = 0;
746
747 if (atomic_read(&epq->finishing)) {
748 DBG(1, "%s: finish_transfers is active for %s\n", __func__, epq->name);
749 return;
750 }
751
752 list_for_each_entry(ep, &isp1362_hcd->async, schedule) {
753 struct urb *urb = get_urb(ep);
754 int ret;
755
756 if (!list_empty(&ep->active)) {
757 DBG(2, "%s: Skipping active %s ep %p\n", __func__, epq->name, ep);
758 continue;
759 }
760
761 DBG(1, "%s: Processing %s ep %p req %d\n", __func__, epq->name,
762 ep, ep->num_req);
763
764 ret = submit_req(isp1362_hcd, urb, ep, epq);
765 if (ret == -ENOMEM) {
766 defer = 1;
767 break;
768 } else if (ret == -EOVERFLOW) {
769 defer = 1;
770 continue;
771 }
772#ifdef BUGGY_PXA2XX_UDC_USBTEST
773 defer = ep->nextpid == USB_PID_SETUP;
774#endif
775 ptd_count++;
776 }
777
778 /* Avoid starving of endpoints */
779 if (isp1362_hcd->async.next != isp1362_hcd->async.prev) {
780 DBG(2, "%s: Cycling ASYNC schedule %d\n", __func__, ptd_count);
781 list_move(&isp1362_hcd->async, isp1362_hcd->async.next);
782 }
783 if (ptd_count || defer)
784 enable_atl_transfers(isp1362_hcd, defer ? 0 : ptd_count);
785
786 epq->ptd_count += ptd_count;
787 if (epq->ptd_count > epq->stat_maxptds) {
788 epq->stat_maxptds = epq->ptd_count;
789 DBG(0, "%s: max_ptds: %d\n", __func__, epq->stat_maxptds);
790 }
791}
792
793static void start_intl_transfers(struct isp1362_hcd *isp1362_hcd)
794{
795 int ptd_count = 0;
796 struct isp1362_ep_queue *epq = &isp1362_hcd->intl_queue;
797 struct isp1362_ep *ep;
798
799 if (atomic_read(&epq->finishing)) {
800 DBG(1, "%s: finish_transfers is active for %s\n", __func__, epq->name);
801 return;
802 }
803
804 list_for_each_entry(ep, &isp1362_hcd->periodic, schedule) {
805 struct urb *urb = get_urb(ep);
806 int ret;
807
808 if (!list_empty(&ep->active)) {
809 DBG(1, "%s: Skipping active %s ep %p\n", __func__,
810 epq->name, ep);
811 continue;
812 }
813
814 DBG(1, "%s: Processing %s ep %p req %d\n", __func__,
815 epq->name, ep, ep->num_req);
816 ret = submit_req(isp1362_hcd, urb, ep, epq);
817 if (ret == -ENOMEM)
818 break;
819 else if (ret == -EOVERFLOW)
820 continue;
821 ptd_count++;
822 }
823
824 if (ptd_count) {
825 static int last_count;
826
827 if (ptd_count != last_count) {
828 DBG(0, "%s: ptd_count: %d\n", __func__, ptd_count);
829 last_count = ptd_count;
830 }
831 enable_intl_transfers(isp1362_hcd);
832 }
833
834 epq->ptd_count += ptd_count;
835 if (epq->ptd_count > epq->stat_maxptds)
836 epq->stat_maxptds = epq->ptd_count;
837}
838
839static inline int next_ptd(struct isp1362_ep_queue *epq, struct isp1362_ep *ep)
840{
841 u16 ptd_offset = ep->ptd_offset;
842 int num_ptds = (ep->length + PTD_HEADER_SIZE + (epq->blk_size - 1)) / epq->blk_size;
843
844 DBG(2, "%s: PTD offset $%04x + %04x => %d * %04x -> $%04x\n", __func__, ptd_offset,
845 ep->length, num_ptds, epq->blk_size, ptd_offset + num_ptds * epq->blk_size);
846
847 ptd_offset += num_ptds * epq->blk_size;
848 if (ptd_offset < epq->buf_start + epq->buf_size)
849 return ptd_offset;
850 else
851 return -ENOMEM;
852}
853
854static void start_iso_transfers(struct isp1362_hcd *isp1362_hcd)
855{
856 int ptd_count = 0;
857 int flip = isp1362_hcd->istl_flip;
858 struct isp1362_ep_queue *epq;
859 int ptd_offset;
860 struct isp1362_ep *ep;
861 struct isp1362_ep *tmp;
862 u16 fno = isp1362_read_reg32(isp1362_hcd, HCFMNUM);
863
864 fill2:
865 epq = &isp1362_hcd->istl_queue[flip];
866 if (atomic_read(&epq->finishing)) {
867 DBG(1, "%s: finish_transfers is active for %s\n", __func__, epq->name);
868 return;
869 }
870
871 if (!list_empty(&epq->active))
872 return;
873
874 ptd_offset = epq->buf_start;
875 list_for_each_entry_safe(ep, tmp, &isp1362_hcd->isoc, schedule) {
876 struct urb *urb = get_urb(ep);
877 s16 diff = fno - (u16)urb->start_frame;
878
879 DBG(1, "%s: Processing %s ep %p\n", __func__, epq->name, ep);
880
881 if (diff > urb->number_of_packets) {
882 /* time frame for this URB has elapsed */
883 finish_request(isp1362_hcd, ep, urb, -EOVERFLOW);
884 continue;
885 } else if (diff < -1) {
886 /* URB is not due in this frame or the next one.
887 * Comparing with '-1' instead of '0' accounts for double
888 * buffering in the ISP1362 which enables us to queue the PTD
889 * one frame ahead of time
890 */
891 } else if (diff == -1) {
892 /* submit PTD's that are due in the next frame */
893 prepare_ptd(isp1362_hcd, urb, ep, epq, fno);
894 if (ptd_offset + PTD_HEADER_SIZE + ep->length >
895 epq->buf_start + epq->buf_size) {
896 pr_err("%s: Not enough ISO buffer space for %d byte PTD\n",
897 __func__, ep->length);
898 continue;
899 }
900 ep->ptd_offset = ptd_offset;
901 list_add_tail(&ep->active, &epq->active);
902
903 ptd_offset = next_ptd(epq, ep);
904 if (ptd_offset < 0) {
905 pr_warning("%s: req %d No more %s PTD buffers available\n", __func__,
906 ep->num_req, epq->name);
907 break;
908 }
909 }
910 }
911 list_for_each_entry(ep, &epq->active, active) {
912 if (epq->active.next == &ep->active)
913 ep->ptd.mps |= PTD_LAST_MSK;
914 isp1362_write_ptd(isp1362_hcd, ep, epq);
915 ptd_count++;
916 }
917
918 if (ptd_count)
919 enable_istl_transfers(isp1362_hcd, flip);
920
921 epq->ptd_count += ptd_count;
922 if (epq->ptd_count > epq->stat_maxptds)
923 epq->stat_maxptds = epq->ptd_count;
924
925 /* check, whether the second ISTL buffer may also be filled */
926 if (!(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
927 (flip ? HCBUFSTAT_ISTL0_FULL : HCBUFSTAT_ISTL1_FULL))) {
928 fno++;
929 ptd_count = 0;
930 flip = 1 - flip;
931 goto fill2;
932 }
933}
934
935static void finish_transfers(struct isp1362_hcd *isp1362_hcd, unsigned long done_map,
936 struct isp1362_ep_queue *epq)
937{
938 struct isp1362_ep *ep;
939 struct isp1362_ep *tmp;
940
941 if (list_empty(&epq->active)) {
942 DBG(1, "%s: Nothing to do for %s queue\n", __func__, epq->name);
943 return;
944 }
945
946 DBG(1, "%s: Finishing %s transfers %08lx\n", __func__, epq->name, done_map);
947
948 atomic_inc(&epq->finishing);
949 list_for_each_entry_safe(ep, tmp, &epq->active, active) {
950 int index = ep->ptd_index;
951
952 DBG(1, "%s: Checking %s PTD[%02x] $%04x\n", __func__, epq->name,
953 index, ep->ptd_offset);
954
955 BUG_ON(index < 0);
956 if (__test_and_clear_bit(index, &done_map)) {
957 isp1362_read_ptd(isp1362_hcd, ep, epq);
958 epq->free_ptd = index;
959 BUG_ON(ep->num_ptds == 0);
960 release_ptd_buffers(epq, ep);
961
962 DBG(1, "%s: ep %p req %d removed from active list\n", __func__,
963 ep, ep->num_req);
964 if (!list_empty(&ep->remove_list)) {
965 list_del_init(&ep->remove_list);
966 DBG(1, "%s: ep %p removed from remove list\n", __func__, ep);
967 }
968 DBG(1, "%s: Postprocessing %s ep %p req %d\n", __func__, epq->name,
969 ep, ep->num_req);
970 postproc_ep(isp1362_hcd, ep);
971 }
972 if (!done_map)
973 break;
974 }
975 if (done_map)
976 pr_warning("%s: done_map not clear: %08lx:%08lx\n", __func__, done_map,
977 epq->skip_map);
978 atomic_dec(&epq->finishing);
979}
980
981static void finish_iso_transfers(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep_queue *epq)
982{
983 struct isp1362_ep *ep;
984 struct isp1362_ep *tmp;
985
986 if (list_empty(&epq->active)) {
987 DBG(1, "%s: Nothing to do for %s queue\n", __func__, epq->name);
988 return;
989 }
990
991 DBG(1, "%s: Finishing %s transfers\n", __func__, epq->name);
992
993 atomic_inc(&epq->finishing);
994 list_for_each_entry_safe(ep, tmp, &epq->active, active) {
995 DBG(1, "%s: Checking PTD $%04x\n", __func__, ep->ptd_offset);
996
997 isp1362_read_ptd(isp1362_hcd, ep, epq);
998 DBG(1, "%s: Postprocessing %s ep %p\n", __func__, epq->name, ep);
999 postproc_ep(isp1362_hcd, ep);
1000 }
1001 WARN_ON(epq->blk_size != 0);
1002 atomic_dec(&epq->finishing);
1003}
1004
1005static irqreturn_t isp1362_irq(struct usb_hcd *hcd)
1006{
1007 int handled = 0;
1008 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1009 u16 irqstat;
1010 u16 svc_mask;
1011
1012 spin_lock(&isp1362_hcd->lock);
1013
1014 BUG_ON(isp1362_hcd->irq_active++);
1015
1016 isp1362_write_reg16(isp1362_hcd, HCuPINTENB, 0);
1017
1018 irqstat = isp1362_read_reg16(isp1362_hcd, HCuPINT);
1019 DBG(3, "%s: got IRQ %04x:%04x\n", __func__, irqstat, isp1362_hcd->irqenb);
1020
1021 /* only handle interrupts that are currently enabled */
1022 irqstat &= isp1362_hcd->irqenb;
1023 isp1362_write_reg16(isp1362_hcd, HCuPINT, irqstat);
1024 svc_mask = irqstat;
1025
1026 if (irqstat & HCuPINT_SOF) {
1027 isp1362_hcd->irqenb &= ~HCuPINT_SOF;
1028 isp1362_hcd->irq_stat[ISP1362_INT_SOF]++;
1029 handled = 1;
1030 svc_mask &= ~HCuPINT_SOF;
1031 DBG(3, "%s: SOF\n", __func__);
1032 isp1362_hcd->fmindex = isp1362_read_reg32(isp1362_hcd, HCFMNUM);
1033 if (!list_empty(&isp1362_hcd->remove_list))
1034 finish_unlinks(isp1362_hcd);
1035 if (!list_empty(&isp1362_hcd->async) && !(irqstat & HCuPINT_ATL)) {
1036 if (list_empty(&isp1362_hcd->atl_queue.active)) {
1037 start_atl_transfers(isp1362_hcd);
1038 } else {
1039 isp1362_enable_int(isp1362_hcd, HCuPINT_ATL);
1040 isp1362_write_reg32(isp1362_hcd, HCATLSKIP,
1041 isp1362_hcd->atl_queue.skip_map);
1042 isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ATL_ACTIVE);
1043 }
1044 }
1045 }
1046
1047 if (irqstat & HCuPINT_ISTL0) {
1048 isp1362_hcd->irq_stat[ISP1362_INT_ISTL0]++;
1049 handled = 1;
1050 svc_mask &= ~HCuPINT_ISTL0;
1051 isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ISTL0_FULL);
1052 DBG(1, "%s: ISTL0\n", __func__);
1053 WARN_ON((int)!!isp1362_hcd->istl_flip);
Julia Lawall3d2b0812009-08-12 16:51:09 +02001054 WARN_ON(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
1055 HCBUFSTAT_ISTL0_ACTIVE);
1056 WARN_ON(!(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
1057 HCBUFSTAT_ISTL0_DONE));
Lothar Wassmanna9d43092009-07-16 20:51:21 -04001058 isp1362_hcd->irqenb &= ~HCuPINT_ISTL0;
1059 }
1060
1061 if (irqstat & HCuPINT_ISTL1) {
1062 isp1362_hcd->irq_stat[ISP1362_INT_ISTL1]++;
1063 handled = 1;
1064 svc_mask &= ~HCuPINT_ISTL1;
1065 isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ISTL1_FULL);
1066 DBG(1, "%s: ISTL1\n", __func__);
1067 WARN_ON(!(int)isp1362_hcd->istl_flip);
Julia Lawall3d2b0812009-08-12 16:51:09 +02001068 WARN_ON(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
1069 HCBUFSTAT_ISTL1_ACTIVE);
1070 WARN_ON(!(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
1071 HCBUFSTAT_ISTL1_DONE));
Lothar Wassmanna9d43092009-07-16 20:51:21 -04001072 isp1362_hcd->irqenb &= ~HCuPINT_ISTL1;
1073 }
1074
1075 if (irqstat & (HCuPINT_ISTL0 | HCuPINT_ISTL1)) {
1076 WARN_ON((irqstat & (HCuPINT_ISTL0 | HCuPINT_ISTL1)) ==
1077 (HCuPINT_ISTL0 | HCuPINT_ISTL1));
1078 finish_iso_transfers(isp1362_hcd,
1079 &isp1362_hcd->istl_queue[isp1362_hcd->istl_flip]);
1080 start_iso_transfers(isp1362_hcd);
1081 isp1362_hcd->istl_flip = 1 - isp1362_hcd->istl_flip;
1082 }
1083
1084 if (irqstat & HCuPINT_INTL) {
1085 u32 done_map = isp1362_read_reg32(isp1362_hcd, HCINTLDONE);
1086 u32 skip_map = isp1362_read_reg32(isp1362_hcd, HCINTLSKIP);
1087 isp1362_hcd->irq_stat[ISP1362_INT_INTL]++;
1088
1089 DBG(2, "%s: INTL\n", __func__);
1090
1091 svc_mask &= ~HCuPINT_INTL;
1092
1093 isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, skip_map | done_map);
1094 if (~(done_map | skip_map) == 0)
1095 /* All PTDs are finished, disable INTL processing entirely */
1096 isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_INTL_ACTIVE);
1097
1098 handled = 1;
1099 WARN_ON(!done_map);
1100 if (done_map) {
1101 DBG(3, "%s: INTL done_map %08x\n", __func__, done_map);
1102 finish_transfers(isp1362_hcd, done_map, &isp1362_hcd->intl_queue);
1103 start_intl_transfers(isp1362_hcd);
1104 }
1105 }
1106
1107 if (irqstat & HCuPINT_ATL) {
1108 u32 done_map = isp1362_read_reg32(isp1362_hcd, HCATLDONE);
1109 u32 skip_map = isp1362_read_reg32(isp1362_hcd, HCATLSKIP);
1110 isp1362_hcd->irq_stat[ISP1362_INT_ATL]++;
1111
1112 DBG(2, "%s: ATL\n", __func__);
1113
1114 svc_mask &= ~HCuPINT_ATL;
1115
1116 isp1362_write_reg32(isp1362_hcd, HCATLSKIP, skip_map | done_map);
1117 if (~(done_map | skip_map) == 0)
1118 isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ATL_ACTIVE);
1119 if (done_map) {
1120 DBG(3, "%s: ATL done_map %08x\n", __func__, done_map);
1121 finish_transfers(isp1362_hcd, done_map, &isp1362_hcd->atl_queue);
1122 start_atl_transfers(isp1362_hcd);
1123 }
1124 handled = 1;
1125 }
1126
1127 if (irqstat & HCuPINT_OPR) {
1128 u32 intstat = isp1362_read_reg32(isp1362_hcd, HCINTSTAT);
1129 isp1362_hcd->irq_stat[ISP1362_INT_OPR]++;
1130
1131 svc_mask &= ~HCuPINT_OPR;
1132 DBG(2, "%s: OPR %08x:%08x\n", __func__, intstat, isp1362_hcd->intenb);
1133 intstat &= isp1362_hcd->intenb;
1134 if (intstat & OHCI_INTR_UE) {
1135 pr_err("Unrecoverable error\n");
1136 /* FIXME: do here reset or cleanup or whatever */
1137 }
1138 if (intstat & OHCI_INTR_RHSC) {
1139 isp1362_hcd->rhstatus = isp1362_read_reg32(isp1362_hcd, HCRHSTATUS);
1140 isp1362_hcd->rhport[0] = isp1362_read_reg32(isp1362_hcd, HCRHPORT1);
1141 isp1362_hcd->rhport[1] = isp1362_read_reg32(isp1362_hcd, HCRHPORT2);
1142 }
1143 if (intstat & OHCI_INTR_RD) {
1144 pr_info("%s: RESUME DETECTED\n", __func__);
1145 isp1362_show_reg(isp1362_hcd, HCCONTROL);
1146 usb_hcd_resume_root_hub(hcd);
1147 }
1148 isp1362_write_reg32(isp1362_hcd, HCINTSTAT, intstat);
1149 irqstat &= ~HCuPINT_OPR;
1150 handled = 1;
1151 }
1152
1153 if (irqstat & HCuPINT_SUSP) {
1154 isp1362_hcd->irq_stat[ISP1362_INT_SUSP]++;
1155 handled = 1;
1156 svc_mask &= ~HCuPINT_SUSP;
1157
1158 pr_info("%s: SUSPEND IRQ\n", __func__);
1159 }
1160
1161 if (irqstat & HCuPINT_CLKRDY) {
1162 isp1362_hcd->irq_stat[ISP1362_INT_CLKRDY]++;
1163 handled = 1;
1164 isp1362_hcd->irqenb &= ~HCuPINT_CLKRDY;
1165 svc_mask &= ~HCuPINT_CLKRDY;
1166 pr_info("%s: CLKRDY IRQ\n", __func__);
1167 }
1168
1169 if (svc_mask)
1170 pr_err("%s: Unserviced interrupt(s) %04x\n", __func__, svc_mask);
1171
1172 isp1362_write_reg16(isp1362_hcd, HCuPINTENB, isp1362_hcd->irqenb);
1173 isp1362_hcd->irq_active--;
1174 spin_unlock(&isp1362_hcd->lock);
1175
1176 return IRQ_RETVAL(handled);
1177}
1178
1179/*-------------------------------------------------------------------------*/
1180
1181#define MAX_PERIODIC_LOAD 900 /* out of 1000 usec */
1182static int balance(struct isp1362_hcd *isp1362_hcd, u16 interval, u16 load)
1183{
1184 int i, branch = -ENOSPC;
1185
1186 /* search for the least loaded schedule branch of that interval
1187 * which has enough bandwidth left unreserved.
1188 */
1189 for (i = 0; i < interval; i++) {
1190 if (branch < 0 || isp1362_hcd->load[branch] > isp1362_hcd->load[i]) {
1191 int j;
1192
1193 for (j = i; j < PERIODIC_SIZE; j += interval) {
1194 if ((isp1362_hcd->load[j] + load) > MAX_PERIODIC_LOAD) {
1195 pr_err("%s: new load %d load[%02x] %d max %d\n", __func__,
1196 load, j, isp1362_hcd->load[j], MAX_PERIODIC_LOAD);
1197 break;
1198 }
1199 }
1200 if (j < PERIODIC_SIZE)
1201 continue;
1202 branch = i;
1203 }
1204 }
1205 return branch;
1206}
1207
1208/* NB! ALL the code above this point runs with isp1362_hcd->lock
1209 held, irqs off
1210*/
1211
1212/*-------------------------------------------------------------------------*/
1213
1214static int isp1362_urb_enqueue(struct usb_hcd *hcd,
1215 struct urb *urb,
1216 gfp_t mem_flags)
1217{
1218 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1219 struct usb_device *udev = urb->dev;
1220 unsigned int pipe = urb->pipe;
1221 int is_out = !usb_pipein(pipe);
1222 int type = usb_pipetype(pipe);
1223 int epnum = usb_pipeendpoint(pipe);
1224 struct usb_host_endpoint *hep = urb->ep;
1225 struct isp1362_ep *ep = NULL;
1226 unsigned long flags;
1227 int retval = 0;
1228
1229 DBG(3, "%s: urb %p\n", __func__, urb);
1230
1231 if (type == PIPE_ISOCHRONOUS) {
1232 pr_err("Isochronous transfers not supported\n");
1233 return -ENOSPC;
1234 }
1235
1236 URB_DBG("%s: FA %d ep%d%s %s: len %d %s%s\n", __func__,
1237 usb_pipedevice(pipe), epnum,
1238 is_out ? "out" : "in",
1239 usb_pipecontrol(pipe) ? "ctrl" :
1240 usb_pipeint(pipe) ? "int" :
1241 usb_pipebulk(pipe) ? "bulk" :
1242 "iso",
1243 urb->transfer_buffer_length,
1244 (urb->transfer_flags & URB_ZERO_PACKET) ? "ZERO_PACKET " : "",
1245 !(urb->transfer_flags & URB_SHORT_NOT_OK) ?
1246 "short_ok" : "");
1247
1248 /* avoid all allocations within spinlocks: request or endpoint */
1249 if (!hep->hcpriv) {
Julia Lawall6ebb7d12009-12-19 08:17:44 +01001250 ep = kzalloc(sizeof *ep, mem_flags);
Lothar Wassmanna9d43092009-07-16 20:51:21 -04001251 if (!ep)
1252 return -ENOMEM;
1253 }
1254 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1255
1256 /* don't submit to a dead or disabled port */
1257 if (!((isp1362_hcd->rhport[0] | isp1362_hcd->rhport[1]) &
Alan Stern749da5f2010-03-04 17:05:08 -05001258 USB_PORT_STAT_ENABLE) ||
Lothar Wassmanna9d43092009-07-16 20:51:21 -04001259 !HC_IS_RUNNING(hcd->state)) {
1260 kfree(ep);
1261 retval = -ENODEV;
1262 goto fail_not_linked;
1263 }
1264
1265 retval = usb_hcd_link_urb_to_ep(hcd, urb);
1266 if (retval) {
1267 kfree(ep);
1268 goto fail_not_linked;
1269 }
1270
1271 if (hep->hcpriv) {
1272 ep = hep->hcpriv;
1273 } else {
1274 INIT_LIST_HEAD(&ep->schedule);
1275 INIT_LIST_HEAD(&ep->active);
1276 INIT_LIST_HEAD(&ep->remove_list);
1277 ep->udev = usb_get_dev(udev);
1278 ep->hep = hep;
1279 ep->epnum = epnum;
1280 ep->maxpacket = usb_maxpacket(udev, urb->pipe, is_out);
1281 ep->ptd_offset = -EINVAL;
1282 ep->ptd_index = -EINVAL;
1283 usb_settoggle(udev, epnum, is_out, 0);
1284
1285 if (type == PIPE_CONTROL)
1286 ep->nextpid = USB_PID_SETUP;
1287 else if (is_out)
1288 ep->nextpid = USB_PID_OUT;
1289 else
1290 ep->nextpid = USB_PID_IN;
1291
1292 switch (type) {
1293 case PIPE_ISOCHRONOUS:
1294 case PIPE_INTERRUPT:
1295 if (urb->interval > PERIODIC_SIZE)
1296 urb->interval = PERIODIC_SIZE;
1297 ep->interval = urb->interval;
1298 ep->branch = PERIODIC_SIZE;
1299 ep->load = usb_calc_bus_time(udev->speed, !is_out,
1300 (type == PIPE_ISOCHRONOUS),
1301 usb_maxpacket(udev, pipe, is_out)) / 1000;
1302 break;
1303 }
1304 hep->hcpriv = ep;
1305 }
1306 ep->num_req = isp1362_hcd->req_serial++;
1307
1308 /* maybe put endpoint into schedule */
1309 switch (type) {
1310 case PIPE_CONTROL:
1311 case PIPE_BULK:
1312 if (list_empty(&ep->schedule)) {
1313 DBG(1, "%s: Adding ep %p req %d to async schedule\n",
1314 __func__, ep, ep->num_req);
1315 list_add_tail(&ep->schedule, &isp1362_hcd->async);
1316 }
1317 break;
1318 case PIPE_ISOCHRONOUS:
1319 case PIPE_INTERRUPT:
1320 urb->interval = ep->interval;
1321
1322 /* urb submitted for already existing EP */
1323 if (ep->branch < PERIODIC_SIZE)
1324 break;
1325
1326 retval = balance(isp1362_hcd, ep->interval, ep->load);
1327 if (retval < 0) {
1328 pr_err("%s: balance returned %d\n", __func__, retval);
1329 goto fail;
1330 }
1331 ep->branch = retval;
1332 retval = 0;
1333 isp1362_hcd->fmindex = isp1362_read_reg32(isp1362_hcd, HCFMNUM);
1334 DBG(1, "%s: Current frame %04x branch %02x start_frame %04x(%04x)\n",
1335 __func__, isp1362_hcd->fmindex, ep->branch,
1336 ((isp1362_hcd->fmindex + PERIODIC_SIZE - 1) &
1337 ~(PERIODIC_SIZE - 1)) + ep->branch,
1338 (isp1362_hcd->fmindex & (PERIODIC_SIZE - 1)) + ep->branch);
1339
1340 if (list_empty(&ep->schedule)) {
1341 if (type == PIPE_ISOCHRONOUS) {
1342 u16 frame = isp1362_hcd->fmindex;
1343
1344 frame += max_t(u16, 8, ep->interval);
1345 frame &= ~(ep->interval - 1);
1346 frame |= ep->branch;
1347 if (frame_before(frame, isp1362_hcd->fmindex))
1348 frame += ep->interval;
1349 urb->start_frame = frame;
1350
1351 DBG(1, "%s: Adding ep %p to isoc schedule\n", __func__, ep);
1352 list_add_tail(&ep->schedule, &isp1362_hcd->isoc);
1353 } else {
1354 DBG(1, "%s: Adding ep %p to periodic schedule\n", __func__, ep);
1355 list_add_tail(&ep->schedule, &isp1362_hcd->periodic);
1356 }
1357 } else
1358 DBG(1, "%s: ep %p already scheduled\n", __func__, ep);
1359
1360 DBG(2, "%s: load %d bandwidth %d -> %d\n", __func__,
1361 ep->load / ep->interval, isp1362_hcd->load[ep->branch],
1362 isp1362_hcd->load[ep->branch] + ep->load);
1363 isp1362_hcd->load[ep->branch] += ep->load;
1364 }
1365
1366 urb->hcpriv = hep;
1367 ALIGNSTAT(isp1362_hcd, urb->transfer_buffer);
1368
1369 switch (type) {
1370 case PIPE_CONTROL:
1371 case PIPE_BULK:
1372 start_atl_transfers(isp1362_hcd);
1373 break;
1374 case PIPE_INTERRUPT:
1375 start_intl_transfers(isp1362_hcd);
1376 break;
1377 case PIPE_ISOCHRONOUS:
1378 start_iso_transfers(isp1362_hcd);
1379 break;
1380 default:
1381 BUG();
1382 }
1383 fail:
1384 if (retval)
1385 usb_hcd_unlink_urb_from_ep(hcd, urb);
1386
1387
1388 fail_not_linked:
1389 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1390 if (retval)
1391 DBG(0, "%s: urb %p failed with %d\n", __func__, urb, retval);
1392 return retval;
1393}
1394
1395static int isp1362_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1396{
1397 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1398 struct usb_host_endpoint *hep;
1399 unsigned long flags;
1400 struct isp1362_ep *ep;
1401 int retval = 0;
1402
1403 DBG(3, "%s: urb %p\n", __func__, urb);
1404
1405 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1406 retval = usb_hcd_check_unlink_urb(hcd, urb, status);
1407 if (retval)
1408 goto done;
1409
1410 hep = urb->hcpriv;
1411
1412 if (!hep) {
1413 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1414 return -EIDRM;
1415 }
1416
1417 ep = hep->hcpriv;
1418 if (ep) {
1419 /* In front of queue? */
1420 if (ep->hep->urb_list.next == &urb->urb_list) {
1421 if (!list_empty(&ep->active)) {
1422 DBG(1, "%s: urb %p ep %p req %d active PTD[%d] $%04x\n", __func__,
1423 urb, ep, ep->num_req, ep->ptd_index, ep->ptd_offset);
1424 /* disable processing and queue PTD for removal */
1425 remove_ptd(isp1362_hcd, ep);
1426 urb = NULL;
1427 }
1428 }
1429 if (urb) {
1430 DBG(1, "%s: Finishing ep %p req %d\n", __func__, ep,
1431 ep->num_req);
1432 finish_request(isp1362_hcd, ep, urb, status);
1433 } else
1434 DBG(1, "%s: urb %p active; wait4irq\n", __func__, urb);
1435 } else {
1436 pr_warning("%s: No EP in URB %p\n", __func__, urb);
1437 retval = -EINVAL;
1438 }
1439done:
1440 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1441
1442 DBG(3, "%s: exit\n", __func__);
1443
1444 return retval;
1445}
1446
1447static void isp1362_endpoint_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
1448{
1449 struct isp1362_ep *ep = hep->hcpriv;
1450 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1451 unsigned long flags;
1452
1453 DBG(1, "%s: ep %p\n", __func__, ep);
1454 if (!ep)
1455 return;
1456 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1457 if (!list_empty(&hep->urb_list)) {
1458 if (!list_empty(&ep->active) && list_empty(&ep->remove_list)) {
1459 DBG(1, "%s: Removing ep %p req %d PTD[%d] $%04x\n", __func__,
1460 ep, ep->num_req, ep->ptd_index, ep->ptd_offset);
1461 remove_ptd(isp1362_hcd, ep);
1462 pr_info("%s: Waiting for Interrupt to clean up\n", __func__);
1463 }
1464 }
1465 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1466 /* Wait for interrupt to clear out active list */
1467 while (!list_empty(&ep->active))
1468 msleep(1);
1469
1470 DBG(1, "%s: Freeing EP %p\n", __func__, ep);
1471
1472 usb_put_dev(ep->udev);
1473 kfree(ep);
1474 hep->hcpriv = NULL;
1475}
1476
1477static int isp1362_get_frame(struct usb_hcd *hcd)
1478{
1479 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1480 u32 fmnum;
1481 unsigned long flags;
1482
1483 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1484 fmnum = isp1362_read_reg32(isp1362_hcd, HCFMNUM);
1485 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1486
1487 return (int)fmnum;
1488}
1489
1490/*-------------------------------------------------------------------------*/
1491
1492/* Adapted from ohci-hub.c */
1493static int isp1362_hub_status_data(struct usb_hcd *hcd, char *buf)
1494{
1495 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1496 int ports, i, changed = 0;
1497 unsigned long flags;
1498
1499 if (!HC_IS_RUNNING(hcd->state))
1500 return -ESHUTDOWN;
1501
1502 /* Report no status change now, if we are scheduled to be
1503 called later */
1504 if (timer_pending(&hcd->rh_timer))
1505 return 0;
1506
1507 ports = isp1362_hcd->rhdesca & RH_A_NDP;
1508 BUG_ON(ports > 2);
1509
1510 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1511 /* init status */
1512 if (isp1362_hcd->rhstatus & (RH_HS_LPSC | RH_HS_OCIC))
1513 buf[0] = changed = 1;
1514 else
1515 buf[0] = 0;
1516
1517 for (i = 0; i < ports; i++) {
1518 u32 status = isp1362_hcd->rhport[i];
1519
1520 if (status & (RH_PS_CSC | RH_PS_PESC | RH_PS_PSSC |
1521 RH_PS_OCIC | RH_PS_PRSC)) {
1522 changed = 1;
1523 buf[0] |= 1 << (i + 1);
1524 continue;
1525 }
1526
1527 if (!(status & RH_PS_CCS))
1528 continue;
1529 }
1530 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1531 return changed;
1532}
1533
1534static void isp1362_hub_descriptor(struct isp1362_hcd *isp1362_hcd,
1535 struct usb_hub_descriptor *desc)
1536{
1537 u32 reg = isp1362_hcd->rhdesca;
1538
1539 DBG(3, "%s: enter\n", __func__);
1540
1541 desc->bDescriptorType = 0x29;
1542 desc->bDescLength = 9;
1543 desc->bHubContrCurrent = 0;
1544 desc->bNbrPorts = reg & 0x3;
1545 /* Power switching, device type, overcurrent. */
1546 desc->wHubCharacteristics = cpu_to_le16((reg >> 8) & 0x1f);
1547 DBG(0, "%s: hubcharacteristics = %02x\n", __func__, cpu_to_le16((reg >> 8) & 0x1f));
1548 desc->bPwrOn2PwrGood = (reg >> 24) & 0xff;
Sarah Sharpda130512010-11-30 15:55:51 -08001549 /* ports removable, and legacy PortPwrCtrlMask */
John Youndbe79bb2001-09-17 00:00:00 -07001550 desc->u.hs.DeviceRemovable[0] = desc->bNbrPorts == 1 ? 1 << 1 : 3 << 1;
1551 desc->u.hs.DeviceRemovable[1] = ~0;
Lothar Wassmanna9d43092009-07-16 20:51:21 -04001552
1553 DBG(3, "%s: exit\n", __func__);
1554}
1555
1556/* Adapted from ohci-hub.c */
1557static int isp1362_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
1558 u16 wIndex, char *buf, u16 wLength)
1559{
1560 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1561 int retval = 0;
1562 unsigned long flags;
1563 unsigned long t1;
1564 int ports = isp1362_hcd->rhdesca & RH_A_NDP;
1565 u32 tmp = 0;
1566
1567 switch (typeReq) {
1568 case ClearHubFeature:
1569 DBG(0, "ClearHubFeature: ");
1570 switch (wValue) {
1571 case C_HUB_OVER_CURRENT:
Greg Kroah-Hartman374f4bf2013-06-28 11:33:00 -07001572 DBG(0, "C_HUB_OVER_CURRENT\n");
Lothar Wassmanna9d43092009-07-16 20:51:21 -04001573 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1574 isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_OCIC);
1575 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1576 case C_HUB_LOCAL_POWER:
Greg Kroah-Hartman374f4bf2013-06-28 11:33:00 -07001577 DBG(0, "C_HUB_LOCAL_POWER\n");
Lothar Wassmanna9d43092009-07-16 20:51:21 -04001578 break;
1579 default:
1580 goto error;
1581 }
1582 break;
1583 case SetHubFeature:
1584 DBG(0, "SetHubFeature: ");
1585 switch (wValue) {
1586 case C_HUB_OVER_CURRENT:
1587 case C_HUB_LOCAL_POWER:
Greg Kroah-Hartman374f4bf2013-06-28 11:33:00 -07001588 DBG(0, "C_HUB_OVER_CURRENT or C_HUB_LOCAL_POWER\n");
Lothar Wassmanna9d43092009-07-16 20:51:21 -04001589 break;
1590 default:
1591 goto error;
1592 }
1593 break;
1594 case GetHubDescriptor:
1595 DBG(0, "GetHubDescriptor\n");
1596 isp1362_hub_descriptor(isp1362_hcd, (struct usb_hub_descriptor *)buf);
1597 break;
1598 case GetHubStatus:
1599 DBG(0, "GetHubStatus\n");
1600 put_unaligned(cpu_to_le32(0), (__le32 *) buf);
1601 break;
1602 case GetPortStatus:
1603#ifndef VERBOSE
1604 DBG(0, "GetPortStatus\n");
1605#endif
1606 if (!wIndex || wIndex > ports)
1607 goto error;
1608 tmp = isp1362_hcd->rhport[--wIndex];
1609 put_unaligned(cpu_to_le32(tmp), (__le32 *) buf);
1610 break;
1611 case ClearPortFeature:
1612 DBG(0, "ClearPortFeature: ");
1613 if (!wIndex || wIndex > ports)
1614 goto error;
1615 wIndex--;
1616
1617 switch (wValue) {
1618 case USB_PORT_FEAT_ENABLE:
Greg Kroah-Hartman374f4bf2013-06-28 11:33:00 -07001619 DBG(0, "USB_PORT_FEAT_ENABLE\n");
Lothar Wassmanna9d43092009-07-16 20:51:21 -04001620 tmp = RH_PS_CCS;
1621 break;
1622 case USB_PORT_FEAT_C_ENABLE:
Greg Kroah-Hartman374f4bf2013-06-28 11:33:00 -07001623 DBG(0, "USB_PORT_FEAT_C_ENABLE\n");
Lothar Wassmanna9d43092009-07-16 20:51:21 -04001624 tmp = RH_PS_PESC;
1625 break;
1626 case USB_PORT_FEAT_SUSPEND:
Greg Kroah-Hartman374f4bf2013-06-28 11:33:00 -07001627 DBG(0, "USB_PORT_FEAT_SUSPEND\n");
Lothar Wassmanna9d43092009-07-16 20:51:21 -04001628 tmp = RH_PS_POCI;
1629 break;
1630 case USB_PORT_FEAT_C_SUSPEND:
Greg Kroah-Hartman374f4bf2013-06-28 11:33:00 -07001631 DBG(0, "USB_PORT_FEAT_C_SUSPEND\n");
Lothar Wassmanna9d43092009-07-16 20:51:21 -04001632 tmp = RH_PS_PSSC;
1633 break;
1634 case USB_PORT_FEAT_POWER:
Greg Kroah-Hartman374f4bf2013-06-28 11:33:00 -07001635 DBG(0, "USB_PORT_FEAT_POWER\n");
Lothar Wassmanna9d43092009-07-16 20:51:21 -04001636 tmp = RH_PS_LSDA;
1637
1638 break;
1639 case USB_PORT_FEAT_C_CONNECTION:
Greg Kroah-Hartman374f4bf2013-06-28 11:33:00 -07001640 DBG(0, "USB_PORT_FEAT_C_CONNECTION\n");
Lothar Wassmanna9d43092009-07-16 20:51:21 -04001641 tmp = RH_PS_CSC;
1642 break;
1643 case USB_PORT_FEAT_C_OVER_CURRENT:
Greg Kroah-Hartman374f4bf2013-06-28 11:33:00 -07001644 DBG(0, "USB_PORT_FEAT_C_OVER_CURRENT\n");
Lothar Wassmanna9d43092009-07-16 20:51:21 -04001645 tmp = RH_PS_OCIC;
1646 break;
1647 case USB_PORT_FEAT_C_RESET:
Greg Kroah-Hartman374f4bf2013-06-28 11:33:00 -07001648 DBG(0, "USB_PORT_FEAT_C_RESET\n");
Lothar Wassmanna9d43092009-07-16 20:51:21 -04001649 tmp = RH_PS_PRSC;
1650 break;
1651 default:
1652 goto error;
1653 }
1654
1655 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1656 isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, tmp);
1657 isp1362_hcd->rhport[wIndex] =
1658 isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + wIndex);
1659 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1660 break;
1661 case SetPortFeature:
1662 DBG(0, "SetPortFeature: ");
1663 if (!wIndex || wIndex > ports)
1664 goto error;
1665 wIndex--;
1666 switch (wValue) {
1667 case USB_PORT_FEAT_SUSPEND:
Greg Kroah-Hartman374f4bf2013-06-28 11:33:00 -07001668 DBG(0, "USB_PORT_FEAT_SUSPEND\n");
Lothar Wassmanna9d43092009-07-16 20:51:21 -04001669 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1670 isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, RH_PS_PSS);
1671 isp1362_hcd->rhport[wIndex] =
1672 isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + wIndex);
1673 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1674 break;
1675 case USB_PORT_FEAT_POWER:
Greg Kroah-Hartman374f4bf2013-06-28 11:33:00 -07001676 DBG(0, "USB_PORT_FEAT_POWER\n");
Lothar Wassmanna9d43092009-07-16 20:51:21 -04001677 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1678 isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, RH_PS_PPS);
1679 isp1362_hcd->rhport[wIndex] =
1680 isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + wIndex);
1681 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1682 break;
1683 case USB_PORT_FEAT_RESET:
Greg Kroah-Hartman374f4bf2013-06-28 11:33:00 -07001684 DBG(0, "USB_PORT_FEAT_RESET\n");
Lothar Wassmanna9d43092009-07-16 20:51:21 -04001685 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1686
1687 t1 = jiffies + msecs_to_jiffies(USB_RESET_WIDTH);
1688 while (time_before(jiffies, t1)) {
1689 /* spin until any current reset finishes */
1690 for (;;) {
1691 tmp = isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + wIndex);
1692 if (!(tmp & RH_PS_PRS))
1693 break;
1694 udelay(500);
1695 }
1696 if (!(tmp & RH_PS_CCS))
1697 break;
1698 /* Reset lasts 10ms (claims datasheet) */
1699 isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, (RH_PS_PRS));
1700
1701 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1702 msleep(10);
1703 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1704 }
1705
1706 isp1362_hcd->rhport[wIndex] = isp1362_read_reg32(isp1362_hcd,
1707 HCRHPORT1 + wIndex);
1708 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1709 break;
1710 default:
1711 goto error;
1712 }
1713 break;
1714
1715 default:
1716 error:
1717 /* "protocol stall" on error */
Greg Kroah-Hartman374f4bf2013-06-28 11:33:00 -07001718 DBG(0, "PROTOCOL STALL\n");
Lothar Wassmanna9d43092009-07-16 20:51:21 -04001719 retval = -EPIPE;
1720 }
1721
1722 return retval;
1723}
1724
1725#ifdef CONFIG_PM
1726static int isp1362_bus_suspend(struct usb_hcd *hcd)
1727{
1728 int status = 0;
1729 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1730 unsigned long flags;
1731
1732 if (time_before(jiffies, isp1362_hcd->next_statechange))
1733 msleep(5);
1734
1735 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1736
1737 isp1362_hcd->hc_control = isp1362_read_reg32(isp1362_hcd, HCCONTROL);
1738 switch (isp1362_hcd->hc_control & OHCI_CTRL_HCFS) {
1739 case OHCI_USB_RESUME:
1740 DBG(0, "%s: resume/suspend?\n", __func__);
1741 isp1362_hcd->hc_control &= ~OHCI_CTRL_HCFS;
1742 isp1362_hcd->hc_control |= OHCI_USB_RESET;
1743 isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
1744 /* FALL THROUGH */
1745 case OHCI_USB_RESET:
1746 status = -EBUSY;
1747 pr_warning("%s: needs reinit!\n", __func__);
1748 goto done;
1749 case OHCI_USB_SUSPEND:
1750 pr_warning("%s: already suspended?\n", __func__);
1751 goto done;
1752 }
1753 DBG(0, "%s: suspend root hub\n", __func__);
1754
1755 /* First stop any processing */
1756 hcd->state = HC_STATE_QUIESCING;
1757 if (!list_empty(&isp1362_hcd->atl_queue.active) ||
1758 !list_empty(&isp1362_hcd->intl_queue.active) ||
1759 !list_empty(&isp1362_hcd->istl_queue[0] .active) ||
1760 !list_empty(&isp1362_hcd->istl_queue[1] .active)) {
1761 int limit;
1762
1763 isp1362_write_reg32(isp1362_hcd, HCATLSKIP, ~0);
1764 isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, ~0);
1765 isp1362_write_reg16(isp1362_hcd, HCBUFSTAT, 0);
1766 isp1362_write_reg16(isp1362_hcd, HCuPINTENB, 0);
1767 isp1362_write_reg32(isp1362_hcd, HCINTSTAT, OHCI_INTR_SF);
1768
1769 DBG(0, "%s: stopping schedules ...\n", __func__);
1770 limit = 2000;
1771 while (limit > 0) {
1772 udelay(250);
1773 limit -= 250;
1774 if (isp1362_read_reg32(isp1362_hcd, HCINTSTAT) & OHCI_INTR_SF)
1775 break;
1776 }
1777 mdelay(7);
1778 if (isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_ATL) {
1779 u32 done_map = isp1362_read_reg32(isp1362_hcd, HCATLDONE);
1780 finish_transfers(isp1362_hcd, done_map, &isp1362_hcd->atl_queue);
1781 }
1782 if (isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_INTL) {
1783 u32 done_map = isp1362_read_reg32(isp1362_hcd, HCINTLDONE);
1784 finish_transfers(isp1362_hcd, done_map, &isp1362_hcd->intl_queue);
1785 }
1786 if (isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_ISTL0)
1787 finish_iso_transfers(isp1362_hcd, &isp1362_hcd->istl_queue[0]);
1788 if (isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_ISTL1)
1789 finish_iso_transfers(isp1362_hcd, &isp1362_hcd->istl_queue[1]);
1790 }
1791 DBG(0, "%s: HCINTSTAT: %08x\n", __func__,
1792 isp1362_read_reg32(isp1362_hcd, HCINTSTAT));
1793 isp1362_write_reg32(isp1362_hcd, HCINTSTAT,
1794 isp1362_read_reg32(isp1362_hcd, HCINTSTAT));
1795
1796 /* Suspend hub */
1797 isp1362_hcd->hc_control = OHCI_USB_SUSPEND;
1798 isp1362_show_reg(isp1362_hcd, HCCONTROL);
1799 isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
1800 isp1362_show_reg(isp1362_hcd, HCCONTROL);
1801
1802#if 1
1803 isp1362_hcd->hc_control = isp1362_read_reg32(isp1362_hcd, HCCONTROL);
1804 if ((isp1362_hcd->hc_control & OHCI_CTRL_HCFS) != OHCI_USB_SUSPEND) {
1805 pr_err("%s: controller won't suspend %08x\n", __func__,
1806 isp1362_hcd->hc_control);
1807 status = -EBUSY;
1808 } else
1809#endif
1810 {
1811 /* no resumes until devices finish suspending */
1812 isp1362_hcd->next_statechange = jiffies + msecs_to_jiffies(5);
1813 }
1814done:
1815 if (status == 0) {
1816 hcd->state = HC_STATE_SUSPENDED;
1817 DBG(0, "%s: HCD suspended: %08x\n", __func__,
1818 isp1362_read_reg32(isp1362_hcd, HCCONTROL));
1819 }
1820 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1821 return status;
1822}
1823
1824static int isp1362_bus_resume(struct usb_hcd *hcd)
1825{
1826 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1827 u32 port;
1828 unsigned long flags;
1829 int status = -EINPROGRESS;
1830
1831 if (time_before(jiffies, isp1362_hcd->next_statechange))
1832 msleep(5);
1833
1834 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1835 isp1362_hcd->hc_control = isp1362_read_reg32(isp1362_hcd, HCCONTROL);
1836 pr_info("%s: HCCONTROL: %08x\n", __func__, isp1362_hcd->hc_control);
1837 if (hcd->state == HC_STATE_RESUMING) {
1838 pr_warning("%s: duplicate resume\n", __func__);
1839 status = 0;
1840 } else
1841 switch (isp1362_hcd->hc_control & OHCI_CTRL_HCFS) {
1842 case OHCI_USB_SUSPEND:
1843 DBG(0, "%s: resume root hub\n", __func__);
1844 isp1362_hcd->hc_control &= ~OHCI_CTRL_HCFS;
1845 isp1362_hcd->hc_control |= OHCI_USB_RESUME;
1846 isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
1847 break;
1848 case OHCI_USB_RESUME:
1849 /* HCFS changes sometime after INTR_RD */
1850 DBG(0, "%s: remote wakeup\n", __func__);
1851 break;
1852 case OHCI_USB_OPER:
1853 DBG(0, "%s: odd resume\n", __func__);
1854 status = 0;
1855 hcd->self.root_hub->dev.power.power_state = PMSG_ON;
1856 break;
1857 default: /* RESET, we lost power */
1858 DBG(0, "%s: root hub hardware reset\n", __func__);
1859 status = -EBUSY;
1860 }
1861 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1862 if (status == -EBUSY) {
1863 DBG(0, "%s: Restarting HC\n", __func__);
1864 isp1362_hc_stop(hcd);
1865 return isp1362_hc_start(hcd);
1866 }
1867 if (status != -EINPROGRESS)
1868 return status;
1869 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1870 port = isp1362_read_reg32(isp1362_hcd, HCRHDESCA) & RH_A_NDP;
1871 while (port--) {
1872 u32 stat = isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + port);
1873
1874 /* force global, not selective, resume */
1875 if (!(stat & RH_PS_PSS)) {
1876 DBG(0, "%s: Not Resuming RH port %d\n", __func__, port);
1877 continue;
1878 }
1879 DBG(0, "%s: Resuming RH port %d\n", __func__, port);
1880 isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + port, RH_PS_POCI);
1881 }
1882 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1883
1884 /* Some controllers (lucent) need extra-long delays */
1885 hcd->state = HC_STATE_RESUMING;
1886 mdelay(20 /* usb 11.5.1.10 */ + 15);
1887
1888 isp1362_hcd->hc_control = OHCI_USB_OPER;
1889 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1890 isp1362_show_reg(isp1362_hcd, HCCONTROL);
1891 isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
1892 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1893 /* TRSMRCY */
1894 msleep(10);
1895
1896 /* keep it alive for ~5x suspend + resume costs */
1897 isp1362_hcd->next_statechange = jiffies + msecs_to_jiffies(250);
1898
1899 hcd->self.root_hub->dev.power.power_state = PMSG_ON;
1900 hcd->state = HC_STATE_RUNNING;
1901 return 0;
1902}
1903#else
1904#define isp1362_bus_suspend NULL
1905#define isp1362_bus_resume NULL
1906#endif
1907
1908/*-------------------------------------------------------------------------*/
1909
1910#ifdef STUB_DEBUG_FILE
1911
1912static inline void create_debug_file(struct isp1362_hcd *isp1362_hcd)
1913{
1914}
1915static inline void remove_debug_file(struct isp1362_hcd *isp1362_hcd)
1916{
1917}
1918
1919#else
1920
1921#include <linux/proc_fs.h>
1922#include <linux/seq_file.h>
1923
1924static void dump_irq(struct seq_file *s, char *label, u16 mask)
1925{
1926 seq_printf(s, "%-15s %04x%s%s%s%s%s%s\n", label, mask,
1927 mask & HCuPINT_CLKRDY ? " clkrdy" : "",
1928 mask & HCuPINT_SUSP ? " susp" : "",
1929 mask & HCuPINT_OPR ? " opr" : "",
1930 mask & HCuPINT_EOT ? " eot" : "",
1931 mask & HCuPINT_ATL ? " atl" : "",
1932 mask & HCuPINT_SOF ? " sof" : "");
1933}
1934
1935static void dump_int(struct seq_file *s, char *label, u32 mask)
1936{
1937 seq_printf(s, "%-15s %08x%s%s%s%s%s%s%s\n", label, mask,
1938 mask & OHCI_INTR_MIE ? " MIE" : "",
1939 mask & OHCI_INTR_RHSC ? " rhsc" : "",
1940 mask & OHCI_INTR_FNO ? " fno" : "",
1941 mask & OHCI_INTR_UE ? " ue" : "",
1942 mask & OHCI_INTR_RD ? " rd" : "",
1943 mask & OHCI_INTR_SF ? " sof" : "",
1944 mask & OHCI_INTR_SO ? " so" : "");
1945}
1946
1947static void dump_ctrl(struct seq_file *s, char *label, u32 mask)
1948{
1949 seq_printf(s, "%-15s %08x%s%s%s\n", label, mask,
1950 mask & OHCI_CTRL_RWC ? " rwc" : "",
1951 mask & OHCI_CTRL_RWE ? " rwe" : "",
1952 ({
1953 char *hcfs;
1954 switch (mask & OHCI_CTRL_HCFS) {
1955 case OHCI_USB_OPER:
1956 hcfs = " oper";
1957 break;
1958 case OHCI_USB_RESET:
1959 hcfs = " reset";
1960 break;
1961 case OHCI_USB_RESUME:
1962 hcfs = " resume";
1963 break;
1964 case OHCI_USB_SUSPEND:
1965 hcfs = " suspend";
1966 break;
1967 default:
1968 hcfs = " ?";
1969 }
1970 hcfs;
1971 }));
1972}
1973
1974static void dump_regs(struct seq_file *s, struct isp1362_hcd *isp1362_hcd)
1975{
1976 seq_printf(s, "HCREVISION [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCREVISION),
1977 isp1362_read_reg32(isp1362_hcd, HCREVISION));
1978 seq_printf(s, "HCCONTROL [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCCONTROL),
1979 isp1362_read_reg32(isp1362_hcd, HCCONTROL));
1980 seq_printf(s, "HCCMDSTAT [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCCMDSTAT),
1981 isp1362_read_reg32(isp1362_hcd, HCCMDSTAT));
1982 seq_printf(s, "HCINTSTAT [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTSTAT),
1983 isp1362_read_reg32(isp1362_hcd, HCINTSTAT));
1984 seq_printf(s, "HCINTENB [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTENB),
1985 isp1362_read_reg32(isp1362_hcd, HCINTENB));
1986 seq_printf(s, "HCFMINTVL [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCFMINTVL),
1987 isp1362_read_reg32(isp1362_hcd, HCFMINTVL));
1988 seq_printf(s, "HCFMREM [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCFMREM),
1989 isp1362_read_reg32(isp1362_hcd, HCFMREM));
1990 seq_printf(s, "HCFMNUM [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCFMNUM),
1991 isp1362_read_reg32(isp1362_hcd, HCFMNUM));
1992 seq_printf(s, "HCLSTHRESH [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCLSTHRESH),
1993 isp1362_read_reg32(isp1362_hcd, HCLSTHRESH));
1994 seq_printf(s, "HCRHDESCA [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHDESCA),
1995 isp1362_read_reg32(isp1362_hcd, HCRHDESCA));
1996 seq_printf(s, "HCRHDESCB [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHDESCB),
1997 isp1362_read_reg32(isp1362_hcd, HCRHDESCB));
1998 seq_printf(s, "HCRHSTATUS [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHSTATUS),
1999 isp1362_read_reg32(isp1362_hcd, HCRHSTATUS));
2000 seq_printf(s, "HCRHPORT1 [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHPORT1),
2001 isp1362_read_reg32(isp1362_hcd, HCRHPORT1));
2002 seq_printf(s, "HCRHPORT2 [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHPORT2),
2003 isp1362_read_reg32(isp1362_hcd, HCRHPORT2));
2004 seq_printf(s, "\n");
2005 seq_printf(s, "HCHWCFG [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCHWCFG),
2006 isp1362_read_reg16(isp1362_hcd, HCHWCFG));
2007 seq_printf(s, "HCDMACFG [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCDMACFG),
2008 isp1362_read_reg16(isp1362_hcd, HCDMACFG));
2009 seq_printf(s, "HCXFERCTR [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCXFERCTR),
2010 isp1362_read_reg16(isp1362_hcd, HCXFERCTR));
2011 seq_printf(s, "HCuPINT [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCuPINT),
2012 isp1362_read_reg16(isp1362_hcd, HCuPINT));
2013 seq_printf(s, "HCuPINTENB [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCuPINTENB),
2014 isp1362_read_reg16(isp1362_hcd, HCuPINTENB));
2015 seq_printf(s, "HCCHIPID [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCCHIPID),
2016 isp1362_read_reg16(isp1362_hcd, HCCHIPID));
2017 seq_printf(s, "HCSCRATCH [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCSCRATCH),
2018 isp1362_read_reg16(isp1362_hcd, HCSCRATCH));
2019 seq_printf(s, "HCBUFSTAT [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCBUFSTAT),
2020 isp1362_read_reg16(isp1362_hcd, HCBUFSTAT));
2021 seq_printf(s, "HCDIRADDR [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCDIRADDR),
2022 isp1362_read_reg32(isp1362_hcd, HCDIRADDR));
2023#if 0
2024 seq_printf(s, "HCDIRDATA [%02x] %04x\n", ISP1362_REG_NO(HCDIRDATA),
2025 isp1362_read_reg16(isp1362_hcd, HCDIRDATA));
2026#endif
2027 seq_printf(s, "HCISTLBUFSZ[%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCISTLBUFSZ),
2028 isp1362_read_reg16(isp1362_hcd, HCISTLBUFSZ));
2029 seq_printf(s, "HCISTLRATE [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCISTLRATE),
2030 isp1362_read_reg16(isp1362_hcd, HCISTLRATE));
2031 seq_printf(s, "\n");
2032 seq_printf(s, "HCINTLBUFSZ[%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLBUFSZ),
2033 isp1362_read_reg16(isp1362_hcd, HCINTLBUFSZ));
2034 seq_printf(s, "HCINTLBLKSZ[%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLBLKSZ),
2035 isp1362_read_reg16(isp1362_hcd, HCINTLBLKSZ));
2036 seq_printf(s, "HCINTLDONE [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLDONE),
2037 isp1362_read_reg32(isp1362_hcd, HCINTLDONE));
2038 seq_printf(s, "HCINTLSKIP [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLSKIP),
2039 isp1362_read_reg32(isp1362_hcd, HCINTLSKIP));
2040 seq_printf(s, "HCINTLLAST [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLLAST),
2041 isp1362_read_reg32(isp1362_hcd, HCINTLLAST));
2042 seq_printf(s, "HCINTLCURR [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLCURR),
2043 isp1362_read_reg16(isp1362_hcd, HCINTLCURR));
2044 seq_printf(s, "\n");
2045 seq_printf(s, "HCATLBUFSZ [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLBUFSZ),
2046 isp1362_read_reg16(isp1362_hcd, HCATLBUFSZ));
2047 seq_printf(s, "HCATLBLKSZ [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLBLKSZ),
2048 isp1362_read_reg16(isp1362_hcd, HCATLBLKSZ));
2049#if 0
2050 seq_printf(s, "HCATLDONE [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCATLDONE),
2051 isp1362_read_reg32(isp1362_hcd, HCATLDONE));
2052#endif
2053 seq_printf(s, "HCATLSKIP [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCATLSKIP),
2054 isp1362_read_reg32(isp1362_hcd, HCATLSKIP));
2055 seq_printf(s, "HCATLLAST [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCATLLAST),
2056 isp1362_read_reg32(isp1362_hcd, HCATLLAST));
2057 seq_printf(s, "HCATLCURR [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLCURR),
2058 isp1362_read_reg16(isp1362_hcd, HCATLCURR));
2059 seq_printf(s, "\n");
2060 seq_printf(s, "HCATLDTC [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLDTC),
2061 isp1362_read_reg16(isp1362_hcd, HCATLDTC));
2062 seq_printf(s, "HCATLDTCTO [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLDTCTO),
2063 isp1362_read_reg16(isp1362_hcd, HCATLDTCTO));
2064}
2065
2066static int proc_isp1362_show(struct seq_file *s, void *unused)
2067{
2068 struct isp1362_hcd *isp1362_hcd = s->private;
2069 struct isp1362_ep *ep;
2070 int i;
2071
2072 seq_printf(s, "%s\n%s version %s\n",
2073 isp1362_hcd_to_hcd(isp1362_hcd)->product_desc, hcd_name, DRIVER_VERSION);
2074
2075 /* collect statistics to help estimate potential win for
2076 * DMA engines that care about alignment (PXA)
2077 */
2078 seq_printf(s, "alignment: 16b/%ld 8b/%ld 4b/%ld 2b/%ld 1b/%ld\n",
2079 isp1362_hcd->stat16, isp1362_hcd->stat8, isp1362_hcd->stat4,
2080 isp1362_hcd->stat2, isp1362_hcd->stat1);
2081 seq_printf(s, "max # ptds in ATL fifo: %d\n", isp1362_hcd->atl_queue.stat_maxptds);
2082 seq_printf(s, "max # ptds in INTL fifo: %d\n", isp1362_hcd->intl_queue.stat_maxptds);
2083 seq_printf(s, "max # ptds in ISTL fifo: %d\n",
2084 max(isp1362_hcd->istl_queue[0] .stat_maxptds,
2085 isp1362_hcd->istl_queue[1] .stat_maxptds));
2086
2087 /* FIXME: don't show the following in suspended state */
2088 spin_lock_irq(&isp1362_hcd->lock);
2089
2090 dump_irq(s, "hc_irq_enable", isp1362_read_reg16(isp1362_hcd, HCuPINTENB));
2091 dump_irq(s, "hc_irq_status", isp1362_read_reg16(isp1362_hcd, HCuPINT));
2092 dump_int(s, "ohci_int_enable", isp1362_read_reg32(isp1362_hcd, HCINTENB));
2093 dump_int(s, "ohci_int_status", isp1362_read_reg32(isp1362_hcd, HCINTSTAT));
2094 dump_ctrl(s, "ohci_control", isp1362_read_reg32(isp1362_hcd, HCCONTROL));
2095
2096 for (i = 0; i < NUM_ISP1362_IRQS; i++)
2097 if (isp1362_hcd->irq_stat[i])
2098 seq_printf(s, "%-15s: %d\n",
2099 ISP1362_INT_NAME(i), isp1362_hcd->irq_stat[i]);
2100
2101 dump_regs(s, isp1362_hcd);
2102 list_for_each_entry(ep, &isp1362_hcd->async, schedule) {
2103 struct urb *urb;
2104
2105 seq_printf(s, "%p, ep%d%s, maxpacket %d:\n", ep, ep->epnum,
2106 ({
2107 char *s;
2108 switch (ep->nextpid) {
2109 case USB_PID_IN:
2110 s = "in";
2111 break;
2112 case USB_PID_OUT:
2113 s = "out";
2114 break;
2115 case USB_PID_SETUP:
2116 s = "setup";
2117 break;
2118 case USB_PID_ACK:
2119 s = "status";
2120 break;
2121 default:
2122 s = "?";
2123 break;
2124 };
2125 s;}), ep->maxpacket) ;
2126 list_for_each_entry(urb, &ep->hep->urb_list, urb_list) {
2127 seq_printf(s, " urb%p, %d/%d\n", urb,
2128 urb->actual_length,
2129 urb->transfer_buffer_length);
2130 }
2131 }
2132 if (!list_empty(&isp1362_hcd->async))
2133 seq_printf(s, "\n");
2134 dump_ptd_queue(&isp1362_hcd->atl_queue);
2135
2136 seq_printf(s, "periodic size= %d\n", PERIODIC_SIZE);
2137
2138 list_for_each_entry(ep, &isp1362_hcd->periodic, schedule) {
2139 seq_printf(s, "branch:%2d load:%3d PTD[%d] $%04x:\n", ep->branch,
2140 isp1362_hcd->load[ep->branch], ep->ptd_index, ep->ptd_offset);
2141
2142 seq_printf(s, " %d/%p (%sdev%d ep%d%s max %d)\n",
2143 ep->interval, ep,
2144 (ep->udev->speed == USB_SPEED_FULL) ? "" : "ls ",
2145 ep->udev->devnum, ep->epnum,
2146 (ep->epnum == 0) ? "" :
2147 ((ep->nextpid == USB_PID_IN) ?
2148 "in" : "out"), ep->maxpacket);
2149 }
2150 dump_ptd_queue(&isp1362_hcd->intl_queue);
2151
2152 seq_printf(s, "ISO:\n");
2153
2154 list_for_each_entry(ep, &isp1362_hcd->isoc, schedule) {
2155 seq_printf(s, " %d/%p (%sdev%d ep%d%s max %d)\n",
2156 ep->interval, ep,
2157 (ep->udev->speed == USB_SPEED_FULL) ? "" : "ls ",
2158 ep->udev->devnum, ep->epnum,
2159 (ep->epnum == 0) ? "" :
2160 ((ep->nextpid == USB_PID_IN) ?
2161 "in" : "out"), ep->maxpacket);
2162 }
2163
2164 spin_unlock_irq(&isp1362_hcd->lock);
2165 seq_printf(s, "\n");
2166
2167 return 0;
2168}
2169
2170static int proc_isp1362_open(struct inode *inode, struct file *file)
2171{
Al Virod9dda782013-03-31 18:16:14 -04002172 return single_open(file, proc_isp1362_show, PDE_DATA(inode));
Lothar Wassmanna9d43092009-07-16 20:51:21 -04002173}
2174
2175static const struct file_operations proc_ops = {
2176 .open = proc_isp1362_open,
2177 .read = seq_read,
2178 .llseek = seq_lseek,
2179 .release = single_release,
2180};
2181
2182/* expect just one isp1362_hcd per system */
2183static const char proc_filename[] = "driver/isp1362";
2184
2185static void create_debug_file(struct isp1362_hcd *isp1362_hcd)
2186{
2187 struct proc_dir_entry *pde;
2188
Al Viro96e7d912013-03-30 13:15:27 -04002189 pde = proc_create_data(proc_filename, 0, NULL, &proc_ops, isp1362_hcd);
Lothar Wassmanna9d43092009-07-16 20:51:21 -04002190 if (pde == NULL) {
2191 pr_warning("%s: Failed to create debug file '%s'\n", __func__, proc_filename);
2192 return;
2193 }
Lothar Wassmanna9d43092009-07-16 20:51:21 -04002194 isp1362_hcd->pde = pde;
2195}
2196
2197static void remove_debug_file(struct isp1362_hcd *isp1362_hcd)
2198{
2199 if (isp1362_hcd->pde)
Randy Dunlap326b4812010-04-19 08:53:50 -07002200 remove_proc_entry(proc_filename, NULL);
Lothar Wassmanna9d43092009-07-16 20:51:21 -04002201}
2202
2203#endif
2204
2205/*-------------------------------------------------------------------------*/
2206
Jiri Slaby1c815572010-06-21 17:02:51 +02002207static void __isp1362_sw_reset(struct isp1362_hcd *isp1362_hcd)
Lothar Wassmanna9d43092009-07-16 20:51:21 -04002208{
2209 int tmp = 20;
Lothar Wassmanna9d43092009-07-16 20:51:21 -04002210
2211 isp1362_write_reg16(isp1362_hcd, HCSWRES, HCSWRES_MAGIC);
2212 isp1362_write_reg32(isp1362_hcd, HCCMDSTAT, OHCI_HCR);
2213 while (--tmp) {
2214 mdelay(1);
2215 if (!(isp1362_read_reg32(isp1362_hcd, HCCMDSTAT) & OHCI_HCR))
2216 break;
2217 }
2218 if (!tmp)
2219 pr_err("Software reset timeout\n");
Jiri Slaby1c815572010-06-21 17:02:51 +02002220}
2221
2222static void isp1362_sw_reset(struct isp1362_hcd *isp1362_hcd)
2223{
2224 unsigned long flags;
2225
2226 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2227 __isp1362_sw_reset(isp1362_hcd);
Lothar Wassmanna9d43092009-07-16 20:51:21 -04002228 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2229}
2230
2231static int isp1362_mem_config(struct usb_hcd *hcd)
2232{
2233 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2234 unsigned long flags;
2235 u32 total;
2236 u16 istl_size = ISP1362_ISTL_BUFSIZE;
2237 u16 intl_blksize = ISP1362_INTL_BLKSIZE + PTD_HEADER_SIZE;
2238 u16 intl_size = ISP1362_INTL_BUFFERS * intl_blksize;
2239 u16 atl_blksize = ISP1362_ATL_BLKSIZE + PTD_HEADER_SIZE;
2240 u16 atl_buffers = (ISP1362_BUF_SIZE - (istl_size + intl_size)) / atl_blksize;
2241 u16 atl_size;
2242 int i;
2243
2244 WARN_ON(istl_size & 3);
2245 WARN_ON(atl_blksize & 3);
2246 WARN_ON(intl_blksize & 3);
2247 WARN_ON(atl_blksize < PTD_HEADER_SIZE);
2248 WARN_ON(intl_blksize < PTD_HEADER_SIZE);
2249
2250 BUG_ON((unsigned)ISP1362_INTL_BUFFERS > 32);
2251 if (atl_buffers > 32)
2252 atl_buffers = 32;
2253 atl_size = atl_buffers * atl_blksize;
2254 total = atl_size + intl_size + istl_size;
2255 dev_info(hcd->self.controller, "ISP1362 Memory usage:\n");
2256 dev_info(hcd->self.controller, " ISTL: 2 * %4d: %4d @ $%04x:$%04x\n",
2257 istl_size / 2, istl_size, 0, istl_size / 2);
Lothar Wassmann96b85172010-01-15 08:04:55 -05002258 dev_info(hcd->self.controller, " INTL: %4d * (%3zu+8): %4d @ $%04x\n",
Lothar Wassmanna9d43092009-07-16 20:51:21 -04002259 ISP1362_INTL_BUFFERS, intl_blksize - PTD_HEADER_SIZE,
2260 intl_size, istl_size);
Lothar Wassmann96b85172010-01-15 08:04:55 -05002261 dev_info(hcd->self.controller, " ATL : %4d * (%3zu+8): %4d @ $%04x\n",
Lothar Wassmanna9d43092009-07-16 20:51:21 -04002262 atl_buffers, atl_blksize - PTD_HEADER_SIZE,
2263 atl_size, istl_size + intl_size);
2264 dev_info(hcd->self.controller, " USED/FREE: %4d %4d\n", total,
2265 ISP1362_BUF_SIZE - total);
2266
2267 if (total > ISP1362_BUF_SIZE) {
2268 dev_err(hcd->self.controller, "%s: Memory requested: %d, available %d\n",
2269 __func__, total, ISP1362_BUF_SIZE);
2270 return -ENOMEM;
2271 }
2272
2273 total = istl_size + intl_size + atl_size;
2274 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2275
2276 for (i = 0; i < 2; i++) {
2277 isp1362_hcd->istl_queue[i].buf_start = i * istl_size / 2,
2278 isp1362_hcd->istl_queue[i].buf_size = istl_size / 2;
2279 isp1362_hcd->istl_queue[i].blk_size = 4;
2280 INIT_LIST_HEAD(&isp1362_hcd->istl_queue[i].active);
2281 snprintf(isp1362_hcd->istl_queue[i].name,
2282 sizeof(isp1362_hcd->istl_queue[i].name), "ISTL%d", i);
2283 DBG(3, "%s: %5s buf $%04x %d\n", __func__,
2284 isp1362_hcd->istl_queue[i].name,
2285 isp1362_hcd->istl_queue[i].buf_start,
2286 isp1362_hcd->istl_queue[i].buf_size);
2287 }
2288 isp1362_write_reg16(isp1362_hcd, HCISTLBUFSZ, istl_size / 2);
2289
2290 isp1362_hcd->intl_queue.buf_start = istl_size;
2291 isp1362_hcd->intl_queue.buf_size = intl_size;
2292 isp1362_hcd->intl_queue.buf_count = ISP1362_INTL_BUFFERS;
2293 isp1362_hcd->intl_queue.blk_size = intl_blksize;
2294 isp1362_hcd->intl_queue.buf_avail = isp1362_hcd->intl_queue.buf_count;
2295 isp1362_hcd->intl_queue.skip_map = ~0;
2296 INIT_LIST_HEAD(&isp1362_hcd->intl_queue.active);
2297
2298 isp1362_write_reg16(isp1362_hcd, HCINTLBUFSZ,
2299 isp1362_hcd->intl_queue.buf_size);
2300 isp1362_write_reg16(isp1362_hcd, HCINTLBLKSZ,
2301 isp1362_hcd->intl_queue.blk_size - PTD_HEADER_SIZE);
2302 isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, ~0);
2303 isp1362_write_reg32(isp1362_hcd, HCINTLLAST,
2304 1 << (ISP1362_INTL_BUFFERS - 1));
2305
2306 isp1362_hcd->atl_queue.buf_start = istl_size + intl_size;
2307 isp1362_hcd->atl_queue.buf_size = atl_size;
2308 isp1362_hcd->atl_queue.buf_count = atl_buffers;
2309 isp1362_hcd->atl_queue.blk_size = atl_blksize;
2310 isp1362_hcd->atl_queue.buf_avail = isp1362_hcd->atl_queue.buf_count;
2311 isp1362_hcd->atl_queue.skip_map = ~0;
2312 INIT_LIST_HEAD(&isp1362_hcd->atl_queue.active);
2313
2314 isp1362_write_reg16(isp1362_hcd, HCATLBUFSZ,
2315 isp1362_hcd->atl_queue.buf_size);
2316 isp1362_write_reg16(isp1362_hcd, HCATLBLKSZ,
2317 isp1362_hcd->atl_queue.blk_size - PTD_HEADER_SIZE);
2318 isp1362_write_reg32(isp1362_hcd, HCATLSKIP, ~0);
2319 isp1362_write_reg32(isp1362_hcd, HCATLLAST,
2320 1 << (atl_buffers - 1));
2321
2322 snprintf(isp1362_hcd->atl_queue.name,
2323 sizeof(isp1362_hcd->atl_queue.name), "ATL");
2324 snprintf(isp1362_hcd->intl_queue.name,
2325 sizeof(isp1362_hcd->intl_queue.name), "INTL");
2326 DBG(3, "%s: %5s buf $%04x %2d * %4d = %4d\n", __func__,
2327 isp1362_hcd->intl_queue.name,
2328 isp1362_hcd->intl_queue.buf_start,
2329 ISP1362_INTL_BUFFERS, isp1362_hcd->intl_queue.blk_size,
2330 isp1362_hcd->intl_queue.buf_size);
2331 DBG(3, "%s: %5s buf $%04x %2d * %4d = %4d\n", __func__,
2332 isp1362_hcd->atl_queue.name,
2333 isp1362_hcd->atl_queue.buf_start,
2334 atl_buffers, isp1362_hcd->atl_queue.blk_size,
2335 isp1362_hcd->atl_queue.buf_size);
2336
2337 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2338
2339 return 0;
2340}
2341
2342static int isp1362_hc_reset(struct usb_hcd *hcd)
2343{
2344 int ret = 0;
2345 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2346 unsigned long t;
2347 unsigned long timeout = 100;
2348 unsigned long flags;
2349 int clkrdy = 0;
2350
Tobias Klauser7a01f492011-07-27 08:57:25 +02002351 pr_debug("%s:\n", __func__);
Lothar Wassmanna9d43092009-07-16 20:51:21 -04002352
2353 if (isp1362_hcd->board && isp1362_hcd->board->reset) {
2354 isp1362_hcd->board->reset(hcd->self.controller, 1);
2355 msleep(20);
2356 if (isp1362_hcd->board->clock)
2357 isp1362_hcd->board->clock(hcd->self.controller, 1);
2358 isp1362_hcd->board->reset(hcd->self.controller, 0);
2359 } else
2360 isp1362_sw_reset(isp1362_hcd);
2361
2362 /* chip has been reset. First we need to see a clock */
2363 t = jiffies + msecs_to_jiffies(timeout);
2364 while (!clkrdy && time_before_eq(jiffies, t)) {
2365 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2366 clkrdy = isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_CLKRDY;
2367 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2368 if (!clkrdy)
2369 msleep(4);
2370 }
2371
2372 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2373 isp1362_write_reg16(isp1362_hcd, HCuPINT, HCuPINT_CLKRDY);
2374 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2375 if (!clkrdy) {
2376 pr_err("Clock not ready after %lums\n", timeout);
2377 ret = -ENODEV;
2378 }
2379 return ret;
2380}
2381
2382static void isp1362_hc_stop(struct usb_hcd *hcd)
2383{
2384 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2385 unsigned long flags;
2386 u32 tmp;
2387
Tobias Klauser7a01f492011-07-27 08:57:25 +02002388 pr_debug("%s:\n", __func__);
Lothar Wassmanna9d43092009-07-16 20:51:21 -04002389
2390 del_timer_sync(&hcd->rh_timer);
2391
2392 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2393
2394 isp1362_write_reg16(isp1362_hcd, HCuPINTENB, 0);
2395
2396 /* Switch off power for all ports */
2397 tmp = isp1362_read_reg32(isp1362_hcd, HCRHDESCA);
2398 tmp &= ~(RH_A_NPS | RH_A_PSM);
2399 isp1362_write_reg32(isp1362_hcd, HCRHDESCA, tmp);
2400 isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_LPS);
2401
2402 /* Reset the chip */
2403 if (isp1362_hcd->board && isp1362_hcd->board->reset)
2404 isp1362_hcd->board->reset(hcd->self.controller, 1);
2405 else
Jiri Slaby1c815572010-06-21 17:02:51 +02002406 __isp1362_sw_reset(isp1362_hcd);
Lothar Wassmanna9d43092009-07-16 20:51:21 -04002407
2408 if (isp1362_hcd->board && isp1362_hcd->board->clock)
2409 isp1362_hcd->board->clock(hcd->self.controller, 0);
2410
2411 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2412}
2413
2414#ifdef CHIP_BUFFER_TEST
2415static int isp1362_chip_test(struct isp1362_hcd *isp1362_hcd)
2416{
2417 int ret = 0;
2418 u16 *ref;
2419 unsigned long flags;
2420
2421 ref = kmalloc(2 * ISP1362_BUF_SIZE, GFP_KERNEL);
2422 if (ref) {
2423 int offset;
2424 u16 *tst = &ref[ISP1362_BUF_SIZE / 2];
2425
2426 for (offset = 0; offset < ISP1362_BUF_SIZE / 2; offset++) {
2427 ref[offset] = ~offset;
2428 tst[offset] = offset;
2429 }
2430
2431 for (offset = 0; offset < 4; offset++) {
2432 int j;
2433
2434 for (j = 0; j < 8; j++) {
2435 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2436 isp1362_write_buffer(isp1362_hcd, (u8 *)ref + offset, 0, j);
2437 isp1362_read_buffer(isp1362_hcd, (u8 *)tst + offset, 0, j);
2438 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2439
2440 if (memcmp(ref, tst, j)) {
2441 ret = -ENODEV;
2442 pr_err("%s: memory check with %d byte offset %d failed\n",
2443 __func__, j, offset);
2444 dump_data((u8 *)ref + offset, j);
2445 dump_data((u8 *)tst + offset, j);
2446 }
2447 }
2448 }
2449
2450 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2451 isp1362_write_buffer(isp1362_hcd, ref, 0, ISP1362_BUF_SIZE);
2452 isp1362_read_buffer(isp1362_hcd, tst, 0, ISP1362_BUF_SIZE);
2453 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2454
2455 if (memcmp(ref, tst, ISP1362_BUF_SIZE)) {
2456 ret = -ENODEV;
2457 pr_err("%s: memory check failed\n", __func__);
2458 dump_data((u8 *)tst, ISP1362_BUF_SIZE / 2);
2459 }
2460
2461 for (offset = 0; offset < 256; offset++) {
2462 int test_size = 0;
2463
2464 yield();
2465
2466 memset(tst, 0, ISP1362_BUF_SIZE);
2467 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2468 isp1362_write_buffer(isp1362_hcd, tst, 0, ISP1362_BUF_SIZE);
2469 isp1362_read_buffer(isp1362_hcd, tst, 0, ISP1362_BUF_SIZE);
2470 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2471 if (memcmp(tst, tst + (ISP1362_BUF_SIZE / (2 * sizeof(*tst))),
2472 ISP1362_BUF_SIZE / 2)) {
2473 pr_err("%s: Failed to clear buffer\n", __func__);
2474 dump_data((u8 *)tst, ISP1362_BUF_SIZE);
2475 break;
2476 }
2477 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2478 isp1362_write_buffer(isp1362_hcd, ref, offset * 2, PTD_HEADER_SIZE);
2479 isp1362_write_buffer(isp1362_hcd, ref + PTD_HEADER_SIZE / sizeof(*ref),
2480 offset * 2 + PTD_HEADER_SIZE, test_size);
2481 isp1362_read_buffer(isp1362_hcd, tst, offset * 2,
2482 PTD_HEADER_SIZE + test_size);
2483 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2484 if (memcmp(ref, tst, PTD_HEADER_SIZE + test_size)) {
2485 dump_data(((u8 *)ref) + offset, PTD_HEADER_SIZE + test_size);
2486 dump_data((u8 *)tst, PTD_HEADER_SIZE + test_size);
2487 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2488 isp1362_read_buffer(isp1362_hcd, tst, offset * 2,
2489 PTD_HEADER_SIZE + test_size);
2490 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2491 if (memcmp(ref, tst, PTD_HEADER_SIZE + test_size)) {
2492 ret = -ENODEV;
2493 pr_err("%s: memory check with offset %02x failed\n",
2494 __func__, offset);
2495 break;
2496 }
2497 pr_warning("%s: memory check with offset %02x ok after second read\n",
2498 __func__, offset);
2499 }
2500 }
2501 kfree(ref);
2502 }
2503 return ret;
2504}
2505#endif
2506
2507static int isp1362_hc_start(struct usb_hcd *hcd)
2508{
2509 int ret;
2510 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2511 struct isp1362_platform_data *board = isp1362_hcd->board;
2512 u16 hwcfg;
2513 u16 chipid;
2514 unsigned long flags;
2515
Tobias Klauser7a01f492011-07-27 08:57:25 +02002516 pr_debug("%s:\n", __func__);
Lothar Wassmanna9d43092009-07-16 20:51:21 -04002517
2518 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2519 chipid = isp1362_read_reg16(isp1362_hcd, HCCHIPID);
2520 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2521
2522 if ((chipid & HCCHIPID_MASK) != HCCHIPID_MAGIC) {
2523 pr_err("%s: Invalid chip ID %04x\n", __func__, chipid);
2524 return -ENODEV;
2525 }
2526
2527#ifdef CHIP_BUFFER_TEST
2528 ret = isp1362_chip_test(isp1362_hcd);
2529 if (ret)
2530 return -ENODEV;
2531#endif
2532 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2533 /* clear interrupt status and disable all interrupt sources */
2534 isp1362_write_reg16(isp1362_hcd, HCuPINT, 0xff);
2535 isp1362_write_reg16(isp1362_hcd, HCuPINTENB, 0);
2536
2537 /* HW conf */
2538 hwcfg = HCHWCFG_INT_ENABLE | HCHWCFG_DBWIDTH(1);
2539 if (board->sel15Kres)
2540 hwcfg |= HCHWCFG_PULLDOWN_DS2 |
Ken MacLeod7949f4e2009-08-06 14:18:27 -05002541 ((MAX_ROOT_PORTS > 1) ? HCHWCFG_PULLDOWN_DS1 : 0);
Lothar Wassmanna9d43092009-07-16 20:51:21 -04002542 if (board->clknotstop)
2543 hwcfg |= HCHWCFG_CLKNOTSTOP;
2544 if (board->oc_enable)
2545 hwcfg |= HCHWCFG_ANALOG_OC;
2546 if (board->int_act_high)
2547 hwcfg |= HCHWCFG_INT_POL;
2548 if (board->int_edge_triggered)
2549 hwcfg |= HCHWCFG_INT_TRIGGER;
2550 if (board->dreq_act_high)
2551 hwcfg |= HCHWCFG_DREQ_POL;
2552 if (board->dack_act_high)
2553 hwcfg |= HCHWCFG_DACK_POL;
2554 isp1362_write_reg16(isp1362_hcd, HCHWCFG, hwcfg);
2555 isp1362_show_reg(isp1362_hcd, HCHWCFG);
2556 isp1362_write_reg16(isp1362_hcd, HCDMACFG, 0);
2557 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2558
2559 ret = isp1362_mem_config(hcd);
2560 if (ret)
2561 return ret;
2562
2563 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2564
2565 /* Root hub conf */
2566 isp1362_hcd->rhdesca = 0;
2567 if (board->no_power_switching)
2568 isp1362_hcd->rhdesca |= RH_A_NPS;
2569 if (board->power_switching_mode)
2570 isp1362_hcd->rhdesca |= RH_A_PSM;
2571 if (board->potpg)
2572 isp1362_hcd->rhdesca |= (board->potpg << 24) & RH_A_POTPGT;
2573 else
2574 isp1362_hcd->rhdesca |= (25 << 24) & RH_A_POTPGT;
2575
2576 isp1362_write_reg32(isp1362_hcd, HCRHDESCA, isp1362_hcd->rhdesca & ~RH_A_OCPM);
2577 isp1362_write_reg32(isp1362_hcd, HCRHDESCA, isp1362_hcd->rhdesca | RH_A_OCPM);
2578 isp1362_hcd->rhdesca = isp1362_read_reg32(isp1362_hcd, HCRHDESCA);
2579
2580 isp1362_hcd->rhdescb = RH_B_PPCM;
2581 isp1362_write_reg32(isp1362_hcd, HCRHDESCB, isp1362_hcd->rhdescb);
2582 isp1362_hcd->rhdescb = isp1362_read_reg32(isp1362_hcd, HCRHDESCB);
2583
2584 isp1362_read_reg32(isp1362_hcd, HCFMINTVL);
2585 isp1362_write_reg32(isp1362_hcd, HCFMINTVL, (FSMP(FI) << 16) | FI);
2586 isp1362_write_reg32(isp1362_hcd, HCLSTHRESH, LSTHRESH);
2587
2588 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2589
2590 isp1362_hcd->hc_control = OHCI_USB_OPER;
2591 hcd->state = HC_STATE_RUNNING;
2592
2593 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2594 /* Set up interrupts */
2595 isp1362_hcd->intenb = OHCI_INTR_MIE | OHCI_INTR_RHSC | OHCI_INTR_UE;
2596 isp1362_hcd->intenb |= OHCI_INTR_RD;
2597 isp1362_hcd->irqenb = HCuPINT_OPR | HCuPINT_SUSP;
2598 isp1362_write_reg32(isp1362_hcd, HCINTENB, isp1362_hcd->intenb);
2599 isp1362_write_reg16(isp1362_hcd, HCuPINTENB, isp1362_hcd->irqenb);
2600
2601 /* Go operational */
2602 isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
2603 /* enable global power */
2604 isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_LPSC | RH_HS_DRWE);
2605
2606 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2607
2608 return 0;
2609}
2610
2611/*-------------------------------------------------------------------------*/
2612
2613static struct hc_driver isp1362_hc_driver = {
2614 .description = hcd_name,
2615 .product_desc = "ISP1362 Host Controller",
2616 .hcd_priv_size = sizeof(struct isp1362_hcd),
2617
2618 .irq = isp1362_irq,
2619 .flags = HCD_USB11 | HCD_MEMORY,
2620
2621 .reset = isp1362_hc_reset,
2622 .start = isp1362_hc_start,
2623 .stop = isp1362_hc_stop,
2624
2625 .urb_enqueue = isp1362_urb_enqueue,
2626 .urb_dequeue = isp1362_urb_dequeue,
2627 .endpoint_disable = isp1362_endpoint_disable,
2628
2629 .get_frame_number = isp1362_get_frame,
2630
2631 .hub_status_data = isp1362_hub_status_data,
2632 .hub_control = isp1362_hub_control,
2633 .bus_suspend = isp1362_bus_suspend,
2634 .bus_resume = isp1362_bus_resume,
2635};
2636
2637/*-------------------------------------------------------------------------*/
2638
Bill Pembertonfb4e98a2012-11-19 13:26:20 -05002639static int isp1362_remove(struct platform_device *pdev)
Lothar Wassmanna9d43092009-07-16 20:51:21 -04002640{
2641 struct usb_hcd *hcd = platform_get_drvdata(pdev);
2642 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2643 struct resource *res;
2644
2645 remove_debug_file(isp1362_hcd);
2646 DBG(0, "%s: Removing HCD\n", __func__);
2647 usb_remove_hcd(hcd);
2648
Mike Frysingerb0a9cf22009-10-07 04:29:31 -04002649 DBG(0, "%s: Unmapping data_reg @ %p\n", __func__,
2650 isp1362_hcd->data_reg);
Lothar Wassmanna9d43092009-07-16 20:51:21 -04002651 iounmap(isp1362_hcd->data_reg);
2652
Mike Frysingerb0a9cf22009-10-07 04:29:31 -04002653 DBG(0, "%s: Unmapping addr_reg @ %p\n", __func__,
2654 isp1362_hcd->addr_reg);
Lothar Wassmanna9d43092009-07-16 20:51:21 -04002655 iounmap(isp1362_hcd->addr_reg);
2656
2657 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
2658 DBG(0, "%s: release mem_region: %08lx\n", __func__, (long unsigned int)res->start);
2659 if (res)
Axel Lin4e5c3532010-10-15 13:27:57 +08002660 release_mem_region(res->start, resource_size(res));
Lothar Wassmanna9d43092009-07-16 20:51:21 -04002661
2662 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2663 DBG(0, "%s: release mem_region: %08lx\n", __func__, (long unsigned int)res->start);
2664 if (res)
Axel Lin4e5c3532010-10-15 13:27:57 +08002665 release_mem_region(res->start, resource_size(res));
Lothar Wassmanna9d43092009-07-16 20:51:21 -04002666
2667 DBG(0, "%s: put_hcd\n", __func__);
2668 usb_put_hcd(hcd);
2669 DBG(0, "%s: Done\n", __func__);
2670
2671 return 0;
2672}
2673
Bill Pemberton41ac7b32012-11-19 13:21:48 -05002674static int isp1362_probe(struct platform_device *pdev)
Lothar Wassmanna9d43092009-07-16 20:51:21 -04002675{
2676 struct usb_hcd *hcd;
2677 struct isp1362_hcd *isp1362_hcd;
2678 struct resource *addr, *data;
2679 void __iomem *addr_reg;
2680 void __iomem *data_reg;
2681 int irq;
2682 int retval = 0;
Lothar Wassmann0a2fea22010-01-15 14:42:02 -05002683 struct resource *irq_res;
2684 unsigned int irq_flags = 0;
Lothar Wassmanna9d43092009-07-16 20:51:21 -04002685
Tobias Klauseraefd4922012-02-17 16:30:04 +01002686 if (usb_disabled())
2687 return -ENODEV;
2688
Lothar Wassmanna9d43092009-07-16 20:51:21 -04002689 /* basic sanity checks first. board-specific init logic should
2690 * have initialized this the three resources and probably board
2691 * specific platform_data. we don't probe for IRQs, and do only
2692 * minimal sanity checking.
2693 */
2694 if (pdev->num_resources < 3) {
2695 retval = -ENODEV;
2696 goto err1;
2697 }
2698
2699 data = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2700 addr = platform_get_resource(pdev, IORESOURCE_MEM, 1);
Lothar Wassmann0a2fea22010-01-15 14:42:02 -05002701 irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
2702 if (!addr || !data || !irq_res) {
Lothar Wassmanna9d43092009-07-16 20:51:21 -04002703 retval = -ENODEV;
2704 goto err1;
2705 }
Lothar Wassmann0a2fea22010-01-15 14:42:02 -05002706 irq = irq_res->start;
Lothar Wassmanna9d43092009-07-16 20:51:21 -04002707
Lothar Wassmanna9d43092009-07-16 20:51:21 -04002708 if (pdev->dev.dma_mask) {
2709 DBG(1, "won't do DMA");
2710 retval = -ENODEV;
2711 goto err1;
2712 }
Lothar Wassmanna9d43092009-07-16 20:51:21 -04002713
Axel Lin4e5c3532010-10-15 13:27:57 +08002714 if (!request_mem_region(addr->start, resource_size(addr), hcd_name)) {
Lothar Wassmanna9d43092009-07-16 20:51:21 -04002715 retval = -EBUSY;
2716 goto err1;
2717 }
Axel Lin4e5c3532010-10-15 13:27:57 +08002718 addr_reg = ioremap(addr->start, resource_size(addr));
Lothar Wassmanna9d43092009-07-16 20:51:21 -04002719 if (addr_reg == NULL) {
2720 retval = -ENOMEM;
2721 goto err2;
2722 }
2723
Axel Lin4e5c3532010-10-15 13:27:57 +08002724 if (!request_mem_region(data->start, resource_size(data), hcd_name)) {
Lothar Wassmanna9d43092009-07-16 20:51:21 -04002725 retval = -EBUSY;
2726 goto err3;
2727 }
Axel Lin4e5c3532010-10-15 13:27:57 +08002728 data_reg = ioremap(data->start, resource_size(data));
Lothar Wassmanna9d43092009-07-16 20:51:21 -04002729 if (data_reg == NULL) {
2730 retval = -ENOMEM;
2731 goto err4;
2732 }
2733
2734 /* allocate and initialize hcd */
2735 hcd = usb_create_hcd(&isp1362_hc_driver, &pdev->dev, dev_name(&pdev->dev));
2736 if (!hcd) {
2737 retval = -ENOMEM;
2738 goto err5;
2739 }
2740 hcd->rsrc_start = data->start;
2741 isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2742 isp1362_hcd->data_reg = data_reg;
2743 isp1362_hcd->addr_reg = addr_reg;
2744
2745 isp1362_hcd->next_statechange = jiffies;
2746 spin_lock_init(&isp1362_hcd->lock);
2747 INIT_LIST_HEAD(&isp1362_hcd->async);
2748 INIT_LIST_HEAD(&isp1362_hcd->periodic);
2749 INIT_LIST_HEAD(&isp1362_hcd->isoc);
2750 INIT_LIST_HEAD(&isp1362_hcd->remove_list);
2751 isp1362_hcd->board = pdev->dev.platform_data;
2752#if USE_PLATFORM_DELAY
2753 if (!isp1362_hcd->board->delay) {
2754 dev_err(hcd->self.controller, "No platform delay function given\n");
2755 retval = -ENODEV;
2756 goto err6;
2757 }
2758#endif
2759
Lothar Wassmann0a2fea22010-01-15 14:42:02 -05002760 if (irq_res->flags & IORESOURCE_IRQ_HIGHEDGE)
2761 irq_flags |= IRQF_TRIGGER_RISING;
2762 if (irq_res->flags & IORESOURCE_IRQ_LOWEDGE)
2763 irq_flags |= IRQF_TRIGGER_FALLING;
2764 if (irq_res->flags & IORESOURCE_IRQ_HIGHLEVEL)
2765 irq_flags |= IRQF_TRIGGER_HIGH;
2766 if (irq_res->flags & IORESOURCE_IRQ_LOWLEVEL)
2767 irq_flags |= IRQF_TRIGGER_LOW;
Lothar Wassmanna9d43092009-07-16 20:51:21 -04002768
Yong Zhangb5dd18d2011-09-07 16:10:52 +08002769 retval = usb_add_hcd(hcd, irq, irq_flags | IRQF_SHARED);
Lothar Wassmanna9d43092009-07-16 20:51:21 -04002770 if (retval != 0)
2771 goto err6;
2772 pr_info("%s, irq %d\n", hcd->product_desc, irq);
2773
2774 create_debug_file(isp1362_hcd);
2775
2776 return 0;
2777
2778 err6:
Mike Frysingerb0a9cf22009-10-07 04:29:31 -04002779 DBG(0, "%s: Freeing dev %p\n", __func__, isp1362_hcd);
Lothar Wassmanna9d43092009-07-16 20:51:21 -04002780 usb_put_hcd(hcd);
2781 err5:
Mike Frysingerb0a9cf22009-10-07 04:29:31 -04002782 DBG(0, "%s: Unmapping data_reg @ %p\n", __func__, data_reg);
Lothar Wassmanna9d43092009-07-16 20:51:21 -04002783 iounmap(data_reg);
2784 err4:
2785 DBG(0, "%s: Releasing mem region %08lx\n", __func__, (long unsigned int)data->start);
Axel Lin4e5c3532010-10-15 13:27:57 +08002786 release_mem_region(data->start, resource_size(data));
Lothar Wassmanna9d43092009-07-16 20:51:21 -04002787 err3:
Mike Frysingerb0a9cf22009-10-07 04:29:31 -04002788 DBG(0, "%s: Unmapping addr_reg @ %p\n", __func__, addr_reg);
Lothar Wassmanna9d43092009-07-16 20:51:21 -04002789 iounmap(addr_reg);
2790 err2:
2791 DBG(0, "%s: Releasing mem region %08lx\n", __func__, (long unsigned int)addr->start);
Axel Lin4e5c3532010-10-15 13:27:57 +08002792 release_mem_region(addr->start, resource_size(addr));
Lothar Wassmanna9d43092009-07-16 20:51:21 -04002793 err1:
2794 pr_err("%s: init error, %d\n", __func__, retval);
2795
2796 return retval;
2797}
2798
2799#ifdef CONFIG_PM
2800static int isp1362_suspend(struct platform_device *pdev, pm_message_t state)
2801{
2802 struct usb_hcd *hcd = platform_get_drvdata(pdev);
2803 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2804 unsigned long flags;
2805 int retval = 0;
2806
2807 DBG(0, "%s: Suspending device\n", __func__);
2808
2809 if (state.event == PM_EVENT_FREEZE) {
2810 DBG(0, "%s: Suspending root hub\n", __func__);
2811 retval = isp1362_bus_suspend(hcd);
2812 } else {
2813 DBG(0, "%s: Suspending RH ports\n", __func__);
2814 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2815 isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_LPS);
2816 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2817 }
2818 if (retval == 0)
2819 pdev->dev.power.power_state = state;
2820 return retval;
2821}
2822
2823static int isp1362_resume(struct platform_device *pdev)
2824{
2825 struct usb_hcd *hcd = platform_get_drvdata(pdev);
2826 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2827 unsigned long flags;
2828
2829 DBG(0, "%s: Resuming\n", __func__);
2830
2831 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
2832 DBG(0, "%s: Resume RH ports\n", __func__);
2833 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2834 isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_LPSC);
2835 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2836 return 0;
2837 }
2838
2839 pdev->dev.power.power_state = PMSG_ON;
2840
2841 return isp1362_bus_resume(isp1362_hcd_to_hcd(isp1362_hcd));
2842}
2843#else
2844#define isp1362_suspend NULL
2845#define isp1362_resume NULL
2846#endif
2847
2848static struct platform_driver isp1362_driver = {
2849 .probe = isp1362_probe,
Bill Pemberton76904172012-11-19 13:21:08 -05002850 .remove = isp1362_remove,
Lothar Wassmanna9d43092009-07-16 20:51:21 -04002851
2852 .suspend = isp1362_suspend,
2853 .resume = isp1362_resume,
2854 .driver = {
2855 .name = (char *)hcd_name,
2856 .owner = THIS_MODULE,
2857 },
2858};
2859
Tobias Klauseraefd4922012-02-17 16:30:04 +01002860module_platform_driver(isp1362_driver);