Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2001-2004 by David Brownell |
| 3 | * Copyright (c) 2003 Michal Sojka, for high-speed iso transfers |
| 4 | * |
| 5 | * This program is free software; you can redistribute it and/or modify it |
| 6 | * under the terms of the GNU General Public License as published by the |
| 7 | * Free Software Foundation; either version 2 of the License, or (at your |
| 8 | * option) any later version. |
| 9 | * |
| 10 | * This program is distributed in the hope that it will be useful, but |
| 11 | * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY |
| 12 | * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
| 13 | * for more details. |
| 14 | * |
| 15 | * You should have received a copy of the GNU General Public License |
| 16 | * along with this program; if not, write to the Free Software Foundation, |
| 17 | * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
| 18 | */ |
| 19 | |
| 20 | /* this file is part of ehci-hcd.c */ |
| 21 | |
| 22 | /*-------------------------------------------------------------------------*/ |
| 23 | |
| 24 | /* |
| 25 | * EHCI scheduled transaction support: interrupt, iso, split iso |
| 26 | * These are called "periodic" transactions in the EHCI spec. |
| 27 | * |
| 28 | * Note that for interrupt transfers, the QH/QTD manipulation is shared |
| 29 | * with the "asynchronous" transaction support (control/bulk transfers). |
| 30 | * The only real difference is in how interrupt transfers are scheduled. |
| 31 | * |
| 32 | * For ISO, we make an "iso_stream" head to serve the same role as a QH. |
| 33 | * It keeps track of every ITD (or SITD) that's linked, and holds enough |
| 34 | * pre-calculated schedule data to make appending to the queue be quick. |
| 35 | */ |
| 36 | |
| 37 | static int ehci_get_frame (struct usb_hcd *hcd); |
| 38 | |
| 39 | /*-------------------------------------------------------------------------*/ |
| 40 | |
| 41 | /* |
| 42 | * periodic_next_shadow - return "next" pointer on shadow list |
| 43 | * @periodic: host pointer to qh/itd/sitd |
| 44 | * @tag: hardware tag for type of this record |
| 45 | */ |
| 46 | static union ehci_shadow * |
| 47 | periodic_next_shadow (union ehci_shadow *periodic, __le32 tag) |
| 48 | { |
| 49 | switch (tag) { |
| 50 | case Q_TYPE_QH: |
| 51 | return &periodic->qh->qh_next; |
| 52 | case Q_TYPE_FSTN: |
| 53 | return &periodic->fstn->fstn_next; |
| 54 | case Q_TYPE_ITD: |
| 55 | return &periodic->itd->itd_next; |
| 56 | // case Q_TYPE_SITD: |
| 57 | default: |
| 58 | return &periodic->sitd->sitd_next; |
| 59 | } |
| 60 | } |
| 61 | |
| 62 | /* caller must hold ehci->lock */ |
| 63 | static void periodic_unlink (struct ehci_hcd *ehci, unsigned frame, void *ptr) |
| 64 | { |
| 65 | union ehci_shadow *prev_p = &ehci->pshadow [frame]; |
| 66 | __le32 *hw_p = &ehci->periodic [frame]; |
| 67 | union ehci_shadow here = *prev_p; |
| 68 | |
| 69 | /* find predecessor of "ptr"; hw and shadow lists are in sync */ |
| 70 | while (here.ptr && here.ptr != ptr) { |
| 71 | prev_p = periodic_next_shadow (prev_p, Q_NEXT_TYPE (*hw_p)); |
| 72 | hw_p = here.hw_next; |
| 73 | here = *prev_p; |
| 74 | } |
| 75 | /* an interrupt entry (at list end) could have been shared */ |
| 76 | if (!here.ptr) |
| 77 | return; |
| 78 | |
| 79 | /* update shadow and hardware lists ... the old "next" pointers |
| 80 | * from ptr may still be in use, the caller updates them. |
| 81 | */ |
| 82 | *prev_p = *periodic_next_shadow (&here, Q_NEXT_TYPE (*hw_p)); |
| 83 | *hw_p = *here.hw_next; |
| 84 | } |
| 85 | |
| 86 | /* how many of the uframe's 125 usecs are allocated? */ |
| 87 | static unsigned short |
| 88 | periodic_usecs (struct ehci_hcd *ehci, unsigned frame, unsigned uframe) |
| 89 | { |
| 90 | __le32 *hw_p = &ehci->periodic [frame]; |
| 91 | union ehci_shadow *q = &ehci->pshadow [frame]; |
| 92 | unsigned usecs = 0; |
| 93 | |
| 94 | while (q->ptr) { |
| 95 | switch (Q_NEXT_TYPE (*hw_p)) { |
| 96 | case Q_TYPE_QH: |
| 97 | /* is it in the S-mask? */ |
| 98 | if (q->qh->hw_info2 & cpu_to_le32 (1 << uframe)) |
| 99 | usecs += q->qh->usecs; |
| 100 | /* ... or C-mask? */ |
| 101 | if (q->qh->hw_info2 & cpu_to_le32 (1 << (8 + uframe))) |
| 102 | usecs += q->qh->c_usecs; |
| 103 | hw_p = &q->qh->hw_next; |
| 104 | q = &q->qh->qh_next; |
| 105 | break; |
| 106 | // case Q_TYPE_FSTN: |
| 107 | default: |
| 108 | /* for "save place" FSTNs, count the relevant INTR |
| 109 | * bandwidth from the previous frame |
| 110 | */ |
| 111 | if (q->fstn->hw_prev != EHCI_LIST_END) { |
| 112 | ehci_dbg (ehci, "ignoring FSTN cost ...\n"); |
| 113 | } |
| 114 | hw_p = &q->fstn->hw_next; |
| 115 | q = &q->fstn->fstn_next; |
| 116 | break; |
| 117 | case Q_TYPE_ITD: |
| 118 | usecs += q->itd->usecs [uframe]; |
| 119 | hw_p = &q->itd->hw_next; |
| 120 | q = &q->itd->itd_next; |
| 121 | break; |
| 122 | case Q_TYPE_SITD: |
| 123 | /* is it in the S-mask? (count SPLIT, DATA) */ |
| 124 | if (q->sitd->hw_uframe & cpu_to_le32 (1 << uframe)) { |
| 125 | if (q->sitd->hw_fullspeed_ep & |
| 126 | __constant_cpu_to_le32 (1<<31)) |
| 127 | usecs += q->sitd->stream->usecs; |
| 128 | else /* worst case for OUT start-split */ |
| 129 | usecs += HS_USECS_ISO (188); |
| 130 | } |
| 131 | |
| 132 | /* ... C-mask? (count CSPLIT, DATA) */ |
| 133 | if (q->sitd->hw_uframe & |
| 134 | cpu_to_le32 (1 << (8 + uframe))) { |
| 135 | /* worst case for IN complete-split */ |
| 136 | usecs += q->sitd->stream->c_usecs; |
| 137 | } |
| 138 | |
| 139 | hw_p = &q->sitd->hw_next; |
| 140 | q = &q->sitd->sitd_next; |
| 141 | break; |
| 142 | } |
| 143 | } |
| 144 | #ifdef DEBUG |
| 145 | if (usecs > 100) |
| 146 | ehci_err (ehci, "uframe %d sched overrun: %d usecs\n", |
| 147 | frame * 8 + uframe, usecs); |
| 148 | #endif |
| 149 | return usecs; |
| 150 | } |
| 151 | |
| 152 | /*-------------------------------------------------------------------------*/ |
| 153 | |
| 154 | static int same_tt (struct usb_device *dev1, struct usb_device *dev2) |
| 155 | { |
| 156 | if (!dev1->tt || !dev2->tt) |
| 157 | return 0; |
| 158 | if (dev1->tt != dev2->tt) |
| 159 | return 0; |
| 160 | if (dev1->tt->multi) |
| 161 | return dev1->ttport == dev2->ttport; |
| 162 | else |
| 163 | return 1; |
| 164 | } |
| 165 | |
| 166 | /* return true iff the device's transaction translator is available |
| 167 | * for a periodic transfer starting at the specified frame, using |
| 168 | * all the uframes in the mask. |
| 169 | */ |
| 170 | static int tt_no_collision ( |
| 171 | struct ehci_hcd *ehci, |
| 172 | unsigned period, |
| 173 | struct usb_device *dev, |
| 174 | unsigned frame, |
| 175 | u32 uf_mask |
| 176 | ) |
| 177 | { |
| 178 | if (period == 0) /* error */ |
| 179 | return 0; |
| 180 | |
| 181 | /* note bandwidth wastage: split never follows csplit |
| 182 | * (different dev or endpoint) until the next uframe. |
| 183 | * calling convention doesn't make that distinction. |
| 184 | */ |
| 185 | for (; frame < ehci->periodic_size; frame += period) { |
| 186 | union ehci_shadow here; |
| 187 | __le32 type; |
| 188 | |
| 189 | here = ehci->pshadow [frame]; |
| 190 | type = Q_NEXT_TYPE (ehci->periodic [frame]); |
| 191 | while (here.ptr) { |
| 192 | switch (type) { |
| 193 | case Q_TYPE_ITD: |
| 194 | type = Q_NEXT_TYPE (here.itd->hw_next); |
| 195 | here = here.itd->itd_next; |
| 196 | continue; |
| 197 | case Q_TYPE_QH: |
| 198 | if (same_tt (dev, here.qh->dev)) { |
| 199 | u32 mask; |
| 200 | |
| 201 | mask = le32_to_cpu (here.qh->hw_info2); |
| 202 | /* "knows" no gap is needed */ |
| 203 | mask |= mask >> 8; |
| 204 | if (mask & uf_mask) |
| 205 | break; |
| 206 | } |
| 207 | type = Q_NEXT_TYPE (here.qh->hw_next); |
| 208 | here = here.qh->qh_next; |
| 209 | continue; |
| 210 | case Q_TYPE_SITD: |
| 211 | if (same_tt (dev, here.sitd->urb->dev)) { |
| 212 | u16 mask; |
| 213 | |
| 214 | mask = le32_to_cpu (here.sitd |
| 215 | ->hw_uframe); |
| 216 | /* FIXME assumes no gap for IN! */ |
| 217 | mask |= mask >> 8; |
| 218 | if (mask & uf_mask) |
| 219 | break; |
| 220 | } |
| 221 | type = Q_NEXT_TYPE (here.sitd->hw_next); |
| 222 | here = here.sitd->sitd_next; |
| 223 | continue; |
| 224 | // case Q_TYPE_FSTN: |
| 225 | default: |
| 226 | ehci_dbg (ehci, |
| 227 | "periodic frame %d bogus type %d\n", |
| 228 | frame, type); |
| 229 | } |
| 230 | |
| 231 | /* collision or error */ |
| 232 | return 0; |
| 233 | } |
| 234 | } |
| 235 | |
| 236 | /* no collision */ |
| 237 | return 1; |
| 238 | } |
| 239 | |
| 240 | /*-------------------------------------------------------------------------*/ |
| 241 | |
| 242 | static int enable_periodic (struct ehci_hcd *ehci) |
| 243 | { |
| 244 | u32 cmd; |
| 245 | int status; |
| 246 | |
| 247 | /* did clearing PSE did take effect yet? |
| 248 | * takes effect only at frame boundaries... |
| 249 | */ |
| 250 | status = handshake (&ehci->regs->status, STS_PSS, 0, 9 * 125); |
| 251 | if (status != 0) { |
| 252 | ehci_to_hcd(ehci)->state = HC_STATE_HALT; |
| 253 | return status; |
| 254 | } |
| 255 | |
| 256 | cmd = readl (&ehci->regs->command) | CMD_PSE; |
| 257 | writel (cmd, &ehci->regs->command); |
| 258 | /* posted write ... PSS happens later */ |
| 259 | ehci_to_hcd(ehci)->state = HC_STATE_RUNNING; |
| 260 | |
| 261 | /* make sure ehci_work scans these */ |
| 262 | ehci->next_uframe = readl (&ehci->regs->frame_index) |
| 263 | % (ehci->periodic_size << 3); |
| 264 | return 0; |
| 265 | } |
| 266 | |
| 267 | static int disable_periodic (struct ehci_hcd *ehci) |
| 268 | { |
| 269 | u32 cmd; |
| 270 | int status; |
| 271 | |
| 272 | /* did setting PSE not take effect yet? |
| 273 | * takes effect only at frame boundaries... |
| 274 | */ |
| 275 | status = handshake (&ehci->regs->status, STS_PSS, STS_PSS, 9 * 125); |
| 276 | if (status != 0) { |
| 277 | ehci_to_hcd(ehci)->state = HC_STATE_HALT; |
| 278 | return status; |
| 279 | } |
| 280 | |
| 281 | cmd = readl (&ehci->regs->command) & ~CMD_PSE; |
| 282 | writel (cmd, &ehci->regs->command); |
| 283 | /* posted write ... */ |
| 284 | |
| 285 | ehci->next_uframe = -1; |
| 286 | return 0; |
| 287 | } |
| 288 | |
| 289 | /*-------------------------------------------------------------------------*/ |
| 290 | |
| 291 | /* periodic schedule slots have iso tds (normal or split) first, then a |
| 292 | * sparse tree for active interrupt transfers. |
| 293 | * |
| 294 | * this just links in a qh; caller guarantees uframe masks are set right. |
| 295 | * no FSTN support (yet; ehci 0.96+) |
| 296 | */ |
| 297 | static int qh_link_periodic (struct ehci_hcd *ehci, struct ehci_qh *qh) |
| 298 | { |
| 299 | unsigned i; |
| 300 | unsigned period = qh->period; |
| 301 | |
| 302 | dev_dbg (&qh->dev->dev, |
| 303 | "link qh%d-%04x/%p start %d [%d/%d us]\n", |
| 304 | period, le32_to_cpup (&qh->hw_info2) & 0xffff, |
| 305 | qh, qh->start, qh->usecs, qh->c_usecs); |
| 306 | |
| 307 | /* high bandwidth, or otherwise every microframe */ |
| 308 | if (period == 0) |
| 309 | period = 1; |
| 310 | |
| 311 | for (i = qh->start; i < ehci->periodic_size; i += period) { |
| 312 | union ehci_shadow *prev = &ehci->pshadow [i]; |
David Brownell | 9a5d3e9 | 2005-04-18 17:39:23 -0700 | [diff] [blame^] | 313 | __le32 *hw_p = &ehci->periodic [i]; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 314 | union ehci_shadow here = *prev; |
David Brownell | 9a5d3e9 | 2005-04-18 17:39:23 -0700 | [diff] [blame^] | 315 | __le32 type = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 316 | |
| 317 | /* skip the iso nodes at list head */ |
| 318 | while (here.ptr) { |
| 319 | type = Q_NEXT_TYPE (*hw_p); |
| 320 | if (type == Q_TYPE_QH) |
| 321 | break; |
| 322 | prev = periodic_next_shadow (prev, type); |
| 323 | hw_p = &here.qh->hw_next; |
| 324 | here = *prev; |
| 325 | } |
| 326 | |
| 327 | /* sorting each branch by period (slow-->fast) |
| 328 | * enables sharing interior tree nodes |
| 329 | */ |
| 330 | while (here.ptr && qh != here.qh) { |
| 331 | if (qh->period > here.qh->period) |
| 332 | break; |
| 333 | prev = &here.qh->qh_next; |
| 334 | hw_p = &here.qh->hw_next; |
| 335 | here = *prev; |
| 336 | } |
| 337 | /* link in this qh, unless some earlier pass did that */ |
| 338 | if (qh != here.qh) { |
| 339 | qh->qh_next = here; |
| 340 | if (here.qh) |
| 341 | qh->hw_next = *hw_p; |
| 342 | wmb (); |
| 343 | prev->qh = qh; |
| 344 | *hw_p = QH_NEXT (qh->qh_dma); |
| 345 | } |
| 346 | } |
| 347 | qh->qh_state = QH_STATE_LINKED; |
| 348 | qh_get (qh); |
| 349 | |
| 350 | /* update per-qh bandwidth for usbfs */ |
| 351 | ehci_to_hcd(ehci)->self.bandwidth_allocated += qh->period |
| 352 | ? ((qh->usecs + qh->c_usecs) / qh->period) |
| 353 | : (qh->usecs * 8); |
| 354 | |
| 355 | /* maybe enable periodic schedule processing */ |
| 356 | if (!ehci->periodic_sched++) |
| 357 | return enable_periodic (ehci); |
| 358 | |
| 359 | return 0; |
| 360 | } |
| 361 | |
| 362 | static void qh_unlink_periodic (struct ehci_hcd *ehci, struct ehci_qh *qh) |
| 363 | { |
| 364 | unsigned i; |
| 365 | unsigned period; |
| 366 | |
| 367 | // FIXME: |
| 368 | // IF this isn't high speed |
| 369 | // and this qh is active in the current uframe |
| 370 | // (and overlay token SplitXstate is false?) |
| 371 | // THEN |
| 372 | // qh->hw_info1 |= __constant_cpu_to_le32 (1 << 7 /* "ignore" */); |
| 373 | |
| 374 | /* high bandwidth, or otherwise part of every microframe */ |
| 375 | if ((period = qh->period) == 0) |
| 376 | period = 1; |
| 377 | |
| 378 | for (i = qh->start; i < ehci->periodic_size; i += period) |
| 379 | periodic_unlink (ehci, i, qh); |
| 380 | |
| 381 | /* update per-qh bandwidth for usbfs */ |
| 382 | ehci_to_hcd(ehci)->self.bandwidth_allocated -= qh->period |
| 383 | ? ((qh->usecs + qh->c_usecs) / qh->period) |
| 384 | : (qh->usecs * 8); |
| 385 | |
| 386 | dev_dbg (&qh->dev->dev, |
| 387 | "unlink qh%d-%04x/%p start %d [%d/%d us]\n", |
| 388 | qh->period, le32_to_cpup (&qh->hw_info2) & 0xffff, |
| 389 | qh, qh->start, qh->usecs, qh->c_usecs); |
| 390 | |
| 391 | /* qh->qh_next still "live" to HC */ |
| 392 | qh->qh_state = QH_STATE_UNLINK; |
| 393 | qh->qh_next.ptr = NULL; |
| 394 | qh_put (qh); |
| 395 | |
| 396 | /* maybe turn off periodic schedule */ |
| 397 | ehci->periodic_sched--; |
| 398 | if (!ehci->periodic_sched) |
| 399 | (void) disable_periodic (ehci); |
| 400 | } |
| 401 | |
| 402 | static void intr_deschedule (struct ehci_hcd *ehci, struct ehci_qh *qh) |
| 403 | { |
| 404 | unsigned wait; |
| 405 | |
| 406 | qh_unlink_periodic (ehci, qh); |
| 407 | |
| 408 | /* simple/paranoid: always delay, expecting the HC needs to read |
| 409 | * qh->hw_next or finish a writeback after SPLIT/CSPLIT ... and |
| 410 | * expect khubd to clean up after any CSPLITs we won't issue. |
| 411 | * active high speed queues may need bigger delays... |
| 412 | */ |
| 413 | if (list_empty (&qh->qtd_list) |
| 414 | || (__constant_cpu_to_le32 (0x0ff << 8) |
| 415 | & qh->hw_info2) != 0) |
| 416 | wait = 2; |
| 417 | else |
| 418 | wait = 55; /* worst case: 3 * 1024 */ |
| 419 | |
| 420 | udelay (wait); |
| 421 | qh->qh_state = QH_STATE_IDLE; |
| 422 | qh->hw_next = EHCI_LIST_END; |
| 423 | wmb (); |
| 424 | } |
| 425 | |
| 426 | /*-------------------------------------------------------------------------*/ |
| 427 | |
| 428 | static int check_period ( |
| 429 | struct ehci_hcd *ehci, |
| 430 | unsigned frame, |
| 431 | unsigned uframe, |
| 432 | unsigned period, |
| 433 | unsigned usecs |
| 434 | ) { |
| 435 | int claimed; |
| 436 | |
| 437 | /* complete split running into next frame? |
| 438 | * given FSTN support, we could sometimes check... |
| 439 | */ |
| 440 | if (uframe >= 8) |
| 441 | return 0; |
| 442 | |
| 443 | /* |
| 444 | * 80% periodic == 100 usec/uframe available |
| 445 | * convert "usecs we need" to "max already claimed" |
| 446 | */ |
| 447 | usecs = 100 - usecs; |
| 448 | |
| 449 | /* we "know" 2 and 4 uframe intervals were rejected; so |
| 450 | * for period 0, check _every_ microframe in the schedule. |
| 451 | */ |
| 452 | if (unlikely (period == 0)) { |
| 453 | do { |
| 454 | for (uframe = 0; uframe < 7; uframe++) { |
| 455 | claimed = periodic_usecs (ehci, frame, uframe); |
| 456 | if (claimed > usecs) |
| 457 | return 0; |
| 458 | } |
| 459 | } while ((frame += 1) < ehci->periodic_size); |
| 460 | |
| 461 | /* just check the specified uframe, at that period */ |
| 462 | } else { |
| 463 | do { |
| 464 | claimed = periodic_usecs (ehci, frame, uframe); |
| 465 | if (claimed > usecs) |
| 466 | return 0; |
| 467 | } while ((frame += period) < ehci->periodic_size); |
| 468 | } |
| 469 | |
| 470 | // success! |
| 471 | return 1; |
| 472 | } |
| 473 | |
| 474 | static int check_intr_schedule ( |
| 475 | struct ehci_hcd *ehci, |
| 476 | unsigned frame, |
| 477 | unsigned uframe, |
| 478 | const struct ehci_qh *qh, |
| 479 | __le32 *c_maskp |
| 480 | ) |
| 481 | { |
| 482 | int retval = -ENOSPC; |
| 483 | u8 mask; |
| 484 | |
| 485 | if (qh->c_usecs && uframe >= 6) /* FSTN territory? */ |
| 486 | goto done; |
| 487 | |
| 488 | if (!check_period (ehci, frame, uframe, qh->period, qh->usecs)) |
| 489 | goto done; |
| 490 | if (!qh->c_usecs) { |
| 491 | retval = 0; |
| 492 | *c_maskp = 0; |
| 493 | goto done; |
| 494 | } |
| 495 | |
| 496 | /* Make sure this tt's buffer is also available for CSPLITs. |
| 497 | * We pessimize a bit; probably the typical full speed case |
| 498 | * doesn't need the second CSPLIT. |
| 499 | * |
| 500 | * NOTE: both SPLIT and CSPLIT could be checked in just |
| 501 | * one smart pass... |
| 502 | */ |
| 503 | mask = 0x03 << (uframe + qh->gap_uf); |
| 504 | *c_maskp = cpu_to_le32 (mask << 8); |
| 505 | |
| 506 | mask |= 1 << uframe; |
| 507 | if (tt_no_collision (ehci, qh->period, qh->dev, frame, mask)) { |
| 508 | if (!check_period (ehci, frame, uframe + qh->gap_uf + 1, |
| 509 | qh->period, qh->c_usecs)) |
| 510 | goto done; |
| 511 | if (!check_period (ehci, frame, uframe + qh->gap_uf, |
| 512 | qh->period, qh->c_usecs)) |
| 513 | goto done; |
| 514 | retval = 0; |
| 515 | } |
| 516 | done: |
| 517 | return retval; |
| 518 | } |
| 519 | |
| 520 | /* "first fit" scheduling policy used the first time through, |
| 521 | * or when the previous schedule slot can't be re-used. |
| 522 | */ |
| 523 | static int qh_schedule (struct ehci_hcd *ehci, struct ehci_qh *qh) |
| 524 | { |
| 525 | int status; |
| 526 | unsigned uframe; |
| 527 | __le32 c_mask; |
| 528 | unsigned frame; /* 0..(qh->period - 1), or NO_FRAME */ |
| 529 | |
| 530 | qh_refresh(ehci, qh); |
| 531 | qh->hw_next = EHCI_LIST_END; |
| 532 | frame = qh->start; |
| 533 | |
| 534 | /* reuse the previous schedule slots, if we can */ |
| 535 | if (frame < qh->period) { |
| 536 | uframe = ffs (le32_to_cpup (&qh->hw_info2) & 0x00ff); |
| 537 | status = check_intr_schedule (ehci, frame, --uframe, |
| 538 | qh, &c_mask); |
| 539 | } else { |
| 540 | uframe = 0; |
| 541 | c_mask = 0; |
| 542 | status = -ENOSPC; |
| 543 | } |
| 544 | |
| 545 | /* else scan the schedule to find a group of slots such that all |
| 546 | * uframes have enough periodic bandwidth available. |
| 547 | */ |
| 548 | if (status) { |
| 549 | /* "normal" case, uframing flexible except with splits */ |
| 550 | if (qh->period) { |
| 551 | frame = qh->period - 1; |
| 552 | do { |
| 553 | for (uframe = 0; uframe < 8; uframe++) { |
| 554 | status = check_intr_schedule (ehci, |
| 555 | frame, uframe, qh, |
| 556 | &c_mask); |
| 557 | if (status == 0) |
| 558 | break; |
| 559 | } |
| 560 | } while (status && frame--); |
| 561 | |
| 562 | /* qh->period == 0 means every uframe */ |
| 563 | } else { |
| 564 | frame = 0; |
| 565 | status = check_intr_schedule (ehci, 0, 0, qh, &c_mask); |
| 566 | } |
| 567 | if (status) |
| 568 | goto done; |
| 569 | qh->start = frame; |
| 570 | |
| 571 | /* reset S-frame and (maybe) C-frame masks */ |
| 572 | qh->hw_info2 &= __constant_cpu_to_le32 (~0xffff); |
| 573 | qh->hw_info2 |= qh->period |
| 574 | ? cpu_to_le32 (1 << uframe) |
| 575 | : __constant_cpu_to_le32 (0xff); |
| 576 | qh->hw_info2 |= c_mask; |
| 577 | } else |
| 578 | ehci_dbg (ehci, "reused qh %p schedule\n", qh); |
| 579 | |
| 580 | /* stuff into the periodic schedule */ |
| 581 | status = qh_link_periodic (ehci, qh); |
| 582 | done: |
| 583 | return status; |
| 584 | } |
| 585 | |
| 586 | static int intr_submit ( |
| 587 | struct ehci_hcd *ehci, |
| 588 | struct usb_host_endpoint *ep, |
| 589 | struct urb *urb, |
| 590 | struct list_head *qtd_list, |
| 591 | int mem_flags |
| 592 | ) { |
| 593 | unsigned epnum; |
| 594 | unsigned long flags; |
| 595 | struct ehci_qh *qh; |
| 596 | int status = 0; |
| 597 | struct list_head empty; |
| 598 | |
| 599 | /* get endpoint and transfer/schedule data */ |
| 600 | epnum = ep->desc.bEndpointAddress; |
| 601 | |
| 602 | spin_lock_irqsave (&ehci->lock, flags); |
| 603 | |
| 604 | /* get qh and force any scheduling errors */ |
| 605 | INIT_LIST_HEAD (&empty); |
| 606 | qh = qh_append_tds (ehci, urb, &empty, epnum, &ep->hcpriv); |
| 607 | if (qh == NULL) { |
| 608 | status = -ENOMEM; |
| 609 | goto done; |
| 610 | } |
| 611 | if (qh->qh_state == QH_STATE_IDLE) { |
| 612 | if ((status = qh_schedule (ehci, qh)) != 0) |
| 613 | goto done; |
| 614 | } |
| 615 | |
| 616 | /* then queue the urb's tds to the qh */ |
| 617 | qh = qh_append_tds (ehci, urb, qtd_list, epnum, &ep->hcpriv); |
| 618 | BUG_ON (qh == NULL); |
| 619 | |
| 620 | /* ... update usbfs periodic stats */ |
| 621 | ehci_to_hcd(ehci)->self.bandwidth_int_reqs++; |
| 622 | |
| 623 | done: |
| 624 | spin_unlock_irqrestore (&ehci->lock, flags); |
| 625 | if (status) |
| 626 | qtd_list_free (ehci, urb, qtd_list); |
| 627 | |
| 628 | return status; |
| 629 | } |
| 630 | |
| 631 | /*-------------------------------------------------------------------------*/ |
| 632 | |
| 633 | /* ehci_iso_stream ops work with both ITD and SITD */ |
| 634 | |
| 635 | static struct ehci_iso_stream * |
| 636 | iso_stream_alloc (int mem_flags) |
| 637 | { |
| 638 | struct ehci_iso_stream *stream; |
| 639 | |
| 640 | stream = kmalloc(sizeof *stream, mem_flags); |
| 641 | if (likely (stream != NULL)) { |
| 642 | memset (stream, 0, sizeof(*stream)); |
| 643 | INIT_LIST_HEAD(&stream->td_list); |
| 644 | INIT_LIST_HEAD(&stream->free_list); |
| 645 | stream->next_uframe = -1; |
| 646 | stream->refcount = 1; |
| 647 | } |
| 648 | return stream; |
| 649 | } |
| 650 | |
| 651 | static void |
| 652 | iso_stream_init ( |
| 653 | struct ehci_hcd *ehci, |
| 654 | struct ehci_iso_stream *stream, |
| 655 | struct usb_device *dev, |
| 656 | int pipe, |
| 657 | unsigned interval |
| 658 | ) |
| 659 | { |
| 660 | static const u8 smask_out [] = { 0x01, 0x03, 0x07, 0x0f, 0x1f, 0x3f }; |
| 661 | |
| 662 | u32 buf1; |
| 663 | unsigned epnum, maxp; |
| 664 | int is_input; |
| 665 | long bandwidth; |
| 666 | |
| 667 | /* |
| 668 | * this might be a "high bandwidth" highspeed endpoint, |
| 669 | * as encoded in the ep descriptor's wMaxPacket field |
| 670 | */ |
| 671 | epnum = usb_pipeendpoint (pipe); |
| 672 | is_input = usb_pipein (pipe) ? USB_DIR_IN : 0; |
| 673 | maxp = usb_maxpacket(dev, pipe, !is_input); |
| 674 | if (is_input) { |
| 675 | buf1 = (1 << 11); |
| 676 | } else { |
| 677 | buf1 = 0; |
| 678 | } |
| 679 | |
| 680 | /* knows about ITD vs SITD */ |
| 681 | if (dev->speed == USB_SPEED_HIGH) { |
| 682 | unsigned multi = hb_mult(maxp); |
| 683 | |
| 684 | stream->highspeed = 1; |
| 685 | |
| 686 | maxp = max_packet(maxp); |
| 687 | buf1 |= maxp; |
| 688 | maxp *= multi; |
| 689 | |
| 690 | stream->buf0 = cpu_to_le32 ((epnum << 8) | dev->devnum); |
| 691 | stream->buf1 = cpu_to_le32 (buf1); |
| 692 | stream->buf2 = cpu_to_le32 (multi); |
| 693 | |
| 694 | /* usbfs wants to report the average usecs per frame tied up |
| 695 | * when transfers on this endpoint are scheduled ... |
| 696 | */ |
| 697 | stream->usecs = HS_USECS_ISO (maxp); |
| 698 | bandwidth = stream->usecs * 8; |
| 699 | bandwidth /= 1 << (interval - 1); |
| 700 | |
| 701 | } else { |
| 702 | u32 addr; |
| 703 | |
| 704 | addr = dev->ttport << 24; |
| 705 | if (!ehci_is_TDI(ehci) |
| 706 | || (dev->tt->hub != |
| 707 | ehci_to_hcd(ehci)->self.root_hub)) |
| 708 | addr |= dev->tt->hub->devnum << 16; |
| 709 | addr |= epnum << 8; |
| 710 | addr |= dev->devnum; |
| 711 | stream->usecs = HS_USECS_ISO (maxp); |
| 712 | if (is_input) { |
| 713 | u32 tmp; |
| 714 | |
| 715 | addr |= 1 << 31; |
| 716 | stream->c_usecs = stream->usecs; |
| 717 | stream->usecs = HS_USECS_ISO (1); |
| 718 | stream->raw_mask = 1; |
| 719 | |
| 720 | /* pessimistic c-mask */ |
| 721 | tmp = usb_calc_bus_time (USB_SPEED_FULL, 1, 0, maxp) |
| 722 | / (125 * 1000); |
| 723 | stream->raw_mask |= 3 << (tmp + 9); |
| 724 | } else |
| 725 | stream->raw_mask = smask_out [maxp / 188]; |
| 726 | bandwidth = stream->usecs + stream->c_usecs; |
| 727 | bandwidth /= 1 << (interval + 2); |
| 728 | |
| 729 | /* stream->splits gets created from raw_mask later */ |
| 730 | stream->address = cpu_to_le32 (addr); |
| 731 | } |
| 732 | stream->bandwidth = bandwidth; |
| 733 | |
| 734 | stream->udev = dev; |
| 735 | |
| 736 | stream->bEndpointAddress = is_input | epnum; |
| 737 | stream->interval = interval; |
| 738 | stream->maxp = maxp; |
| 739 | } |
| 740 | |
| 741 | static void |
| 742 | iso_stream_put(struct ehci_hcd *ehci, struct ehci_iso_stream *stream) |
| 743 | { |
| 744 | stream->refcount--; |
| 745 | |
| 746 | /* free whenever just a dev->ep reference remains. |
| 747 | * not like a QH -- no persistent state (toggle, halt) |
| 748 | */ |
| 749 | if (stream->refcount == 1) { |
| 750 | int is_in; |
| 751 | |
| 752 | // BUG_ON (!list_empty(&stream->td_list)); |
| 753 | |
| 754 | while (!list_empty (&stream->free_list)) { |
| 755 | struct list_head *entry; |
| 756 | |
| 757 | entry = stream->free_list.next; |
| 758 | list_del (entry); |
| 759 | |
| 760 | /* knows about ITD vs SITD */ |
| 761 | if (stream->highspeed) { |
| 762 | struct ehci_itd *itd; |
| 763 | |
| 764 | itd = list_entry (entry, struct ehci_itd, |
| 765 | itd_list); |
| 766 | dma_pool_free (ehci->itd_pool, itd, |
| 767 | itd->itd_dma); |
| 768 | } else { |
| 769 | struct ehci_sitd *sitd; |
| 770 | |
| 771 | sitd = list_entry (entry, struct ehci_sitd, |
| 772 | sitd_list); |
| 773 | dma_pool_free (ehci->sitd_pool, sitd, |
| 774 | sitd->sitd_dma); |
| 775 | } |
| 776 | } |
| 777 | |
| 778 | is_in = (stream->bEndpointAddress & USB_DIR_IN) ? 0x10 : 0; |
| 779 | stream->bEndpointAddress &= 0x0f; |
| 780 | stream->ep->hcpriv = NULL; |
| 781 | |
| 782 | if (stream->rescheduled) { |
| 783 | ehci_info (ehci, "ep%d%s-iso rescheduled " |
| 784 | "%lu times in %lu seconds\n", |
| 785 | stream->bEndpointAddress, is_in ? "in" : "out", |
| 786 | stream->rescheduled, |
| 787 | ((jiffies - stream->start)/HZ) |
| 788 | ); |
| 789 | } |
| 790 | |
| 791 | kfree(stream); |
| 792 | } |
| 793 | } |
| 794 | |
| 795 | static inline struct ehci_iso_stream * |
| 796 | iso_stream_get (struct ehci_iso_stream *stream) |
| 797 | { |
| 798 | if (likely (stream != NULL)) |
| 799 | stream->refcount++; |
| 800 | return stream; |
| 801 | } |
| 802 | |
| 803 | static struct ehci_iso_stream * |
| 804 | iso_stream_find (struct ehci_hcd *ehci, struct urb *urb) |
| 805 | { |
| 806 | unsigned epnum; |
| 807 | struct ehci_iso_stream *stream; |
| 808 | struct usb_host_endpoint *ep; |
| 809 | unsigned long flags; |
| 810 | |
| 811 | epnum = usb_pipeendpoint (urb->pipe); |
| 812 | if (usb_pipein(urb->pipe)) |
| 813 | ep = urb->dev->ep_in[epnum]; |
| 814 | else |
| 815 | ep = urb->dev->ep_out[epnum]; |
| 816 | |
| 817 | spin_lock_irqsave (&ehci->lock, flags); |
| 818 | stream = ep->hcpriv; |
| 819 | |
| 820 | if (unlikely (stream == NULL)) { |
| 821 | stream = iso_stream_alloc(GFP_ATOMIC); |
| 822 | if (likely (stream != NULL)) { |
| 823 | /* dev->ep owns the initial refcount */ |
| 824 | ep->hcpriv = stream; |
| 825 | stream->ep = ep; |
| 826 | iso_stream_init(ehci, stream, urb->dev, urb->pipe, |
| 827 | urb->interval); |
| 828 | } |
| 829 | |
| 830 | /* if dev->ep [epnum] is a QH, info1.maxpacket is nonzero */ |
| 831 | } else if (unlikely (stream->hw_info1 != 0)) { |
| 832 | ehci_dbg (ehci, "dev %s ep%d%s, not iso??\n", |
| 833 | urb->dev->devpath, epnum, |
| 834 | usb_pipein(urb->pipe) ? "in" : "out"); |
| 835 | stream = NULL; |
| 836 | } |
| 837 | |
| 838 | /* caller guarantees an eventual matching iso_stream_put */ |
| 839 | stream = iso_stream_get (stream); |
| 840 | |
| 841 | spin_unlock_irqrestore (&ehci->lock, flags); |
| 842 | return stream; |
| 843 | } |
| 844 | |
| 845 | /*-------------------------------------------------------------------------*/ |
| 846 | |
| 847 | /* ehci_iso_sched ops can be ITD-only or SITD-only */ |
| 848 | |
| 849 | static struct ehci_iso_sched * |
| 850 | iso_sched_alloc (unsigned packets, int mem_flags) |
| 851 | { |
| 852 | struct ehci_iso_sched *iso_sched; |
| 853 | int size = sizeof *iso_sched; |
| 854 | |
| 855 | size += packets * sizeof (struct ehci_iso_packet); |
| 856 | iso_sched = kmalloc (size, mem_flags); |
| 857 | if (likely (iso_sched != NULL)) { |
| 858 | memset(iso_sched, 0, size); |
| 859 | INIT_LIST_HEAD (&iso_sched->td_list); |
| 860 | } |
| 861 | return iso_sched; |
| 862 | } |
| 863 | |
| 864 | static inline void |
| 865 | itd_sched_init ( |
| 866 | struct ehci_iso_sched *iso_sched, |
| 867 | struct ehci_iso_stream *stream, |
| 868 | struct urb *urb |
| 869 | ) |
| 870 | { |
| 871 | unsigned i; |
| 872 | dma_addr_t dma = urb->transfer_dma; |
| 873 | |
| 874 | /* how many uframes are needed for these transfers */ |
| 875 | iso_sched->span = urb->number_of_packets * stream->interval; |
| 876 | |
| 877 | /* figure out per-uframe itd fields that we'll need later |
| 878 | * when we fit new itds into the schedule. |
| 879 | */ |
| 880 | for (i = 0; i < urb->number_of_packets; i++) { |
| 881 | struct ehci_iso_packet *uframe = &iso_sched->packet [i]; |
| 882 | unsigned length; |
| 883 | dma_addr_t buf; |
| 884 | u32 trans; |
| 885 | |
| 886 | length = urb->iso_frame_desc [i].length; |
| 887 | buf = dma + urb->iso_frame_desc [i].offset; |
| 888 | |
| 889 | trans = EHCI_ISOC_ACTIVE; |
| 890 | trans |= buf & 0x0fff; |
| 891 | if (unlikely (((i + 1) == urb->number_of_packets)) |
| 892 | && !(urb->transfer_flags & URB_NO_INTERRUPT)) |
| 893 | trans |= EHCI_ITD_IOC; |
| 894 | trans |= length << 16; |
| 895 | uframe->transaction = cpu_to_le32 (trans); |
| 896 | |
| 897 | /* might need to cross a buffer page within a td */ |
| 898 | uframe->bufp = (buf & ~(u64)0x0fff); |
| 899 | buf += length; |
| 900 | if (unlikely ((uframe->bufp != (buf & ~(u64)0x0fff)))) |
| 901 | uframe->cross = 1; |
| 902 | } |
| 903 | } |
| 904 | |
| 905 | static void |
| 906 | iso_sched_free ( |
| 907 | struct ehci_iso_stream *stream, |
| 908 | struct ehci_iso_sched *iso_sched |
| 909 | ) |
| 910 | { |
| 911 | if (!iso_sched) |
| 912 | return; |
| 913 | // caller must hold ehci->lock! |
| 914 | list_splice (&iso_sched->td_list, &stream->free_list); |
| 915 | kfree (iso_sched); |
| 916 | } |
| 917 | |
| 918 | static int |
| 919 | itd_urb_transaction ( |
| 920 | struct ehci_iso_stream *stream, |
| 921 | struct ehci_hcd *ehci, |
| 922 | struct urb *urb, |
| 923 | int mem_flags |
| 924 | ) |
| 925 | { |
| 926 | struct ehci_itd *itd; |
| 927 | dma_addr_t itd_dma; |
| 928 | int i; |
| 929 | unsigned num_itds; |
| 930 | struct ehci_iso_sched *sched; |
| 931 | unsigned long flags; |
| 932 | |
| 933 | sched = iso_sched_alloc (urb->number_of_packets, mem_flags); |
| 934 | if (unlikely (sched == NULL)) |
| 935 | return -ENOMEM; |
| 936 | |
| 937 | itd_sched_init (sched, stream, urb); |
| 938 | |
| 939 | if (urb->interval < 8) |
| 940 | num_itds = 1 + (sched->span + 7) / 8; |
| 941 | else |
| 942 | num_itds = urb->number_of_packets; |
| 943 | |
| 944 | /* allocate/init ITDs */ |
| 945 | spin_lock_irqsave (&ehci->lock, flags); |
| 946 | for (i = 0; i < num_itds; i++) { |
| 947 | |
| 948 | /* free_list.next might be cache-hot ... but maybe |
| 949 | * the HC caches it too. avoid that issue for now. |
| 950 | */ |
| 951 | |
| 952 | /* prefer previously-allocated itds */ |
| 953 | if (likely (!list_empty(&stream->free_list))) { |
| 954 | itd = list_entry (stream->free_list.prev, |
| 955 | struct ehci_itd, itd_list); |
| 956 | list_del (&itd->itd_list); |
| 957 | itd_dma = itd->itd_dma; |
| 958 | } else |
| 959 | itd = NULL; |
| 960 | |
| 961 | if (!itd) { |
| 962 | spin_unlock_irqrestore (&ehci->lock, flags); |
| 963 | itd = dma_pool_alloc (ehci->itd_pool, mem_flags, |
| 964 | &itd_dma); |
| 965 | spin_lock_irqsave (&ehci->lock, flags); |
| 966 | } |
| 967 | |
| 968 | if (unlikely (NULL == itd)) { |
| 969 | iso_sched_free (stream, sched); |
| 970 | spin_unlock_irqrestore (&ehci->lock, flags); |
| 971 | return -ENOMEM; |
| 972 | } |
| 973 | memset (itd, 0, sizeof *itd); |
| 974 | itd->itd_dma = itd_dma; |
| 975 | list_add (&itd->itd_list, &sched->td_list); |
| 976 | } |
| 977 | spin_unlock_irqrestore (&ehci->lock, flags); |
| 978 | |
| 979 | /* temporarily store schedule info in hcpriv */ |
| 980 | urb->hcpriv = sched; |
| 981 | urb->error_count = 0; |
| 982 | return 0; |
| 983 | } |
| 984 | |
| 985 | /*-------------------------------------------------------------------------*/ |
| 986 | |
| 987 | static inline int |
| 988 | itd_slot_ok ( |
| 989 | struct ehci_hcd *ehci, |
| 990 | u32 mod, |
| 991 | u32 uframe, |
| 992 | u8 usecs, |
| 993 | u32 period |
| 994 | ) |
| 995 | { |
| 996 | uframe %= period; |
| 997 | do { |
| 998 | /* can't commit more than 80% periodic == 100 usec */ |
| 999 | if (periodic_usecs (ehci, uframe >> 3, uframe & 0x7) |
| 1000 | > (100 - usecs)) |
| 1001 | return 0; |
| 1002 | |
| 1003 | /* we know urb->interval is 2^N uframes */ |
| 1004 | uframe += period; |
| 1005 | } while (uframe < mod); |
| 1006 | return 1; |
| 1007 | } |
| 1008 | |
| 1009 | static inline int |
| 1010 | sitd_slot_ok ( |
| 1011 | struct ehci_hcd *ehci, |
| 1012 | u32 mod, |
| 1013 | struct ehci_iso_stream *stream, |
| 1014 | u32 uframe, |
| 1015 | struct ehci_iso_sched *sched, |
| 1016 | u32 period_uframes |
| 1017 | ) |
| 1018 | { |
| 1019 | u32 mask, tmp; |
| 1020 | u32 frame, uf; |
| 1021 | |
| 1022 | mask = stream->raw_mask << (uframe & 7); |
| 1023 | |
| 1024 | /* for IN, don't wrap CSPLIT into the next frame */ |
| 1025 | if (mask & ~0xffff) |
| 1026 | return 0; |
| 1027 | |
| 1028 | /* this multi-pass logic is simple, but performance may |
| 1029 | * suffer when the schedule data isn't cached. |
| 1030 | */ |
| 1031 | |
| 1032 | /* check bandwidth */ |
| 1033 | uframe %= period_uframes; |
| 1034 | do { |
| 1035 | u32 max_used; |
| 1036 | |
| 1037 | frame = uframe >> 3; |
| 1038 | uf = uframe & 7; |
| 1039 | |
| 1040 | /* tt must be idle for start(s), any gap, and csplit. |
| 1041 | * assume scheduling slop leaves 10+% for control/bulk. |
| 1042 | */ |
| 1043 | if (!tt_no_collision (ehci, period_uframes << 3, |
| 1044 | stream->udev, frame, mask)) |
| 1045 | return 0; |
| 1046 | |
| 1047 | /* check starts (OUT uses more than one) */ |
| 1048 | max_used = 100 - stream->usecs; |
| 1049 | for (tmp = stream->raw_mask & 0xff; tmp; tmp >>= 1, uf++) { |
| 1050 | if (periodic_usecs (ehci, frame, uf) > max_used) |
| 1051 | return 0; |
| 1052 | } |
| 1053 | |
| 1054 | /* for IN, check CSPLIT */ |
| 1055 | if (stream->c_usecs) { |
| 1056 | max_used = 100 - stream->c_usecs; |
| 1057 | do { |
| 1058 | tmp = 1 << uf; |
| 1059 | tmp <<= 8; |
| 1060 | if ((stream->raw_mask & tmp) == 0) |
| 1061 | continue; |
| 1062 | if (periodic_usecs (ehci, frame, uf) |
| 1063 | > max_used) |
| 1064 | return 0; |
| 1065 | } while (++uf < 8); |
| 1066 | } |
| 1067 | |
| 1068 | /* we know urb->interval is 2^N uframes */ |
| 1069 | uframe += period_uframes; |
| 1070 | } while (uframe < mod); |
| 1071 | |
| 1072 | stream->splits = cpu_to_le32(stream->raw_mask << (uframe & 7)); |
| 1073 | return 1; |
| 1074 | } |
| 1075 | |
| 1076 | /* |
| 1077 | * This scheduler plans almost as far into the future as it has actual |
| 1078 | * periodic schedule slots. (Affected by TUNE_FLS, which defaults to |
| 1079 | * "as small as possible" to be cache-friendlier.) That limits the size |
| 1080 | * transfers you can stream reliably; avoid more than 64 msec per urb. |
| 1081 | * Also avoid queue depths of less than ehci's worst irq latency (affected |
| 1082 | * by the per-urb URB_NO_INTERRUPT hint, the log2_irq_thresh module parameter, |
| 1083 | * and other factors); or more than about 230 msec total (for portability, |
| 1084 | * given EHCI_TUNE_FLS and the slop). Or, write a smarter scheduler! |
| 1085 | */ |
| 1086 | |
| 1087 | #define SCHEDULE_SLOP 10 /* frames */ |
| 1088 | |
| 1089 | static int |
| 1090 | iso_stream_schedule ( |
| 1091 | struct ehci_hcd *ehci, |
| 1092 | struct urb *urb, |
| 1093 | struct ehci_iso_stream *stream |
| 1094 | ) |
| 1095 | { |
| 1096 | u32 now, start, max, period; |
| 1097 | int status; |
| 1098 | unsigned mod = ehci->periodic_size << 3; |
| 1099 | struct ehci_iso_sched *sched = urb->hcpriv; |
| 1100 | |
| 1101 | if (sched->span > (mod - 8 * SCHEDULE_SLOP)) { |
| 1102 | ehci_dbg (ehci, "iso request %p too long\n", urb); |
| 1103 | status = -EFBIG; |
| 1104 | goto fail; |
| 1105 | } |
| 1106 | |
| 1107 | if ((stream->depth + sched->span) > mod) { |
| 1108 | ehci_dbg (ehci, "request %p would overflow (%d+%d>%d)\n", |
| 1109 | urb, stream->depth, sched->span, mod); |
| 1110 | status = -EFBIG; |
| 1111 | goto fail; |
| 1112 | } |
| 1113 | |
| 1114 | now = readl (&ehci->regs->frame_index) % mod; |
| 1115 | |
| 1116 | /* when's the last uframe this urb could start? */ |
| 1117 | max = now + mod; |
| 1118 | |
| 1119 | /* typical case: reuse current schedule. stream is still active, |
| 1120 | * and no gaps from host falling behind (irq delays etc) |
| 1121 | */ |
| 1122 | if (likely (!list_empty (&stream->td_list))) { |
| 1123 | start = stream->next_uframe; |
| 1124 | if (start < now) |
| 1125 | start += mod; |
| 1126 | if (likely ((start + sched->span) < max)) |
| 1127 | goto ready; |
| 1128 | /* else fell behind; someday, try to reschedule */ |
| 1129 | status = -EL2NSYNC; |
| 1130 | goto fail; |
| 1131 | } |
| 1132 | |
| 1133 | /* need to schedule; when's the next (u)frame we could start? |
| 1134 | * this is bigger than ehci->i_thresh allows; scheduling itself |
| 1135 | * isn't free, the slop should handle reasonably slow cpus. it |
| 1136 | * can also help high bandwidth if the dma and irq loads don't |
| 1137 | * jump until after the queue is primed. |
| 1138 | */ |
| 1139 | start = SCHEDULE_SLOP * 8 + (now & ~0x07); |
| 1140 | start %= mod; |
| 1141 | stream->next_uframe = start; |
| 1142 | |
| 1143 | /* NOTE: assumes URB_ISO_ASAP, to limit complexity/bugs */ |
| 1144 | |
| 1145 | period = urb->interval; |
| 1146 | if (!stream->highspeed) |
| 1147 | period <<= 3; |
| 1148 | |
| 1149 | /* find a uframe slot with enough bandwidth */ |
| 1150 | for (; start < (stream->next_uframe + period); start++) { |
| 1151 | int enough_space; |
| 1152 | |
| 1153 | /* check schedule: enough space? */ |
| 1154 | if (stream->highspeed) |
| 1155 | enough_space = itd_slot_ok (ehci, mod, start, |
| 1156 | stream->usecs, period); |
| 1157 | else { |
| 1158 | if ((start % 8) >= 6) |
| 1159 | continue; |
| 1160 | enough_space = sitd_slot_ok (ehci, mod, stream, |
| 1161 | start, sched, period); |
| 1162 | } |
| 1163 | |
| 1164 | /* schedule it here if there's enough bandwidth */ |
| 1165 | if (enough_space) { |
| 1166 | stream->next_uframe = start % mod; |
| 1167 | goto ready; |
| 1168 | } |
| 1169 | } |
| 1170 | |
| 1171 | /* no room in the schedule */ |
| 1172 | ehci_dbg (ehci, "iso %ssched full %p (now %d max %d)\n", |
| 1173 | list_empty (&stream->td_list) ? "" : "re", |
| 1174 | urb, now, max); |
| 1175 | status = -ENOSPC; |
| 1176 | |
| 1177 | fail: |
| 1178 | iso_sched_free (stream, sched); |
| 1179 | urb->hcpriv = NULL; |
| 1180 | return status; |
| 1181 | |
| 1182 | ready: |
| 1183 | /* report high speed start in uframes; full speed, in frames */ |
| 1184 | urb->start_frame = stream->next_uframe; |
| 1185 | if (!stream->highspeed) |
| 1186 | urb->start_frame >>= 3; |
| 1187 | return 0; |
| 1188 | } |
| 1189 | |
| 1190 | /*-------------------------------------------------------------------------*/ |
| 1191 | |
| 1192 | static inline void |
| 1193 | itd_init (struct ehci_iso_stream *stream, struct ehci_itd *itd) |
| 1194 | { |
| 1195 | int i; |
| 1196 | |
| 1197 | itd->hw_next = EHCI_LIST_END; |
| 1198 | itd->hw_bufp [0] = stream->buf0; |
| 1199 | itd->hw_bufp [1] = stream->buf1; |
| 1200 | itd->hw_bufp [2] = stream->buf2; |
| 1201 | |
| 1202 | for (i = 0; i < 8; i++) |
| 1203 | itd->index[i] = -1; |
| 1204 | |
| 1205 | /* All other fields are filled when scheduling */ |
| 1206 | } |
| 1207 | |
| 1208 | static inline void |
| 1209 | itd_patch ( |
| 1210 | struct ehci_itd *itd, |
| 1211 | struct ehci_iso_sched *iso_sched, |
| 1212 | unsigned index, |
| 1213 | u16 uframe, |
| 1214 | int first |
| 1215 | ) |
| 1216 | { |
| 1217 | struct ehci_iso_packet *uf = &iso_sched->packet [index]; |
| 1218 | unsigned pg = itd->pg; |
| 1219 | |
| 1220 | // BUG_ON (pg == 6 && uf->cross); |
| 1221 | |
| 1222 | uframe &= 0x07; |
| 1223 | itd->index [uframe] = index; |
| 1224 | |
| 1225 | itd->hw_transaction [uframe] = uf->transaction; |
| 1226 | itd->hw_transaction [uframe] |= cpu_to_le32 (pg << 12); |
| 1227 | itd->hw_bufp [pg] |= cpu_to_le32 (uf->bufp & ~(u32)0); |
| 1228 | itd->hw_bufp_hi [pg] |= cpu_to_le32 ((u32)(uf->bufp >> 32)); |
| 1229 | |
| 1230 | /* iso_frame_desc[].offset must be strictly increasing */ |
| 1231 | if (unlikely (!first && uf->cross)) { |
| 1232 | u64 bufp = uf->bufp + 4096; |
| 1233 | itd->pg = ++pg; |
| 1234 | itd->hw_bufp [pg] |= cpu_to_le32 (bufp & ~(u32)0); |
| 1235 | itd->hw_bufp_hi [pg] |= cpu_to_le32 ((u32)(bufp >> 32)); |
| 1236 | } |
| 1237 | } |
| 1238 | |
| 1239 | static inline void |
| 1240 | itd_link (struct ehci_hcd *ehci, unsigned frame, struct ehci_itd *itd) |
| 1241 | { |
| 1242 | /* always prepend ITD/SITD ... only QH tree is order-sensitive */ |
| 1243 | itd->itd_next = ehci->pshadow [frame]; |
| 1244 | itd->hw_next = ehci->periodic [frame]; |
| 1245 | ehci->pshadow [frame].itd = itd; |
| 1246 | itd->frame = frame; |
| 1247 | wmb (); |
| 1248 | ehci->periodic [frame] = cpu_to_le32 (itd->itd_dma) | Q_TYPE_ITD; |
| 1249 | } |
| 1250 | |
| 1251 | /* fit urb's itds into the selected schedule slot; activate as needed */ |
| 1252 | static int |
| 1253 | itd_link_urb ( |
| 1254 | struct ehci_hcd *ehci, |
| 1255 | struct urb *urb, |
| 1256 | unsigned mod, |
| 1257 | struct ehci_iso_stream *stream |
| 1258 | ) |
| 1259 | { |
| 1260 | int packet, first = 1; |
| 1261 | unsigned next_uframe, uframe, frame; |
| 1262 | struct ehci_iso_sched *iso_sched = urb->hcpriv; |
| 1263 | struct ehci_itd *itd; |
| 1264 | |
| 1265 | next_uframe = stream->next_uframe % mod; |
| 1266 | |
| 1267 | if (unlikely (list_empty(&stream->td_list))) { |
| 1268 | ehci_to_hcd(ehci)->self.bandwidth_allocated |
| 1269 | += stream->bandwidth; |
| 1270 | ehci_vdbg (ehci, |
| 1271 | "schedule devp %s ep%d%s-iso period %d start %d.%d\n", |
| 1272 | urb->dev->devpath, stream->bEndpointAddress & 0x0f, |
| 1273 | (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out", |
| 1274 | urb->interval, |
| 1275 | next_uframe >> 3, next_uframe & 0x7); |
| 1276 | stream->start = jiffies; |
| 1277 | } |
| 1278 | ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs++; |
| 1279 | |
| 1280 | /* fill iTDs uframe by uframe */ |
| 1281 | for (packet = 0, itd = NULL; packet < urb->number_of_packets; ) { |
| 1282 | if (itd == NULL) { |
| 1283 | /* ASSERT: we have all necessary itds */ |
| 1284 | // BUG_ON (list_empty (&iso_sched->td_list)); |
| 1285 | |
| 1286 | /* ASSERT: no itds for this endpoint in this uframe */ |
| 1287 | |
| 1288 | itd = list_entry (iso_sched->td_list.next, |
| 1289 | struct ehci_itd, itd_list); |
| 1290 | list_move_tail (&itd->itd_list, &stream->td_list); |
| 1291 | itd->stream = iso_stream_get (stream); |
| 1292 | itd->urb = usb_get_urb (urb); |
| 1293 | first = 1; |
| 1294 | itd_init (stream, itd); |
| 1295 | } |
| 1296 | |
| 1297 | uframe = next_uframe & 0x07; |
| 1298 | frame = next_uframe >> 3; |
| 1299 | |
| 1300 | itd->usecs [uframe] = stream->usecs; |
| 1301 | itd_patch (itd, iso_sched, packet, uframe, first); |
| 1302 | first = 0; |
| 1303 | |
| 1304 | next_uframe += stream->interval; |
| 1305 | stream->depth += stream->interval; |
| 1306 | next_uframe %= mod; |
| 1307 | packet++; |
| 1308 | |
| 1309 | /* link completed itds into the schedule */ |
| 1310 | if (((next_uframe >> 3) != frame) |
| 1311 | || packet == urb->number_of_packets) { |
| 1312 | itd_link (ehci, frame % ehci->periodic_size, itd); |
| 1313 | itd = NULL; |
| 1314 | } |
| 1315 | } |
| 1316 | stream->next_uframe = next_uframe; |
| 1317 | |
| 1318 | /* don't need that schedule data any more */ |
| 1319 | iso_sched_free (stream, iso_sched); |
| 1320 | urb->hcpriv = NULL; |
| 1321 | |
| 1322 | timer_action (ehci, TIMER_IO_WATCHDOG); |
| 1323 | if (unlikely (!ehci->periodic_sched++)) |
| 1324 | return enable_periodic (ehci); |
| 1325 | return 0; |
| 1326 | } |
| 1327 | |
| 1328 | #define ISO_ERRS (EHCI_ISOC_BUF_ERR | EHCI_ISOC_BABBLE | EHCI_ISOC_XACTERR) |
| 1329 | |
| 1330 | static unsigned |
| 1331 | itd_complete ( |
| 1332 | struct ehci_hcd *ehci, |
| 1333 | struct ehci_itd *itd, |
| 1334 | struct pt_regs *regs |
| 1335 | ) { |
| 1336 | struct urb *urb = itd->urb; |
| 1337 | struct usb_iso_packet_descriptor *desc; |
| 1338 | u32 t; |
| 1339 | unsigned uframe; |
| 1340 | int urb_index = -1; |
| 1341 | struct ehci_iso_stream *stream = itd->stream; |
| 1342 | struct usb_device *dev; |
| 1343 | |
| 1344 | /* for each uframe with a packet */ |
| 1345 | for (uframe = 0; uframe < 8; uframe++) { |
| 1346 | if (likely (itd->index[uframe] == -1)) |
| 1347 | continue; |
| 1348 | urb_index = itd->index[uframe]; |
| 1349 | desc = &urb->iso_frame_desc [urb_index]; |
| 1350 | |
| 1351 | t = le32_to_cpup (&itd->hw_transaction [uframe]); |
| 1352 | itd->hw_transaction [uframe] = 0; |
| 1353 | stream->depth -= stream->interval; |
| 1354 | |
| 1355 | /* report transfer status */ |
| 1356 | if (unlikely (t & ISO_ERRS)) { |
| 1357 | urb->error_count++; |
| 1358 | if (t & EHCI_ISOC_BUF_ERR) |
| 1359 | desc->status = usb_pipein (urb->pipe) |
| 1360 | ? -ENOSR /* hc couldn't read */ |
| 1361 | : -ECOMM; /* hc couldn't write */ |
| 1362 | else if (t & EHCI_ISOC_BABBLE) |
| 1363 | desc->status = -EOVERFLOW; |
| 1364 | else /* (t & EHCI_ISOC_XACTERR) */ |
| 1365 | desc->status = -EPROTO; |
| 1366 | |
| 1367 | /* HC need not update length with this error */ |
| 1368 | if (!(t & EHCI_ISOC_BABBLE)) |
| 1369 | desc->actual_length = EHCI_ITD_LENGTH (t); |
| 1370 | } else if (likely ((t & EHCI_ISOC_ACTIVE) == 0)) { |
| 1371 | desc->status = 0; |
| 1372 | desc->actual_length = EHCI_ITD_LENGTH (t); |
| 1373 | } |
| 1374 | } |
| 1375 | |
| 1376 | usb_put_urb (urb); |
| 1377 | itd->urb = NULL; |
| 1378 | itd->stream = NULL; |
| 1379 | list_move (&itd->itd_list, &stream->free_list); |
| 1380 | iso_stream_put (ehci, stream); |
| 1381 | |
| 1382 | /* handle completion now? */ |
| 1383 | if (likely ((urb_index + 1) != urb->number_of_packets)) |
| 1384 | return 0; |
| 1385 | |
| 1386 | /* ASSERT: it's really the last itd for this urb |
| 1387 | list_for_each_entry (itd, &stream->td_list, itd_list) |
| 1388 | BUG_ON (itd->urb == urb); |
| 1389 | */ |
| 1390 | |
| 1391 | /* give urb back to the driver ... can be out-of-order */ |
| 1392 | dev = usb_get_dev (urb->dev); |
| 1393 | ehci_urb_done (ehci, urb, regs); |
| 1394 | urb = NULL; |
| 1395 | |
| 1396 | /* defer stopping schedule; completion can submit */ |
| 1397 | ehci->periodic_sched--; |
| 1398 | if (unlikely (!ehci->periodic_sched)) |
| 1399 | (void) disable_periodic (ehci); |
| 1400 | ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs--; |
| 1401 | |
| 1402 | if (unlikely (list_empty (&stream->td_list))) { |
| 1403 | ehci_to_hcd(ehci)->self.bandwidth_allocated |
| 1404 | -= stream->bandwidth; |
| 1405 | ehci_vdbg (ehci, |
| 1406 | "deschedule devp %s ep%d%s-iso\n", |
| 1407 | dev->devpath, stream->bEndpointAddress & 0x0f, |
| 1408 | (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out"); |
| 1409 | } |
| 1410 | iso_stream_put (ehci, stream); |
| 1411 | usb_put_dev (dev); |
| 1412 | |
| 1413 | return 1; |
| 1414 | } |
| 1415 | |
| 1416 | /*-------------------------------------------------------------------------*/ |
| 1417 | |
| 1418 | static int itd_submit (struct ehci_hcd *ehci, struct urb *urb, int mem_flags) |
| 1419 | { |
| 1420 | int status = -EINVAL; |
| 1421 | unsigned long flags; |
| 1422 | struct ehci_iso_stream *stream; |
| 1423 | |
| 1424 | /* Get iso_stream head */ |
| 1425 | stream = iso_stream_find (ehci, urb); |
| 1426 | if (unlikely (stream == NULL)) { |
| 1427 | ehci_dbg (ehci, "can't get iso stream\n"); |
| 1428 | return -ENOMEM; |
| 1429 | } |
| 1430 | if (unlikely (urb->interval != stream->interval)) { |
| 1431 | ehci_dbg (ehci, "can't change iso interval %d --> %d\n", |
| 1432 | stream->interval, urb->interval); |
| 1433 | goto done; |
| 1434 | } |
| 1435 | |
| 1436 | #ifdef EHCI_URB_TRACE |
| 1437 | ehci_dbg (ehci, |
| 1438 | "%s %s urb %p ep%d%s len %d, %d pkts %d uframes [%p]\n", |
| 1439 | __FUNCTION__, urb->dev->devpath, urb, |
| 1440 | usb_pipeendpoint (urb->pipe), |
| 1441 | usb_pipein (urb->pipe) ? "in" : "out", |
| 1442 | urb->transfer_buffer_length, |
| 1443 | urb->number_of_packets, urb->interval, |
| 1444 | stream); |
| 1445 | #endif |
| 1446 | |
| 1447 | /* allocate ITDs w/o locking anything */ |
| 1448 | status = itd_urb_transaction (stream, ehci, urb, mem_flags); |
| 1449 | if (unlikely (status < 0)) { |
| 1450 | ehci_dbg (ehci, "can't init itds\n"); |
| 1451 | goto done; |
| 1452 | } |
| 1453 | |
| 1454 | /* schedule ... need to lock */ |
| 1455 | spin_lock_irqsave (&ehci->lock, flags); |
| 1456 | status = iso_stream_schedule (ehci, urb, stream); |
| 1457 | if (likely (status == 0)) |
| 1458 | itd_link_urb (ehci, urb, ehci->periodic_size << 3, stream); |
| 1459 | spin_unlock_irqrestore (&ehci->lock, flags); |
| 1460 | |
| 1461 | done: |
| 1462 | if (unlikely (status < 0)) |
| 1463 | iso_stream_put (ehci, stream); |
| 1464 | return status; |
| 1465 | } |
| 1466 | |
| 1467 | #ifdef CONFIG_USB_EHCI_SPLIT_ISO |
| 1468 | |
| 1469 | /*-------------------------------------------------------------------------*/ |
| 1470 | |
| 1471 | /* |
| 1472 | * "Split ISO TDs" ... used for USB 1.1 devices going through the |
| 1473 | * TTs in USB 2.0 hubs. These need microframe scheduling. |
| 1474 | */ |
| 1475 | |
| 1476 | static inline void |
| 1477 | sitd_sched_init ( |
| 1478 | struct ehci_iso_sched *iso_sched, |
| 1479 | struct ehci_iso_stream *stream, |
| 1480 | struct urb *urb |
| 1481 | ) |
| 1482 | { |
| 1483 | unsigned i; |
| 1484 | dma_addr_t dma = urb->transfer_dma; |
| 1485 | |
| 1486 | /* how many frames are needed for these transfers */ |
| 1487 | iso_sched->span = urb->number_of_packets * stream->interval; |
| 1488 | |
| 1489 | /* figure out per-frame sitd fields that we'll need later |
| 1490 | * when we fit new sitds into the schedule. |
| 1491 | */ |
| 1492 | for (i = 0; i < urb->number_of_packets; i++) { |
| 1493 | struct ehci_iso_packet *packet = &iso_sched->packet [i]; |
| 1494 | unsigned length; |
| 1495 | dma_addr_t buf; |
| 1496 | u32 trans; |
| 1497 | |
| 1498 | length = urb->iso_frame_desc [i].length & 0x03ff; |
| 1499 | buf = dma + urb->iso_frame_desc [i].offset; |
| 1500 | |
| 1501 | trans = SITD_STS_ACTIVE; |
| 1502 | if (((i + 1) == urb->number_of_packets) |
| 1503 | && !(urb->transfer_flags & URB_NO_INTERRUPT)) |
| 1504 | trans |= SITD_IOC; |
| 1505 | trans |= length << 16; |
| 1506 | packet->transaction = cpu_to_le32 (trans); |
| 1507 | |
| 1508 | /* might need to cross a buffer page within a td */ |
| 1509 | packet->bufp = buf; |
| 1510 | packet->buf1 = (buf + length) & ~0x0fff; |
| 1511 | if (packet->buf1 != (buf & ~(u64)0x0fff)) |
| 1512 | packet->cross = 1; |
| 1513 | |
| 1514 | /* OUT uses multiple start-splits */ |
| 1515 | if (stream->bEndpointAddress & USB_DIR_IN) |
| 1516 | continue; |
| 1517 | length = (length + 187) / 188; |
| 1518 | if (length > 1) /* BEGIN vs ALL */ |
| 1519 | length |= 1 << 3; |
| 1520 | packet->buf1 |= length; |
| 1521 | } |
| 1522 | } |
| 1523 | |
| 1524 | static int |
| 1525 | sitd_urb_transaction ( |
| 1526 | struct ehci_iso_stream *stream, |
| 1527 | struct ehci_hcd *ehci, |
| 1528 | struct urb *urb, |
| 1529 | int mem_flags |
| 1530 | ) |
| 1531 | { |
| 1532 | struct ehci_sitd *sitd; |
| 1533 | dma_addr_t sitd_dma; |
| 1534 | int i; |
| 1535 | struct ehci_iso_sched *iso_sched; |
| 1536 | unsigned long flags; |
| 1537 | |
| 1538 | iso_sched = iso_sched_alloc (urb->number_of_packets, mem_flags); |
| 1539 | if (iso_sched == NULL) |
| 1540 | return -ENOMEM; |
| 1541 | |
| 1542 | sitd_sched_init (iso_sched, stream, urb); |
| 1543 | |
| 1544 | /* allocate/init sITDs */ |
| 1545 | spin_lock_irqsave (&ehci->lock, flags); |
| 1546 | for (i = 0; i < urb->number_of_packets; i++) { |
| 1547 | |
| 1548 | /* NOTE: for now, we don't try to handle wraparound cases |
| 1549 | * for IN (using sitd->hw_backpointer, like a FSTN), which |
| 1550 | * means we never need two sitds for full speed packets. |
| 1551 | */ |
| 1552 | |
| 1553 | /* free_list.next might be cache-hot ... but maybe |
| 1554 | * the HC caches it too. avoid that issue for now. |
| 1555 | */ |
| 1556 | |
| 1557 | /* prefer previously-allocated sitds */ |
| 1558 | if (!list_empty(&stream->free_list)) { |
| 1559 | sitd = list_entry (stream->free_list.prev, |
| 1560 | struct ehci_sitd, sitd_list); |
| 1561 | list_del (&sitd->sitd_list); |
| 1562 | sitd_dma = sitd->sitd_dma; |
| 1563 | } else |
| 1564 | sitd = NULL; |
| 1565 | |
| 1566 | if (!sitd) { |
| 1567 | spin_unlock_irqrestore (&ehci->lock, flags); |
| 1568 | sitd = dma_pool_alloc (ehci->sitd_pool, mem_flags, |
| 1569 | &sitd_dma); |
| 1570 | spin_lock_irqsave (&ehci->lock, flags); |
| 1571 | } |
| 1572 | |
| 1573 | if (!sitd) { |
| 1574 | iso_sched_free (stream, iso_sched); |
| 1575 | spin_unlock_irqrestore (&ehci->lock, flags); |
| 1576 | return -ENOMEM; |
| 1577 | } |
| 1578 | memset (sitd, 0, sizeof *sitd); |
| 1579 | sitd->sitd_dma = sitd_dma; |
| 1580 | list_add (&sitd->sitd_list, &iso_sched->td_list); |
| 1581 | } |
| 1582 | |
| 1583 | /* temporarily store schedule info in hcpriv */ |
| 1584 | urb->hcpriv = iso_sched; |
| 1585 | urb->error_count = 0; |
| 1586 | |
| 1587 | spin_unlock_irqrestore (&ehci->lock, flags); |
| 1588 | return 0; |
| 1589 | } |
| 1590 | |
| 1591 | /*-------------------------------------------------------------------------*/ |
| 1592 | |
| 1593 | static inline void |
| 1594 | sitd_patch ( |
| 1595 | struct ehci_iso_stream *stream, |
| 1596 | struct ehci_sitd *sitd, |
| 1597 | struct ehci_iso_sched *iso_sched, |
| 1598 | unsigned index |
| 1599 | ) |
| 1600 | { |
| 1601 | struct ehci_iso_packet *uf = &iso_sched->packet [index]; |
| 1602 | u64 bufp = uf->bufp; |
| 1603 | |
| 1604 | sitd->hw_next = EHCI_LIST_END; |
| 1605 | sitd->hw_fullspeed_ep = stream->address; |
| 1606 | sitd->hw_uframe = stream->splits; |
| 1607 | sitd->hw_results = uf->transaction; |
| 1608 | sitd->hw_backpointer = EHCI_LIST_END; |
| 1609 | |
| 1610 | bufp = uf->bufp; |
| 1611 | sitd->hw_buf [0] = cpu_to_le32 (bufp); |
| 1612 | sitd->hw_buf_hi [0] = cpu_to_le32 (bufp >> 32); |
| 1613 | |
| 1614 | sitd->hw_buf [1] = cpu_to_le32 (uf->buf1); |
| 1615 | if (uf->cross) |
| 1616 | bufp += 4096; |
| 1617 | sitd->hw_buf_hi [1] = cpu_to_le32 (bufp >> 32); |
| 1618 | sitd->index = index; |
| 1619 | } |
| 1620 | |
| 1621 | static inline void |
| 1622 | sitd_link (struct ehci_hcd *ehci, unsigned frame, struct ehci_sitd *sitd) |
| 1623 | { |
| 1624 | /* note: sitd ordering could matter (CSPLIT then SSPLIT) */ |
| 1625 | sitd->sitd_next = ehci->pshadow [frame]; |
| 1626 | sitd->hw_next = ehci->periodic [frame]; |
| 1627 | ehci->pshadow [frame].sitd = sitd; |
| 1628 | sitd->frame = frame; |
| 1629 | wmb (); |
| 1630 | ehci->periodic [frame] = cpu_to_le32 (sitd->sitd_dma) | Q_TYPE_SITD; |
| 1631 | } |
| 1632 | |
| 1633 | /* fit urb's sitds into the selected schedule slot; activate as needed */ |
| 1634 | static int |
| 1635 | sitd_link_urb ( |
| 1636 | struct ehci_hcd *ehci, |
| 1637 | struct urb *urb, |
| 1638 | unsigned mod, |
| 1639 | struct ehci_iso_stream *stream |
| 1640 | ) |
| 1641 | { |
| 1642 | int packet; |
| 1643 | unsigned next_uframe; |
| 1644 | struct ehci_iso_sched *sched = urb->hcpriv; |
| 1645 | struct ehci_sitd *sitd; |
| 1646 | |
| 1647 | next_uframe = stream->next_uframe; |
| 1648 | |
| 1649 | if (list_empty(&stream->td_list)) { |
| 1650 | /* usbfs ignores TT bandwidth */ |
| 1651 | ehci_to_hcd(ehci)->self.bandwidth_allocated |
| 1652 | += stream->bandwidth; |
| 1653 | ehci_vdbg (ehci, |
| 1654 | "sched devp %s ep%d%s-iso [%d] %dms/%04x\n", |
| 1655 | urb->dev->devpath, stream->bEndpointAddress & 0x0f, |
| 1656 | (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out", |
| 1657 | (next_uframe >> 3) % ehci->periodic_size, |
| 1658 | stream->interval, le32_to_cpu (stream->splits)); |
| 1659 | stream->start = jiffies; |
| 1660 | } |
| 1661 | ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs++; |
| 1662 | |
| 1663 | /* fill sITDs frame by frame */ |
| 1664 | for (packet = 0, sitd = NULL; |
| 1665 | packet < urb->number_of_packets; |
| 1666 | packet++) { |
| 1667 | |
| 1668 | /* ASSERT: we have all necessary sitds */ |
| 1669 | BUG_ON (list_empty (&sched->td_list)); |
| 1670 | |
| 1671 | /* ASSERT: no itds for this endpoint in this frame */ |
| 1672 | |
| 1673 | sitd = list_entry (sched->td_list.next, |
| 1674 | struct ehci_sitd, sitd_list); |
| 1675 | list_move_tail (&sitd->sitd_list, &stream->td_list); |
| 1676 | sitd->stream = iso_stream_get (stream); |
| 1677 | sitd->urb = usb_get_urb (urb); |
| 1678 | |
| 1679 | sitd_patch (stream, sitd, sched, packet); |
| 1680 | sitd_link (ehci, (next_uframe >> 3) % ehci->periodic_size, |
| 1681 | sitd); |
| 1682 | |
| 1683 | next_uframe += stream->interval << 3; |
| 1684 | stream->depth += stream->interval << 3; |
| 1685 | } |
| 1686 | stream->next_uframe = next_uframe % mod; |
| 1687 | |
| 1688 | /* don't need that schedule data any more */ |
| 1689 | iso_sched_free (stream, sched); |
| 1690 | urb->hcpriv = NULL; |
| 1691 | |
| 1692 | timer_action (ehci, TIMER_IO_WATCHDOG); |
| 1693 | if (!ehci->periodic_sched++) |
| 1694 | return enable_periodic (ehci); |
| 1695 | return 0; |
| 1696 | } |
| 1697 | |
| 1698 | /*-------------------------------------------------------------------------*/ |
| 1699 | |
| 1700 | #define SITD_ERRS (SITD_STS_ERR | SITD_STS_DBE | SITD_STS_BABBLE \ |
| 1701 | | SITD_STS_XACT | SITD_STS_MMF) |
| 1702 | |
| 1703 | static unsigned |
| 1704 | sitd_complete ( |
| 1705 | struct ehci_hcd *ehci, |
| 1706 | struct ehci_sitd *sitd, |
| 1707 | struct pt_regs *regs |
| 1708 | ) { |
| 1709 | struct urb *urb = sitd->urb; |
| 1710 | struct usb_iso_packet_descriptor *desc; |
| 1711 | u32 t; |
| 1712 | int urb_index = -1; |
| 1713 | struct ehci_iso_stream *stream = sitd->stream; |
| 1714 | struct usb_device *dev; |
| 1715 | |
| 1716 | urb_index = sitd->index; |
| 1717 | desc = &urb->iso_frame_desc [urb_index]; |
| 1718 | t = le32_to_cpup (&sitd->hw_results); |
| 1719 | |
| 1720 | /* report transfer status */ |
| 1721 | if (t & SITD_ERRS) { |
| 1722 | urb->error_count++; |
| 1723 | if (t & SITD_STS_DBE) |
| 1724 | desc->status = usb_pipein (urb->pipe) |
| 1725 | ? -ENOSR /* hc couldn't read */ |
| 1726 | : -ECOMM; /* hc couldn't write */ |
| 1727 | else if (t & SITD_STS_BABBLE) |
| 1728 | desc->status = -EOVERFLOW; |
| 1729 | else /* XACT, MMF, etc */ |
| 1730 | desc->status = -EPROTO; |
| 1731 | } else { |
| 1732 | desc->status = 0; |
| 1733 | desc->actual_length = desc->length - SITD_LENGTH (t); |
| 1734 | } |
| 1735 | |
| 1736 | usb_put_urb (urb); |
| 1737 | sitd->urb = NULL; |
| 1738 | sitd->stream = NULL; |
| 1739 | list_move (&sitd->sitd_list, &stream->free_list); |
| 1740 | stream->depth -= stream->interval << 3; |
| 1741 | iso_stream_put (ehci, stream); |
| 1742 | |
| 1743 | /* handle completion now? */ |
| 1744 | if ((urb_index + 1) != urb->number_of_packets) |
| 1745 | return 0; |
| 1746 | |
| 1747 | /* ASSERT: it's really the last sitd for this urb |
| 1748 | list_for_each_entry (sitd, &stream->td_list, sitd_list) |
| 1749 | BUG_ON (sitd->urb == urb); |
| 1750 | */ |
| 1751 | |
| 1752 | /* give urb back to the driver */ |
| 1753 | dev = usb_get_dev (urb->dev); |
| 1754 | ehci_urb_done (ehci, urb, regs); |
| 1755 | urb = NULL; |
| 1756 | |
| 1757 | /* defer stopping schedule; completion can submit */ |
| 1758 | ehci->periodic_sched--; |
| 1759 | if (!ehci->periodic_sched) |
| 1760 | (void) disable_periodic (ehci); |
| 1761 | ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs--; |
| 1762 | |
| 1763 | if (list_empty (&stream->td_list)) { |
| 1764 | ehci_to_hcd(ehci)->self.bandwidth_allocated |
| 1765 | -= stream->bandwidth; |
| 1766 | ehci_vdbg (ehci, |
| 1767 | "deschedule devp %s ep%d%s-iso\n", |
| 1768 | dev->devpath, stream->bEndpointAddress & 0x0f, |
| 1769 | (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out"); |
| 1770 | } |
| 1771 | iso_stream_put (ehci, stream); |
| 1772 | usb_put_dev (dev); |
| 1773 | |
| 1774 | return 1; |
| 1775 | } |
| 1776 | |
| 1777 | |
| 1778 | static int sitd_submit (struct ehci_hcd *ehci, struct urb *urb, int mem_flags) |
| 1779 | { |
| 1780 | int status = -EINVAL; |
| 1781 | unsigned long flags; |
| 1782 | struct ehci_iso_stream *stream; |
| 1783 | |
| 1784 | /* Get iso_stream head */ |
| 1785 | stream = iso_stream_find (ehci, urb); |
| 1786 | if (stream == NULL) { |
| 1787 | ehci_dbg (ehci, "can't get iso stream\n"); |
| 1788 | return -ENOMEM; |
| 1789 | } |
| 1790 | if (urb->interval != stream->interval) { |
| 1791 | ehci_dbg (ehci, "can't change iso interval %d --> %d\n", |
| 1792 | stream->interval, urb->interval); |
| 1793 | goto done; |
| 1794 | } |
| 1795 | |
| 1796 | #ifdef EHCI_URB_TRACE |
| 1797 | ehci_dbg (ehci, |
| 1798 | "submit %p dev%s ep%d%s-iso len %d\n", |
| 1799 | urb, urb->dev->devpath, |
| 1800 | usb_pipeendpoint (urb->pipe), |
| 1801 | usb_pipein (urb->pipe) ? "in" : "out", |
| 1802 | urb->transfer_buffer_length); |
| 1803 | #endif |
| 1804 | |
| 1805 | /* allocate SITDs */ |
| 1806 | status = sitd_urb_transaction (stream, ehci, urb, mem_flags); |
| 1807 | if (status < 0) { |
| 1808 | ehci_dbg (ehci, "can't init sitds\n"); |
| 1809 | goto done; |
| 1810 | } |
| 1811 | |
| 1812 | /* schedule ... need to lock */ |
| 1813 | spin_lock_irqsave (&ehci->lock, flags); |
| 1814 | status = iso_stream_schedule (ehci, urb, stream); |
| 1815 | if (status == 0) |
| 1816 | sitd_link_urb (ehci, urb, ehci->periodic_size << 3, stream); |
| 1817 | spin_unlock_irqrestore (&ehci->lock, flags); |
| 1818 | |
| 1819 | done: |
| 1820 | if (status < 0) |
| 1821 | iso_stream_put (ehci, stream); |
| 1822 | return status; |
| 1823 | } |
| 1824 | |
| 1825 | #else |
| 1826 | |
| 1827 | static inline int |
| 1828 | sitd_submit (struct ehci_hcd *ehci, struct urb *urb, int mem_flags) |
| 1829 | { |
| 1830 | ehci_dbg (ehci, "split iso support is disabled\n"); |
| 1831 | return -ENOSYS; |
| 1832 | } |
| 1833 | |
| 1834 | static inline unsigned |
| 1835 | sitd_complete ( |
| 1836 | struct ehci_hcd *ehci, |
| 1837 | struct ehci_sitd *sitd, |
| 1838 | struct pt_regs *regs |
| 1839 | ) { |
| 1840 | ehci_err (ehci, "sitd_complete %p?\n", sitd); |
| 1841 | return 0; |
| 1842 | } |
| 1843 | |
| 1844 | #endif /* USB_EHCI_SPLIT_ISO */ |
| 1845 | |
| 1846 | /*-------------------------------------------------------------------------*/ |
| 1847 | |
| 1848 | static void |
| 1849 | scan_periodic (struct ehci_hcd *ehci, struct pt_regs *regs) |
| 1850 | { |
| 1851 | unsigned frame, clock, now_uframe, mod; |
| 1852 | unsigned modified; |
| 1853 | |
| 1854 | mod = ehci->periodic_size << 3; |
| 1855 | |
| 1856 | /* |
| 1857 | * When running, scan from last scan point up to "now" |
| 1858 | * else clean up by scanning everything that's left. |
| 1859 | * Touches as few pages as possible: cache-friendly. |
| 1860 | */ |
| 1861 | now_uframe = ehci->next_uframe; |
| 1862 | if (HC_IS_RUNNING (ehci_to_hcd(ehci)->state)) |
| 1863 | clock = readl (&ehci->regs->frame_index); |
| 1864 | else |
| 1865 | clock = now_uframe + mod - 1; |
| 1866 | clock %= mod; |
| 1867 | |
| 1868 | for (;;) { |
| 1869 | union ehci_shadow q, *q_p; |
| 1870 | __le32 type, *hw_p; |
| 1871 | unsigned uframes; |
| 1872 | |
| 1873 | /* don't scan past the live uframe */ |
| 1874 | frame = now_uframe >> 3; |
| 1875 | if (frame == (clock >> 3)) |
| 1876 | uframes = now_uframe & 0x07; |
| 1877 | else { |
| 1878 | /* safe to scan the whole frame at once */ |
| 1879 | now_uframe |= 0x07; |
| 1880 | uframes = 8; |
| 1881 | } |
| 1882 | |
| 1883 | restart: |
| 1884 | /* scan each element in frame's queue for completions */ |
| 1885 | q_p = &ehci->pshadow [frame]; |
| 1886 | hw_p = &ehci->periodic [frame]; |
| 1887 | q.ptr = q_p->ptr; |
| 1888 | type = Q_NEXT_TYPE (*hw_p); |
| 1889 | modified = 0; |
| 1890 | |
| 1891 | while (q.ptr != NULL) { |
| 1892 | unsigned uf; |
| 1893 | union ehci_shadow temp; |
| 1894 | int live; |
| 1895 | |
| 1896 | live = HC_IS_RUNNING (ehci_to_hcd(ehci)->state); |
| 1897 | switch (type) { |
| 1898 | case Q_TYPE_QH: |
| 1899 | /* handle any completions */ |
| 1900 | temp.qh = qh_get (q.qh); |
| 1901 | type = Q_NEXT_TYPE (q.qh->hw_next); |
| 1902 | q = q.qh->qh_next; |
| 1903 | modified = qh_completions (ehci, temp.qh, regs); |
| 1904 | if (unlikely (list_empty (&temp.qh->qtd_list))) |
| 1905 | intr_deschedule (ehci, temp.qh); |
| 1906 | qh_put (temp.qh); |
| 1907 | break; |
| 1908 | case Q_TYPE_FSTN: |
| 1909 | /* for "save place" FSTNs, look at QH entries |
| 1910 | * in the previous frame for completions. |
| 1911 | */ |
| 1912 | if (q.fstn->hw_prev != EHCI_LIST_END) { |
| 1913 | dbg ("ignoring completions from FSTNs"); |
| 1914 | } |
| 1915 | type = Q_NEXT_TYPE (q.fstn->hw_next); |
| 1916 | q = q.fstn->fstn_next; |
| 1917 | break; |
| 1918 | case Q_TYPE_ITD: |
| 1919 | /* skip itds for later in the frame */ |
| 1920 | rmb (); |
| 1921 | for (uf = live ? uframes : 8; uf < 8; uf++) { |
| 1922 | if (0 == (q.itd->hw_transaction [uf] |
| 1923 | & ITD_ACTIVE)) |
| 1924 | continue; |
| 1925 | q_p = &q.itd->itd_next; |
| 1926 | hw_p = &q.itd->hw_next; |
| 1927 | type = Q_NEXT_TYPE (q.itd->hw_next); |
| 1928 | q = *q_p; |
| 1929 | break; |
| 1930 | } |
| 1931 | if (uf != 8) |
| 1932 | break; |
| 1933 | |
| 1934 | /* this one's ready ... HC won't cache the |
| 1935 | * pointer for much longer, if at all. |
| 1936 | */ |
| 1937 | *q_p = q.itd->itd_next; |
| 1938 | *hw_p = q.itd->hw_next; |
| 1939 | type = Q_NEXT_TYPE (q.itd->hw_next); |
| 1940 | wmb(); |
| 1941 | modified = itd_complete (ehci, q.itd, regs); |
| 1942 | q = *q_p; |
| 1943 | break; |
| 1944 | case Q_TYPE_SITD: |
| 1945 | if ((q.sitd->hw_results & SITD_ACTIVE) |
| 1946 | && live) { |
| 1947 | q_p = &q.sitd->sitd_next; |
| 1948 | hw_p = &q.sitd->hw_next; |
| 1949 | type = Q_NEXT_TYPE (q.sitd->hw_next); |
| 1950 | q = *q_p; |
| 1951 | break; |
| 1952 | } |
| 1953 | *q_p = q.sitd->sitd_next; |
| 1954 | *hw_p = q.sitd->hw_next; |
| 1955 | type = Q_NEXT_TYPE (q.sitd->hw_next); |
| 1956 | wmb(); |
| 1957 | modified = sitd_complete (ehci, q.sitd, regs); |
| 1958 | q = *q_p; |
| 1959 | break; |
| 1960 | default: |
| 1961 | dbg ("corrupt type %d frame %d shadow %p", |
| 1962 | type, frame, q.ptr); |
| 1963 | // BUG (); |
| 1964 | q.ptr = NULL; |
| 1965 | } |
| 1966 | |
| 1967 | /* assume completion callbacks modify the queue */ |
| 1968 | if (unlikely (modified)) |
| 1969 | goto restart; |
| 1970 | } |
| 1971 | |
| 1972 | /* stop when we catch up to the HC */ |
| 1973 | |
| 1974 | // FIXME: this assumes we won't get lapped when |
| 1975 | // latencies climb; that should be rare, but... |
| 1976 | // detect it, and just go all the way around. |
| 1977 | // FLR might help detect this case, so long as latencies |
| 1978 | // don't exceed periodic_size msec (default 1.024 sec). |
| 1979 | |
| 1980 | // FIXME: likewise assumes HC doesn't halt mid-scan |
| 1981 | |
| 1982 | if (now_uframe == clock) { |
| 1983 | unsigned now; |
| 1984 | |
| 1985 | if (!HC_IS_RUNNING (ehci_to_hcd(ehci)->state)) |
| 1986 | break; |
| 1987 | ehci->next_uframe = now_uframe; |
| 1988 | now = readl (&ehci->regs->frame_index) % mod; |
| 1989 | if (now_uframe == now) |
| 1990 | break; |
| 1991 | |
| 1992 | /* rescan the rest of this frame, then ... */ |
| 1993 | clock = now; |
| 1994 | } else { |
| 1995 | now_uframe++; |
| 1996 | now_uframe %= mod; |
| 1997 | } |
| 1998 | } |
| 1999 | } |