Felipe Balbi | 550a737 | 2008-07-24 12:27:36 +0300 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2005-2006 by Texas Instruments |
| 3 | * |
| 4 | * This file implements a DMA interface using TI's CPPI DMA. |
| 5 | * For now it's DaVinci-only, but CPPI isn't specific to DaVinci or USB. |
| 6 | * The TUSB6020, using VLYNQ, has CPPI that looks much like DaVinci. |
| 7 | */ |
| 8 | |
| 9 | #include <linux/usb.h> |
| 10 | |
| 11 | #include "musb_core.h" |
Hugo Villeneuve | 704a148 | 2009-01-24 17:57:30 -0800 | [diff] [blame] | 12 | #include "musb_debug.h" |
Felipe Balbi | 550a737 | 2008-07-24 12:27:36 +0300 | [diff] [blame] | 13 | #include "cppi_dma.h" |
| 14 | |
| 15 | |
| 16 | /* CPPI DMA status 7-mar-2006: |
| 17 | * |
| 18 | * - See musb_{host,gadget}.c for more info |
| 19 | * |
| 20 | * - Correct RX DMA generally forces the engine into irq-per-packet mode, |
| 21 | * which can easily saturate the CPU under non-mass-storage loads. |
| 22 | * |
| 23 | * NOTES 24-aug-2006 (2.6.18-rc4): |
| 24 | * |
| 25 | * - peripheral RXDMA wedged in a test with packets of length 512/512/1. |
| 26 | * evidently after the 1 byte packet was received and acked, the queue |
| 27 | * of BDs got garbaged so it wouldn't empty the fifo. (rxcsr 0x2003, |
| 28 | * and RX DMA0: 4 left, 80000000 8feff880, 8feff860 8feff860; 8f321401 |
| 29 | * 004001ff 00000001 .. 8feff860) Host was just getting NAKed on tx |
| 30 | * of its next (512 byte) packet. IRQ issues? |
| 31 | * |
| 32 | * REVISIT: the "transfer DMA" glue between CPPI and USB fifos will |
| 33 | * evidently also directly update the RX and TX CSRs ... so audit all |
| 34 | * host and peripheral side DMA code to avoid CSR access after DMA has |
| 35 | * been started. |
| 36 | */ |
| 37 | |
| 38 | /* REVISIT now we can avoid preallocating these descriptors; or |
| 39 | * more simply, switch to a global freelist not per-channel ones. |
| 40 | * Note: at full speed, 64 descriptors == 4K bulk data. |
| 41 | */ |
| 42 | #define NUM_TXCHAN_BD 64 |
| 43 | #define NUM_RXCHAN_BD 64 |
| 44 | |
| 45 | static inline void cpu_drain_writebuffer(void) |
| 46 | { |
| 47 | wmb(); |
| 48 | #ifdef CONFIG_CPU_ARM926T |
| 49 | /* REVISIT this "should not be needed", |
| 50 | * but lack of it sure seemed to hurt ... |
| 51 | */ |
| 52 | asm("mcr p15, 0, r0, c7, c10, 4 @ drain write buffer\n"); |
| 53 | #endif |
| 54 | } |
| 55 | |
| 56 | static inline struct cppi_descriptor *cppi_bd_alloc(struct cppi_channel *c) |
| 57 | { |
| 58 | struct cppi_descriptor *bd = c->freelist; |
| 59 | |
| 60 | if (bd) |
| 61 | c->freelist = bd->next; |
| 62 | return bd; |
| 63 | } |
| 64 | |
| 65 | static inline void |
| 66 | cppi_bd_free(struct cppi_channel *c, struct cppi_descriptor *bd) |
| 67 | { |
| 68 | if (!bd) |
| 69 | return; |
| 70 | bd->next = c->freelist; |
| 71 | c->freelist = bd; |
| 72 | } |
| 73 | |
| 74 | /* |
| 75 | * Start DMA controller |
| 76 | * |
| 77 | * Initialize the DMA controller as necessary. |
| 78 | */ |
| 79 | |
| 80 | /* zero out entire rx state RAM entry for the channel */ |
| 81 | static void cppi_reset_rx(struct cppi_rx_stateram __iomem *rx) |
| 82 | { |
| 83 | musb_writel(&rx->rx_skipbytes, 0, 0); |
| 84 | musb_writel(&rx->rx_head, 0, 0); |
| 85 | musb_writel(&rx->rx_sop, 0, 0); |
| 86 | musb_writel(&rx->rx_current, 0, 0); |
| 87 | musb_writel(&rx->rx_buf_current, 0, 0); |
| 88 | musb_writel(&rx->rx_len_len, 0, 0); |
| 89 | musb_writel(&rx->rx_cnt_cnt, 0, 0); |
| 90 | } |
| 91 | |
| 92 | /* zero out entire tx state RAM entry for the channel */ |
| 93 | static void cppi_reset_tx(struct cppi_tx_stateram __iomem *tx, u32 ptr) |
| 94 | { |
| 95 | musb_writel(&tx->tx_head, 0, 0); |
| 96 | musb_writel(&tx->tx_buf, 0, 0); |
| 97 | musb_writel(&tx->tx_current, 0, 0); |
| 98 | musb_writel(&tx->tx_buf_current, 0, 0); |
| 99 | musb_writel(&tx->tx_info, 0, 0); |
| 100 | musb_writel(&tx->tx_rem_len, 0, 0); |
| 101 | /* musb_writel(&tx->tx_dummy, 0, 0); */ |
| 102 | musb_writel(&tx->tx_complete, 0, ptr); |
| 103 | } |
| 104 | |
| 105 | static void __init cppi_pool_init(struct cppi *cppi, struct cppi_channel *c) |
| 106 | { |
| 107 | int j; |
| 108 | |
| 109 | /* initialize channel fields */ |
| 110 | c->head = NULL; |
| 111 | c->tail = NULL; |
| 112 | c->last_processed = NULL; |
| 113 | c->channel.status = MUSB_DMA_STATUS_UNKNOWN; |
| 114 | c->controller = cppi; |
| 115 | c->is_rndis = 0; |
| 116 | c->freelist = NULL; |
| 117 | |
| 118 | /* build the BD Free list for the channel */ |
| 119 | for (j = 0; j < NUM_TXCHAN_BD + 1; j++) { |
| 120 | struct cppi_descriptor *bd; |
| 121 | dma_addr_t dma; |
| 122 | |
| 123 | bd = dma_pool_alloc(cppi->pool, GFP_KERNEL, &dma); |
| 124 | bd->dma = dma; |
| 125 | cppi_bd_free(c, bd); |
| 126 | } |
| 127 | } |
| 128 | |
| 129 | static int cppi_channel_abort(struct dma_channel *); |
| 130 | |
| 131 | static void cppi_pool_free(struct cppi_channel *c) |
| 132 | { |
| 133 | struct cppi *cppi = c->controller; |
| 134 | struct cppi_descriptor *bd; |
| 135 | |
| 136 | (void) cppi_channel_abort(&c->channel); |
| 137 | c->channel.status = MUSB_DMA_STATUS_UNKNOWN; |
| 138 | c->controller = NULL; |
| 139 | |
| 140 | /* free all its bds */ |
| 141 | bd = c->last_processed; |
| 142 | do { |
| 143 | if (bd) |
| 144 | dma_pool_free(cppi->pool, bd, bd->dma); |
| 145 | bd = cppi_bd_alloc(c); |
| 146 | } while (bd); |
| 147 | c->last_processed = NULL; |
| 148 | } |
| 149 | |
| 150 | static int __init cppi_controller_start(struct dma_controller *c) |
| 151 | { |
| 152 | struct cppi *controller; |
| 153 | void __iomem *tibase; |
| 154 | int i; |
| 155 | |
| 156 | controller = container_of(c, struct cppi, controller); |
| 157 | |
| 158 | /* do whatever is necessary to start controller */ |
| 159 | for (i = 0; i < ARRAY_SIZE(controller->tx); i++) { |
| 160 | controller->tx[i].transmit = true; |
| 161 | controller->tx[i].index = i; |
| 162 | } |
| 163 | for (i = 0; i < ARRAY_SIZE(controller->rx); i++) { |
| 164 | controller->rx[i].transmit = false; |
| 165 | controller->rx[i].index = i; |
| 166 | } |
| 167 | |
| 168 | /* setup BD list on a per channel basis */ |
| 169 | for (i = 0; i < ARRAY_SIZE(controller->tx); i++) |
| 170 | cppi_pool_init(controller, controller->tx + i); |
| 171 | for (i = 0; i < ARRAY_SIZE(controller->rx); i++) |
| 172 | cppi_pool_init(controller, controller->rx + i); |
| 173 | |
| 174 | tibase = controller->tibase; |
| 175 | INIT_LIST_HEAD(&controller->tx_complete); |
| 176 | |
| 177 | /* initialise tx/rx channel head pointers to zero */ |
| 178 | for (i = 0; i < ARRAY_SIZE(controller->tx); i++) { |
| 179 | struct cppi_channel *tx_ch = controller->tx + i; |
| 180 | struct cppi_tx_stateram __iomem *tx; |
| 181 | |
| 182 | INIT_LIST_HEAD(&tx_ch->tx_complete); |
| 183 | |
| 184 | tx = tibase + DAVINCI_TXCPPI_STATERAM_OFFSET(i); |
| 185 | tx_ch->state_ram = tx; |
| 186 | cppi_reset_tx(tx, 0); |
| 187 | } |
| 188 | for (i = 0; i < ARRAY_SIZE(controller->rx); i++) { |
| 189 | struct cppi_channel *rx_ch = controller->rx + i; |
| 190 | struct cppi_rx_stateram __iomem *rx; |
| 191 | |
| 192 | INIT_LIST_HEAD(&rx_ch->tx_complete); |
| 193 | |
| 194 | rx = tibase + DAVINCI_RXCPPI_STATERAM_OFFSET(i); |
| 195 | rx_ch->state_ram = rx; |
| 196 | cppi_reset_rx(rx); |
| 197 | } |
| 198 | |
| 199 | /* enable individual cppi channels */ |
| 200 | musb_writel(tibase, DAVINCI_TXCPPI_INTENAB_REG, |
| 201 | DAVINCI_DMA_ALL_CHANNELS_ENABLE); |
| 202 | musb_writel(tibase, DAVINCI_RXCPPI_INTENAB_REG, |
| 203 | DAVINCI_DMA_ALL_CHANNELS_ENABLE); |
| 204 | |
| 205 | /* enable tx/rx CPPI control */ |
| 206 | musb_writel(tibase, DAVINCI_TXCPPI_CTRL_REG, DAVINCI_DMA_CTRL_ENABLE); |
| 207 | musb_writel(tibase, DAVINCI_RXCPPI_CTRL_REG, DAVINCI_DMA_CTRL_ENABLE); |
| 208 | |
| 209 | /* disable RNDIS mode, also host rx RNDIS autorequest */ |
| 210 | musb_writel(tibase, DAVINCI_RNDIS_REG, 0); |
| 211 | musb_writel(tibase, DAVINCI_AUTOREQ_REG, 0); |
| 212 | |
| 213 | return 0; |
| 214 | } |
| 215 | |
| 216 | /* |
| 217 | * Stop DMA controller |
| 218 | * |
| 219 | * De-Init the DMA controller as necessary. |
| 220 | */ |
| 221 | |
| 222 | static int cppi_controller_stop(struct dma_controller *c) |
| 223 | { |
| 224 | struct cppi *controller; |
| 225 | void __iomem *tibase; |
| 226 | int i; |
| 227 | |
| 228 | controller = container_of(c, struct cppi, controller); |
| 229 | |
| 230 | tibase = controller->tibase; |
| 231 | /* DISABLE INDIVIDUAL CHANNEL Interrupts */ |
| 232 | musb_writel(tibase, DAVINCI_TXCPPI_INTCLR_REG, |
| 233 | DAVINCI_DMA_ALL_CHANNELS_ENABLE); |
| 234 | musb_writel(tibase, DAVINCI_RXCPPI_INTCLR_REG, |
| 235 | DAVINCI_DMA_ALL_CHANNELS_ENABLE); |
| 236 | |
| 237 | DBG(1, "Tearing down RX and TX Channels\n"); |
| 238 | for (i = 0; i < ARRAY_SIZE(controller->tx); i++) { |
| 239 | /* FIXME restructure of txdma to use bds like rxdma */ |
| 240 | controller->tx[i].last_processed = NULL; |
| 241 | cppi_pool_free(controller->tx + i); |
| 242 | } |
| 243 | for (i = 0; i < ARRAY_SIZE(controller->rx); i++) |
| 244 | cppi_pool_free(controller->rx + i); |
| 245 | |
| 246 | /* in Tx Case proper teardown is supported. We resort to disabling |
| 247 | * Tx/Rx CPPI after cleanup of Tx channels. Before TX teardown is |
| 248 | * complete TX CPPI cannot be disabled. |
| 249 | */ |
| 250 | /*disable tx/rx cppi */ |
| 251 | musb_writel(tibase, DAVINCI_TXCPPI_CTRL_REG, DAVINCI_DMA_CTRL_DISABLE); |
| 252 | musb_writel(tibase, DAVINCI_RXCPPI_CTRL_REG, DAVINCI_DMA_CTRL_DISABLE); |
| 253 | |
| 254 | return 0; |
| 255 | } |
| 256 | |
| 257 | /* While dma channel is allocated, we only want the core irqs active |
| 258 | * for fault reports, otherwise we'd get irqs that we don't care about. |
| 259 | * Except for TX irqs, where dma done != fifo empty and reusable ... |
| 260 | * |
| 261 | * NOTE: docs don't say either way, but irq masking **enables** irqs. |
| 262 | * |
| 263 | * REVISIT same issue applies to pure PIO usage too, and non-cppi dma... |
| 264 | */ |
| 265 | static inline void core_rxirq_disable(void __iomem *tibase, unsigned epnum) |
| 266 | { |
| 267 | musb_writel(tibase, DAVINCI_USB_INT_MASK_CLR_REG, 1 << (epnum + 8)); |
| 268 | } |
| 269 | |
| 270 | static inline void core_rxirq_enable(void __iomem *tibase, unsigned epnum) |
| 271 | { |
| 272 | musb_writel(tibase, DAVINCI_USB_INT_MASK_SET_REG, 1 << (epnum + 8)); |
| 273 | } |
| 274 | |
| 275 | |
| 276 | /* |
| 277 | * Allocate a CPPI Channel for DMA. With CPPI, channels are bound to |
| 278 | * each transfer direction of a non-control endpoint, so allocating |
| 279 | * (and deallocating) is mostly a way to notice bad housekeeping on |
| 280 | * the software side. We assume the irqs are always active. |
| 281 | */ |
| 282 | static struct dma_channel * |
| 283 | cppi_channel_allocate(struct dma_controller *c, |
| 284 | struct musb_hw_ep *ep, u8 transmit) |
| 285 | { |
| 286 | struct cppi *controller; |
| 287 | u8 index; |
| 288 | struct cppi_channel *cppi_ch; |
| 289 | void __iomem *tibase; |
| 290 | |
| 291 | controller = container_of(c, struct cppi, controller); |
| 292 | tibase = controller->tibase; |
| 293 | |
| 294 | /* ep0 doesn't use DMA; remember cppi indices are 0..N-1 */ |
| 295 | index = ep->epnum - 1; |
| 296 | |
| 297 | /* return the corresponding CPPI Channel Handle, and |
| 298 | * probably disable the non-CPPI irq until we need it. |
| 299 | */ |
| 300 | if (transmit) { |
| 301 | if (index >= ARRAY_SIZE(controller->tx)) { |
| 302 | DBG(1, "no %cX%d CPPI channel\n", 'T', index); |
| 303 | return NULL; |
| 304 | } |
| 305 | cppi_ch = controller->tx + index; |
| 306 | } else { |
| 307 | if (index >= ARRAY_SIZE(controller->rx)) { |
| 308 | DBG(1, "no %cX%d CPPI channel\n", 'R', index); |
| 309 | return NULL; |
| 310 | } |
| 311 | cppi_ch = controller->rx + index; |
| 312 | core_rxirq_disable(tibase, ep->epnum); |
| 313 | } |
| 314 | |
| 315 | /* REVISIT make this an error later once the same driver code works |
| 316 | * with the other DMA engine too |
| 317 | */ |
| 318 | if (cppi_ch->hw_ep) |
| 319 | DBG(1, "re-allocating DMA%d %cX channel %p\n", |
| 320 | index, transmit ? 'T' : 'R', cppi_ch); |
| 321 | cppi_ch->hw_ep = ep; |
| 322 | cppi_ch->channel.status = MUSB_DMA_STATUS_FREE; |
| 323 | |
| 324 | DBG(4, "Allocate CPPI%d %cX\n", index, transmit ? 'T' : 'R'); |
| 325 | return &cppi_ch->channel; |
| 326 | } |
| 327 | |
| 328 | /* Release a CPPI Channel. */ |
| 329 | static void cppi_channel_release(struct dma_channel *channel) |
| 330 | { |
| 331 | struct cppi_channel *c; |
| 332 | void __iomem *tibase; |
| 333 | |
| 334 | /* REVISIT: for paranoia, check state and abort if needed... */ |
| 335 | |
| 336 | c = container_of(channel, struct cppi_channel, channel); |
| 337 | tibase = c->controller->tibase; |
| 338 | if (!c->hw_ep) |
| 339 | DBG(1, "releasing idle DMA channel %p\n", c); |
| 340 | else if (!c->transmit) |
| 341 | core_rxirq_enable(tibase, c->index + 1); |
| 342 | |
| 343 | /* for now, leave its cppi IRQ enabled (we won't trigger it) */ |
| 344 | c->hw_ep = NULL; |
| 345 | channel->status = MUSB_DMA_STATUS_UNKNOWN; |
| 346 | } |
| 347 | |
| 348 | /* Context: controller irqlocked */ |
| 349 | static void |
| 350 | cppi_dump_rx(int level, struct cppi_channel *c, const char *tag) |
| 351 | { |
| 352 | void __iomem *base = c->controller->mregs; |
| 353 | struct cppi_rx_stateram __iomem *rx = c->state_ram; |
| 354 | |
| 355 | musb_ep_select(base, c->index + 1); |
| 356 | |
| 357 | DBG(level, "RX DMA%d%s: %d left, csr %04x, " |
| 358 | "%08x H%08x S%08x C%08x, " |
| 359 | "B%08x L%08x %08x .. %08x" |
| 360 | "\n", |
| 361 | c->index, tag, |
| 362 | musb_readl(c->controller->tibase, |
| 363 | DAVINCI_RXCPPI_BUFCNT0_REG + 4 * c->index), |
| 364 | musb_readw(c->hw_ep->regs, MUSB_RXCSR), |
| 365 | |
| 366 | musb_readl(&rx->rx_skipbytes, 0), |
| 367 | musb_readl(&rx->rx_head, 0), |
| 368 | musb_readl(&rx->rx_sop, 0), |
| 369 | musb_readl(&rx->rx_current, 0), |
| 370 | |
| 371 | musb_readl(&rx->rx_buf_current, 0), |
| 372 | musb_readl(&rx->rx_len_len, 0), |
| 373 | musb_readl(&rx->rx_cnt_cnt, 0), |
| 374 | musb_readl(&rx->rx_complete, 0) |
| 375 | ); |
| 376 | } |
| 377 | |
| 378 | /* Context: controller irqlocked */ |
| 379 | static void |
| 380 | cppi_dump_tx(int level, struct cppi_channel *c, const char *tag) |
| 381 | { |
| 382 | void __iomem *base = c->controller->mregs; |
| 383 | struct cppi_tx_stateram __iomem *tx = c->state_ram; |
| 384 | |
| 385 | musb_ep_select(base, c->index + 1); |
| 386 | |
| 387 | DBG(level, "TX DMA%d%s: csr %04x, " |
| 388 | "H%08x S%08x C%08x %08x, " |
| 389 | "F%08x L%08x .. %08x" |
| 390 | "\n", |
| 391 | c->index, tag, |
| 392 | musb_readw(c->hw_ep->regs, MUSB_TXCSR), |
| 393 | |
| 394 | musb_readl(&tx->tx_head, 0), |
| 395 | musb_readl(&tx->tx_buf, 0), |
| 396 | musb_readl(&tx->tx_current, 0), |
| 397 | musb_readl(&tx->tx_buf_current, 0), |
| 398 | |
| 399 | musb_readl(&tx->tx_info, 0), |
| 400 | musb_readl(&tx->tx_rem_len, 0), |
| 401 | /* dummy/unused word 6 */ |
| 402 | musb_readl(&tx->tx_complete, 0) |
| 403 | ); |
| 404 | } |
| 405 | |
| 406 | /* Context: controller irqlocked */ |
| 407 | static inline void |
| 408 | cppi_rndis_update(struct cppi_channel *c, int is_rx, |
| 409 | void __iomem *tibase, int is_rndis) |
| 410 | { |
| 411 | /* we may need to change the rndis flag for this cppi channel */ |
| 412 | if (c->is_rndis != is_rndis) { |
| 413 | u32 value = musb_readl(tibase, DAVINCI_RNDIS_REG); |
| 414 | u32 temp = 1 << (c->index); |
| 415 | |
| 416 | if (is_rx) |
| 417 | temp <<= 16; |
| 418 | if (is_rndis) |
| 419 | value |= temp; |
| 420 | else |
| 421 | value &= ~temp; |
| 422 | musb_writel(tibase, DAVINCI_RNDIS_REG, value); |
| 423 | c->is_rndis = is_rndis; |
| 424 | } |
| 425 | } |
| 426 | |
Hugo Villeneuve | 704a148 | 2009-01-24 17:57:30 -0800 | [diff] [blame] | 427 | #ifdef CONFIG_USB_MUSB_DEBUG |
Felipe Balbi | 550a737 | 2008-07-24 12:27:36 +0300 | [diff] [blame] | 428 | static void cppi_dump_rxbd(const char *tag, struct cppi_descriptor *bd) |
| 429 | { |
| 430 | pr_debug("RXBD/%s %08x: " |
| 431 | "nxt %08x buf %08x off.blen %08x opt.plen %08x\n", |
| 432 | tag, bd->dma, |
| 433 | bd->hw_next, bd->hw_bufp, bd->hw_off_len, |
| 434 | bd->hw_options); |
| 435 | } |
Hugo Villeneuve | 704a148 | 2009-01-24 17:57:30 -0800 | [diff] [blame] | 436 | #endif |
Felipe Balbi | 550a737 | 2008-07-24 12:27:36 +0300 | [diff] [blame] | 437 | |
| 438 | static void cppi_dump_rxq(int level, const char *tag, struct cppi_channel *rx) |
| 439 | { |
Hugo Villeneuve | 704a148 | 2009-01-24 17:57:30 -0800 | [diff] [blame] | 440 | #ifdef CONFIG_USB_MUSB_DEBUG |
Felipe Balbi | 550a737 | 2008-07-24 12:27:36 +0300 | [diff] [blame] | 441 | struct cppi_descriptor *bd; |
| 442 | |
| 443 | if (!_dbg_level(level)) |
| 444 | return; |
| 445 | cppi_dump_rx(level, rx, tag); |
| 446 | if (rx->last_processed) |
| 447 | cppi_dump_rxbd("last", rx->last_processed); |
| 448 | for (bd = rx->head; bd; bd = bd->next) |
| 449 | cppi_dump_rxbd("active", bd); |
| 450 | #endif |
| 451 | } |
| 452 | |
| 453 | |
| 454 | /* NOTE: DaVinci autoreq is ignored except for host side "RNDIS" mode RX; |
| 455 | * so we won't ever use it (see "CPPI RX Woes" below). |
| 456 | */ |
| 457 | static inline int cppi_autoreq_update(struct cppi_channel *rx, |
| 458 | void __iomem *tibase, int onepacket, unsigned n_bds) |
| 459 | { |
| 460 | u32 val; |
| 461 | |
| 462 | #ifdef RNDIS_RX_IS_USABLE |
| 463 | u32 tmp; |
| 464 | /* assert(is_host_active(musb)) */ |
| 465 | |
| 466 | /* start from "AutoReq never" */ |
| 467 | tmp = musb_readl(tibase, DAVINCI_AUTOREQ_REG); |
| 468 | val = tmp & ~((0x3) << (rx->index * 2)); |
| 469 | |
| 470 | /* HCD arranged reqpkt for packet #1. we arrange int |
| 471 | * for all but the last one, maybe in two segments. |
| 472 | */ |
| 473 | if (!onepacket) { |
| 474 | #if 0 |
| 475 | /* use two segments, autoreq "all" then the last "never" */ |
| 476 | val |= ((0x3) << (rx->index * 2)); |
| 477 | n_bds--; |
| 478 | #else |
| 479 | /* one segment, autoreq "all-but-last" */ |
| 480 | val |= ((0x1) << (rx->index * 2)); |
| 481 | #endif |
| 482 | } |
| 483 | |
| 484 | if (val != tmp) { |
| 485 | int n = 100; |
| 486 | |
| 487 | /* make sure that autoreq is updated before continuing */ |
| 488 | musb_writel(tibase, DAVINCI_AUTOREQ_REG, val); |
| 489 | do { |
| 490 | tmp = musb_readl(tibase, DAVINCI_AUTOREQ_REG); |
| 491 | if (tmp == val) |
| 492 | break; |
| 493 | cpu_relax(); |
| 494 | } while (n-- > 0); |
| 495 | } |
| 496 | #endif |
| 497 | |
| 498 | /* REQPKT is turned off after each segment */ |
| 499 | if (n_bds && rx->channel.actual_len) { |
| 500 | void __iomem *regs = rx->hw_ep->regs; |
| 501 | |
| 502 | val = musb_readw(regs, MUSB_RXCSR); |
| 503 | if (!(val & MUSB_RXCSR_H_REQPKT)) { |
| 504 | val |= MUSB_RXCSR_H_REQPKT | MUSB_RXCSR_H_WZC_BITS; |
| 505 | musb_writew(regs, MUSB_RXCSR, val); |
| 506 | /* flush writebufer */ |
| 507 | val = musb_readw(regs, MUSB_RXCSR); |
| 508 | } |
| 509 | } |
| 510 | return n_bds; |
| 511 | } |
| 512 | |
| 513 | |
| 514 | /* Buffer enqueuing Logic: |
| 515 | * |
| 516 | * - RX builds new queues each time, to help handle routine "early |
| 517 | * termination" cases (faults, including errors and short reads) |
| 518 | * more correctly. |
| 519 | * |
| 520 | * - for now, TX reuses the same queue of BDs every time |
| 521 | * |
| 522 | * REVISIT long term, we want a normal dynamic model. |
| 523 | * ... the goal will be to append to the |
| 524 | * existing queue, processing completed "dma buffers" (segments) on the fly. |
| 525 | * |
| 526 | * Otherwise we force an IRQ latency between requests, which slows us a lot |
| 527 | * (especially in "transparent" dma). Unfortunately that model seems to be |
| 528 | * inherent in the DMA model from the Mentor code, except in the rare case |
| 529 | * of transfers big enough (~128+ KB) that we could append "middle" segments |
| 530 | * in the TX paths. (RX can't do this, see below.) |
| 531 | * |
| 532 | * That's true even in the CPPI- friendly iso case, where most urbs have |
| 533 | * several small segments provided in a group and where the "packet at a time" |
| 534 | * "transparent" DMA model is always correct, even on the RX side. |
| 535 | */ |
| 536 | |
| 537 | /* |
| 538 | * CPPI TX: |
| 539 | * ======== |
| 540 | * TX is a lot more reasonable than RX; it doesn't need to run in |
| 541 | * irq-per-packet mode very often. RNDIS mode seems to behave too |
| 542 | * (except how it handles the exactly-N-packets case). Building a |
| 543 | * txdma queue with multiple requests (urb or usb_request) looks |
| 544 | * like it would work ... but fault handling would need much testing. |
| 545 | * |
| 546 | * The main issue with TX mode RNDIS relates to transfer lengths that |
| 547 | * are an exact multiple of the packet length. It appears that there's |
| 548 | * a hiccup in that case (maybe the DMA completes before the ZLP gets |
| 549 | * written?) boiling down to not being able to rely on CPPI writing any |
| 550 | * terminating zero length packet before the next transfer is written. |
| 551 | * So that's punted to PIO; better yet, gadget drivers can avoid it. |
| 552 | * |
| 553 | * Plus, there's allegedly an undocumented constraint that rndis transfer |
| 554 | * length be a multiple of 64 bytes ... but the chip doesn't act that |
| 555 | * way, and we really don't _want_ that behavior anyway. |
| 556 | * |
| 557 | * On TX, "transparent" mode works ... although experiments have shown |
| 558 | * problems trying to use the SOP/EOP bits in different USB packets. |
| 559 | * |
| 560 | * REVISIT try to handle terminating zero length packets using CPPI |
| 561 | * instead of doing it by PIO after an IRQ. (Meanwhile, make Ethernet |
| 562 | * links avoid that issue by forcing them to avoid zlps.) |
| 563 | */ |
| 564 | static void |
| 565 | cppi_next_tx_segment(struct musb *musb, struct cppi_channel *tx) |
| 566 | { |
| 567 | unsigned maxpacket = tx->maxpacket; |
| 568 | dma_addr_t addr = tx->buf_dma + tx->offset; |
| 569 | size_t length = tx->buf_len - tx->offset; |
| 570 | struct cppi_descriptor *bd; |
| 571 | unsigned n_bds; |
| 572 | unsigned i; |
| 573 | struct cppi_tx_stateram __iomem *tx_ram = tx->state_ram; |
| 574 | int rndis; |
| 575 | |
| 576 | /* TX can use the CPPI "rndis" mode, where we can probably fit this |
| 577 | * transfer in one BD and one IRQ. The only time we would NOT want |
| 578 | * to use it is when hardware constraints prevent it, or if we'd |
| 579 | * trigger the "send a ZLP?" confusion. |
| 580 | */ |
| 581 | rndis = (maxpacket & 0x3f) == 0 |
| 582 | && length < 0xffff |
| 583 | && (length % maxpacket) != 0; |
| 584 | |
| 585 | if (rndis) { |
| 586 | maxpacket = length; |
| 587 | n_bds = 1; |
| 588 | } else { |
| 589 | n_bds = length / maxpacket; |
| 590 | if (!length || (length % maxpacket)) |
| 591 | n_bds++; |
| 592 | n_bds = min(n_bds, (unsigned) NUM_TXCHAN_BD); |
| 593 | length = min(n_bds * maxpacket, length); |
| 594 | } |
| 595 | |
| 596 | DBG(4, "TX DMA%d, pktSz %d %s bds %d dma 0x%x len %u\n", |
| 597 | tx->index, |
| 598 | maxpacket, |
| 599 | rndis ? "rndis" : "transparent", |
| 600 | n_bds, |
| 601 | addr, length); |
| 602 | |
| 603 | cppi_rndis_update(tx, 0, musb->ctrl_base, rndis); |
| 604 | |
| 605 | /* assuming here that channel_program is called during |
| 606 | * transfer initiation ... current code maintains state |
| 607 | * for one outstanding request only (no queues, not even |
| 608 | * the implicit ones of an iso urb). |
| 609 | */ |
| 610 | |
| 611 | bd = tx->freelist; |
| 612 | tx->head = bd; |
| 613 | tx->last_processed = NULL; |
| 614 | |
| 615 | /* FIXME use BD pool like RX side does, and just queue |
| 616 | * the minimum number for this request. |
| 617 | */ |
| 618 | |
| 619 | /* Prepare queue of BDs first, then hand it to hardware. |
| 620 | * All BDs except maybe the last should be of full packet |
| 621 | * size; for RNDIS there _is_ only that last packet. |
| 622 | */ |
| 623 | for (i = 0; i < n_bds; ) { |
| 624 | if (++i < n_bds && bd->next) |
| 625 | bd->hw_next = bd->next->dma; |
| 626 | else |
| 627 | bd->hw_next = 0; |
| 628 | |
| 629 | bd->hw_bufp = tx->buf_dma + tx->offset; |
| 630 | |
| 631 | /* FIXME set EOP only on the last packet, |
| 632 | * SOP only on the first ... avoid IRQs |
| 633 | */ |
| 634 | if ((tx->offset + maxpacket) <= tx->buf_len) { |
| 635 | tx->offset += maxpacket; |
| 636 | bd->hw_off_len = maxpacket; |
| 637 | bd->hw_options = CPPI_SOP_SET | CPPI_EOP_SET |
| 638 | | CPPI_OWN_SET | maxpacket; |
| 639 | } else { |
| 640 | /* only this one may be a partial USB Packet */ |
| 641 | u32 partial_len; |
| 642 | |
| 643 | partial_len = tx->buf_len - tx->offset; |
| 644 | tx->offset = tx->buf_len; |
| 645 | bd->hw_off_len = partial_len; |
| 646 | |
| 647 | bd->hw_options = CPPI_SOP_SET | CPPI_EOP_SET |
| 648 | | CPPI_OWN_SET | partial_len; |
| 649 | if (partial_len == 0) |
| 650 | bd->hw_options |= CPPI_ZERO_SET; |
| 651 | } |
| 652 | |
| 653 | DBG(5, "TXBD %p: nxt %08x buf %08x len %04x opt %08x\n", |
| 654 | bd, bd->hw_next, bd->hw_bufp, |
| 655 | bd->hw_off_len, bd->hw_options); |
| 656 | |
| 657 | /* update the last BD enqueued to the list */ |
| 658 | tx->tail = bd; |
| 659 | bd = bd->next; |
| 660 | } |
| 661 | |
| 662 | /* BDs live in DMA-coherent memory, but writes might be pending */ |
| 663 | cpu_drain_writebuffer(); |
| 664 | |
| 665 | /* Write to the HeadPtr in state RAM to trigger */ |
| 666 | musb_writel(&tx_ram->tx_head, 0, (u32)tx->freelist->dma); |
| 667 | |
| 668 | cppi_dump_tx(5, tx, "/S"); |
| 669 | } |
| 670 | |
| 671 | /* |
| 672 | * CPPI RX Woes: |
| 673 | * ============= |
| 674 | * Consider a 1KB bulk RX buffer in two scenarios: (a) it's fed two 300 byte |
| 675 | * packets back-to-back, and (b) it's fed two 512 byte packets back-to-back. |
| 676 | * (Full speed transfers have similar scenarios.) |
| 677 | * |
| 678 | * The correct behavior for Linux is that (a) fills the buffer with 300 bytes, |
| 679 | * and the next packet goes into a buffer that's queued later; while (b) fills |
| 680 | * the buffer with 1024 bytes. How to do that with CPPI? |
| 681 | * |
| 682 | * - RX queues in "rndis" mode -- one single BD -- handle (a) correctly, but |
| 683 | * (b) loses **BADLY** because nothing (!) happens when that second packet |
| 684 | * fills the buffer, much less when a third one arrives. (Which makes this |
| 685 | * not a "true" RNDIS mode. In the RNDIS protocol short-packet termination |
| 686 | * is optional, and it's fine if peripherals -- not hosts! -- pad messages |
| 687 | * out to end-of-buffer. Standard PCI host controller DMA descriptors |
| 688 | * implement that mode by default ... which is no accident.) |
| 689 | * |
| 690 | * - RX queues in "transparent" mode -- two BDs with 512 bytes each -- have |
| 691 | * converse problems: (b) is handled right, but (a) loses badly. CPPI RX |
| 692 | * ignores SOP/EOP markings and processes both of those BDs; so both packets |
| 693 | * are loaded into the buffer (with a 212 byte gap between them), and the next |
| 694 | * buffer queued will NOT get its 300 bytes of data. (It seems like SOP/EOP |
| 695 | * are intended as outputs for RX queues, not inputs...) |
| 696 | * |
| 697 | * - A variant of "transparent" mode -- one BD at a time -- is the only way to |
| 698 | * reliably make both cases work, with software handling both cases correctly |
| 699 | * and at the significant penalty of needing an IRQ per packet. (The lack of |
| 700 | * I/O overlap can be slightly ameliorated by enabling double buffering.) |
| 701 | * |
| 702 | * So how to get rid of IRQ-per-packet? The transparent multi-BD case could |
| 703 | * be used in special cases like mass storage, which sets URB_SHORT_NOT_OK |
| 704 | * (or maybe its peripheral side counterpart) to flag (a) scenarios as errors |
| 705 | * with guaranteed driver level fault recovery and scrubbing out what's left |
| 706 | * of that garbaged datastream. |
| 707 | * |
| 708 | * But there seems to be no way to identify the cases where CPPI RNDIS mode |
| 709 | * is appropriate -- which do NOT include RNDIS host drivers, but do include |
| 710 | * the CDC Ethernet driver! -- and the documentation is incomplete/wrong. |
| 711 | * So we can't _ever_ use RX RNDIS mode ... except by using a heuristic |
| 712 | * that applies best on the peripheral side (and which could fail rudely). |
| 713 | * |
| 714 | * Leaving only "transparent" mode; we avoid multi-bd modes in almost all |
| 715 | * cases other than mass storage class. Otherwise we're correct but slow, |
| 716 | * since CPPI penalizes our need for a "true RNDIS" default mode. |
| 717 | */ |
| 718 | |
| 719 | |
| 720 | /* Heuristic, intended to kick in for ethernet/rndis peripheral ONLY |
| 721 | * |
| 722 | * IFF |
| 723 | * (a) peripheral mode ... since rndis peripherals could pad their |
| 724 | * writes to hosts, causing i/o failure; or we'd have to cope with |
| 725 | * a largely unknowable variety of host side protocol variants |
| 726 | * (b) and short reads are NOT errors ... since full reads would |
| 727 | * cause those same i/o failures |
| 728 | * (c) and read length is |
| 729 | * - less than 64KB (max per cppi descriptor) |
| 730 | * - not a multiple of 4096 (g_zero default, full reads typical) |
| 731 | * - N (>1) packets long, ditto (full reads not EXPECTED) |
| 732 | * THEN |
| 733 | * try rx rndis mode |
| 734 | * |
| 735 | * Cost of heuristic failing: RXDMA wedges at the end of transfers that |
| 736 | * fill out the whole buffer. Buggy host side usb network drivers could |
| 737 | * trigger that, but "in the field" such bugs seem to be all but unknown. |
| 738 | * |
| 739 | * So this module parameter lets the heuristic be disabled. When using |
| 740 | * gadgetfs, the heuristic will probably need to be disabled. |
| 741 | */ |
| 742 | static int cppi_rx_rndis = 1; |
| 743 | |
| 744 | module_param(cppi_rx_rndis, bool, 0); |
| 745 | MODULE_PARM_DESC(cppi_rx_rndis, "enable/disable RX RNDIS heuristic"); |
| 746 | |
| 747 | |
| 748 | /** |
| 749 | * cppi_next_rx_segment - dma read for the next chunk of a buffer |
| 750 | * @musb: the controller |
| 751 | * @rx: dma channel |
| 752 | * @onepacket: true unless caller treats short reads as errors, and |
| 753 | * performs fault recovery above usbcore. |
| 754 | * Context: controller irqlocked |
| 755 | * |
| 756 | * See above notes about why we can't use multi-BD RX queues except in |
| 757 | * rare cases (mass storage class), and can never use the hardware "rndis" |
| 758 | * mode (since it's not a "true" RNDIS mode) with complete safety.. |
| 759 | * |
| 760 | * It's ESSENTIAL that callers specify "onepacket" mode unless they kick in |
| 761 | * code to recover from corrupted datastreams after each short transfer. |
| 762 | */ |
| 763 | static void |
| 764 | cppi_next_rx_segment(struct musb *musb, struct cppi_channel *rx, int onepacket) |
| 765 | { |
| 766 | unsigned maxpacket = rx->maxpacket; |
| 767 | dma_addr_t addr = rx->buf_dma + rx->offset; |
| 768 | size_t length = rx->buf_len - rx->offset; |
| 769 | struct cppi_descriptor *bd, *tail; |
| 770 | unsigned n_bds; |
| 771 | unsigned i; |
| 772 | void __iomem *tibase = musb->ctrl_base; |
| 773 | int is_rndis = 0; |
| 774 | struct cppi_rx_stateram __iomem *rx_ram = rx->state_ram; |
| 775 | |
| 776 | if (onepacket) { |
| 777 | /* almost every USB driver, host or peripheral side */ |
| 778 | n_bds = 1; |
| 779 | |
| 780 | /* maybe apply the heuristic above */ |
| 781 | if (cppi_rx_rndis |
| 782 | && is_peripheral_active(musb) |
| 783 | && length > maxpacket |
| 784 | && (length & ~0xffff) == 0 |
| 785 | && (length & 0x0fff) != 0 |
| 786 | && (length & (maxpacket - 1)) == 0) { |
| 787 | maxpacket = length; |
| 788 | is_rndis = 1; |
| 789 | } |
| 790 | } else { |
| 791 | /* virtually nothing except mass storage class */ |
| 792 | if (length > 0xffff) { |
| 793 | n_bds = 0xffff / maxpacket; |
| 794 | length = n_bds * maxpacket; |
| 795 | } else { |
| 796 | n_bds = length / maxpacket; |
| 797 | if (length % maxpacket) |
| 798 | n_bds++; |
| 799 | } |
| 800 | if (n_bds == 1) |
| 801 | onepacket = 1; |
| 802 | else |
| 803 | n_bds = min(n_bds, (unsigned) NUM_RXCHAN_BD); |
| 804 | } |
| 805 | |
| 806 | /* In host mode, autorequest logic can generate some IN tokens; it's |
| 807 | * tricky since we can't leave REQPKT set in RXCSR after the transfer |
| 808 | * finishes. So: multipacket transfers involve two or more segments. |
| 809 | * And always at least two IRQs ... RNDIS mode is not an option. |
| 810 | */ |
| 811 | if (is_host_active(musb)) |
| 812 | n_bds = cppi_autoreq_update(rx, tibase, onepacket, n_bds); |
| 813 | |
| 814 | cppi_rndis_update(rx, 1, musb->ctrl_base, is_rndis); |
| 815 | |
| 816 | length = min(n_bds * maxpacket, length); |
| 817 | |
| 818 | DBG(4, "RX DMA%d seg, maxp %d %s bds %d (cnt %d) " |
| 819 | "dma 0x%x len %u %u/%u\n", |
| 820 | rx->index, maxpacket, |
| 821 | onepacket |
| 822 | ? (is_rndis ? "rndis" : "onepacket") |
| 823 | : "multipacket", |
| 824 | n_bds, |
| 825 | musb_readl(tibase, |
| 826 | DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4)) |
| 827 | & 0xffff, |
| 828 | addr, length, rx->channel.actual_len, rx->buf_len); |
| 829 | |
| 830 | /* only queue one segment at a time, since the hardware prevents |
| 831 | * correct queue shutdown after unexpected short packets |
| 832 | */ |
| 833 | bd = cppi_bd_alloc(rx); |
| 834 | rx->head = bd; |
| 835 | |
| 836 | /* Build BDs for all packets in this segment */ |
| 837 | for (i = 0, tail = NULL; bd && i < n_bds; i++, tail = bd) { |
| 838 | u32 bd_len; |
| 839 | |
| 840 | if (i) { |
| 841 | bd = cppi_bd_alloc(rx); |
| 842 | if (!bd) |
| 843 | break; |
| 844 | tail->next = bd; |
| 845 | tail->hw_next = bd->dma; |
| 846 | } |
| 847 | bd->hw_next = 0; |
| 848 | |
| 849 | /* all but the last packet will be maxpacket size */ |
| 850 | if (maxpacket < length) |
| 851 | bd_len = maxpacket; |
| 852 | else |
| 853 | bd_len = length; |
| 854 | |
| 855 | bd->hw_bufp = addr; |
| 856 | addr += bd_len; |
| 857 | rx->offset += bd_len; |
| 858 | |
| 859 | bd->hw_off_len = (0 /*offset*/ << 16) + bd_len; |
| 860 | bd->buflen = bd_len; |
| 861 | |
| 862 | bd->hw_options = CPPI_OWN_SET | (i == 0 ? length : 0); |
| 863 | length -= bd_len; |
| 864 | } |
| 865 | |
| 866 | /* we always expect at least one reusable BD! */ |
| 867 | if (!tail) { |
| 868 | WARNING("rx dma%d -- no BDs? need %d\n", rx->index, n_bds); |
| 869 | return; |
| 870 | } else if (i < n_bds) |
| 871 | WARNING("rx dma%d -- only %d of %d BDs\n", rx->index, i, n_bds); |
| 872 | |
| 873 | tail->next = NULL; |
| 874 | tail->hw_next = 0; |
| 875 | |
| 876 | bd = rx->head; |
| 877 | rx->tail = tail; |
| 878 | |
| 879 | /* short reads and other faults should terminate this entire |
| 880 | * dma segment. we want one "dma packet" per dma segment, not |
| 881 | * one per USB packet, terminating the whole queue at once... |
| 882 | * NOTE that current hardware seems to ignore SOP and EOP. |
| 883 | */ |
| 884 | bd->hw_options |= CPPI_SOP_SET; |
| 885 | tail->hw_options |= CPPI_EOP_SET; |
| 886 | |
Hugo Villeneuve | 704a148 | 2009-01-24 17:57:30 -0800 | [diff] [blame] | 887 | #ifdef CONFIG_USB_MUSB_DEBUG |
| 888 | if (_dbg_level(5)) { |
Felipe Balbi | 550a737 | 2008-07-24 12:27:36 +0300 | [diff] [blame] | 889 | struct cppi_descriptor *d; |
| 890 | |
| 891 | for (d = rx->head; d; d = d->next) |
| 892 | cppi_dump_rxbd("S", d); |
| 893 | } |
Hugo Villeneuve | 704a148 | 2009-01-24 17:57:30 -0800 | [diff] [blame] | 894 | #endif |
Felipe Balbi | 550a737 | 2008-07-24 12:27:36 +0300 | [diff] [blame] | 895 | |
| 896 | /* in case the preceding transfer left some state... */ |
| 897 | tail = rx->last_processed; |
| 898 | if (tail) { |
| 899 | tail->next = bd; |
| 900 | tail->hw_next = bd->dma; |
| 901 | } |
| 902 | |
| 903 | core_rxirq_enable(tibase, rx->index + 1); |
| 904 | |
| 905 | /* BDs live in DMA-coherent memory, but writes might be pending */ |
| 906 | cpu_drain_writebuffer(); |
| 907 | |
| 908 | /* REVISIT specs say to write this AFTER the BUFCNT register |
| 909 | * below ... but that loses badly. |
| 910 | */ |
| 911 | musb_writel(&rx_ram->rx_head, 0, bd->dma); |
| 912 | |
| 913 | /* bufferCount must be at least 3, and zeroes on completion |
| 914 | * unless it underflows below zero, or stops at two, or keeps |
| 915 | * growing ... grr. |
| 916 | */ |
| 917 | i = musb_readl(tibase, |
| 918 | DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4)) |
| 919 | & 0xffff; |
| 920 | |
| 921 | if (!i) |
| 922 | musb_writel(tibase, |
| 923 | DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4), |
| 924 | n_bds + 2); |
| 925 | else if (n_bds > (i - 3)) |
| 926 | musb_writel(tibase, |
| 927 | DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4), |
| 928 | n_bds - (i - 3)); |
| 929 | |
| 930 | i = musb_readl(tibase, |
| 931 | DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4)) |
| 932 | & 0xffff; |
| 933 | if (i < (2 + n_bds)) { |
| 934 | DBG(2, "bufcnt%d underrun - %d (for %d)\n", |
| 935 | rx->index, i, n_bds); |
| 936 | musb_writel(tibase, |
| 937 | DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4), |
| 938 | n_bds + 2); |
| 939 | } |
| 940 | |
| 941 | cppi_dump_rx(4, rx, "/S"); |
| 942 | } |
| 943 | |
| 944 | /** |
| 945 | * cppi_channel_program - program channel for data transfer |
| 946 | * @ch: the channel |
| 947 | * @maxpacket: max packet size |
| 948 | * @mode: For RX, 1 unless the usb protocol driver promised to treat |
| 949 | * all short reads as errors and kick in high level fault recovery. |
| 950 | * For TX, ignored because of RNDIS mode races/glitches. |
| 951 | * @dma_addr: dma address of buffer |
| 952 | * @len: length of buffer |
| 953 | * Context: controller irqlocked |
| 954 | */ |
| 955 | static int cppi_channel_program(struct dma_channel *ch, |
| 956 | u16 maxpacket, u8 mode, |
| 957 | dma_addr_t dma_addr, u32 len) |
| 958 | { |
| 959 | struct cppi_channel *cppi_ch; |
| 960 | struct cppi *controller; |
| 961 | struct musb *musb; |
| 962 | |
| 963 | cppi_ch = container_of(ch, struct cppi_channel, channel); |
| 964 | controller = cppi_ch->controller; |
| 965 | musb = controller->musb; |
| 966 | |
| 967 | switch (ch->status) { |
| 968 | case MUSB_DMA_STATUS_BUS_ABORT: |
| 969 | case MUSB_DMA_STATUS_CORE_ABORT: |
| 970 | /* fault irq handler should have handled cleanup */ |
| 971 | WARNING("%cX DMA%d not cleaned up after abort!\n", |
| 972 | cppi_ch->transmit ? 'T' : 'R', |
| 973 | cppi_ch->index); |
| 974 | /* WARN_ON(1); */ |
| 975 | break; |
| 976 | case MUSB_DMA_STATUS_BUSY: |
| 977 | WARNING("program active channel? %cX DMA%d\n", |
| 978 | cppi_ch->transmit ? 'T' : 'R', |
| 979 | cppi_ch->index); |
| 980 | /* WARN_ON(1); */ |
| 981 | break; |
| 982 | case MUSB_DMA_STATUS_UNKNOWN: |
| 983 | DBG(1, "%cX DMA%d not allocated!\n", |
| 984 | cppi_ch->transmit ? 'T' : 'R', |
| 985 | cppi_ch->index); |
| 986 | /* FALLTHROUGH */ |
| 987 | case MUSB_DMA_STATUS_FREE: |
| 988 | break; |
| 989 | } |
| 990 | |
| 991 | ch->status = MUSB_DMA_STATUS_BUSY; |
| 992 | |
| 993 | /* set transfer parameters, then queue up its first segment */ |
| 994 | cppi_ch->buf_dma = dma_addr; |
| 995 | cppi_ch->offset = 0; |
| 996 | cppi_ch->maxpacket = maxpacket; |
| 997 | cppi_ch->buf_len = len; |
Swaminathan S | 191b776 | 2009-01-24 17:57:37 -0800 | [diff] [blame^] | 998 | cppi_ch->channel.actual_len = 0; |
Felipe Balbi | 550a737 | 2008-07-24 12:27:36 +0300 | [diff] [blame] | 999 | |
| 1000 | /* TX channel? or RX? */ |
| 1001 | if (cppi_ch->transmit) |
| 1002 | cppi_next_tx_segment(musb, cppi_ch); |
| 1003 | else |
| 1004 | cppi_next_rx_segment(musb, cppi_ch, mode); |
| 1005 | |
| 1006 | return true; |
| 1007 | } |
| 1008 | |
| 1009 | static bool cppi_rx_scan(struct cppi *cppi, unsigned ch) |
| 1010 | { |
| 1011 | struct cppi_channel *rx = &cppi->rx[ch]; |
| 1012 | struct cppi_rx_stateram __iomem *state = rx->state_ram; |
| 1013 | struct cppi_descriptor *bd; |
| 1014 | struct cppi_descriptor *last = rx->last_processed; |
| 1015 | bool completed = false; |
| 1016 | bool acked = false; |
| 1017 | int i; |
| 1018 | dma_addr_t safe2ack; |
| 1019 | void __iomem *regs = rx->hw_ep->regs; |
| 1020 | |
| 1021 | cppi_dump_rx(6, rx, "/K"); |
| 1022 | |
| 1023 | bd = last ? last->next : rx->head; |
| 1024 | if (!bd) |
| 1025 | return false; |
| 1026 | |
| 1027 | /* run through all completed BDs */ |
| 1028 | for (i = 0, safe2ack = musb_readl(&state->rx_complete, 0); |
| 1029 | (safe2ack || completed) && bd && i < NUM_RXCHAN_BD; |
| 1030 | i++, bd = bd->next) { |
| 1031 | u16 len; |
| 1032 | |
| 1033 | /* catch latest BD writes from CPPI */ |
| 1034 | rmb(); |
| 1035 | if (!completed && (bd->hw_options & CPPI_OWN_SET)) |
| 1036 | break; |
| 1037 | |
| 1038 | DBG(5, "C/RXBD %08x: nxt %08x buf %08x " |
| 1039 | "off.len %08x opt.len %08x (%d)\n", |
| 1040 | bd->dma, bd->hw_next, bd->hw_bufp, |
| 1041 | bd->hw_off_len, bd->hw_options, |
| 1042 | rx->channel.actual_len); |
| 1043 | |
| 1044 | /* actual packet received length */ |
| 1045 | if ((bd->hw_options & CPPI_SOP_SET) && !completed) |
| 1046 | len = bd->hw_off_len & CPPI_RECV_PKTLEN_MASK; |
| 1047 | else |
| 1048 | len = 0; |
| 1049 | |
| 1050 | if (bd->hw_options & CPPI_EOQ_MASK) |
| 1051 | completed = true; |
| 1052 | |
| 1053 | if (!completed && len < bd->buflen) { |
| 1054 | /* NOTE: when we get a short packet, RXCSR_H_REQPKT |
| 1055 | * must have been cleared, and no more DMA packets may |
| 1056 | * active be in the queue... TI docs didn't say, but |
| 1057 | * CPPI ignores those BDs even though OWN is still set. |
| 1058 | */ |
| 1059 | completed = true; |
| 1060 | DBG(3, "rx short %d/%d (%d)\n", |
| 1061 | len, bd->buflen, |
| 1062 | rx->channel.actual_len); |
| 1063 | } |
| 1064 | |
| 1065 | /* If we got here, we expect to ack at least one BD; meanwhile |
| 1066 | * CPPI may completing other BDs while we scan this list... |
| 1067 | * |
| 1068 | * RACE: we can notice OWN cleared before CPPI raises the |
| 1069 | * matching irq by writing that BD as the completion pointer. |
| 1070 | * In such cases, stop scanning and wait for the irq, avoiding |
| 1071 | * lost acks and states where BD ownership is unclear. |
| 1072 | */ |
| 1073 | if (bd->dma == safe2ack) { |
| 1074 | musb_writel(&state->rx_complete, 0, safe2ack); |
| 1075 | safe2ack = musb_readl(&state->rx_complete, 0); |
| 1076 | acked = true; |
| 1077 | if (bd->dma == safe2ack) |
| 1078 | safe2ack = 0; |
| 1079 | } |
| 1080 | |
| 1081 | rx->channel.actual_len += len; |
| 1082 | |
| 1083 | cppi_bd_free(rx, last); |
| 1084 | last = bd; |
| 1085 | |
| 1086 | /* stop scanning on end-of-segment */ |
| 1087 | if (bd->hw_next == 0) |
| 1088 | completed = true; |
| 1089 | } |
| 1090 | rx->last_processed = last; |
| 1091 | |
| 1092 | /* dma abort, lost ack, or ... */ |
| 1093 | if (!acked && last) { |
| 1094 | int csr; |
| 1095 | |
| 1096 | if (safe2ack == 0 || safe2ack == rx->last_processed->dma) |
| 1097 | musb_writel(&state->rx_complete, 0, safe2ack); |
| 1098 | if (safe2ack == 0) { |
| 1099 | cppi_bd_free(rx, last); |
| 1100 | rx->last_processed = NULL; |
| 1101 | |
| 1102 | /* if we land here on the host side, H_REQPKT will |
| 1103 | * be clear and we need to restart the queue... |
| 1104 | */ |
| 1105 | WARN_ON(rx->head); |
| 1106 | } |
| 1107 | musb_ep_select(cppi->mregs, rx->index + 1); |
| 1108 | csr = musb_readw(regs, MUSB_RXCSR); |
| 1109 | if (csr & MUSB_RXCSR_DMAENAB) { |
| 1110 | DBG(4, "list%d %p/%p, last %08x%s, csr %04x\n", |
| 1111 | rx->index, |
| 1112 | rx->head, rx->tail, |
| 1113 | rx->last_processed |
| 1114 | ? rx->last_processed->dma |
| 1115 | : 0, |
| 1116 | completed ? ", completed" : "", |
| 1117 | csr); |
| 1118 | cppi_dump_rxq(4, "/what?", rx); |
| 1119 | } |
| 1120 | } |
| 1121 | if (!completed) { |
| 1122 | int csr; |
| 1123 | |
| 1124 | rx->head = bd; |
| 1125 | |
| 1126 | /* REVISIT seems like "autoreq all but EOP" doesn't... |
| 1127 | * setting it here "should" be racey, but seems to work |
| 1128 | */ |
| 1129 | csr = musb_readw(rx->hw_ep->regs, MUSB_RXCSR); |
| 1130 | if (is_host_active(cppi->musb) |
| 1131 | && bd |
| 1132 | && !(csr & MUSB_RXCSR_H_REQPKT)) { |
| 1133 | csr |= MUSB_RXCSR_H_REQPKT; |
| 1134 | musb_writew(regs, MUSB_RXCSR, |
| 1135 | MUSB_RXCSR_H_WZC_BITS | csr); |
| 1136 | csr = musb_readw(rx->hw_ep->regs, MUSB_RXCSR); |
| 1137 | } |
| 1138 | } else { |
| 1139 | rx->head = NULL; |
| 1140 | rx->tail = NULL; |
| 1141 | } |
| 1142 | |
| 1143 | cppi_dump_rx(6, rx, completed ? "/completed" : "/cleaned"); |
| 1144 | return completed; |
| 1145 | } |
| 1146 | |
| 1147 | void cppi_completion(struct musb *musb, u32 rx, u32 tx) |
| 1148 | { |
| 1149 | void __iomem *tibase; |
| 1150 | int i, index; |
| 1151 | struct cppi *cppi; |
| 1152 | struct musb_hw_ep *hw_ep = NULL; |
| 1153 | |
| 1154 | cppi = container_of(musb->dma_controller, struct cppi, controller); |
| 1155 | |
| 1156 | tibase = musb->ctrl_base; |
| 1157 | |
| 1158 | /* process TX channels */ |
| 1159 | for (index = 0; tx; tx = tx >> 1, index++) { |
| 1160 | struct cppi_channel *tx_ch; |
| 1161 | struct cppi_tx_stateram __iomem *tx_ram; |
| 1162 | bool completed = false; |
| 1163 | struct cppi_descriptor *bd; |
| 1164 | |
| 1165 | if (!(tx & 1)) |
| 1166 | continue; |
| 1167 | |
| 1168 | tx_ch = cppi->tx + index; |
| 1169 | tx_ram = tx_ch->state_ram; |
| 1170 | |
| 1171 | /* FIXME need a cppi_tx_scan() routine, which |
| 1172 | * can also be called from abort code |
| 1173 | */ |
| 1174 | |
| 1175 | cppi_dump_tx(5, tx_ch, "/E"); |
| 1176 | |
| 1177 | bd = tx_ch->head; |
| 1178 | |
| 1179 | if (NULL == bd) { |
| 1180 | DBG(1, "null BD\n"); |
| 1181 | continue; |
| 1182 | } |
| 1183 | |
| 1184 | /* run through all completed BDs */ |
| 1185 | for (i = 0; !completed && bd && i < NUM_TXCHAN_BD; |
| 1186 | i++, bd = bd->next) { |
| 1187 | u16 len; |
| 1188 | |
| 1189 | /* catch latest BD writes from CPPI */ |
| 1190 | rmb(); |
| 1191 | if (bd->hw_options & CPPI_OWN_SET) |
| 1192 | break; |
| 1193 | |
| 1194 | DBG(5, "C/TXBD %p n %x b %x off %x opt %x\n", |
| 1195 | bd, bd->hw_next, bd->hw_bufp, |
| 1196 | bd->hw_off_len, bd->hw_options); |
| 1197 | |
| 1198 | len = bd->hw_off_len & CPPI_BUFFER_LEN_MASK; |
| 1199 | tx_ch->channel.actual_len += len; |
| 1200 | |
| 1201 | tx_ch->last_processed = bd; |
| 1202 | |
| 1203 | /* write completion register to acknowledge |
| 1204 | * processing of completed BDs, and possibly |
| 1205 | * release the IRQ; EOQ might not be set ... |
| 1206 | * |
| 1207 | * REVISIT use the same ack strategy as rx |
| 1208 | * |
| 1209 | * REVISIT have observed bit 18 set; huh?? |
| 1210 | */ |
| 1211 | /* if ((bd->hw_options & CPPI_EOQ_MASK)) */ |
| 1212 | musb_writel(&tx_ram->tx_complete, 0, bd->dma); |
| 1213 | |
| 1214 | /* stop scanning on end-of-segment */ |
| 1215 | if (bd->hw_next == 0) |
| 1216 | completed = true; |
| 1217 | } |
| 1218 | |
| 1219 | /* on end of segment, maybe go to next one */ |
| 1220 | if (completed) { |
| 1221 | /* cppi_dump_tx(4, tx_ch, "/complete"); */ |
| 1222 | |
| 1223 | /* transfer more, or report completion */ |
| 1224 | if (tx_ch->offset >= tx_ch->buf_len) { |
| 1225 | tx_ch->head = NULL; |
| 1226 | tx_ch->tail = NULL; |
| 1227 | tx_ch->channel.status = MUSB_DMA_STATUS_FREE; |
| 1228 | |
| 1229 | hw_ep = tx_ch->hw_ep; |
| 1230 | |
| 1231 | /* Peripheral role never repurposes the |
| 1232 | * endpoint, so immediate completion is |
| 1233 | * safe. Host role waits for the fifo |
| 1234 | * to empty (TXPKTRDY irq) before going |
| 1235 | * to the next queued bulk transfer. |
| 1236 | */ |
| 1237 | if (is_host_active(cppi->musb)) { |
| 1238 | #if 0 |
| 1239 | /* WORKAROUND because we may |
| 1240 | * not always get TXKPTRDY ... |
| 1241 | */ |
| 1242 | int csr; |
| 1243 | |
| 1244 | csr = musb_readw(hw_ep->regs, |
| 1245 | MUSB_TXCSR); |
| 1246 | if (csr & MUSB_TXCSR_TXPKTRDY) |
| 1247 | #endif |
| 1248 | completed = false; |
| 1249 | } |
| 1250 | if (completed) |
| 1251 | musb_dma_completion(musb, index + 1, 1); |
| 1252 | |
| 1253 | } else { |
| 1254 | /* Bigger transfer than we could fit in |
| 1255 | * that first batch of descriptors... |
| 1256 | */ |
| 1257 | cppi_next_tx_segment(musb, tx_ch); |
| 1258 | } |
| 1259 | } else |
| 1260 | tx_ch->head = bd; |
| 1261 | } |
| 1262 | |
| 1263 | /* Start processing the RX block */ |
| 1264 | for (index = 0; rx; rx = rx >> 1, index++) { |
| 1265 | |
| 1266 | if (rx & 1) { |
| 1267 | struct cppi_channel *rx_ch; |
| 1268 | |
| 1269 | rx_ch = cppi->rx + index; |
| 1270 | |
| 1271 | /* let incomplete dma segments finish */ |
| 1272 | if (!cppi_rx_scan(cppi, index)) |
| 1273 | continue; |
| 1274 | |
| 1275 | /* start another dma segment if needed */ |
| 1276 | if (rx_ch->channel.actual_len != rx_ch->buf_len |
| 1277 | && rx_ch->channel.actual_len |
| 1278 | == rx_ch->offset) { |
| 1279 | cppi_next_rx_segment(musb, rx_ch, 1); |
| 1280 | continue; |
| 1281 | } |
| 1282 | |
| 1283 | /* all segments completed! */ |
| 1284 | rx_ch->channel.status = MUSB_DMA_STATUS_FREE; |
| 1285 | |
| 1286 | hw_ep = rx_ch->hw_ep; |
| 1287 | |
| 1288 | core_rxirq_disable(tibase, index + 1); |
| 1289 | musb_dma_completion(musb, index + 1, 0); |
| 1290 | } |
| 1291 | } |
| 1292 | |
| 1293 | /* write to CPPI EOI register to re-enable interrupts */ |
| 1294 | musb_writel(tibase, DAVINCI_CPPI_EOI_REG, 0); |
| 1295 | } |
| 1296 | |
| 1297 | /* Instantiate a software object representing a DMA controller. */ |
| 1298 | struct dma_controller *__init |
| 1299 | dma_controller_create(struct musb *musb, void __iomem *mregs) |
| 1300 | { |
| 1301 | struct cppi *controller; |
| 1302 | |
| 1303 | controller = kzalloc(sizeof *controller, GFP_KERNEL); |
| 1304 | if (!controller) |
| 1305 | return NULL; |
| 1306 | |
| 1307 | controller->mregs = mregs; |
| 1308 | controller->tibase = mregs - DAVINCI_BASE_OFFSET; |
| 1309 | |
| 1310 | controller->musb = musb; |
| 1311 | controller->controller.start = cppi_controller_start; |
| 1312 | controller->controller.stop = cppi_controller_stop; |
| 1313 | controller->controller.channel_alloc = cppi_channel_allocate; |
| 1314 | controller->controller.channel_release = cppi_channel_release; |
| 1315 | controller->controller.channel_program = cppi_channel_program; |
| 1316 | controller->controller.channel_abort = cppi_channel_abort; |
| 1317 | |
| 1318 | /* NOTE: allocating from on-chip SRAM would give the least |
| 1319 | * contention for memory access, if that ever matters here. |
| 1320 | */ |
| 1321 | |
| 1322 | /* setup BufferPool */ |
| 1323 | controller->pool = dma_pool_create("cppi", |
| 1324 | controller->musb->controller, |
| 1325 | sizeof(struct cppi_descriptor), |
| 1326 | CPPI_DESCRIPTOR_ALIGN, 0); |
| 1327 | if (!controller->pool) { |
| 1328 | kfree(controller); |
| 1329 | return NULL; |
| 1330 | } |
| 1331 | |
| 1332 | return &controller->controller; |
| 1333 | } |
| 1334 | |
| 1335 | /* |
| 1336 | * Destroy a previously-instantiated DMA controller. |
| 1337 | */ |
| 1338 | void dma_controller_destroy(struct dma_controller *c) |
| 1339 | { |
| 1340 | struct cppi *cppi; |
| 1341 | |
| 1342 | cppi = container_of(c, struct cppi, controller); |
| 1343 | |
| 1344 | /* assert: caller stopped the controller first */ |
| 1345 | dma_pool_destroy(cppi->pool); |
| 1346 | |
| 1347 | kfree(cppi); |
| 1348 | } |
| 1349 | |
| 1350 | /* |
| 1351 | * Context: controller irqlocked, endpoint selected |
| 1352 | */ |
| 1353 | static int cppi_channel_abort(struct dma_channel *channel) |
| 1354 | { |
| 1355 | struct cppi_channel *cppi_ch; |
| 1356 | struct cppi *controller; |
| 1357 | void __iomem *mbase; |
| 1358 | void __iomem *tibase; |
| 1359 | void __iomem *regs; |
| 1360 | u32 value; |
| 1361 | struct cppi_descriptor *queue; |
| 1362 | |
| 1363 | cppi_ch = container_of(channel, struct cppi_channel, channel); |
| 1364 | |
| 1365 | controller = cppi_ch->controller; |
| 1366 | |
| 1367 | switch (channel->status) { |
| 1368 | case MUSB_DMA_STATUS_BUS_ABORT: |
| 1369 | case MUSB_DMA_STATUS_CORE_ABORT: |
| 1370 | /* from RX or TX fault irq handler */ |
| 1371 | case MUSB_DMA_STATUS_BUSY: |
| 1372 | /* the hardware needs shutting down */ |
| 1373 | regs = cppi_ch->hw_ep->regs; |
| 1374 | break; |
| 1375 | case MUSB_DMA_STATUS_UNKNOWN: |
| 1376 | case MUSB_DMA_STATUS_FREE: |
| 1377 | return 0; |
| 1378 | default: |
| 1379 | return -EINVAL; |
| 1380 | } |
| 1381 | |
| 1382 | if (!cppi_ch->transmit && cppi_ch->head) |
| 1383 | cppi_dump_rxq(3, "/abort", cppi_ch); |
| 1384 | |
| 1385 | mbase = controller->mregs; |
| 1386 | tibase = controller->tibase; |
| 1387 | |
| 1388 | queue = cppi_ch->head; |
| 1389 | cppi_ch->head = NULL; |
| 1390 | cppi_ch->tail = NULL; |
| 1391 | |
| 1392 | /* REVISIT should rely on caller having done this, |
| 1393 | * and caller should rely on us not changing it. |
| 1394 | * peripheral code is safe ... check host too. |
| 1395 | */ |
| 1396 | musb_ep_select(mbase, cppi_ch->index + 1); |
| 1397 | |
| 1398 | if (cppi_ch->transmit) { |
| 1399 | struct cppi_tx_stateram __iomem *tx_ram; |
| 1400 | int enabled; |
| 1401 | |
| 1402 | /* mask interrupts raised to signal teardown complete. */ |
| 1403 | enabled = musb_readl(tibase, DAVINCI_TXCPPI_INTENAB_REG) |
| 1404 | & (1 << cppi_ch->index); |
| 1405 | if (enabled) |
| 1406 | musb_writel(tibase, DAVINCI_TXCPPI_INTCLR_REG, |
| 1407 | (1 << cppi_ch->index)); |
| 1408 | |
| 1409 | /* REVISIT put timeouts on these controller handshakes */ |
| 1410 | |
| 1411 | cppi_dump_tx(6, cppi_ch, " (teardown)"); |
| 1412 | |
| 1413 | /* teardown DMA engine then usb core */ |
| 1414 | do { |
| 1415 | value = musb_readl(tibase, DAVINCI_TXCPPI_TEAR_REG); |
| 1416 | } while (!(value & CPPI_TEAR_READY)); |
| 1417 | musb_writel(tibase, DAVINCI_TXCPPI_TEAR_REG, cppi_ch->index); |
| 1418 | |
| 1419 | tx_ram = cppi_ch->state_ram; |
| 1420 | do { |
| 1421 | value = musb_readl(&tx_ram->tx_complete, 0); |
| 1422 | } while (0xFFFFFFFC != value); |
| 1423 | musb_writel(&tx_ram->tx_complete, 0, 0xFFFFFFFC); |
| 1424 | |
| 1425 | /* FIXME clean up the transfer state ... here? |
| 1426 | * the completion routine should get called with |
| 1427 | * an appropriate status code. |
| 1428 | */ |
| 1429 | |
| 1430 | value = musb_readw(regs, MUSB_TXCSR); |
| 1431 | value &= ~MUSB_TXCSR_DMAENAB; |
| 1432 | value |= MUSB_TXCSR_FLUSHFIFO; |
| 1433 | musb_writew(regs, MUSB_TXCSR, value); |
| 1434 | musb_writew(regs, MUSB_TXCSR, value); |
| 1435 | |
| 1436 | /* re-enable interrupt */ |
| 1437 | if (enabled) |
| 1438 | musb_writel(tibase, DAVINCI_TXCPPI_INTENAB_REG, |
| 1439 | (1 << cppi_ch->index)); |
| 1440 | |
| 1441 | /* While we scrub the TX state RAM, ensure that we clean |
| 1442 | * up any interrupt that's currently asserted: |
| 1443 | * 1. Write to completion Ptr value 0x1(bit 0 set) |
| 1444 | * (write back mode) |
| 1445 | * 2. Write to completion Ptr value 0x0(bit 0 cleared) |
| 1446 | * (compare mode) |
| 1447 | * Value written is compared(for bits 31:2) and when |
| 1448 | * equal, interrupt is deasserted. |
| 1449 | */ |
| 1450 | cppi_reset_tx(tx_ram, 1); |
| 1451 | musb_writel(&tx_ram->tx_complete, 0, 0); |
| 1452 | |
| 1453 | cppi_dump_tx(5, cppi_ch, " (done teardown)"); |
| 1454 | |
| 1455 | /* REVISIT tx side _should_ clean up the same way |
| 1456 | * as the RX side ... this does no cleanup at all! |
| 1457 | */ |
| 1458 | |
| 1459 | } else /* RX */ { |
| 1460 | u16 csr; |
| 1461 | |
| 1462 | /* NOTE: docs don't guarantee any of this works ... we |
| 1463 | * expect that if the usb core stops telling the cppi core |
| 1464 | * to pull more data from it, then it'll be safe to flush |
| 1465 | * current RX DMA state iff any pending fifo transfer is done. |
| 1466 | */ |
| 1467 | |
| 1468 | core_rxirq_disable(tibase, cppi_ch->index + 1); |
| 1469 | |
| 1470 | /* for host, ensure ReqPkt is never set again */ |
| 1471 | if (is_host_active(cppi_ch->controller->musb)) { |
| 1472 | value = musb_readl(tibase, DAVINCI_AUTOREQ_REG); |
| 1473 | value &= ~((0x3) << (cppi_ch->index * 2)); |
| 1474 | musb_writel(tibase, DAVINCI_AUTOREQ_REG, value); |
| 1475 | } |
| 1476 | |
| 1477 | csr = musb_readw(regs, MUSB_RXCSR); |
| 1478 | |
| 1479 | /* for host, clear (just) ReqPkt at end of current packet(s) */ |
| 1480 | if (is_host_active(cppi_ch->controller->musb)) { |
| 1481 | csr |= MUSB_RXCSR_H_WZC_BITS; |
| 1482 | csr &= ~MUSB_RXCSR_H_REQPKT; |
| 1483 | } else |
| 1484 | csr |= MUSB_RXCSR_P_WZC_BITS; |
| 1485 | |
| 1486 | /* clear dma enable */ |
| 1487 | csr &= ~(MUSB_RXCSR_DMAENAB); |
| 1488 | musb_writew(regs, MUSB_RXCSR, csr); |
| 1489 | csr = musb_readw(regs, MUSB_RXCSR); |
| 1490 | |
| 1491 | /* Quiesce: wait for current dma to finish (if not cleanup). |
| 1492 | * We can't use bit zero of stateram->rx_sop, since that |
| 1493 | * refers to an entire "DMA packet" not just emptying the |
| 1494 | * current fifo. Most segments need multiple usb packets. |
| 1495 | */ |
| 1496 | if (channel->status == MUSB_DMA_STATUS_BUSY) |
| 1497 | udelay(50); |
| 1498 | |
| 1499 | /* scan the current list, reporting any data that was |
| 1500 | * transferred and acking any IRQ |
| 1501 | */ |
| 1502 | cppi_rx_scan(controller, cppi_ch->index); |
| 1503 | |
| 1504 | /* clobber the existing state once it's idle |
| 1505 | * |
| 1506 | * NOTE: arguably, we should also wait for all the other |
| 1507 | * RX channels to quiesce (how??) and then temporarily |
| 1508 | * disable RXCPPI_CTRL_REG ... but it seems that we can |
| 1509 | * rely on the controller restarting from state ram, with |
| 1510 | * only RXCPPI_BUFCNT state being bogus. BUFCNT will |
| 1511 | * correct itself after the next DMA transfer though. |
| 1512 | * |
| 1513 | * REVISIT does using rndis mode change that? |
| 1514 | */ |
| 1515 | cppi_reset_rx(cppi_ch->state_ram); |
| 1516 | |
| 1517 | /* next DMA request _should_ load cppi head ptr */ |
| 1518 | |
| 1519 | /* ... we don't "free" that list, only mutate it in place. */ |
| 1520 | cppi_dump_rx(5, cppi_ch, " (done abort)"); |
| 1521 | |
| 1522 | /* clean up previously pending bds */ |
| 1523 | cppi_bd_free(cppi_ch, cppi_ch->last_processed); |
| 1524 | cppi_ch->last_processed = NULL; |
| 1525 | |
| 1526 | while (queue) { |
| 1527 | struct cppi_descriptor *tmp = queue->next; |
| 1528 | |
| 1529 | cppi_bd_free(cppi_ch, queue); |
| 1530 | queue = tmp; |
| 1531 | } |
| 1532 | } |
| 1533 | |
| 1534 | channel->status = MUSB_DMA_STATUS_FREE; |
| 1535 | cppi_ch->buf_dma = 0; |
| 1536 | cppi_ch->offset = 0; |
| 1537 | cppi_ch->buf_len = 0; |
| 1538 | cppi_ch->maxpacket = 0; |
| 1539 | return 0; |
| 1540 | } |
| 1541 | |
| 1542 | /* TBD Queries: |
| 1543 | * |
| 1544 | * Power Management ... probably turn off cppi during suspend, restart; |
| 1545 | * check state ram? Clocking is presumably shared with usb core. |
| 1546 | */ |