John W. Linville | f222313 | 2006-01-23 16:59:58 -0500 | [diff] [blame^] | 1 | /* |
| 2 | |
| 3 | Broadcom BCM43xx wireless driver |
| 4 | |
| 5 | PIO Transmission |
| 6 | |
| 7 | Copyright (c) 2005 Michael Buesch <mbuesch@freenet.de> |
| 8 | |
| 9 | This program is free software; you can redistribute it and/or modify |
| 10 | it under the terms of the GNU General Public License as published by |
| 11 | the Free Software Foundation; either version 2 of the License, or |
| 12 | (at your option) any later version. |
| 13 | |
| 14 | This program is distributed in the hope that it will be useful, |
| 15 | but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 16 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 17 | GNU General Public License for more details. |
| 18 | |
| 19 | You should have received a copy of the GNU General Public License |
| 20 | along with this program; see the file COPYING. If not, write to |
| 21 | the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, |
| 22 | Boston, MA 02110-1301, USA. |
| 23 | |
| 24 | */ |
| 25 | |
| 26 | #include "bcm43xx.h" |
| 27 | #include "bcm43xx_pio.h" |
| 28 | #include "bcm43xx_main.h" |
| 29 | |
| 30 | #include <linux/delay.h> |
| 31 | |
| 32 | |
| 33 | static inline |
| 34 | u16 bcm43xx_pio_read(struct bcm43xx_pioqueue *queue, |
| 35 | u16 offset) |
| 36 | { |
| 37 | return bcm43xx_read16(queue->bcm, queue->mmio_base + offset); |
| 38 | } |
| 39 | |
| 40 | static inline |
| 41 | void bcm43xx_pio_write(struct bcm43xx_pioqueue *queue, |
| 42 | u16 offset, u16 value) |
| 43 | { |
| 44 | bcm43xx_write16(queue->bcm, queue->mmio_base + offset, value); |
| 45 | } |
| 46 | |
| 47 | static inline |
| 48 | void tx_start(struct bcm43xx_pioqueue *queue) |
| 49 | { |
| 50 | bcm43xx_pio_write(queue, BCM43xx_PIO_TXCTL, BCM43xx_PIO_TXCTL_INIT); |
| 51 | } |
| 52 | |
| 53 | static inline |
| 54 | void tx_octet(struct bcm43xx_pioqueue *queue, |
| 55 | u8 octet) |
| 56 | { |
| 57 | if (queue->bcm->current_core->rev < 3) { |
| 58 | bcm43xx_pio_write(queue, BCM43xx_PIO_TXDATA, octet); |
| 59 | bcm43xx_pio_write(queue, BCM43xx_PIO_TXCTL, BCM43xx_PIO_TXCTL_WRITEHI); |
| 60 | } else { |
| 61 | bcm43xx_pio_write(queue, BCM43xx_PIO_TXCTL, BCM43xx_PIO_TXCTL_WRITEHI); |
| 62 | bcm43xx_pio_write(queue, BCM43xx_PIO_TXDATA, octet); |
| 63 | } |
| 64 | } |
| 65 | |
| 66 | static inline |
| 67 | void tx_data(struct bcm43xx_pioqueue *queue, |
| 68 | u8 *packet, |
| 69 | unsigned int octets) |
| 70 | { |
| 71 | u16 data; |
| 72 | unsigned int i = 0; |
| 73 | |
| 74 | if (queue->bcm->current_core->rev < 3) { |
| 75 | data = be16_to_cpu( *((u16 *)packet) ); |
| 76 | bcm43xx_pio_write(queue, BCM43xx_PIO_TXDATA, data); |
| 77 | i += 2; |
| 78 | } |
| 79 | bcm43xx_pio_write(queue, BCM43xx_PIO_TXCTL, |
| 80 | BCM43xx_PIO_TXCTL_WRITELO | BCM43xx_PIO_TXCTL_WRITEHI); |
| 81 | for ( ; i < octets - 1; i += 2) { |
| 82 | data = be16_to_cpu( *((u16 *)(packet + i)) ); |
| 83 | bcm43xx_pio_write(queue, BCM43xx_PIO_TXDATA, data); |
| 84 | } |
| 85 | if (octets % 2) |
| 86 | tx_octet(queue, packet[octets - 1]); |
| 87 | } |
| 88 | |
| 89 | static inline |
| 90 | void tx_complete(struct bcm43xx_pioqueue *queue, |
| 91 | struct sk_buff *skb) |
| 92 | { |
| 93 | u16 data; |
| 94 | |
| 95 | if (queue->bcm->current_core->rev < 3) { |
| 96 | data = be16_to_cpu( *((u16 *)(skb->data + skb->len - 2)) ); |
| 97 | bcm43xx_pio_write(queue, BCM43xx_PIO_TXDATA, data); |
| 98 | bcm43xx_pio_write(queue, BCM43xx_PIO_TXCTL, |
| 99 | BCM43xx_PIO_TXCTL_WRITEHI | BCM43xx_PIO_TXCTL_COMPLETE); |
| 100 | } else { |
| 101 | bcm43xx_pio_write(queue, BCM43xx_PIO_TXCTL, BCM43xx_PIO_TXCTL_COMPLETE); |
| 102 | } |
| 103 | } |
| 104 | |
| 105 | static inline |
| 106 | u16 generate_cookie(struct bcm43xx_pioqueue *queue, |
| 107 | int packetindex) |
| 108 | { |
| 109 | u16 cookie = 0x0000; |
| 110 | |
| 111 | /* We use the upper 4 bits for the PIO |
| 112 | * controller ID and the lower 12 bits |
| 113 | * for the packet index (in the cache). |
| 114 | */ |
| 115 | switch (queue->mmio_base) { |
| 116 | default: |
| 117 | assert(0); |
| 118 | case BCM43xx_MMIO_PIO1_BASE: |
| 119 | break; |
| 120 | case BCM43xx_MMIO_PIO2_BASE: |
| 121 | cookie = 0x1000; |
| 122 | break; |
| 123 | case BCM43xx_MMIO_PIO3_BASE: |
| 124 | cookie = 0x2000; |
| 125 | break; |
| 126 | case BCM43xx_MMIO_PIO4_BASE: |
| 127 | cookie = 0x3000; |
| 128 | break; |
| 129 | } |
| 130 | assert(((u16)packetindex & 0xF000) == 0x0000); |
| 131 | cookie |= (u16)packetindex; |
| 132 | |
| 133 | return cookie; |
| 134 | } |
| 135 | |
| 136 | static inline |
| 137 | struct bcm43xx_pioqueue * parse_cookie(struct bcm43xx_private *bcm, |
| 138 | u16 cookie, |
| 139 | struct bcm43xx_pio_txpacket **packet) |
| 140 | { |
| 141 | struct bcm43xx_pioqueue *queue = NULL; |
| 142 | int packetindex; |
| 143 | |
| 144 | switch (cookie & 0xF000) { |
| 145 | case 0x0000: |
| 146 | queue = bcm->current_core->pio->queue0; |
| 147 | break; |
| 148 | case 0x1000: |
| 149 | queue = bcm->current_core->pio->queue1; |
| 150 | break; |
| 151 | case 0x2000: |
| 152 | queue = bcm->current_core->pio->queue2; |
| 153 | break; |
| 154 | case 0x3000: |
| 155 | queue = bcm->current_core->pio->queue3; |
| 156 | break; |
| 157 | default: |
| 158 | assert(0); |
| 159 | } |
| 160 | |
| 161 | packetindex = (cookie & 0x0FFF); |
| 162 | assert(packetindex >= 0 && packetindex < BCM43xx_PIO_MAXTXPACKETS); |
| 163 | *packet = queue->__tx_packets_cache + packetindex; |
| 164 | |
| 165 | return queue; |
| 166 | } |
| 167 | |
| 168 | static inline |
| 169 | void pio_tx_write_fragment(struct bcm43xx_pioqueue *queue, |
| 170 | struct sk_buff *skb, |
| 171 | struct bcm43xx_pio_txpacket *packet) |
| 172 | { |
| 173 | unsigned int octets; |
| 174 | |
| 175 | assert(skb_shinfo(skb)->nr_frags == 0); |
| 176 | assert(skb_headroom(skb) >= sizeof(struct bcm43xx_txhdr)); |
| 177 | |
| 178 | __skb_push(skb, sizeof(struct bcm43xx_txhdr)); |
| 179 | bcm43xx_generate_txhdr(queue->bcm, |
| 180 | (struct bcm43xx_txhdr *)skb->data, |
| 181 | skb->data + sizeof(struct bcm43xx_txhdr), |
| 182 | skb->len - sizeof(struct bcm43xx_txhdr), |
| 183 | (packet->xmitted_frags == 0), |
| 184 | generate_cookie(queue, pio_txpacket_getindex(packet))); |
| 185 | |
| 186 | tx_start(queue); |
| 187 | octets = skb->len; |
| 188 | if (queue->bcm->current_core->rev < 3) //FIXME: && this is the last packet in the queue. |
| 189 | octets -= 2; |
| 190 | tx_data(queue, (u8 *)skb->data, octets); |
| 191 | tx_complete(queue, skb); |
| 192 | } |
| 193 | |
| 194 | static inline |
| 195 | int pio_tx_packet(struct bcm43xx_pio_txpacket *packet) |
| 196 | { |
| 197 | struct bcm43xx_pioqueue *queue = packet->queue; |
| 198 | struct ieee80211_txb *txb = packet->txb; |
| 199 | struct sk_buff *skb; |
| 200 | u16 octets; |
| 201 | int i; |
| 202 | |
| 203 | for (i = packet->xmitted_frags; i < txb->nr_frags; i++) { |
| 204 | skb = txb->fragments[i]; |
| 205 | |
| 206 | octets = (u16)skb->len + sizeof(struct bcm43xx_txhdr); |
| 207 | |
| 208 | assert(queue->tx_devq_size >= octets); |
| 209 | assert(queue->tx_devq_packets <= BCM43xx_PIO_MAXTXDEVQPACKETS); |
| 210 | assert(queue->tx_devq_used <= queue->tx_devq_size); |
| 211 | /* Check if there is sufficient free space on the device |
| 212 | * TX queue. If not, return and let the TX-work-handler |
| 213 | * retry later. |
| 214 | */ |
| 215 | if (queue->tx_devq_packets == BCM43xx_PIO_MAXTXDEVQPACKETS) |
| 216 | return -EBUSY; |
| 217 | if (queue->tx_devq_used + octets > queue->tx_devq_size) |
| 218 | return -EBUSY; |
| 219 | /* Now poke the device. */ |
| 220 | pio_tx_write_fragment(queue, skb, packet); |
| 221 | |
| 222 | /* Account for the packet size. |
| 223 | * (We must not overflow the device TX queue) |
| 224 | */ |
| 225 | queue->tx_devq_packets++; |
| 226 | queue->tx_devq_used += octets; |
| 227 | |
| 228 | assert(packet->xmitted_frags <= packet->txb->nr_frags); |
| 229 | packet->xmitted_frags++; |
| 230 | packet->xmitted_octets += octets; |
| 231 | } |
| 232 | list_move_tail(&packet->list, &queue->txrunning); |
| 233 | |
| 234 | return 0; |
| 235 | } |
| 236 | |
| 237 | static void free_txpacket(struct bcm43xx_pio_txpacket *packet) |
| 238 | { |
| 239 | struct bcm43xx_pioqueue *queue = packet->queue; |
| 240 | |
| 241 | ieee80211_txb_free(packet->txb); |
| 242 | |
| 243 | list_move(&packet->list, &packet->queue->txfree); |
| 244 | |
| 245 | assert(queue->tx_devq_used >= packet->xmitted_octets); |
| 246 | queue->tx_devq_used -= packet->xmitted_octets; |
| 247 | assert(queue->tx_devq_packets >= packet->xmitted_frags); |
| 248 | queue->tx_devq_packets -= packet->xmitted_frags; |
| 249 | } |
| 250 | |
| 251 | static void txwork_handler(void *d) |
| 252 | { |
| 253 | struct bcm43xx_pioqueue *queue = d; |
| 254 | unsigned long flags; |
| 255 | struct bcm43xx_pio_txpacket *packet, *tmp_packet; |
| 256 | int err; |
| 257 | |
| 258 | spin_lock_irqsave(&queue->txlock, flags); |
| 259 | list_for_each_entry_safe(packet, tmp_packet, &queue->txqueue, list) { |
| 260 | assert(packet->xmitted_frags < packet->txb->nr_frags); |
| 261 | if (packet->xmitted_frags == 0) { |
| 262 | int i; |
| 263 | struct sk_buff *skb; |
| 264 | |
| 265 | /* Check if the device queue is big |
| 266 | * enough for every fragment. If not, drop the |
| 267 | * whole packet. |
| 268 | */ |
| 269 | for (i = 0; i < packet->txb->nr_frags; i++) { |
| 270 | skb = packet->txb->fragments[i]; |
| 271 | if (unlikely(skb->len > queue->tx_devq_size)) { |
| 272 | dprintkl(KERN_ERR PFX "PIO TX device queue too small. " |
| 273 | "Dropping packet...\n"); |
| 274 | free_txpacket(packet); |
| 275 | goto next_packet; |
| 276 | } |
| 277 | } |
| 278 | } |
| 279 | /* Now try to transmit the packet. |
| 280 | * This may not completely succeed. |
| 281 | */ |
| 282 | err = pio_tx_packet(packet); |
| 283 | if (err) |
| 284 | break; |
| 285 | next_packet: |
| 286 | continue; |
| 287 | } |
| 288 | spin_unlock_irqrestore(&queue->txlock, flags); |
| 289 | } |
| 290 | |
| 291 | static void setup_txqueues(struct bcm43xx_pioqueue *queue) |
| 292 | { |
| 293 | struct bcm43xx_pio_txpacket *packet; |
| 294 | int i; |
| 295 | |
| 296 | for (i = 0; i < BCM43xx_PIO_MAXTXPACKETS; i++) { |
| 297 | packet = queue->__tx_packets_cache + i; |
| 298 | |
| 299 | packet->queue = queue; |
| 300 | INIT_LIST_HEAD(&packet->list); |
| 301 | |
| 302 | list_add(&packet->list, &queue->txfree); |
| 303 | } |
| 304 | } |
| 305 | |
| 306 | static |
| 307 | struct bcm43xx_pioqueue * bcm43xx_setup_pioqueue(struct bcm43xx_private *bcm, |
| 308 | u16 pio_mmio_base) |
| 309 | { |
| 310 | struct bcm43xx_pioqueue *queue; |
| 311 | u32 value; |
| 312 | u16 qsize; |
| 313 | |
| 314 | queue = kmalloc(sizeof(*queue), GFP_KERNEL); |
| 315 | if (!queue) |
| 316 | goto out; |
| 317 | memset(queue, 0, sizeof(*queue)); |
| 318 | |
| 319 | queue->bcm = bcm; |
| 320 | queue->mmio_base = pio_mmio_base; |
| 321 | |
| 322 | INIT_LIST_HEAD(&queue->txfree); |
| 323 | INIT_LIST_HEAD(&queue->txqueue); |
| 324 | INIT_LIST_HEAD(&queue->txrunning); |
| 325 | spin_lock_init(&queue->txlock); |
| 326 | INIT_WORK(&queue->txwork, txwork_handler, queue); |
| 327 | |
| 328 | value = bcm43xx_read32(bcm, BCM43xx_MMIO_STATUS_BITFIELD); |
| 329 | value |= BCM43xx_SBF_XFER_REG_BYTESWAP; |
| 330 | bcm43xx_write32(bcm, BCM43xx_MMIO_STATUS_BITFIELD, value); |
| 331 | |
| 332 | qsize = bcm43xx_read16(bcm, queue->mmio_base + BCM43xx_PIO_TXQBUFSIZE); |
| 333 | if (qsize <= BCM43xx_PIO_TXQADJUST) { |
| 334 | printk(KERN_ERR PFX "PIO tx queue too small (%u)\n", qsize); |
| 335 | goto err_freequeue; |
| 336 | } |
| 337 | qsize -= BCM43xx_PIO_TXQADJUST; |
| 338 | queue->tx_devq_size = qsize; |
| 339 | |
| 340 | setup_txqueues(queue); |
| 341 | |
| 342 | out: |
| 343 | return queue; |
| 344 | |
| 345 | err_freequeue: |
| 346 | kfree(queue); |
| 347 | queue = NULL; |
| 348 | goto out; |
| 349 | } |
| 350 | |
| 351 | static void cancel_transfers(struct bcm43xx_pioqueue *queue) |
| 352 | { |
| 353 | struct bcm43xx_pio_txpacket *packet, *tmp_packet; |
| 354 | |
| 355 | netif_tx_disable(queue->bcm->net_dev); |
| 356 | assert(queue->bcm->shutting_down); |
| 357 | cancel_delayed_work(&queue->txwork); |
| 358 | flush_workqueue(queue->bcm->workqueue); |
| 359 | |
| 360 | list_for_each_entry_safe(packet, tmp_packet, &queue->txrunning, list) |
| 361 | free_txpacket(packet); |
| 362 | list_for_each_entry_safe(packet, tmp_packet, &queue->txqueue, list) |
| 363 | free_txpacket(packet); |
| 364 | } |
| 365 | |
| 366 | static void bcm43xx_destroy_pioqueue(struct bcm43xx_pioqueue *queue) |
| 367 | { |
| 368 | if (!queue) |
| 369 | return; |
| 370 | |
| 371 | cancel_transfers(queue); |
| 372 | kfree(queue); |
| 373 | } |
| 374 | |
| 375 | void bcm43xx_pio_free(struct bcm43xx_private *bcm) |
| 376 | { |
| 377 | bcm43xx_destroy_pioqueue(bcm->current_core->pio->queue3); |
| 378 | bcm->current_core->pio->queue3 = NULL; |
| 379 | bcm43xx_destroy_pioqueue(bcm->current_core->pio->queue2); |
| 380 | bcm->current_core->pio->queue2 = NULL; |
| 381 | bcm43xx_destroy_pioqueue(bcm->current_core->pio->queue1); |
| 382 | bcm->current_core->pio->queue1 = NULL; |
| 383 | bcm43xx_destroy_pioqueue(bcm->current_core->pio->queue0); |
| 384 | bcm->current_core->pio->queue0 = NULL; |
| 385 | } |
| 386 | |
| 387 | int bcm43xx_pio_init(struct bcm43xx_private *bcm) |
| 388 | { |
| 389 | struct bcm43xx_pioqueue *queue; |
| 390 | int err = -ENOMEM; |
| 391 | |
| 392 | queue = bcm43xx_setup_pioqueue(bcm, BCM43xx_MMIO_PIO1_BASE); |
| 393 | if (!queue) |
| 394 | goto out; |
| 395 | bcm->current_core->pio->queue0 = queue; |
| 396 | |
| 397 | queue = bcm43xx_setup_pioqueue(bcm, BCM43xx_MMIO_PIO2_BASE); |
| 398 | if (!queue) |
| 399 | goto err_destroy0; |
| 400 | bcm->current_core->pio->queue1 = queue; |
| 401 | |
| 402 | queue = bcm43xx_setup_pioqueue(bcm, BCM43xx_MMIO_PIO3_BASE); |
| 403 | if (!queue) |
| 404 | goto err_destroy1; |
| 405 | bcm->current_core->pio->queue2 = queue; |
| 406 | |
| 407 | queue = bcm43xx_setup_pioqueue(bcm, BCM43xx_MMIO_PIO4_BASE); |
| 408 | if (!queue) |
| 409 | goto err_destroy2; |
| 410 | bcm->current_core->pio->queue3 = queue; |
| 411 | |
| 412 | if (bcm->current_core->rev < 3) |
| 413 | bcm->irq_savedstate |= BCM43xx_IRQ_PIO_WORKAROUND; |
| 414 | |
| 415 | dprintk(KERN_INFO PFX "PIO initialized\n"); |
| 416 | err = 0; |
| 417 | out: |
| 418 | return err; |
| 419 | |
| 420 | err_destroy2: |
| 421 | bcm43xx_destroy_pioqueue(bcm->current_core->pio->queue2); |
| 422 | bcm->current_core->pio->queue2 = NULL; |
| 423 | err_destroy1: |
| 424 | bcm43xx_destroy_pioqueue(bcm->current_core->pio->queue1); |
| 425 | bcm->current_core->pio->queue1 = NULL; |
| 426 | err_destroy0: |
| 427 | bcm43xx_destroy_pioqueue(bcm->current_core->pio->queue0); |
| 428 | bcm->current_core->pio->queue0 = NULL; |
| 429 | goto out; |
| 430 | } |
| 431 | |
| 432 | static inline |
| 433 | int pio_transfer_txb(struct bcm43xx_pioqueue *queue, |
| 434 | struct ieee80211_txb *txb) |
| 435 | { |
| 436 | struct bcm43xx_pio_txpacket *packet; |
| 437 | unsigned long flags; |
| 438 | u16 tmp; |
| 439 | |
| 440 | spin_lock_irqsave(&queue->txlock, flags); |
| 441 | assert(!queue->tx_suspended); |
| 442 | assert(!list_empty(&queue->txfree)); |
| 443 | |
| 444 | tmp = bcm43xx_pio_read(queue, BCM43xx_PIO_TXCTL); |
| 445 | if (tmp & BCM43xx_PIO_TXCTL_SUSPEND) { |
| 446 | spin_unlock_irqrestore(&queue->txlock, flags); |
| 447 | return -EBUSY; |
| 448 | } |
| 449 | |
| 450 | packet = list_entry(queue->txfree.next, struct bcm43xx_pio_txpacket, list); |
| 451 | |
| 452 | packet->txb = txb; |
| 453 | list_move_tail(&packet->list, &queue->txqueue); |
| 454 | packet->xmitted_octets = 0; |
| 455 | packet->xmitted_frags = 0; |
| 456 | |
| 457 | /* Suspend TX, if we are out of packets in the "free" queue. */ |
| 458 | if (unlikely(list_empty(&queue->txfree))) { |
| 459 | netif_stop_queue(queue->bcm->net_dev); |
| 460 | queue->tx_suspended = 1; |
| 461 | } |
| 462 | |
| 463 | spin_unlock_irqrestore(&queue->txlock, flags); |
| 464 | queue_work(queue->bcm->workqueue, &queue->txwork); |
| 465 | |
| 466 | return 0; |
| 467 | } |
| 468 | |
| 469 | int fastcall bcm43xx_pio_transfer_txb(struct bcm43xx_private *bcm, |
| 470 | struct ieee80211_txb *txb) |
| 471 | { |
| 472 | return pio_transfer_txb(bcm->current_core->pio->queue1, txb); |
| 473 | } |
| 474 | |
| 475 | void fastcall |
| 476 | bcm43xx_pio_handle_xmitstatus(struct bcm43xx_private *bcm, |
| 477 | struct bcm43xx_xmitstatus *status) |
| 478 | { |
| 479 | struct bcm43xx_pioqueue *queue; |
| 480 | struct bcm43xx_pio_txpacket *packet; |
| 481 | unsigned long flags; |
| 482 | |
| 483 | queue = parse_cookie(bcm, status->cookie, &packet); |
| 484 | assert(queue); |
| 485 | spin_lock_irqsave(&queue->txlock, flags); |
| 486 | free_txpacket(packet); |
| 487 | if (unlikely(queue->tx_suspended)) { |
| 488 | queue->tx_suspended = 0; |
| 489 | netif_wake_queue(queue->bcm->net_dev); |
| 490 | } |
| 491 | |
| 492 | /* If there are packets on the txqueue, |
| 493 | * start the work handler again. |
| 494 | */ |
| 495 | if (!list_empty(&queue->txqueue)) { |
| 496 | queue_work(queue->bcm->workqueue, |
| 497 | &queue->txwork); |
| 498 | } |
| 499 | spin_unlock_irqrestore(&queue->txlock, flags); |
| 500 | } |
| 501 | |
| 502 | static void pio_rx_error(struct bcm43xx_pioqueue *queue, |
| 503 | const char *error) |
| 504 | { |
| 505 | printk("PIO RX error: %s\n", error); |
| 506 | bcm43xx_pio_write(queue, BCM43xx_PIO_RXCTL, BCM43xx_PIO_RXCTL_READY); |
| 507 | } |
| 508 | |
| 509 | void fastcall |
| 510 | bcm43xx_pio_rx(struct bcm43xx_pioqueue *queue) |
| 511 | { |
| 512 | u16 preamble[21] = { 0 }; |
| 513 | struct bcm43xx_rxhdr *rxhdr; |
| 514 | u16 tmp; |
| 515 | u16 len; |
| 516 | int i, err; |
| 517 | int preamble_readwords; |
| 518 | struct sk_buff *skb; |
| 519 | |
| 520 | tmp = bcm43xx_pio_read(queue, BCM43xx_PIO_RXCTL); |
| 521 | if (!(tmp & BCM43xx_PIO_RXCTL_DATAAVAILABLE)) { |
| 522 | dprintkl(KERN_ERR PFX "PIO RX: No data available\n"); |
| 523 | return; |
| 524 | } |
| 525 | bcm43xx_pio_write(queue, BCM43xx_PIO_RXCTL, BCM43xx_PIO_RXCTL_DATAAVAILABLE); |
| 526 | |
| 527 | for (i = 0; i < 10; i++) { |
| 528 | tmp = bcm43xx_pio_read(queue, BCM43xx_PIO_RXCTL); |
| 529 | if (tmp & BCM43xx_PIO_RXCTL_READY) |
| 530 | goto data_ready; |
| 531 | udelay(10); |
| 532 | } |
| 533 | dprintkl(KERN_ERR PFX "PIO RX timed out\n"); |
| 534 | return; |
| 535 | data_ready: |
| 536 | |
| 537 | len = le16_to_cpu(bcm43xx_pio_read(queue, BCM43xx_PIO_RXDATA)); |
| 538 | if (unlikely(len > 0x700)) { |
| 539 | pio_rx_error(queue, "len > 0x700"); |
| 540 | return; |
| 541 | } |
| 542 | if (unlikely(len == 0 && queue->mmio_base != BCM43xx_MMIO_PIO4_BASE)) { |
| 543 | pio_rx_error(queue, "len == 0"); |
| 544 | return; |
| 545 | } |
| 546 | preamble[0] = cpu_to_le16(len); |
| 547 | if (queue->mmio_base == BCM43xx_MMIO_PIO4_BASE) |
| 548 | preamble_readwords = 14 / sizeof(u16); |
| 549 | else |
| 550 | preamble_readwords = 18 / sizeof(u16); |
| 551 | for (i = 0; i < preamble_readwords; i++) { |
| 552 | tmp = bcm43xx_pio_read(queue, BCM43xx_PIO_RXDATA); |
| 553 | preamble[i + 1] = cpu_to_be16(tmp); |
| 554 | } |
| 555 | rxhdr = (struct bcm43xx_rxhdr *)preamble; |
| 556 | if (unlikely(rxhdr->flags2 & BCM43xx_RXHDR_FLAGS2_INVALIDFRAME)) { |
| 557 | pio_rx_error(queue, "invalid frame"); |
| 558 | if (queue->mmio_base == BCM43xx_MMIO_PIO1_BASE) { |
| 559 | for (i = 0; i < 15; i++) |
| 560 | bcm43xx_pio_read(queue, BCM43xx_PIO_RXDATA); /* dummy read. */ |
| 561 | } |
| 562 | return; |
| 563 | } |
| 564 | //FIXME |
| 565 | #if 0 |
| 566 | if (queue->mmio_base == BCM43xx_MMIO_PIO4_BASE) { |
| 567 | bcm43xx_rx_transmitstatus(queue->bcm, |
| 568 | (const struct bcm43xx_hwxmitstatus *)(preamble + 1)); |
| 569 | return; |
| 570 | } |
| 571 | #endif |
| 572 | skb = dev_alloc_skb(len); |
| 573 | if (unlikely(!skb)) { |
| 574 | pio_rx_error(queue, "out of memory"); |
| 575 | return; |
| 576 | } |
| 577 | skb_put(skb, len); |
| 578 | for (i = 0; i < len - 1; i += 2) { |
| 579 | tmp = cpu_to_be16(bcm43xx_pio_read(queue, BCM43xx_PIO_RXDATA)); |
| 580 | *((u16 *)(skb->data + i)) = tmp; |
| 581 | } |
| 582 | if (len % 2) { |
| 583 | tmp = cpu_to_be16(bcm43xx_pio_read(queue, BCM43xx_PIO_RXDATA)); |
| 584 | skb->data[len - 1] = (tmp & 0x00FF); |
| 585 | skb->data[0] = (tmp & 0xFF00) >> 8; |
| 586 | } |
| 587 | err = bcm43xx_rx(queue->bcm, skb, rxhdr); |
| 588 | if (unlikely(err)) |
| 589 | dev_kfree_skb_irq(skb); |
| 590 | } |
| 591 | |
| 592 | /* vim: set ts=8 sw=8 sts=8: */ |