Stefan Wahren | 291ab06 | 2014-09-26 22:21:21 +0000 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2011, 2012, Qualcomm Atheros Communications Inc. |
| 3 | * Copyright (c) 2014, I2SE GmbH |
| 4 | * |
| 5 | * Permission to use, copy, modify, and/or distribute this software |
| 6 | * for any purpose with or without fee is hereby granted, provided |
| 7 | * that the above copyright notice and this permission notice appear |
| 8 | * in all copies. |
| 9 | * |
| 10 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL |
| 11 | * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED |
| 12 | * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL |
| 13 | * THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR |
| 14 | * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM |
| 15 | * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, |
| 16 | * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN |
| 17 | * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. |
| 18 | */ |
| 19 | |
| 20 | /* This module implements the Qualcomm Atheros SPI protocol for |
| 21 | * kernel-based SPI device; it is essentially an Ethernet-to-SPI |
| 22 | * serial converter; |
| 23 | */ |
| 24 | |
| 25 | #include <linux/errno.h> |
| 26 | #include <linux/etherdevice.h> |
| 27 | #include <linux/if_arp.h> |
| 28 | #include <linux/if_ether.h> |
| 29 | #include <linux/init.h> |
| 30 | #include <linux/interrupt.h> |
| 31 | #include <linux/jiffies.h> |
| 32 | #include <linux/kernel.h> |
| 33 | #include <linux/kthread.h> |
| 34 | #include <linux/module.h> |
| 35 | #include <linux/moduleparam.h> |
| 36 | #include <linux/netdevice.h> |
| 37 | #include <linux/of.h> |
| 38 | #include <linux/of_device.h> |
| 39 | #include <linux/of_net.h> |
| 40 | #include <linux/sched.h> |
| 41 | #include <linux/skbuff.h> |
| 42 | #include <linux/spi/spi.h> |
| 43 | #include <linux/types.h> |
Stefan Wahren | 291ab06 | 2014-09-26 22:21:21 +0000 | [diff] [blame] | 44 | |
| 45 | #include "qca_7k.h" |
| 46 | #include "qca_debug.h" |
| 47 | #include "qca_framing.h" |
| 48 | #include "qca_spi.h" |
| 49 | |
| 50 | #define MAX_DMA_BURST_LEN 5000 |
| 51 | |
| 52 | /* Modules parameters */ |
| 53 | #define QCASPI_CLK_SPEED_MIN 1000000 |
| 54 | #define QCASPI_CLK_SPEED_MAX 16000000 |
| 55 | #define QCASPI_CLK_SPEED 8000000 |
| 56 | static int qcaspi_clkspeed; |
| 57 | module_param(qcaspi_clkspeed, int, 0); |
| 58 | MODULE_PARM_DESC(qcaspi_clkspeed, "SPI bus clock speed (Hz). Use 1000000-16000000."); |
| 59 | |
| 60 | #define QCASPI_BURST_LEN_MIN 1 |
| 61 | #define QCASPI_BURST_LEN_MAX MAX_DMA_BURST_LEN |
| 62 | static int qcaspi_burst_len = MAX_DMA_BURST_LEN; |
| 63 | module_param(qcaspi_burst_len, int, 0); |
| 64 | MODULE_PARM_DESC(qcaspi_burst_len, "Number of data bytes per burst. Use 1-5000."); |
| 65 | |
| 66 | #define QCASPI_PLUGGABLE_MIN 0 |
| 67 | #define QCASPI_PLUGGABLE_MAX 1 |
| 68 | static int qcaspi_pluggable = QCASPI_PLUGGABLE_MIN; |
| 69 | module_param(qcaspi_pluggable, int, 0); |
| 70 | MODULE_PARM_DESC(qcaspi_pluggable, "Pluggable SPI connection (yes/no)."); |
| 71 | |
| 72 | #define QCASPI_MTU QCAFRM_ETHMAXMTU |
| 73 | #define QCASPI_TX_TIMEOUT (1 * HZ) |
| 74 | #define QCASPI_QCA7K_REBOOT_TIME_MS 1000 |
| 75 | |
| 76 | static void |
| 77 | start_spi_intr_handling(struct qcaspi *qca, u16 *intr_cause) |
| 78 | { |
| 79 | *intr_cause = 0; |
| 80 | |
| 81 | qcaspi_write_register(qca, SPI_REG_INTR_ENABLE, 0); |
| 82 | qcaspi_read_register(qca, SPI_REG_INTR_CAUSE, intr_cause); |
| 83 | netdev_dbg(qca->net_dev, "interrupts: 0x%04x\n", *intr_cause); |
| 84 | } |
| 85 | |
| 86 | static void |
| 87 | end_spi_intr_handling(struct qcaspi *qca, u16 intr_cause) |
| 88 | { |
| 89 | u16 intr_enable = (SPI_INT_CPU_ON | |
| 90 | SPI_INT_PKT_AVLBL | |
| 91 | SPI_INT_RDBUF_ERR | |
| 92 | SPI_INT_WRBUF_ERR); |
| 93 | |
| 94 | qcaspi_write_register(qca, SPI_REG_INTR_CAUSE, intr_cause); |
| 95 | qcaspi_write_register(qca, SPI_REG_INTR_ENABLE, intr_enable); |
| 96 | netdev_dbg(qca->net_dev, "acking int: 0x%04x\n", intr_cause); |
| 97 | } |
| 98 | |
| 99 | static u32 |
| 100 | qcaspi_write_burst(struct qcaspi *qca, u8 *src, u32 len) |
| 101 | { |
| 102 | __be16 cmd; |
| 103 | struct spi_message *msg = &qca->spi_msg2; |
| 104 | struct spi_transfer *transfer = &qca->spi_xfer2[0]; |
| 105 | int ret; |
| 106 | |
| 107 | cmd = cpu_to_be16(QCA7K_SPI_WRITE | QCA7K_SPI_EXTERNAL); |
| 108 | transfer->tx_buf = &cmd; |
| 109 | transfer->rx_buf = NULL; |
| 110 | transfer->len = QCASPI_CMD_LEN; |
| 111 | transfer = &qca->spi_xfer2[1]; |
| 112 | transfer->tx_buf = src; |
| 113 | transfer->rx_buf = NULL; |
| 114 | transfer->len = len; |
| 115 | |
| 116 | ret = spi_sync(qca->spi_dev, msg); |
| 117 | |
| 118 | if (ret || (msg->actual_length != QCASPI_CMD_LEN + len)) { |
| 119 | qcaspi_spi_error(qca); |
| 120 | return 0; |
| 121 | } |
| 122 | |
| 123 | return len; |
| 124 | } |
| 125 | |
| 126 | static u32 |
| 127 | qcaspi_write_legacy(struct qcaspi *qca, u8 *src, u32 len) |
| 128 | { |
| 129 | struct spi_message *msg = &qca->spi_msg1; |
| 130 | struct spi_transfer *transfer = &qca->spi_xfer1; |
| 131 | int ret; |
| 132 | |
| 133 | transfer->tx_buf = src; |
| 134 | transfer->rx_buf = NULL; |
| 135 | transfer->len = len; |
| 136 | |
| 137 | ret = spi_sync(qca->spi_dev, msg); |
| 138 | |
| 139 | if (ret || (msg->actual_length != len)) { |
| 140 | qcaspi_spi_error(qca); |
| 141 | return 0; |
| 142 | } |
| 143 | |
| 144 | return len; |
| 145 | } |
| 146 | |
| 147 | static u32 |
| 148 | qcaspi_read_burst(struct qcaspi *qca, u8 *dst, u32 len) |
| 149 | { |
| 150 | struct spi_message *msg = &qca->spi_msg2; |
| 151 | __be16 cmd; |
| 152 | struct spi_transfer *transfer = &qca->spi_xfer2[0]; |
| 153 | int ret; |
| 154 | |
| 155 | cmd = cpu_to_be16(QCA7K_SPI_READ | QCA7K_SPI_EXTERNAL); |
| 156 | transfer->tx_buf = &cmd; |
| 157 | transfer->rx_buf = NULL; |
| 158 | transfer->len = QCASPI_CMD_LEN; |
| 159 | transfer = &qca->spi_xfer2[1]; |
| 160 | transfer->tx_buf = NULL; |
| 161 | transfer->rx_buf = dst; |
| 162 | transfer->len = len; |
| 163 | |
| 164 | ret = spi_sync(qca->spi_dev, msg); |
| 165 | |
| 166 | if (ret || (msg->actual_length != QCASPI_CMD_LEN + len)) { |
| 167 | qcaspi_spi_error(qca); |
| 168 | return 0; |
| 169 | } |
| 170 | |
| 171 | return len; |
| 172 | } |
| 173 | |
| 174 | static u32 |
| 175 | qcaspi_read_legacy(struct qcaspi *qca, u8 *dst, u32 len) |
| 176 | { |
| 177 | struct spi_message *msg = &qca->spi_msg1; |
| 178 | struct spi_transfer *transfer = &qca->spi_xfer1; |
| 179 | int ret; |
| 180 | |
| 181 | transfer->tx_buf = NULL; |
| 182 | transfer->rx_buf = dst; |
| 183 | transfer->len = len; |
| 184 | |
| 185 | ret = spi_sync(qca->spi_dev, msg); |
| 186 | |
| 187 | if (ret || (msg->actual_length != len)) { |
| 188 | qcaspi_spi_error(qca); |
| 189 | return 0; |
| 190 | } |
| 191 | |
| 192 | return len; |
| 193 | } |
| 194 | |
| 195 | static int |
| 196 | qcaspi_tx_frame(struct qcaspi *qca, struct sk_buff *skb) |
| 197 | { |
| 198 | u32 count; |
| 199 | u32 written; |
| 200 | u32 offset; |
| 201 | u32 len; |
| 202 | |
| 203 | len = skb->len; |
| 204 | |
| 205 | qcaspi_write_register(qca, SPI_REG_BFR_SIZE, len); |
| 206 | if (qca->legacy_mode) |
| 207 | qcaspi_tx_cmd(qca, QCA7K_SPI_WRITE | QCA7K_SPI_EXTERNAL); |
| 208 | |
| 209 | offset = 0; |
| 210 | while (len) { |
| 211 | count = len; |
| 212 | if (count > qca->burst_len) |
| 213 | count = qca->burst_len; |
| 214 | |
| 215 | if (qca->legacy_mode) { |
| 216 | written = qcaspi_write_legacy(qca, |
| 217 | skb->data + offset, |
| 218 | count); |
| 219 | } else { |
| 220 | written = qcaspi_write_burst(qca, |
| 221 | skb->data + offset, |
| 222 | count); |
| 223 | } |
| 224 | |
| 225 | if (written != count) |
| 226 | return -1; |
| 227 | |
| 228 | offset += count; |
| 229 | len -= count; |
| 230 | } |
| 231 | |
| 232 | return 0; |
| 233 | } |
| 234 | |
| 235 | static int |
| 236 | qcaspi_transmit(struct qcaspi *qca) |
| 237 | { |
| 238 | struct net_device_stats *n_stats = &qca->net_dev->stats; |
| 239 | u16 available = 0; |
| 240 | u32 pkt_len; |
| 241 | u16 new_head; |
| 242 | u16 packets = 0; |
| 243 | |
| 244 | if (qca->txr.skb[qca->txr.head] == NULL) |
| 245 | return 0; |
| 246 | |
| 247 | qcaspi_read_register(qca, SPI_REG_WRBUF_SPC_AVA, &available); |
| 248 | |
| 249 | while (qca->txr.skb[qca->txr.head]) { |
| 250 | pkt_len = qca->txr.skb[qca->txr.head]->len + QCASPI_HW_PKT_LEN; |
| 251 | |
| 252 | if (available < pkt_len) { |
| 253 | if (packets == 0) |
| 254 | qca->stats.write_buf_miss++; |
| 255 | break; |
| 256 | } |
| 257 | |
| 258 | if (qcaspi_tx_frame(qca, qca->txr.skb[qca->txr.head]) == -1) { |
| 259 | qca->stats.write_err++; |
| 260 | return -1; |
| 261 | } |
| 262 | |
| 263 | packets++; |
| 264 | n_stats->tx_packets++; |
| 265 | n_stats->tx_bytes += qca->txr.skb[qca->txr.head]->len; |
| 266 | available -= pkt_len; |
| 267 | |
| 268 | /* remove the skb from the queue */ |
| 269 | /* XXX After inconsistent lock states netif_tx_lock() |
| 270 | * has been replaced by netif_tx_lock_bh() and so on. |
| 271 | */ |
| 272 | netif_tx_lock_bh(qca->net_dev); |
| 273 | dev_kfree_skb(qca->txr.skb[qca->txr.head]); |
| 274 | qca->txr.skb[qca->txr.head] = NULL; |
| 275 | qca->txr.size -= pkt_len; |
| 276 | new_head = qca->txr.head + 1; |
| 277 | if (new_head >= qca->txr.count) |
| 278 | new_head = 0; |
| 279 | qca->txr.head = new_head; |
| 280 | if (netif_queue_stopped(qca->net_dev)) |
| 281 | netif_wake_queue(qca->net_dev); |
| 282 | netif_tx_unlock_bh(qca->net_dev); |
| 283 | } |
| 284 | |
| 285 | return 0; |
| 286 | } |
| 287 | |
| 288 | static int |
| 289 | qcaspi_receive(struct qcaspi *qca) |
| 290 | { |
| 291 | struct net_device *net_dev = qca->net_dev; |
| 292 | struct net_device_stats *n_stats = &net_dev->stats; |
| 293 | u16 available = 0; |
| 294 | u32 bytes_read; |
| 295 | u8 *cp; |
| 296 | |
| 297 | /* Allocate rx SKB if we don't have one available. */ |
| 298 | if (!qca->rx_skb) { |
| 299 | qca->rx_skb = netdev_alloc_skb(net_dev, |
| 300 | net_dev->mtu + VLAN_ETH_HLEN); |
| 301 | if (!qca->rx_skb) { |
| 302 | netdev_dbg(net_dev, "out of RX resources\n"); |
| 303 | qca->stats.out_of_mem++; |
| 304 | return -1; |
| 305 | } |
| 306 | } |
| 307 | |
| 308 | /* Read the packet size. */ |
| 309 | qcaspi_read_register(qca, SPI_REG_RDBUF_BYTE_AVA, &available); |
| 310 | netdev_dbg(net_dev, "qcaspi_receive: SPI_REG_RDBUF_BYTE_AVA: Value: %08x\n", |
| 311 | available); |
| 312 | |
| 313 | if (available == 0) { |
| 314 | netdev_dbg(net_dev, "qcaspi_receive called without any data being available!\n"); |
| 315 | return -1; |
| 316 | } |
| 317 | |
| 318 | qcaspi_write_register(qca, SPI_REG_BFR_SIZE, available); |
| 319 | |
| 320 | if (qca->legacy_mode) |
| 321 | qcaspi_tx_cmd(qca, QCA7K_SPI_READ | QCA7K_SPI_EXTERNAL); |
| 322 | |
| 323 | while (available) { |
| 324 | u32 count = available; |
| 325 | |
| 326 | if (count > qca->burst_len) |
| 327 | count = qca->burst_len; |
| 328 | |
| 329 | if (qca->legacy_mode) { |
| 330 | bytes_read = qcaspi_read_legacy(qca, qca->rx_buffer, |
| 331 | count); |
| 332 | } else { |
| 333 | bytes_read = qcaspi_read_burst(qca, qca->rx_buffer, |
| 334 | count); |
| 335 | } |
| 336 | |
| 337 | netdev_dbg(net_dev, "available: %d, byte read: %d\n", |
| 338 | available, bytes_read); |
| 339 | |
| 340 | if (bytes_read) { |
| 341 | available -= bytes_read; |
| 342 | } else { |
| 343 | qca->stats.read_err++; |
| 344 | return -1; |
| 345 | } |
| 346 | |
| 347 | cp = qca->rx_buffer; |
| 348 | |
| 349 | while ((bytes_read--) && (qca->rx_skb)) { |
| 350 | s32 retcode; |
| 351 | |
| 352 | retcode = qcafrm_fsm_decode(&qca->frm_handle, |
| 353 | qca->rx_skb->data, |
| 354 | skb_tailroom(qca->rx_skb), |
| 355 | *cp); |
| 356 | cp++; |
| 357 | switch (retcode) { |
| 358 | case QCAFRM_GATHER: |
| 359 | case QCAFRM_NOHEAD: |
| 360 | break; |
| 361 | case QCAFRM_NOTAIL: |
| 362 | netdev_dbg(net_dev, "no RX tail\n"); |
| 363 | n_stats->rx_errors++; |
| 364 | n_stats->rx_dropped++; |
| 365 | break; |
| 366 | case QCAFRM_INVLEN: |
| 367 | netdev_dbg(net_dev, "invalid RX length\n"); |
| 368 | n_stats->rx_errors++; |
| 369 | n_stats->rx_dropped++; |
| 370 | break; |
| 371 | default: |
| 372 | qca->rx_skb->dev = qca->net_dev; |
| 373 | n_stats->rx_packets++; |
| 374 | n_stats->rx_bytes += retcode; |
| 375 | skb_put(qca->rx_skb, retcode); |
| 376 | qca->rx_skb->protocol = eth_type_trans( |
| 377 | qca->rx_skb, qca->rx_skb->dev); |
| 378 | qca->rx_skb->ip_summed = CHECKSUM_UNNECESSARY; |
| 379 | netif_rx_ni(qca->rx_skb); |
| 380 | qca->rx_skb = netdev_alloc_skb(net_dev, |
| 381 | net_dev->mtu + VLAN_ETH_HLEN); |
| 382 | if (!qca->rx_skb) { |
| 383 | netdev_dbg(net_dev, "out of RX resources\n"); |
| 384 | n_stats->rx_errors++; |
| 385 | qca->stats.out_of_mem++; |
| 386 | break; |
| 387 | } |
| 388 | } |
| 389 | } |
| 390 | } |
| 391 | |
| 392 | return 0; |
| 393 | } |
| 394 | |
| 395 | /* Check that tx ring stores only so much bytes |
| 396 | * that fit into the internal QCA buffer. |
| 397 | */ |
| 398 | |
| 399 | static int |
| 400 | qcaspi_tx_ring_has_space(struct tx_ring *txr) |
| 401 | { |
| 402 | if (txr->skb[txr->tail]) |
| 403 | return 0; |
| 404 | |
| 405 | return (txr->size + QCAFRM_ETHMAXLEN < QCASPI_HW_BUF_LEN) ? 1 : 0; |
| 406 | } |
| 407 | |
| 408 | /* Flush the tx ring. This function is only safe to |
| 409 | * call from the qcaspi_spi_thread. |
| 410 | */ |
| 411 | |
| 412 | static void |
| 413 | qcaspi_flush_tx_ring(struct qcaspi *qca) |
| 414 | { |
| 415 | int i; |
| 416 | |
| 417 | /* XXX After inconsistent lock states netif_tx_lock() |
| 418 | * has been replaced by netif_tx_lock_bh() and so on. |
| 419 | */ |
| 420 | netif_tx_lock_bh(qca->net_dev); |
| 421 | for (i = 0; i < TX_RING_MAX_LEN; i++) { |
| 422 | if (qca->txr.skb[i]) { |
| 423 | dev_kfree_skb(qca->txr.skb[i]); |
| 424 | qca->txr.skb[i] = NULL; |
| 425 | qca->net_dev->stats.tx_dropped++; |
| 426 | } |
| 427 | } |
| 428 | qca->txr.tail = 0; |
| 429 | qca->txr.head = 0; |
| 430 | qca->txr.size = 0; |
| 431 | netif_tx_unlock_bh(qca->net_dev); |
| 432 | } |
| 433 | |
| 434 | static void |
| 435 | qcaspi_qca7k_sync(struct qcaspi *qca, int event) |
| 436 | { |
| 437 | u16 signature = 0; |
| 438 | u16 spi_config; |
| 439 | u16 wrbuf_space = 0; |
| 440 | static u16 reset_count; |
| 441 | |
| 442 | if (event == QCASPI_EVENT_CPUON) { |
| 443 | /* Read signature twice, if not valid |
| 444 | * go back to unknown state. |
| 445 | */ |
| 446 | qcaspi_read_register(qca, SPI_REG_SIGNATURE, &signature); |
| 447 | qcaspi_read_register(qca, SPI_REG_SIGNATURE, &signature); |
| 448 | if (signature != QCASPI_GOOD_SIGNATURE) { |
| 449 | qca->sync = QCASPI_SYNC_UNKNOWN; |
| 450 | netdev_dbg(qca->net_dev, "sync: got CPU on, but signature was invalid, restart\n"); |
| 451 | } else { |
| 452 | /* ensure that the WRBUF is empty */ |
| 453 | qcaspi_read_register(qca, SPI_REG_WRBUF_SPC_AVA, |
| 454 | &wrbuf_space); |
| 455 | if (wrbuf_space != QCASPI_HW_BUF_LEN) { |
| 456 | netdev_dbg(qca->net_dev, "sync: got CPU on, but wrbuf not empty. reset!\n"); |
| 457 | qca->sync = QCASPI_SYNC_UNKNOWN; |
| 458 | } else { |
| 459 | netdev_dbg(qca->net_dev, "sync: got CPU on, now in sync\n"); |
| 460 | qca->sync = QCASPI_SYNC_READY; |
| 461 | return; |
| 462 | } |
| 463 | } |
| 464 | } |
| 465 | |
| 466 | switch (qca->sync) { |
| 467 | case QCASPI_SYNC_READY: |
| 468 | /* Read signature, if not valid go to unknown state. */ |
| 469 | qcaspi_read_register(qca, SPI_REG_SIGNATURE, &signature); |
| 470 | if (signature != QCASPI_GOOD_SIGNATURE) { |
| 471 | qca->sync = QCASPI_SYNC_UNKNOWN; |
| 472 | netdev_dbg(qca->net_dev, "sync: bad signature, restart\n"); |
| 473 | /* don't reset right away */ |
| 474 | return; |
| 475 | } |
| 476 | break; |
| 477 | case QCASPI_SYNC_UNKNOWN: |
| 478 | /* Read signature, if not valid stay in unknown state */ |
| 479 | qcaspi_read_register(qca, SPI_REG_SIGNATURE, &signature); |
| 480 | if (signature != QCASPI_GOOD_SIGNATURE) { |
| 481 | netdev_dbg(qca->net_dev, "sync: could not read signature to reset device, retry.\n"); |
| 482 | return; |
| 483 | } |
| 484 | |
| 485 | /* TODO: use GPIO to reset QCA7000 in legacy mode*/ |
| 486 | netdev_dbg(qca->net_dev, "sync: resetting device.\n"); |
| 487 | qcaspi_read_register(qca, SPI_REG_SPI_CONFIG, &spi_config); |
| 488 | spi_config |= QCASPI_SLAVE_RESET_BIT; |
| 489 | qcaspi_write_register(qca, SPI_REG_SPI_CONFIG, spi_config); |
| 490 | |
| 491 | qca->sync = QCASPI_SYNC_RESET; |
| 492 | qca->stats.trig_reset++; |
| 493 | reset_count = 0; |
| 494 | break; |
| 495 | case QCASPI_SYNC_RESET: |
| 496 | reset_count++; |
| 497 | netdev_dbg(qca->net_dev, "sync: waiting for CPU on, count %u.\n", |
| 498 | reset_count); |
| 499 | if (reset_count >= QCASPI_RESET_TIMEOUT) { |
| 500 | /* reset did not seem to take place, try again */ |
| 501 | qca->sync = QCASPI_SYNC_UNKNOWN; |
| 502 | qca->stats.reset_timeout++; |
| 503 | netdev_dbg(qca->net_dev, "sync: reset timeout, restarting process.\n"); |
| 504 | } |
| 505 | break; |
| 506 | } |
| 507 | } |
| 508 | |
| 509 | static int |
| 510 | qcaspi_spi_thread(void *data) |
| 511 | { |
| 512 | struct qcaspi *qca = data; |
| 513 | u16 intr_cause = 0; |
| 514 | |
| 515 | netdev_info(qca->net_dev, "SPI thread created\n"); |
| 516 | while (!kthread_should_stop()) { |
| 517 | set_current_state(TASK_INTERRUPTIBLE); |
| 518 | if ((qca->intr_req == qca->intr_svc) && |
| 519 | (qca->txr.skb[qca->txr.head] == NULL) && |
| 520 | (qca->sync == QCASPI_SYNC_READY)) |
| 521 | schedule(); |
| 522 | |
| 523 | set_current_state(TASK_RUNNING); |
| 524 | |
| 525 | netdev_dbg(qca->net_dev, "have work to do. int: %d, tx_skb: %p\n", |
| 526 | qca->intr_req - qca->intr_svc, |
| 527 | qca->txr.skb[qca->txr.head]); |
| 528 | |
| 529 | qcaspi_qca7k_sync(qca, QCASPI_EVENT_UPDATE); |
| 530 | |
| 531 | if (qca->sync != QCASPI_SYNC_READY) { |
| 532 | netdev_dbg(qca->net_dev, "sync: not ready %u, turn off carrier and flush\n", |
| 533 | (unsigned int)qca->sync); |
| 534 | netif_stop_queue(qca->net_dev); |
| 535 | netif_carrier_off(qca->net_dev); |
| 536 | qcaspi_flush_tx_ring(qca); |
| 537 | msleep(QCASPI_QCA7K_REBOOT_TIME_MS); |
| 538 | } |
| 539 | |
| 540 | if (qca->intr_svc != qca->intr_req) { |
| 541 | qca->intr_svc = qca->intr_req; |
| 542 | start_spi_intr_handling(qca, &intr_cause); |
| 543 | |
| 544 | if (intr_cause & SPI_INT_CPU_ON) { |
| 545 | qcaspi_qca7k_sync(qca, QCASPI_EVENT_CPUON); |
| 546 | |
| 547 | /* not synced. */ |
| 548 | if (qca->sync != QCASPI_SYNC_READY) |
| 549 | continue; |
| 550 | |
| 551 | qca->stats.device_reset++; |
| 552 | netif_wake_queue(qca->net_dev); |
| 553 | netif_carrier_on(qca->net_dev); |
| 554 | } |
| 555 | |
| 556 | if (intr_cause & SPI_INT_RDBUF_ERR) { |
| 557 | /* restart sync */ |
| 558 | netdev_dbg(qca->net_dev, "===> rdbuf error!\n"); |
| 559 | qca->stats.read_buf_err++; |
| 560 | qca->sync = QCASPI_SYNC_UNKNOWN; |
| 561 | continue; |
| 562 | } |
| 563 | |
| 564 | if (intr_cause & SPI_INT_WRBUF_ERR) { |
| 565 | /* restart sync */ |
| 566 | netdev_dbg(qca->net_dev, "===> wrbuf error!\n"); |
| 567 | qca->stats.write_buf_err++; |
| 568 | qca->sync = QCASPI_SYNC_UNKNOWN; |
| 569 | continue; |
| 570 | } |
| 571 | |
| 572 | /* can only handle other interrupts |
Joe Perches | dbedd44 | 2015-03-06 20:49:12 -0800 | [diff] [blame] | 573 | * if sync has occurred |
Stefan Wahren | 291ab06 | 2014-09-26 22:21:21 +0000 | [diff] [blame] | 574 | */ |
| 575 | if (qca->sync == QCASPI_SYNC_READY) { |
| 576 | if (intr_cause & SPI_INT_PKT_AVLBL) |
| 577 | qcaspi_receive(qca); |
| 578 | } |
| 579 | |
| 580 | end_spi_intr_handling(qca, intr_cause); |
| 581 | } |
| 582 | |
| 583 | if (qca->sync == QCASPI_SYNC_READY) |
| 584 | qcaspi_transmit(qca); |
| 585 | } |
| 586 | set_current_state(TASK_RUNNING); |
| 587 | netdev_info(qca->net_dev, "SPI thread exit\n"); |
| 588 | |
| 589 | return 0; |
| 590 | } |
| 591 | |
| 592 | static irqreturn_t |
| 593 | qcaspi_intr_handler(int irq, void *data) |
| 594 | { |
| 595 | struct qcaspi *qca = data; |
| 596 | |
| 597 | qca->intr_req++; |
| 598 | if (qca->spi_thread && |
| 599 | qca->spi_thread->state != TASK_RUNNING) |
| 600 | wake_up_process(qca->spi_thread); |
| 601 | |
| 602 | return IRQ_HANDLED; |
| 603 | } |
| 604 | |
| 605 | int |
| 606 | qcaspi_netdev_open(struct net_device *dev) |
| 607 | { |
| 608 | struct qcaspi *qca = netdev_priv(dev); |
| 609 | int ret = 0; |
| 610 | |
| 611 | if (!qca) |
| 612 | return -EINVAL; |
| 613 | |
| 614 | qca->intr_req = 1; |
| 615 | qca->intr_svc = 0; |
| 616 | qca->sync = QCASPI_SYNC_UNKNOWN; |
| 617 | qcafrm_fsm_init(&qca->frm_handle); |
| 618 | |
| 619 | qca->spi_thread = kthread_run((void *)qcaspi_spi_thread, |
| 620 | qca, "%s", dev->name); |
| 621 | |
| 622 | if (IS_ERR(qca->spi_thread)) { |
| 623 | netdev_err(dev, "%s: unable to start kernel thread.\n", |
| 624 | QCASPI_DRV_NAME); |
| 625 | return PTR_ERR(qca->spi_thread); |
| 626 | } |
| 627 | |
| 628 | ret = request_irq(qca->spi_dev->irq, qcaspi_intr_handler, 0, |
| 629 | dev->name, qca); |
| 630 | if (ret) { |
| 631 | netdev_err(dev, "%s: unable to get IRQ %d (irqval=%d).\n", |
| 632 | QCASPI_DRV_NAME, qca->spi_dev->irq, ret); |
| 633 | kthread_stop(qca->spi_thread); |
| 634 | return ret; |
| 635 | } |
| 636 | |
| 637 | netif_start_queue(qca->net_dev); |
| 638 | |
| 639 | return 0; |
| 640 | } |
| 641 | |
| 642 | int |
| 643 | qcaspi_netdev_close(struct net_device *dev) |
| 644 | { |
| 645 | struct qcaspi *qca = netdev_priv(dev); |
| 646 | |
| 647 | netif_stop_queue(dev); |
| 648 | |
| 649 | qcaspi_write_register(qca, SPI_REG_INTR_ENABLE, 0); |
| 650 | free_irq(qca->spi_dev->irq, qca); |
| 651 | |
| 652 | kthread_stop(qca->spi_thread); |
| 653 | qca->spi_thread = NULL; |
| 654 | qcaspi_flush_tx_ring(qca); |
| 655 | |
| 656 | return 0; |
| 657 | } |
| 658 | |
| 659 | static netdev_tx_t |
| 660 | qcaspi_netdev_xmit(struct sk_buff *skb, struct net_device *dev) |
| 661 | { |
| 662 | u32 frame_len; |
| 663 | u8 *ptmp; |
| 664 | struct qcaspi *qca = netdev_priv(dev); |
| 665 | u16 new_tail; |
| 666 | struct sk_buff *tskb; |
| 667 | u8 pad_len = 0; |
| 668 | |
| 669 | if (skb->len < QCAFRM_ETHMINLEN) |
| 670 | pad_len = QCAFRM_ETHMINLEN - skb->len; |
| 671 | |
| 672 | if (qca->txr.skb[qca->txr.tail]) { |
| 673 | netdev_warn(qca->net_dev, "queue was unexpectedly full!\n"); |
| 674 | netif_stop_queue(qca->net_dev); |
| 675 | qca->stats.ring_full++; |
| 676 | return NETDEV_TX_BUSY; |
| 677 | } |
| 678 | |
| 679 | if ((skb_headroom(skb) < QCAFRM_HEADER_LEN) || |
| 680 | (skb_tailroom(skb) < QCAFRM_FOOTER_LEN + pad_len)) { |
| 681 | tskb = skb_copy_expand(skb, QCAFRM_HEADER_LEN, |
| 682 | QCAFRM_FOOTER_LEN + pad_len, GFP_ATOMIC); |
| 683 | if (!tskb) { |
| 684 | netdev_dbg(qca->net_dev, "could not allocate tx_buff\n"); |
| 685 | qca->stats.out_of_mem++; |
| 686 | return NETDEV_TX_BUSY; |
| 687 | } |
| 688 | dev_kfree_skb(skb); |
| 689 | skb = tskb; |
| 690 | } |
| 691 | |
| 692 | frame_len = skb->len + pad_len; |
| 693 | |
| 694 | ptmp = skb_push(skb, QCAFRM_HEADER_LEN); |
| 695 | qcafrm_create_header(ptmp, frame_len); |
| 696 | |
| 697 | if (pad_len) { |
| 698 | ptmp = skb_put(skb, pad_len); |
| 699 | memset(ptmp, 0, pad_len); |
| 700 | } |
| 701 | |
| 702 | ptmp = skb_put(skb, QCAFRM_FOOTER_LEN); |
| 703 | qcafrm_create_footer(ptmp); |
| 704 | |
| 705 | netdev_dbg(qca->net_dev, "Tx-ing packet: Size: 0x%08x\n", |
| 706 | skb->len); |
| 707 | |
| 708 | qca->txr.size += skb->len + QCASPI_HW_PKT_LEN; |
| 709 | |
| 710 | new_tail = qca->txr.tail + 1; |
| 711 | if (new_tail >= qca->txr.count) |
| 712 | new_tail = 0; |
| 713 | |
| 714 | qca->txr.skb[qca->txr.tail] = skb; |
| 715 | qca->txr.tail = new_tail; |
| 716 | |
| 717 | if (!qcaspi_tx_ring_has_space(&qca->txr)) { |
| 718 | netif_stop_queue(qca->net_dev); |
| 719 | qca->stats.ring_full++; |
| 720 | } |
| 721 | |
| 722 | dev->trans_start = jiffies; |
| 723 | |
| 724 | if (qca->spi_thread && |
| 725 | qca->spi_thread->state != TASK_RUNNING) |
| 726 | wake_up_process(qca->spi_thread); |
| 727 | |
| 728 | return NETDEV_TX_OK; |
| 729 | } |
| 730 | |
| 731 | static void |
| 732 | qcaspi_netdev_tx_timeout(struct net_device *dev) |
| 733 | { |
| 734 | struct qcaspi *qca = netdev_priv(dev); |
| 735 | |
| 736 | netdev_info(qca->net_dev, "Transmit timeout at %ld, latency %ld\n", |
| 737 | jiffies, jiffies - dev->trans_start); |
| 738 | qca->net_dev->stats.tx_errors++; |
| 739 | /* wake the queue if there is room */ |
| 740 | if (qcaspi_tx_ring_has_space(&qca->txr)) |
| 741 | netif_wake_queue(dev); |
| 742 | } |
| 743 | |
| 744 | static int |
| 745 | qcaspi_netdev_init(struct net_device *dev) |
| 746 | { |
| 747 | struct qcaspi *qca = netdev_priv(dev); |
| 748 | |
| 749 | dev->mtu = QCASPI_MTU; |
| 750 | dev->type = ARPHRD_ETHER; |
| 751 | qca->clkspeed = qcaspi_clkspeed; |
| 752 | qca->burst_len = qcaspi_burst_len; |
| 753 | qca->spi_thread = NULL; |
| 754 | qca->buffer_size = (dev->mtu + VLAN_ETH_HLEN + QCAFRM_HEADER_LEN + |
| 755 | QCAFRM_FOOTER_LEN + 4) * 4; |
| 756 | |
| 757 | memset(&qca->stats, 0, sizeof(struct qcaspi_stats)); |
| 758 | |
| 759 | qca->rx_buffer = kmalloc(qca->buffer_size, GFP_KERNEL); |
| 760 | if (!qca->rx_buffer) |
| 761 | return -ENOBUFS; |
| 762 | |
| 763 | qca->rx_skb = netdev_alloc_skb(dev, qca->net_dev->mtu + VLAN_ETH_HLEN); |
| 764 | if (!qca->rx_skb) { |
| 765 | kfree(qca->rx_buffer); |
| 766 | netdev_info(qca->net_dev, "Failed to allocate RX sk_buff.\n"); |
| 767 | return -ENOBUFS; |
| 768 | } |
| 769 | |
| 770 | return 0; |
| 771 | } |
| 772 | |
| 773 | static void |
| 774 | qcaspi_netdev_uninit(struct net_device *dev) |
| 775 | { |
| 776 | struct qcaspi *qca = netdev_priv(dev); |
| 777 | |
| 778 | kfree(qca->rx_buffer); |
| 779 | qca->buffer_size = 0; |
| 780 | if (qca->rx_skb) |
| 781 | dev_kfree_skb(qca->rx_skb); |
| 782 | } |
| 783 | |
| 784 | static int |
| 785 | qcaspi_netdev_change_mtu(struct net_device *dev, int new_mtu) |
| 786 | { |
| 787 | if ((new_mtu < QCAFRM_ETHMINMTU) || (new_mtu > QCAFRM_ETHMAXMTU)) |
| 788 | return -EINVAL; |
| 789 | |
| 790 | dev->mtu = new_mtu; |
| 791 | |
| 792 | return 0; |
| 793 | } |
| 794 | |
| 795 | static const struct net_device_ops qcaspi_netdev_ops = { |
| 796 | .ndo_init = qcaspi_netdev_init, |
| 797 | .ndo_uninit = qcaspi_netdev_uninit, |
| 798 | .ndo_open = qcaspi_netdev_open, |
| 799 | .ndo_stop = qcaspi_netdev_close, |
| 800 | .ndo_start_xmit = qcaspi_netdev_xmit, |
| 801 | .ndo_change_mtu = qcaspi_netdev_change_mtu, |
| 802 | .ndo_set_mac_address = eth_mac_addr, |
| 803 | .ndo_tx_timeout = qcaspi_netdev_tx_timeout, |
| 804 | .ndo_validate_addr = eth_validate_addr, |
| 805 | }; |
| 806 | |
| 807 | static void |
| 808 | qcaspi_netdev_setup(struct net_device *dev) |
| 809 | { |
| 810 | struct qcaspi *qca = NULL; |
| 811 | |
Stefan Wahren | 291ab06 | 2014-09-26 22:21:21 +0000 | [diff] [blame] | 812 | dev->netdev_ops = &qcaspi_netdev_ops; |
| 813 | qcaspi_set_ethtool_ops(dev); |
| 814 | dev->watchdog_timeo = QCASPI_TX_TIMEOUT; |
| 815 | dev->flags = IFF_MULTICAST; |
| 816 | dev->tx_queue_len = 100; |
| 817 | |
| 818 | qca = netdev_priv(dev); |
| 819 | memset(qca, 0, sizeof(struct qcaspi)); |
| 820 | |
| 821 | memset(&qca->spi_xfer1, 0, sizeof(struct spi_transfer)); |
| 822 | memset(&qca->spi_xfer2, 0, sizeof(struct spi_transfer) * 2); |
| 823 | |
| 824 | spi_message_init(&qca->spi_msg1); |
| 825 | spi_message_add_tail(&qca->spi_xfer1, &qca->spi_msg1); |
| 826 | |
| 827 | spi_message_init(&qca->spi_msg2); |
| 828 | spi_message_add_tail(&qca->spi_xfer2[0], &qca->spi_msg2); |
| 829 | spi_message_add_tail(&qca->spi_xfer2[1], &qca->spi_msg2); |
| 830 | |
| 831 | memset(&qca->txr, 0, sizeof(qca->txr)); |
| 832 | qca->txr.count = TX_RING_MAX_LEN; |
| 833 | } |
| 834 | |
| 835 | static const struct of_device_id qca_spi_of_match[] = { |
| 836 | { .compatible = "qca,qca7000" }, |
| 837 | { /* sentinel */ } |
| 838 | }; |
| 839 | MODULE_DEVICE_TABLE(of, qca_spi_of_match); |
| 840 | |
| 841 | static int |
| 842 | qca_spi_probe(struct spi_device *spi_device) |
| 843 | { |
| 844 | struct qcaspi *qca = NULL; |
| 845 | struct net_device *qcaspi_devs = NULL; |
| 846 | u8 legacy_mode = 0; |
| 847 | u16 signature; |
| 848 | const char *mac; |
| 849 | |
| 850 | if (!spi_device->dev.of_node) { |
| 851 | dev_err(&spi_device->dev, "Missing device tree\n"); |
| 852 | return -EINVAL; |
| 853 | } |
| 854 | |
| 855 | legacy_mode = of_property_read_bool(spi_device->dev.of_node, |
| 856 | "qca,legacy-mode"); |
| 857 | |
| 858 | if (qcaspi_clkspeed == 0) { |
| 859 | if (spi_device->max_speed_hz) |
| 860 | qcaspi_clkspeed = spi_device->max_speed_hz; |
| 861 | else |
| 862 | qcaspi_clkspeed = QCASPI_CLK_SPEED; |
| 863 | } |
| 864 | |
| 865 | if ((qcaspi_clkspeed < QCASPI_CLK_SPEED_MIN) || |
| 866 | (qcaspi_clkspeed > QCASPI_CLK_SPEED_MAX)) { |
| 867 | dev_info(&spi_device->dev, "Invalid clkspeed: %d\n", |
| 868 | qcaspi_clkspeed); |
| 869 | return -EINVAL; |
| 870 | } |
| 871 | |
| 872 | if ((qcaspi_burst_len < QCASPI_BURST_LEN_MIN) || |
| 873 | (qcaspi_burst_len > QCASPI_BURST_LEN_MAX)) { |
| 874 | dev_info(&spi_device->dev, "Invalid burst len: %d\n", |
| 875 | qcaspi_burst_len); |
| 876 | return -EINVAL; |
| 877 | } |
| 878 | |
| 879 | if ((qcaspi_pluggable < QCASPI_PLUGGABLE_MIN) || |
| 880 | (qcaspi_pluggable > QCASPI_PLUGGABLE_MAX)) { |
| 881 | dev_info(&spi_device->dev, "Invalid pluggable: %d\n", |
| 882 | qcaspi_pluggable); |
| 883 | return -EINVAL; |
| 884 | } |
| 885 | |
| 886 | dev_info(&spi_device->dev, "ver=%s, clkspeed=%d, burst_len=%d, pluggable=%d\n", |
| 887 | QCASPI_DRV_VERSION, |
| 888 | qcaspi_clkspeed, |
| 889 | qcaspi_burst_len, |
| 890 | qcaspi_pluggable); |
| 891 | |
| 892 | spi_device->mode = SPI_MODE_3; |
| 893 | spi_device->max_speed_hz = qcaspi_clkspeed; |
| 894 | if (spi_setup(spi_device) < 0) { |
| 895 | dev_err(&spi_device->dev, "Unable to setup SPI device\n"); |
| 896 | return -EFAULT; |
| 897 | } |
| 898 | |
| 899 | qcaspi_devs = alloc_etherdev(sizeof(struct qcaspi)); |
| 900 | if (!qcaspi_devs) |
| 901 | return -ENOMEM; |
| 902 | |
| 903 | qcaspi_netdev_setup(qcaspi_devs); |
| 904 | |
| 905 | qca = netdev_priv(qcaspi_devs); |
| 906 | if (!qca) { |
| 907 | free_netdev(qcaspi_devs); |
| 908 | dev_err(&spi_device->dev, "Fail to retrieve private structure\n"); |
| 909 | return -ENOMEM; |
| 910 | } |
| 911 | qca->net_dev = qcaspi_devs; |
| 912 | qca->spi_dev = spi_device; |
| 913 | qca->legacy_mode = legacy_mode; |
| 914 | |
| 915 | mac = of_get_mac_address(spi_device->dev.of_node); |
| 916 | |
| 917 | if (mac) |
| 918 | ether_addr_copy(qca->net_dev->dev_addr, mac); |
| 919 | |
| 920 | if (!is_valid_ether_addr(qca->net_dev->dev_addr)) { |
| 921 | eth_hw_addr_random(qca->net_dev); |
| 922 | dev_info(&spi_device->dev, "Using random MAC address: %pM\n", |
| 923 | qca->net_dev->dev_addr); |
| 924 | } |
| 925 | |
| 926 | netif_carrier_off(qca->net_dev); |
| 927 | |
| 928 | if (!qcaspi_pluggable) { |
| 929 | qcaspi_read_register(qca, SPI_REG_SIGNATURE, &signature); |
| 930 | qcaspi_read_register(qca, SPI_REG_SIGNATURE, &signature); |
| 931 | |
| 932 | if (signature != QCASPI_GOOD_SIGNATURE) { |
| 933 | dev_err(&spi_device->dev, "Invalid signature (0x%04X)\n", |
| 934 | signature); |
| 935 | free_netdev(qcaspi_devs); |
| 936 | return -EFAULT; |
| 937 | } |
| 938 | } |
| 939 | |
| 940 | if (register_netdev(qcaspi_devs)) { |
| 941 | dev_info(&spi_device->dev, "Unable to register net device %s\n", |
| 942 | qcaspi_devs->name); |
| 943 | free_netdev(qcaspi_devs); |
| 944 | return -EFAULT; |
| 945 | } |
| 946 | |
| 947 | spi_set_drvdata(spi_device, qcaspi_devs); |
| 948 | |
| 949 | qcaspi_init_device_debugfs(qca); |
| 950 | |
| 951 | return 0; |
| 952 | } |
| 953 | |
| 954 | static int |
| 955 | qca_spi_remove(struct spi_device *spi_device) |
| 956 | { |
| 957 | struct net_device *qcaspi_devs = spi_get_drvdata(spi_device); |
| 958 | struct qcaspi *qca = netdev_priv(qcaspi_devs); |
| 959 | |
| 960 | qcaspi_remove_device_debugfs(qca); |
| 961 | |
| 962 | unregister_netdev(qcaspi_devs); |
| 963 | free_netdev(qcaspi_devs); |
| 964 | |
| 965 | return 0; |
| 966 | } |
| 967 | |
| 968 | static const struct spi_device_id qca_spi_id[] = { |
| 969 | { "qca7000", 0 }, |
| 970 | { /* sentinel */ } |
| 971 | }; |
| 972 | MODULE_DEVICE_TABLE(spi, qca_spi_id); |
| 973 | |
| 974 | static struct spi_driver qca_spi_driver = { |
| 975 | .driver = { |
| 976 | .name = QCASPI_DRV_NAME, |
| 977 | .owner = THIS_MODULE, |
| 978 | .of_match_table = qca_spi_of_match, |
| 979 | }, |
| 980 | .id_table = qca_spi_id, |
| 981 | .probe = qca_spi_probe, |
| 982 | .remove = qca_spi_remove, |
| 983 | }; |
| 984 | module_spi_driver(qca_spi_driver); |
| 985 | |
| 986 | MODULE_DESCRIPTION("Qualcomm Atheros SPI Driver"); |
| 987 | MODULE_AUTHOR("Qualcomm Atheros Communications"); |
| 988 | MODULE_AUTHOR("Stefan Wahren <stefan.wahren@i2se.com>"); |
| 989 | MODULE_LICENSE("Dual BSD/GPL"); |
| 990 | MODULE_VERSION(QCASPI_DRV_VERSION); |