Mark Einon | 38df649 | 2014-09-30 22:29:46 +0100 | [diff] [blame] | 1 | /* Agere Systems Inc. |
| 2 | * 10/100/1000 Base-T Ethernet Driver for the ET1301 and ET131x series MACs |
| 3 | * |
| 4 | * Copyright © 2005 Agere Systems Inc. |
| 5 | * All rights reserved. |
| 6 | * http://www.agere.com |
| 7 | * |
| 8 | * Copyright (c) 2011 Mark Einon <mark.einon@gmail.com> |
| 9 | * |
| 10 | *------------------------------------------------------------------------------ |
| 11 | * |
| 12 | * SOFTWARE LICENSE |
| 13 | * |
| 14 | * This software is provided subject to the following terms and conditions, |
| 15 | * which you should read carefully before using the software. Using this |
| 16 | * software indicates your acceptance of these terms and conditions. If you do |
| 17 | * not agree with these terms and conditions, do not use the software. |
| 18 | * |
| 19 | * Copyright © 2005 Agere Systems Inc. |
| 20 | * All rights reserved. |
| 21 | * |
| 22 | * Redistribution and use in source or binary forms, with or without |
| 23 | * modifications, are permitted provided that the following conditions are met: |
| 24 | * |
| 25 | * . Redistributions of source code must retain the above copyright notice, this |
| 26 | * list of conditions and the following Disclaimer as comments in the code as |
| 27 | * well as in the documentation and/or other materials provided with the |
| 28 | * distribution. |
| 29 | * |
| 30 | * . Redistributions in binary form must reproduce the above copyright notice, |
| 31 | * this list of conditions and the following Disclaimer in the documentation |
| 32 | * and/or other materials provided with the distribution. |
| 33 | * |
| 34 | * . Neither the name of Agere Systems Inc. nor the names of the contributors |
| 35 | * may be used to endorse or promote products derived from this software |
| 36 | * without specific prior written permission. |
| 37 | * |
| 38 | * Disclaimer |
| 39 | * |
| 40 | * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, |
| 41 | * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF |
| 42 | * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY |
| 43 | * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN |
| 44 | * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY |
| 45 | * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES |
| 46 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; |
| 47 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND |
| 48 | * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT |
| 49 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT |
| 50 | * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH |
| 51 | * DAMAGE. |
| 52 | */ |
| 53 | |
| 54 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
| 55 | |
| 56 | #include <linux/pci.h> |
| 57 | #include <linux/module.h> |
| 58 | #include <linux/types.h> |
| 59 | #include <linux/kernel.h> |
| 60 | |
| 61 | #include <linux/sched.h> |
| 62 | #include <linux/ptrace.h> |
| 63 | #include <linux/slab.h> |
| 64 | #include <linux/ctype.h> |
| 65 | #include <linux/string.h> |
| 66 | #include <linux/timer.h> |
| 67 | #include <linux/interrupt.h> |
| 68 | #include <linux/in.h> |
| 69 | #include <linux/delay.h> |
| 70 | #include <linux/bitops.h> |
| 71 | #include <linux/io.h> |
| 72 | |
| 73 | #include <linux/netdevice.h> |
| 74 | #include <linux/etherdevice.h> |
| 75 | #include <linux/skbuff.h> |
| 76 | #include <linux/if_arp.h> |
| 77 | #include <linux/ioport.h> |
| 78 | #include <linux/crc32.h> |
| 79 | #include <linux/random.h> |
| 80 | #include <linux/phy.h> |
| 81 | |
| 82 | #include "et131x.h" |
| 83 | |
| 84 | MODULE_AUTHOR("Victor Soriano <vjsoriano@agere.com>"); |
| 85 | MODULE_AUTHOR("Mark Einon <mark.einon@gmail.com>"); |
| 86 | MODULE_LICENSE("Dual BSD/GPL"); |
| 87 | MODULE_DESCRIPTION("10/100/1000 Base-T Ethernet Driver for the ET1310 by Agere Systems"); |
| 88 | |
| 89 | /* EEPROM defines */ |
| 90 | #define MAX_NUM_REGISTER_POLLS 1000 |
| 91 | #define MAX_NUM_WRITE_RETRIES 2 |
| 92 | |
| 93 | /* MAC defines */ |
| 94 | #define COUNTER_WRAP_16_BIT 0x10000 |
| 95 | #define COUNTER_WRAP_12_BIT 0x1000 |
| 96 | |
| 97 | /* PCI defines */ |
| 98 | #define INTERNAL_MEM_SIZE 0x400 /* 1024 of internal memory */ |
| 99 | #define INTERNAL_MEM_RX_OFFSET 0x1FF /* 50% Tx, 50% Rx */ |
| 100 | |
| 101 | /* ISR defines */ |
| 102 | /* For interrupts, normal running is: |
| 103 | * rxdma_xfr_done, phy_interrupt, mac_stat_interrupt, |
| 104 | * watchdog_interrupt & txdma_xfer_done |
| 105 | * |
| 106 | * In both cases, when flow control is enabled for either Tx or bi-direction, |
| 107 | * we additional enable rx_fbr0_low and rx_fbr1_low, so we know when the |
| 108 | * buffer rings are running low. |
| 109 | */ |
| 110 | #define INT_MASK_DISABLE 0xffffffff |
| 111 | |
| 112 | /* NOTE: Masking out MAC_STAT Interrupt for now... |
| 113 | * #define INT_MASK_ENABLE 0xfff6bf17 |
| 114 | * #define INT_MASK_ENABLE_NO_FLOW 0xfff6bfd7 |
| 115 | */ |
| 116 | #define INT_MASK_ENABLE 0xfffebf17 |
| 117 | #define INT_MASK_ENABLE_NO_FLOW 0xfffebfd7 |
| 118 | |
| 119 | /* General defines */ |
| 120 | /* Packet and header sizes */ |
| 121 | #define NIC_MIN_PACKET_SIZE 60 |
| 122 | |
| 123 | /* Multicast list size */ |
| 124 | #define NIC_MAX_MCAST_LIST 128 |
| 125 | |
| 126 | /* Supported Filters */ |
| 127 | #define ET131X_PACKET_TYPE_DIRECTED 0x0001 |
| 128 | #define ET131X_PACKET_TYPE_MULTICAST 0x0002 |
| 129 | #define ET131X_PACKET_TYPE_BROADCAST 0x0004 |
| 130 | #define ET131X_PACKET_TYPE_PROMISCUOUS 0x0008 |
| 131 | #define ET131X_PACKET_TYPE_ALL_MULTICAST 0x0010 |
| 132 | |
| 133 | /* Tx Timeout */ |
| 134 | #define ET131X_TX_TIMEOUT (1 * HZ) |
| 135 | #define NIC_SEND_HANG_THRESHOLD 0 |
| 136 | |
| 137 | /* MP_ADAPTER flags */ |
| 138 | #define FMP_ADAPTER_INTERRUPT_IN_USE 0x00000008 |
| 139 | |
| 140 | /* MP_SHARED flags */ |
| 141 | #define FMP_ADAPTER_LOWER_POWER 0x00200000 |
| 142 | |
| 143 | #define FMP_ADAPTER_NON_RECOVER_ERROR 0x00800000 |
| 144 | #define FMP_ADAPTER_HARDWARE_ERROR 0x04000000 |
| 145 | |
| 146 | #define FMP_ADAPTER_FAIL_SEND_MASK 0x3ff00000 |
| 147 | |
| 148 | /* Some offsets in PCI config space that are actually used. */ |
| 149 | #define ET1310_PCI_MAC_ADDRESS 0xA4 |
| 150 | #define ET1310_PCI_EEPROM_STATUS 0xB2 |
| 151 | #define ET1310_PCI_ACK_NACK 0xC0 |
| 152 | #define ET1310_PCI_REPLAY 0xC2 |
| 153 | #define ET1310_PCI_L0L1LATENCY 0xCF |
| 154 | |
| 155 | /* PCI Product IDs */ |
| 156 | #define ET131X_PCI_DEVICE_ID_GIG 0xED00 /* ET1310 1000 Base-T 8 */ |
| 157 | #define ET131X_PCI_DEVICE_ID_FAST 0xED01 /* ET1310 100 Base-T */ |
| 158 | |
| 159 | /* Define order of magnitude converter */ |
| 160 | #define NANO_IN_A_MICRO 1000 |
| 161 | |
| 162 | #define PARM_RX_NUM_BUFS_DEF 4 |
| 163 | #define PARM_RX_TIME_INT_DEF 10 |
| 164 | #define PARM_RX_MEM_END_DEF 0x2bc |
| 165 | #define PARM_TX_TIME_INT_DEF 40 |
| 166 | #define PARM_TX_NUM_BUFS_DEF 4 |
| 167 | #define PARM_DMA_CACHE_DEF 0 |
| 168 | |
| 169 | /* RX defines */ |
| 170 | #define FBR_CHUNKS 32 |
| 171 | #define MAX_DESC_PER_RING_RX 1024 |
| 172 | |
| 173 | /* number of RFDs - default and min */ |
| 174 | #define RFD_LOW_WATER_MARK 40 |
| 175 | #define NIC_DEFAULT_NUM_RFD 1024 |
| 176 | #define NUM_FBRS 2 |
| 177 | |
| 178 | #define MAX_PACKETS_HANDLED 256 |
| 179 | |
| 180 | #define ALCATEL_MULTICAST_PKT 0x01000000 |
| 181 | #define ALCATEL_BROADCAST_PKT 0x02000000 |
| 182 | |
| 183 | /* typedefs for Free Buffer Descriptors */ |
| 184 | struct fbr_desc { |
| 185 | u32 addr_lo; |
| 186 | u32 addr_hi; |
| 187 | u32 word2; /* Bits 10-31 reserved, 0-9 descriptor */ |
| 188 | }; |
| 189 | |
| 190 | /* Packet Status Ring Descriptors |
| 191 | * |
| 192 | * Word 0: |
| 193 | * |
| 194 | * top 16 bits are from the Alcatel Status Word as enumerated in |
| 195 | * PE-MCXMAC Data Sheet IPD DS54 0210-1 (also IPD-DS80 0205-2) |
| 196 | * |
| 197 | * 0: hp hash pass |
| 198 | * 1: ipa IP checksum assist |
| 199 | * 2: ipp IP checksum pass |
| 200 | * 3: tcpa TCP checksum assist |
| 201 | * 4: tcpp TCP checksum pass |
| 202 | * 5: wol WOL Event |
| 203 | * 6: rxmac_error RXMAC Error Indicator |
| 204 | * 7: drop Drop packet |
| 205 | * 8: ft Frame Truncated |
| 206 | * 9: jp Jumbo Packet |
| 207 | * 10: vp VLAN Packet |
| 208 | * 11-15: unused |
| 209 | * 16: asw_prev_pkt_dropped e.g. IFG too small on previous |
| 210 | * 17: asw_RX_DV_event short receive event detected |
| 211 | * 18: asw_false_carrier_event bad carrier since last good packet |
| 212 | * 19: asw_code_err one or more nibbles signalled as errors |
| 213 | * 20: asw_CRC_err CRC error |
| 214 | * 21: asw_len_chk_err frame length field incorrect |
| 215 | * 22: asw_too_long frame length > 1518 bytes |
| 216 | * 23: asw_OK valid CRC + no code error |
| 217 | * 24: asw_multicast has a multicast address |
| 218 | * 25: asw_broadcast has a broadcast address |
| 219 | * 26: asw_dribble_nibble spurious bits after EOP |
| 220 | * 27: asw_control_frame is a control frame |
| 221 | * 28: asw_pause_frame is a pause frame |
| 222 | * 29: asw_unsupported_op unsupported OP code |
| 223 | * 30: asw_VLAN_tag VLAN tag detected |
| 224 | * 31: asw_long_evt Rx long event |
| 225 | * |
| 226 | * Word 1: |
| 227 | * 0-15: length length in bytes |
| 228 | * 16-25: bi Buffer Index |
| 229 | * 26-27: ri Ring Index |
| 230 | * 28-31: reserved |
| 231 | */ |
| 232 | struct pkt_stat_desc { |
| 233 | u32 word0; |
| 234 | u32 word1; |
| 235 | }; |
| 236 | |
| 237 | /* Typedefs for the RX DMA status word */ |
| 238 | |
| 239 | /* rx status word 0 holds part of the status bits of the Rx DMA engine |
| 240 | * that get copied out to memory by the ET-1310. Word 0 is a 32 bit word |
| 241 | * which contains the Free Buffer ring 0 and 1 available offset. |
| 242 | * |
| 243 | * bit 0-9 FBR1 offset |
| 244 | * bit 10 Wrap flag for FBR1 |
| 245 | * bit 16-25 FBR0 offset |
| 246 | * bit 26 Wrap flag for FBR0 |
| 247 | */ |
| 248 | |
| 249 | /* RXSTAT_WORD1_t structure holds part of the status bits of the Rx DMA engine |
| 250 | * that get copied out to memory by the ET-1310. Word 3 is a 32 bit word |
| 251 | * which contains the Packet Status Ring available offset. |
| 252 | * |
| 253 | * bit 0-15 reserved |
| 254 | * bit 16-27 PSRoffset |
| 255 | * bit 28 PSRwrap |
| 256 | * bit 29-31 unused |
| 257 | */ |
| 258 | |
| 259 | /* struct rx_status_block is a structure representing the status of the Rx |
| 260 | * DMA engine it sits in free memory, and is pointed to by 0x101c / 0x1020 |
| 261 | */ |
| 262 | struct rx_status_block { |
| 263 | u32 word0; |
| 264 | u32 word1; |
| 265 | }; |
| 266 | |
| 267 | /* Structure for look-up table holding free buffer ring pointers, addresses |
| 268 | * and state. |
| 269 | */ |
| 270 | struct fbr_lookup { |
| 271 | void *virt[MAX_DESC_PER_RING_RX]; |
| 272 | u32 bus_high[MAX_DESC_PER_RING_RX]; |
| 273 | u32 bus_low[MAX_DESC_PER_RING_RX]; |
| 274 | void *ring_virtaddr; |
| 275 | dma_addr_t ring_physaddr; |
| 276 | void *mem_virtaddrs[MAX_DESC_PER_RING_RX / FBR_CHUNKS]; |
| 277 | dma_addr_t mem_physaddrs[MAX_DESC_PER_RING_RX / FBR_CHUNKS]; |
| 278 | u32 local_full; |
| 279 | u32 num_entries; |
| 280 | dma_addr_t buffsize; |
| 281 | }; |
| 282 | |
| 283 | /* struct rx_ring is the structure representing the adaptor's local |
| 284 | * reference(s) to the rings |
| 285 | */ |
| 286 | struct rx_ring { |
| 287 | struct fbr_lookup *fbr[NUM_FBRS]; |
| 288 | void *ps_ring_virtaddr; |
| 289 | dma_addr_t ps_ring_physaddr; |
| 290 | u32 local_psr_full; |
| 291 | u32 psr_entries; |
| 292 | |
| 293 | struct rx_status_block *rx_status_block; |
| 294 | dma_addr_t rx_status_bus; |
| 295 | |
| 296 | struct list_head recv_list; |
| 297 | u32 num_ready_recv; |
| 298 | |
| 299 | u32 num_rfd; |
| 300 | |
| 301 | bool unfinished_receives; |
| 302 | }; |
| 303 | |
| 304 | /* TX defines */ |
| 305 | /* word 2 of the control bits in the Tx Descriptor ring for the ET-1310 |
| 306 | * |
| 307 | * 0-15: length of packet |
| 308 | * 16-27: VLAN tag |
| 309 | * 28: VLAN CFI |
| 310 | * 29-31: VLAN priority |
| 311 | * |
| 312 | * word 3 of the control bits in the Tx Descriptor ring for the ET-1310 |
| 313 | * |
| 314 | * 0: last packet in the sequence |
| 315 | * 1: first packet in the sequence |
| 316 | * 2: interrupt the processor when this pkt sent |
| 317 | * 3: Control word - no packet data |
| 318 | * 4: Issue half-duplex backpressure : XON/XOFF |
| 319 | * 5: send pause frame |
| 320 | * 6: Tx frame has error |
| 321 | * 7: append CRC |
| 322 | * 8: MAC override |
| 323 | * 9: pad packet |
| 324 | * 10: Packet is a Huge packet |
| 325 | * 11: append VLAN tag |
| 326 | * 12: IP checksum assist |
| 327 | * 13: TCP checksum assist |
| 328 | * 14: UDP checksum assist |
| 329 | */ |
| 330 | #define TXDESC_FLAG_LASTPKT 0x0001 |
| 331 | #define TXDESC_FLAG_FIRSTPKT 0x0002 |
| 332 | #define TXDESC_FLAG_INTPROC 0x0004 |
| 333 | |
| 334 | /* struct tx_desc represents each descriptor on the ring */ |
| 335 | struct tx_desc { |
| 336 | u32 addr_hi; |
| 337 | u32 addr_lo; |
| 338 | u32 len_vlan; /* control words how to xmit the */ |
| 339 | u32 flags; /* data (detailed above) */ |
| 340 | }; |
| 341 | |
| 342 | /* The status of the Tx DMA engine it sits in free memory, and is pointed to |
| 343 | * by 0x101c / 0x1020. This is a DMA10 type |
| 344 | */ |
| 345 | |
| 346 | /* TCB (Transmit Control Block: Host Side) */ |
| 347 | struct tcb { |
| 348 | struct tcb *next; /* Next entry in ring */ |
| 349 | u32 count; /* Used to spot stuck/lost packets */ |
| 350 | u32 stale; /* Used to spot stuck/lost packets */ |
| 351 | struct sk_buff *skb; /* Network skb we are tied to */ |
| 352 | u32 index; /* Ring indexes */ |
| 353 | u32 index_start; |
| 354 | }; |
| 355 | |
| 356 | /* Structure representing our local reference(s) to the ring */ |
| 357 | struct tx_ring { |
| 358 | /* TCB (Transmit Control Block) memory and lists */ |
| 359 | struct tcb *tcb_ring; |
| 360 | |
| 361 | /* List of TCBs that are ready to be used */ |
| 362 | struct tcb *tcb_qhead; |
| 363 | struct tcb *tcb_qtail; |
| 364 | |
| 365 | /* list of TCBs that are currently being sent. */ |
| 366 | struct tcb *send_head; |
| 367 | struct tcb *send_tail; |
| 368 | int used; |
| 369 | |
| 370 | /* The actual descriptor ring */ |
| 371 | struct tx_desc *tx_desc_ring; |
| 372 | dma_addr_t tx_desc_ring_pa; |
| 373 | |
| 374 | /* send_idx indicates where we last wrote to in the descriptor ring. */ |
| 375 | u32 send_idx; |
| 376 | |
| 377 | /* The location of the write-back status block */ |
| 378 | u32 *tx_status; |
| 379 | dma_addr_t tx_status_pa; |
| 380 | |
| 381 | /* Packets since the last IRQ: used for interrupt coalescing */ |
| 382 | int since_irq; |
| 383 | }; |
| 384 | |
| 385 | /* Do not change these values: if changed, then change also in respective |
| 386 | * TXdma and Rxdma engines |
| 387 | */ |
| 388 | #define NUM_DESC_PER_RING_TX 512 /* TX Do not change these values */ |
| 389 | #define NUM_TCB 64 |
| 390 | |
| 391 | /* These values are all superseded by registry entries to facilitate tuning. |
| 392 | * Once the desired performance has been achieved, the optimal registry values |
| 393 | * should be re-populated to these #defines: |
| 394 | */ |
| 395 | #define TX_ERROR_PERIOD 1000 |
| 396 | |
| 397 | #define LO_MARK_PERCENT_FOR_PSR 15 |
| 398 | #define LO_MARK_PERCENT_FOR_RX 15 |
| 399 | |
| 400 | /* RFD (Receive Frame Descriptor) */ |
| 401 | struct rfd { |
| 402 | struct list_head list_node; |
| 403 | struct sk_buff *skb; |
| 404 | u32 len; /* total size of receive frame */ |
| 405 | u16 bufferindex; |
| 406 | u8 ringindex; |
| 407 | }; |
| 408 | |
| 409 | /* Flow Control */ |
| 410 | #define FLOW_BOTH 0 |
| 411 | #define FLOW_TXONLY 1 |
| 412 | #define FLOW_RXONLY 2 |
| 413 | #define FLOW_NONE 3 |
| 414 | |
| 415 | /* Struct to define some device statistics */ |
| 416 | struct ce_stats { |
| 417 | u32 multicast_pkts_rcvd; |
| 418 | u32 rcvd_pkts_dropped; |
| 419 | |
| 420 | u32 tx_underflows; |
| 421 | u32 tx_collisions; |
| 422 | u32 tx_excessive_collisions; |
| 423 | u32 tx_first_collisions; |
| 424 | u32 tx_late_collisions; |
| 425 | u32 tx_max_pkt_errs; |
| 426 | u32 tx_deferred; |
| 427 | |
| 428 | u32 rx_overflows; |
| 429 | u32 rx_length_errs; |
| 430 | u32 rx_align_errs; |
| 431 | u32 rx_crc_errs; |
| 432 | u32 rx_code_violations; |
| 433 | u32 rx_other_errs; |
| 434 | |
| 435 | u32 interrupt_status; |
| 436 | }; |
| 437 | |
| 438 | /* The private adapter structure */ |
| 439 | struct et131x_adapter { |
| 440 | struct net_device *netdev; |
| 441 | struct pci_dev *pdev; |
| 442 | struct mii_bus *mii_bus; |
| 443 | struct phy_device *phydev; |
| 444 | struct napi_struct napi; |
| 445 | |
| 446 | /* Flags that indicate current state of the adapter */ |
| 447 | u32 flags; |
| 448 | |
| 449 | /* local link state, to determine if a state change has occurred */ |
| 450 | int link; |
| 451 | |
| 452 | /* Configuration */ |
| 453 | u8 rom_addr[ETH_ALEN]; |
| 454 | u8 addr[ETH_ALEN]; |
| 455 | bool has_eeprom; |
| 456 | u8 eeprom_data[2]; |
| 457 | |
| 458 | spinlock_t tcb_send_qlock; /* protects the tx_ring send tcb list */ |
| 459 | spinlock_t tcb_ready_qlock; /* protects the tx_ring ready tcb list */ |
| 460 | spinlock_t rcv_lock; /* protects the rx_ring receive list */ |
| 461 | |
| 462 | /* Packet Filter and look ahead size */ |
| 463 | u32 packet_filter; |
| 464 | |
| 465 | /* multicast list */ |
| 466 | u32 multicast_addr_count; |
| 467 | u8 multicast_list[NIC_MAX_MCAST_LIST][ETH_ALEN]; |
| 468 | |
| 469 | /* Pointer to the device's PCI register space */ |
| 470 | struct address_map __iomem *regs; |
| 471 | |
| 472 | /* Registry parameters */ |
| 473 | u8 wanted_flow; /* Flow we want for 802.3x flow control */ |
| 474 | u32 registry_jumbo_packet; /* Max supported ethernet packet size */ |
| 475 | |
| 476 | /* Derived from the registry: */ |
| 477 | u8 flow; /* flow control validated by the far-end */ |
| 478 | |
| 479 | /* Minimize init-time */ |
| 480 | struct timer_list error_timer; |
| 481 | |
| 482 | /* variable putting the phy into coma mode when boot up with no cable |
| 483 | * plugged in after 5 seconds |
| 484 | */ |
| 485 | u8 boot_coma; |
| 486 | |
| 487 | /* Tx Memory Variables */ |
| 488 | struct tx_ring tx_ring; |
| 489 | |
| 490 | /* Rx Memory Variables */ |
| 491 | struct rx_ring rx_ring; |
| 492 | |
| 493 | struct ce_stats stats; |
| 494 | }; |
| 495 | |
| 496 | static int eeprom_wait_ready(struct pci_dev *pdev, u32 *status) |
| 497 | { |
| 498 | u32 reg; |
| 499 | int i; |
| 500 | |
| 501 | /* 1. Check LBCIF Status Register for bits 6 & 3:2 all equal to 0 and |
| 502 | * bits 7,1:0 both equal to 1, at least once after reset. |
| 503 | * Subsequent operations need only to check that bits 1:0 are equal |
| 504 | * to 1 prior to starting a single byte read/write |
| 505 | */ |
| 506 | for (i = 0; i < MAX_NUM_REGISTER_POLLS; i++) { |
| 507 | if (pci_read_config_dword(pdev, LBCIF_DWORD1_GROUP, ®)) |
| 508 | return -EIO; |
| 509 | |
| 510 | /* I2C idle and Phy Queue Avail both true */ |
| 511 | if ((reg & 0x3000) == 0x3000) { |
| 512 | if (status) |
| 513 | *status = reg; |
| 514 | return reg & 0xFF; |
| 515 | } |
| 516 | } |
| 517 | return -ETIMEDOUT; |
| 518 | } |
| 519 | |
| 520 | static int eeprom_write(struct et131x_adapter *adapter, u32 addr, u8 data) |
| 521 | { |
| 522 | struct pci_dev *pdev = adapter->pdev; |
| 523 | int index = 0; |
| 524 | int retries; |
| 525 | int err = 0; |
| 526 | int writeok = 0; |
| 527 | u32 status; |
| 528 | u32 val = 0; |
| 529 | |
| 530 | /* For an EEPROM, an I2C single byte write is defined as a START |
| 531 | * condition followed by the device address, EEPROM address, one byte |
| 532 | * of data and a STOP condition. The STOP condition will trigger the |
| 533 | * EEPROM's internally timed write cycle to the nonvolatile memory. |
| 534 | * All inputs are disabled during this write cycle and the EEPROM will |
| 535 | * not respond to any access until the internal write is complete. |
| 536 | */ |
| 537 | err = eeprom_wait_ready(pdev, NULL); |
| 538 | if (err < 0) |
| 539 | return err; |
| 540 | |
| 541 | /* 2. Write to the LBCIF Control Register: bit 7=1, bit 6=1, bit 3=0, |
| 542 | * and bits 1:0 both =0. Bit 5 should be set according to the |
| 543 | * type of EEPROM being accessed (1=two byte addressing, 0=one |
| 544 | * byte addressing). |
| 545 | */ |
| 546 | if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER, |
| 547 | LBCIF_CONTROL_LBCIF_ENABLE | |
| 548 | LBCIF_CONTROL_I2C_WRITE)) |
| 549 | return -EIO; |
| 550 | |
| 551 | /* Prepare EEPROM address for Step 3 */ |
| 552 | for (retries = 0; retries < MAX_NUM_WRITE_RETRIES; retries++) { |
| 553 | if (pci_write_config_dword(pdev, LBCIF_ADDRESS_REGISTER, addr)) |
| 554 | break; |
| 555 | /* Write the data to the LBCIF Data Register (the I2C write |
| 556 | * will begin). |
| 557 | */ |
| 558 | if (pci_write_config_byte(pdev, LBCIF_DATA_REGISTER, data)) |
| 559 | break; |
| 560 | /* Monitor bit 1:0 of the LBCIF Status Register. When bits |
| 561 | * 1:0 are both equal to 1, the I2C write has completed and the |
| 562 | * internal write cycle of the EEPROM is about to start. |
| 563 | * (bits 1:0 = 01 is a legal state while waiting from both |
| 564 | * equal to 1, but bits 1:0 = 10 is invalid and implies that |
| 565 | * something is broken). |
| 566 | */ |
| 567 | err = eeprom_wait_ready(pdev, &status); |
| 568 | if (err < 0) |
| 569 | return 0; |
| 570 | |
| 571 | /* Check bit 3 of the LBCIF Status Register. If equal to 1, |
| 572 | * an error has occurred.Don't break here if we are revision |
| 573 | * 1, this is so we do a blind write for load bug. |
| 574 | */ |
| 575 | if ((status & LBCIF_STATUS_GENERAL_ERROR) && |
| 576 | adapter->pdev->revision == 0) |
| 577 | break; |
| 578 | |
| 579 | /* Check bit 2 of the LBCIF Status Register. If equal to 1 an |
| 580 | * ACK error has occurred on the address phase of the write. |
| 581 | * This could be due to an actual hardware failure or the |
| 582 | * EEPROM may still be in its internal write cycle from a |
| 583 | * previous write. This write operation was ignored and must be |
| 584 | *repeated later. |
| 585 | */ |
| 586 | if (status & LBCIF_STATUS_ACK_ERROR) { |
| 587 | /* This could be due to an actual hardware failure |
| 588 | * or the EEPROM may still be in its internal write |
| 589 | * cycle from a previous write. This write operation |
| 590 | * was ignored and must be repeated later. |
| 591 | */ |
| 592 | udelay(10); |
| 593 | continue; |
| 594 | } |
| 595 | |
| 596 | writeok = 1; |
| 597 | break; |
| 598 | } |
| 599 | |
| 600 | udelay(10); |
| 601 | |
| 602 | while (1) { |
| 603 | if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER, |
| 604 | LBCIF_CONTROL_LBCIF_ENABLE)) |
| 605 | writeok = 0; |
| 606 | |
| 607 | /* Do read until internal ACK_ERROR goes away meaning write |
| 608 | * completed |
| 609 | */ |
| 610 | do { |
| 611 | pci_write_config_dword(pdev, |
| 612 | LBCIF_ADDRESS_REGISTER, |
| 613 | addr); |
| 614 | do { |
| 615 | pci_read_config_dword(pdev, |
| 616 | LBCIF_DATA_REGISTER, |
| 617 | &val); |
| 618 | } while ((val & 0x00010000) == 0); |
| 619 | } while (val & 0x00040000); |
| 620 | |
| 621 | if ((val & 0xFF00) != 0xC000 || index == 10000) |
| 622 | break; |
| 623 | index++; |
| 624 | } |
| 625 | return writeok ? 0 : -EIO; |
| 626 | } |
| 627 | |
| 628 | static int eeprom_read(struct et131x_adapter *adapter, u32 addr, u8 *pdata) |
| 629 | { |
| 630 | struct pci_dev *pdev = adapter->pdev; |
| 631 | int err; |
| 632 | u32 status; |
| 633 | |
| 634 | /* A single byte read is similar to the single byte write, with the |
| 635 | * exception of the data flow: |
| 636 | */ |
| 637 | err = eeprom_wait_ready(pdev, NULL); |
| 638 | if (err < 0) |
| 639 | return err; |
| 640 | /* Write to the LBCIF Control Register: bit 7=1, bit 6=0, bit 3=0, |
| 641 | * and bits 1:0 both =0. Bit 5 should be set according to the type |
| 642 | * of EEPROM being accessed (1=two byte addressing, 0=one byte |
| 643 | * addressing). |
| 644 | */ |
| 645 | if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER, |
| 646 | LBCIF_CONTROL_LBCIF_ENABLE)) |
| 647 | return -EIO; |
| 648 | /* Write the address to the LBCIF Address Register (I2C read will |
| 649 | * begin). |
| 650 | */ |
| 651 | if (pci_write_config_dword(pdev, LBCIF_ADDRESS_REGISTER, addr)) |
| 652 | return -EIO; |
| 653 | /* Monitor bit 0 of the LBCIF Status Register. When = 1, I2C read |
| 654 | * is complete. (if bit 1 =1 and bit 0 stays = 0, a hardware failure |
| 655 | * has occurred). |
| 656 | */ |
| 657 | err = eeprom_wait_ready(pdev, &status); |
| 658 | if (err < 0) |
| 659 | return err; |
| 660 | /* Regardless of error status, read data byte from LBCIF Data |
| 661 | * Register. |
| 662 | */ |
| 663 | *pdata = err; |
| 664 | |
| 665 | return (status & LBCIF_STATUS_ACK_ERROR) ? -EIO : 0; |
| 666 | } |
| 667 | |
| 668 | static int et131x_init_eeprom(struct et131x_adapter *adapter) |
| 669 | { |
| 670 | struct pci_dev *pdev = adapter->pdev; |
| 671 | u8 eestatus; |
| 672 | |
| 673 | pci_read_config_byte(pdev, ET1310_PCI_EEPROM_STATUS, &eestatus); |
| 674 | |
| 675 | /* THIS IS A WORKAROUND: |
| 676 | * I need to call this function twice to get my card in a |
| 677 | * LG M1 Express Dual running. I tried also a msleep before this |
| 678 | * function, because I thought there could be some time conditions |
| 679 | * but it didn't work. Call the whole function twice also work. |
| 680 | */ |
| 681 | if (pci_read_config_byte(pdev, ET1310_PCI_EEPROM_STATUS, &eestatus)) { |
| 682 | dev_err(&pdev->dev, |
| 683 | "Could not read PCI config space for EEPROM Status\n"); |
| 684 | return -EIO; |
| 685 | } |
| 686 | |
| 687 | /* Determine if the error(s) we care about are present. If they are |
| 688 | * present we need to fail. |
| 689 | */ |
| 690 | if (eestatus & 0x4C) { |
| 691 | int write_failed = 0; |
| 692 | |
| 693 | if (pdev->revision == 0x01) { |
| 694 | int i; |
| 695 | static const u8 eedata[4] = { 0xFE, 0x13, 0x10, 0xFF }; |
| 696 | |
| 697 | /* Re-write the first 4 bytes if we have an eeprom |
| 698 | * present and the revision id is 1, this fixes the |
| 699 | * corruption seen with 1310 B Silicon |
| 700 | */ |
| 701 | for (i = 0; i < 3; i++) |
| 702 | if (eeprom_write(adapter, i, eedata[i]) < 0) |
| 703 | write_failed = 1; |
| 704 | } |
| 705 | if (pdev->revision != 0x01 || write_failed) { |
| 706 | dev_err(&pdev->dev, |
| 707 | "Fatal EEPROM Status Error - 0x%04x\n", |
| 708 | eestatus); |
| 709 | |
| 710 | /* This error could mean that there was an error |
| 711 | * reading the eeprom or that the eeprom doesn't exist. |
| 712 | * We will treat each case the same and not try to |
| 713 | * gather additional information that normally would |
| 714 | * come from the eeprom, like MAC Address |
| 715 | */ |
| 716 | adapter->has_eeprom = 0; |
| 717 | return -EIO; |
| 718 | } |
| 719 | } |
| 720 | adapter->has_eeprom = 1; |
| 721 | |
| 722 | /* Read the EEPROM for information regarding LED behavior. Refer to |
| 723 | * et131x_xcvr_init() for its use. |
| 724 | */ |
| 725 | eeprom_read(adapter, 0x70, &adapter->eeprom_data[0]); |
| 726 | eeprom_read(adapter, 0x71, &adapter->eeprom_data[1]); |
| 727 | |
| 728 | if (adapter->eeprom_data[0] != 0xcd) |
| 729 | /* Disable all optional features */ |
| 730 | adapter->eeprom_data[1] = 0x00; |
| 731 | |
| 732 | return 0; |
| 733 | } |
| 734 | |
| 735 | static void et131x_rx_dma_enable(struct et131x_adapter *adapter) |
| 736 | { |
| 737 | /* Setup the receive dma configuration register for normal operation */ |
| 738 | u32 csr = ET_RXDMA_CSR_FBR1_ENABLE; |
| 739 | struct rx_ring *rx_ring = &adapter->rx_ring; |
| 740 | |
| 741 | if (rx_ring->fbr[1]->buffsize == 4096) |
| 742 | csr |= ET_RXDMA_CSR_FBR1_SIZE_LO; |
| 743 | else if (rx_ring->fbr[1]->buffsize == 8192) |
| 744 | csr |= ET_RXDMA_CSR_FBR1_SIZE_HI; |
| 745 | else if (rx_ring->fbr[1]->buffsize == 16384) |
| 746 | csr |= ET_RXDMA_CSR_FBR1_SIZE_LO | ET_RXDMA_CSR_FBR1_SIZE_HI; |
| 747 | |
| 748 | csr |= ET_RXDMA_CSR_FBR0_ENABLE; |
| 749 | if (rx_ring->fbr[0]->buffsize == 256) |
| 750 | csr |= ET_RXDMA_CSR_FBR0_SIZE_LO; |
| 751 | else if (rx_ring->fbr[0]->buffsize == 512) |
| 752 | csr |= ET_RXDMA_CSR_FBR0_SIZE_HI; |
| 753 | else if (rx_ring->fbr[0]->buffsize == 1024) |
| 754 | csr |= ET_RXDMA_CSR_FBR0_SIZE_LO | ET_RXDMA_CSR_FBR0_SIZE_HI; |
| 755 | writel(csr, &adapter->regs->rxdma.csr); |
| 756 | |
| 757 | csr = readl(&adapter->regs->rxdma.csr); |
| 758 | if (csr & ET_RXDMA_CSR_HALT_STATUS) { |
| 759 | udelay(5); |
| 760 | csr = readl(&adapter->regs->rxdma.csr); |
| 761 | if (csr & ET_RXDMA_CSR_HALT_STATUS) { |
| 762 | dev_err(&adapter->pdev->dev, |
| 763 | "RX Dma failed to exit halt state. CSR 0x%08x\n", |
| 764 | csr); |
| 765 | } |
| 766 | } |
| 767 | } |
| 768 | |
| 769 | static void et131x_rx_dma_disable(struct et131x_adapter *adapter) |
| 770 | { |
| 771 | u32 csr; |
| 772 | /* Setup the receive dma configuration register */ |
| 773 | writel(ET_RXDMA_CSR_HALT | ET_RXDMA_CSR_FBR1_ENABLE, |
| 774 | &adapter->regs->rxdma.csr); |
| 775 | csr = readl(&adapter->regs->rxdma.csr); |
| 776 | if (!(csr & ET_RXDMA_CSR_HALT_STATUS)) { |
| 777 | udelay(5); |
| 778 | csr = readl(&adapter->regs->rxdma.csr); |
| 779 | if (!(csr & ET_RXDMA_CSR_HALT_STATUS)) |
| 780 | dev_err(&adapter->pdev->dev, |
| 781 | "RX Dma failed to enter halt state. CSR 0x%08x\n", |
| 782 | csr); |
| 783 | } |
| 784 | } |
| 785 | |
| 786 | static void et131x_tx_dma_enable(struct et131x_adapter *adapter) |
| 787 | { |
| 788 | /* Setup the transmit dma configuration register for normal |
| 789 | * operation |
| 790 | */ |
| 791 | writel(ET_TXDMA_SNGL_EPKT | (PARM_DMA_CACHE_DEF << ET_TXDMA_CACHE_SHIFT), |
| 792 | &adapter->regs->txdma.csr); |
| 793 | } |
| 794 | |
| 795 | static inline void add_10bit(u32 *v, int n) |
| 796 | { |
| 797 | *v = INDEX10(*v + n) | (*v & ET_DMA10_WRAP); |
| 798 | } |
| 799 | |
| 800 | static inline void add_12bit(u32 *v, int n) |
| 801 | { |
| 802 | *v = INDEX12(*v + n) | (*v & ET_DMA12_WRAP); |
| 803 | } |
| 804 | |
| 805 | static void et1310_config_mac_regs1(struct et131x_adapter *adapter) |
| 806 | { |
| 807 | struct mac_regs __iomem *macregs = &adapter->regs->mac; |
| 808 | u32 station1; |
| 809 | u32 station2; |
| 810 | u32 ipg; |
| 811 | |
| 812 | /* First we need to reset everything. Write to MAC configuration |
| 813 | * register 1 to perform reset. |
| 814 | */ |
| 815 | writel(ET_MAC_CFG1_SOFT_RESET | ET_MAC_CFG1_SIM_RESET | |
| 816 | ET_MAC_CFG1_RESET_RXMC | ET_MAC_CFG1_RESET_TXMC | |
| 817 | ET_MAC_CFG1_RESET_RXFUNC | ET_MAC_CFG1_RESET_TXFUNC, |
| 818 | ¯egs->cfg1); |
| 819 | |
| 820 | /* Next lets configure the MAC Inter-packet gap register */ |
| 821 | ipg = 0x38005860; /* IPG1 0x38 IPG2 0x58 B2B 0x60 */ |
| 822 | ipg |= 0x50 << 8; /* ifg enforce 0x50 */ |
| 823 | writel(ipg, ¯egs->ipg); |
| 824 | |
| 825 | /* Next lets configure the MAC Half Duplex register */ |
| 826 | /* BEB trunc 0xA, Ex Defer, Rexmit 0xF Coll 0x37 */ |
| 827 | writel(0x00A1F037, ¯egs->hfdp); |
| 828 | |
| 829 | /* Next lets configure the MAC Interface Control register */ |
| 830 | writel(0, ¯egs->if_ctrl); |
| 831 | |
| 832 | writel(ET_MAC_MIIMGMT_CLK_RST, ¯egs->mii_mgmt_cfg); |
| 833 | |
| 834 | /* Next lets configure the MAC Station Address register. These |
| 835 | * values are read from the EEPROM during initialization and stored |
| 836 | * in the adapter structure. We write what is stored in the adapter |
| 837 | * structure to the MAC Station Address registers high and low. This |
| 838 | * station address is used for generating and checking pause control |
| 839 | * packets. |
| 840 | */ |
| 841 | station2 = (adapter->addr[1] << ET_MAC_STATION_ADDR2_OC2_SHIFT) | |
| 842 | (adapter->addr[0] << ET_MAC_STATION_ADDR2_OC1_SHIFT); |
| 843 | station1 = (adapter->addr[5] << ET_MAC_STATION_ADDR1_OC6_SHIFT) | |
| 844 | (adapter->addr[4] << ET_MAC_STATION_ADDR1_OC5_SHIFT) | |
| 845 | (adapter->addr[3] << ET_MAC_STATION_ADDR1_OC4_SHIFT) | |
| 846 | adapter->addr[2]; |
| 847 | writel(station1, ¯egs->station_addr_1); |
| 848 | writel(station2, ¯egs->station_addr_2); |
| 849 | |
| 850 | /* Max ethernet packet in bytes that will be passed by the mac without |
| 851 | * being truncated. Allow the MAC to pass 4 more than our max packet |
| 852 | * size. This is 4 for the Ethernet CRC. |
| 853 | * |
| 854 | * Packets larger than (registry_jumbo_packet) that do not contain a |
| 855 | * VLAN ID will be dropped by the Rx function. |
| 856 | */ |
| 857 | writel(adapter->registry_jumbo_packet + 4, ¯egs->max_fm_len); |
| 858 | |
| 859 | /* clear out MAC config reset */ |
| 860 | writel(0, ¯egs->cfg1); |
| 861 | } |
| 862 | |
| 863 | static void et1310_config_mac_regs2(struct et131x_adapter *adapter) |
| 864 | { |
| 865 | int32_t delay = 0; |
| 866 | struct mac_regs __iomem *mac = &adapter->regs->mac; |
| 867 | struct phy_device *phydev = adapter->phydev; |
| 868 | u32 cfg1; |
| 869 | u32 cfg2; |
| 870 | u32 ifctrl; |
| 871 | u32 ctl; |
| 872 | |
| 873 | ctl = readl(&adapter->regs->txmac.ctl); |
| 874 | cfg1 = readl(&mac->cfg1); |
| 875 | cfg2 = readl(&mac->cfg2); |
| 876 | ifctrl = readl(&mac->if_ctrl); |
| 877 | |
| 878 | /* Set up the if mode bits */ |
| 879 | cfg2 &= ~ET_MAC_CFG2_IFMODE_MASK; |
| 880 | if (phydev->speed == SPEED_1000) { |
| 881 | cfg2 |= ET_MAC_CFG2_IFMODE_1000; |
| 882 | ifctrl &= ~ET_MAC_IFCTRL_PHYMODE; |
| 883 | } else { |
| 884 | cfg2 |= ET_MAC_CFG2_IFMODE_100; |
| 885 | ifctrl |= ET_MAC_IFCTRL_PHYMODE; |
| 886 | } |
| 887 | |
| 888 | cfg1 |= ET_MAC_CFG1_RX_ENABLE | ET_MAC_CFG1_TX_ENABLE | |
| 889 | ET_MAC_CFG1_TX_FLOW; |
| 890 | |
| 891 | cfg1 &= ~(ET_MAC_CFG1_LOOPBACK | ET_MAC_CFG1_RX_FLOW); |
| 892 | if (adapter->flow == FLOW_RXONLY || adapter->flow == FLOW_BOTH) |
| 893 | cfg1 |= ET_MAC_CFG1_RX_FLOW; |
| 894 | writel(cfg1, &mac->cfg1); |
| 895 | |
| 896 | /* Now we need to initialize the MAC Configuration 2 register */ |
| 897 | /* preamble 7, check length, huge frame off, pad crc, crc enable |
| 898 | * full duplex off |
| 899 | */ |
| 900 | cfg2 |= 0x7 << ET_MAC_CFG2_PREAMBLE_SHIFT; |
| 901 | cfg2 |= ET_MAC_CFG2_IFMODE_LEN_CHECK; |
| 902 | cfg2 |= ET_MAC_CFG2_IFMODE_PAD_CRC; |
| 903 | cfg2 |= ET_MAC_CFG2_IFMODE_CRC_ENABLE; |
| 904 | cfg2 &= ~ET_MAC_CFG2_IFMODE_HUGE_FRAME; |
| 905 | cfg2 &= ~ET_MAC_CFG2_IFMODE_FULL_DPLX; |
| 906 | |
| 907 | if (phydev->duplex == DUPLEX_FULL) |
| 908 | cfg2 |= ET_MAC_CFG2_IFMODE_FULL_DPLX; |
| 909 | |
| 910 | ifctrl &= ~ET_MAC_IFCTRL_GHDMODE; |
| 911 | if (phydev->duplex == DUPLEX_HALF) |
| 912 | ifctrl |= ET_MAC_IFCTRL_GHDMODE; |
| 913 | |
| 914 | writel(ifctrl, &mac->if_ctrl); |
| 915 | writel(cfg2, &mac->cfg2); |
| 916 | |
| 917 | do { |
| 918 | udelay(10); |
| 919 | delay++; |
| 920 | cfg1 = readl(&mac->cfg1); |
| 921 | } while ((cfg1 & ET_MAC_CFG1_WAIT) != ET_MAC_CFG1_WAIT && delay < 100); |
| 922 | |
| 923 | if (delay == 100) { |
| 924 | dev_warn(&adapter->pdev->dev, |
| 925 | "Syncd bits did not respond correctly cfg1 word 0x%08x\n", |
| 926 | cfg1); |
| 927 | } |
| 928 | |
| 929 | ctl |= ET_TX_CTRL_TXMAC_ENABLE | ET_TX_CTRL_FC_DISABLE; |
| 930 | writel(ctl, &adapter->regs->txmac.ctl); |
| 931 | |
| 932 | if (adapter->flags & FMP_ADAPTER_LOWER_POWER) { |
| 933 | et131x_rx_dma_enable(adapter); |
| 934 | et131x_tx_dma_enable(adapter); |
| 935 | } |
| 936 | } |
| 937 | |
| 938 | static int et1310_in_phy_coma(struct et131x_adapter *adapter) |
| 939 | { |
| 940 | u32 pmcsr = readl(&adapter->regs->global.pm_csr); |
| 941 | |
| 942 | return ET_PM_PHY_SW_COMA & pmcsr ? 1 : 0; |
| 943 | } |
| 944 | |
| 945 | static void et1310_setup_device_for_multicast(struct et131x_adapter *adapter) |
| 946 | { |
| 947 | struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac; |
| 948 | u32 hash1 = 0; |
| 949 | u32 hash2 = 0; |
| 950 | u32 hash3 = 0; |
| 951 | u32 hash4 = 0; |
| 952 | u32 pm_csr; |
| 953 | |
| 954 | /* If ET131X_PACKET_TYPE_MULTICAST is specified, then we provision |
| 955 | * the multi-cast LIST. If it is NOT specified, (and "ALL" is not |
| 956 | * specified) then we should pass NO multi-cast addresses to the |
| 957 | * driver. |
| 958 | */ |
| 959 | if (adapter->packet_filter & ET131X_PACKET_TYPE_MULTICAST) { |
| 960 | int i; |
| 961 | |
| 962 | /* Loop through our multicast array and set up the device */ |
| 963 | for (i = 0; i < adapter->multicast_addr_count; i++) { |
| 964 | u32 result; |
| 965 | |
| 966 | result = ether_crc(6, adapter->multicast_list[i]); |
| 967 | |
| 968 | result = (result & 0x3F800000) >> 23; |
| 969 | |
| 970 | if (result < 32) { |
| 971 | hash1 |= (1 << result); |
| 972 | } else if ((31 < result) && (result < 64)) { |
| 973 | result -= 32; |
| 974 | hash2 |= (1 << result); |
| 975 | } else if ((63 < result) && (result < 96)) { |
| 976 | result -= 64; |
| 977 | hash3 |= (1 << result); |
| 978 | } else { |
| 979 | result -= 96; |
| 980 | hash4 |= (1 << result); |
| 981 | } |
| 982 | } |
| 983 | } |
| 984 | |
| 985 | /* Write out the new hash to the device */ |
| 986 | pm_csr = readl(&adapter->regs->global.pm_csr); |
| 987 | if (!et1310_in_phy_coma(adapter)) { |
| 988 | writel(hash1, &rxmac->multi_hash1); |
| 989 | writel(hash2, &rxmac->multi_hash2); |
| 990 | writel(hash3, &rxmac->multi_hash3); |
| 991 | writel(hash4, &rxmac->multi_hash4); |
| 992 | } |
| 993 | } |
| 994 | |
| 995 | static void et1310_setup_device_for_unicast(struct et131x_adapter *adapter) |
| 996 | { |
| 997 | struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac; |
| 998 | u32 uni_pf1; |
| 999 | u32 uni_pf2; |
| 1000 | u32 uni_pf3; |
| 1001 | u32 pm_csr; |
| 1002 | |
| 1003 | /* Set up unicast packet filter reg 3 to be the first two octets of |
| 1004 | * the MAC address for both address |
| 1005 | * |
| 1006 | * Set up unicast packet filter reg 2 to be the octets 2 - 5 of the |
| 1007 | * MAC address for second address |
| 1008 | * |
| 1009 | * Set up unicast packet filter reg 3 to be the octets 2 - 5 of the |
| 1010 | * MAC address for first address |
| 1011 | */ |
| 1012 | uni_pf3 = (adapter->addr[0] << ET_RX_UNI_PF_ADDR2_1_SHIFT) | |
| 1013 | (adapter->addr[1] << ET_RX_UNI_PF_ADDR2_2_SHIFT) | |
| 1014 | (adapter->addr[0] << ET_RX_UNI_PF_ADDR1_1_SHIFT) | |
| 1015 | adapter->addr[1]; |
| 1016 | |
| 1017 | uni_pf2 = (adapter->addr[2] << ET_RX_UNI_PF_ADDR2_3_SHIFT) | |
| 1018 | (adapter->addr[3] << ET_RX_UNI_PF_ADDR2_4_SHIFT) | |
| 1019 | (adapter->addr[4] << ET_RX_UNI_PF_ADDR2_5_SHIFT) | |
| 1020 | adapter->addr[5]; |
| 1021 | |
| 1022 | uni_pf1 = (adapter->addr[2] << ET_RX_UNI_PF_ADDR1_3_SHIFT) | |
| 1023 | (adapter->addr[3] << ET_RX_UNI_PF_ADDR1_4_SHIFT) | |
| 1024 | (adapter->addr[4] << ET_RX_UNI_PF_ADDR1_5_SHIFT) | |
| 1025 | adapter->addr[5]; |
| 1026 | |
| 1027 | pm_csr = readl(&adapter->regs->global.pm_csr); |
| 1028 | if (!et1310_in_phy_coma(adapter)) { |
| 1029 | writel(uni_pf1, &rxmac->uni_pf_addr1); |
| 1030 | writel(uni_pf2, &rxmac->uni_pf_addr2); |
| 1031 | writel(uni_pf3, &rxmac->uni_pf_addr3); |
| 1032 | } |
| 1033 | } |
| 1034 | |
| 1035 | static void et1310_config_rxmac_regs(struct et131x_adapter *adapter) |
| 1036 | { |
| 1037 | struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac; |
| 1038 | struct phy_device *phydev = adapter->phydev; |
| 1039 | u32 sa_lo; |
| 1040 | u32 sa_hi = 0; |
| 1041 | u32 pf_ctrl = 0; |
| 1042 | u32 __iomem *wolw; |
| 1043 | |
| 1044 | /* Disable the MAC while it is being configured (also disable WOL) */ |
| 1045 | writel(0x8, &rxmac->ctrl); |
| 1046 | |
| 1047 | /* Initialize WOL to disabled. */ |
| 1048 | writel(0, &rxmac->crc0); |
| 1049 | writel(0, &rxmac->crc12); |
| 1050 | writel(0, &rxmac->crc34); |
| 1051 | |
| 1052 | /* We need to set the WOL mask0 - mask4 next. We initialize it to |
| 1053 | * its default Values of 0x00000000 because there are not WOL masks |
| 1054 | * as of this time. |
| 1055 | */ |
| 1056 | for (wolw = &rxmac->mask0_word0; wolw <= &rxmac->mask4_word3; wolw++) |
| 1057 | writel(0, wolw); |
| 1058 | |
| 1059 | /* Lets setup the WOL Source Address */ |
| 1060 | sa_lo = (adapter->addr[2] << ET_RX_WOL_LO_SA3_SHIFT) | |
| 1061 | (adapter->addr[3] << ET_RX_WOL_LO_SA4_SHIFT) | |
| 1062 | (adapter->addr[4] << ET_RX_WOL_LO_SA5_SHIFT) | |
| 1063 | adapter->addr[5]; |
| 1064 | writel(sa_lo, &rxmac->sa_lo); |
| 1065 | |
| 1066 | sa_hi = (u32)(adapter->addr[0] << ET_RX_WOL_HI_SA1_SHIFT) | |
| 1067 | adapter->addr[1]; |
| 1068 | writel(sa_hi, &rxmac->sa_hi); |
| 1069 | |
| 1070 | /* Disable all Packet Filtering */ |
| 1071 | writel(0, &rxmac->pf_ctrl); |
| 1072 | |
| 1073 | /* Let's initialize the Unicast Packet filtering address */ |
| 1074 | if (adapter->packet_filter & ET131X_PACKET_TYPE_DIRECTED) { |
| 1075 | et1310_setup_device_for_unicast(adapter); |
| 1076 | pf_ctrl |= ET_RX_PFCTRL_UNICST_FILTER_ENABLE; |
| 1077 | } else { |
| 1078 | writel(0, &rxmac->uni_pf_addr1); |
| 1079 | writel(0, &rxmac->uni_pf_addr2); |
| 1080 | writel(0, &rxmac->uni_pf_addr3); |
| 1081 | } |
| 1082 | |
| 1083 | /* Let's initialize the Multicast hash */ |
| 1084 | if (!(adapter->packet_filter & ET131X_PACKET_TYPE_ALL_MULTICAST)) { |
| 1085 | pf_ctrl |= ET_RX_PFCTRL_MLTCST_FILTER_ENABLE; |
| 1086 | et1310_setup_device_for_multicast(adapter); |
| 1087 | } |
| 1088 | |
| 1089 | /* Runt packet filtering. Didn't work in version A silicon. */ |
| 1090 | pf_ctrl |= (NIC_MIN_PACKET_SIZE + 4) << ET_RX_PFCTRL_MIN_PKT_SZ_SHIFT; |
| 1091 | pf_ctrl |= ET_RX_PFCTRL_FRAG_FILTER_ENABLE; |
| 1092 | |
| 1093 | if (adapter->registry_jumbo_packet > 8192) |
| 1094 | /* In order to transmit jumbo packets greater than 8k, the |
| 1095 | * FIFO between RxMAC and RxDMA needs to be reduced in size |
| 1096 | * to (16k - Jumbo packet size). In order to implement this, |
| 1097 | * we must use "cut through" mode in the RxMAC, which chops |
| 1098 | * packets down into segments which are (max_size * 16). In |
| 1099 | * this case we selected 256 bytes, since this is the size of |
| 1100 | * the PCI-Express TLP's that the 1310 uses. |
| 1101 | * |
| 1102 | * seg_en on, fc_en off, size 0x10 |
| 1103 | */ |
| 1104 | writel(0x41, &rxmac->mcif_ctrl_max_seg); |
| 1105 | else |
| 1106 | writel(0, &rxmac->mcif_ctrl_max_seg); |
| 1107 | |
| 1108 | writel(0, &rxmac->mcif_water_mark); |
| 1109 | writel(0, &rxmac->mif_ctrl); |
| 1110 | writel(0, &rxmac->space_avail); |
| 1111 | |
| 1112 | /* Initialize the the mif_ctrl register |
| 1113 | * bit 3: Receive code error. One or more nibbles were signaled as |
| 1114 | * errors during the reception of the packet. Clear this |
| 1115 | * bit in Gigabit, set it in 100Mbit. This was derived |
| 1116 | * experimentally at UNH. |
| 1117 | * bit 4: Receive CRC error. The packet's CRC did not match the |
| 1118 | * internally generated CRC. |
| 1119 | * bit 5: Receive length check error. Indicates that frame length |
| 1120 | * field value in the packet does not match the actual data |
| 1121 | * byte length and is not a type field. |
| 1122 | * bit 16: Receive frame truncated. |
| 1123 | * bit 17: Drop packet enable |
| 1124 | */ |
| 1125 | if (phydev && phydev->speed == SPEED_100) |
| 1126 | writel(0x30038, &rxmac->mif_ctrl); |
| 1127 | else |
| 1128 | writel(0x30030, &rxmac->mif_ctrl); |
| 1129 | |
| 1130 | /* Finally we initialize RxMac to be enabled & WOL disabled. Packet |
| 1131 | * filter is always enabled since it is where the runt packets are |
| 1132 | * supposed to be dropped. For version A silicon, runt packet |
| 1133 | * dropping doesn't work, so it is disabled in the pf_ctrl register, |
| 1134 | * but we still leave the packet filter on. |
| 1135 | */ |
| 1136 | writel(pf_ctrl, &rxmac->pf_ctrl); |
| 1137 | writel(ET_RX_CTRL_RXMAC_ENABLE | ET_RX_CTRL_WOL_DISABLE, &rxmac->ctrl); |
| 1138 | } |
| 1139 | |
| 1140 | static void et1310_config_txmac_regs(struct et131x_adapter *adapter) |
| 1141 | { |
| 1142 | struct txmac_regs __iomem *txmac = &adapter->regs->txmac; |
| 1143 | |
| 1144 | /* We need to update the Control Frame Parameters |
| 1145 | * cfpt - control frame pause timer set to 64 (0x40) |
| 1146 | * cfep - control frame extended pause timer set to 0x0 |
| 1147 | */ |
| 1148 | if (adapter->flow == FLOW_NONE) |
| 1149 | writel(0, &txmac->cf_param); |
| 1150 | else |
| 1151 | writel(0x40, &txmac->cf_param); |
| 1152 | } |
| 1153 | |
| 1154 | static void et1310_config_macstat_regs(struct et131x_adapter *adapter) |
| 1155 | { |
| 1156 | struct macstat_regs __iomem *macstat = &adapter->regs->macstat; |
| 1157 | u32 __iomem *reg; |
| 1158 | |
| 1159 | /* initialize all the macstat registers to zero on the device */ |
| 1160 | for (reg = &macstat->txrx_0_64_byte_frames; |
| 1161 | reg <= &macstat->carry_reg2; reg++) |
| 1162 | writel(0, reg); |
| 1163 | |
| 1164 | /* Unmask any counters that we want to track the overflow of. |
| 1165 | * Initially this will be all counters. It may become clear later |
| 1166 | * that we do not need to track all counters. |
| 1167 | */ |
| 1168 | writel(0xFFFFBE32, &macstat->carry_reg1_mask); |
| 1169 | writel(0xFFFE7E8B, &macstat->carry_reg2_mask); |
| 1170 | } |
| 1171 | |
| 1172 | static int et131x_phy_mii_read(struct et131x_adapter *adapter, u8 addr, |
| 1173 | u8 reg, u16 *value) |
| 1174 | { |
| 1175 | struct mac_regs __iomem *mac = &adapter->regs->mac; |
| 1176 | int status = 0; |
| 1177 | u32 delay = 0; |
| 1178 | u32 mii_addr; |
| 1179 | u32 mii_cmd; |
| 1180 | u32 mii_indicator; |
| 1181 | |
| 1182 | /* Save a local copy of the registers we are dealing with so we can |
| 1183 | * set them back |
| 1184 | */ |
| 1185 | mii_addr = readl(&mac->mii_mgmt_addr); |
| 1186 | mii_cmd = readl(&mac->mii_mgmt_cmd); |
| 1187 | |
| 1188 | /* Stop the current operation */ |
| 1189 | writel(0, &mac->mii_mgmt_cmd); |
| 1190 | |
| 1191 | /* Set up the register we need to read from on the correct PHY */ |
| 1192 | writel(ET_MAC_MII_ADDR(addr, reg), &mac->mii_mgmt_addr); |
| 1193 | |
| 1194 | writel(0x1, &mac->mii_mgmt_cmd); |
| 1195 | |
| 1196 | do { |
| 1197 | udelay(50); |
| 1198 | delay++; |
| 1199 | mii_indicator = readl(&mac->mii_mgmt_indicator); |
| 1200 | } while ((mii_indicator & ET_MAC_MGMT_WAIT) && delay < 50); |
| 1201 | |
| 1202 | /* If we hit the max delay, we could not read the register */ |
| 1203 | if (delay == 50) { |
| 1204 | dev_warn(&adapter->pdev->dev, |
| 1205 | "reg 0x%08x could not be read\n", reg); |
| 1206 | dev_warn(&adapter->pdev->dev, "status is 0x%08x\n", |
| 1207 | mii_indicator); |
| 1208 | |
| 1209 | status = -EIO; |
| 1210 | goto out; |
| 1211 | } |
| 1212 | |
| 1213 | /* If we hit here we were able to read the register and we need to |
| 1214 | * return the value to the caller |
| 1215 | */ |
| 1216 | *value = readl(&mac->mii_mgmt_stat) & ET_MAC_MIIMGMT_STAT_PHYCRTL_MASK; |
| 1217 | |
| 1218 | out: |
| 1219 | /* Stop the read operation */ |
| 1220 | writel(0, &mac->mii_mgmt_cmd); |
| 1221 | |
| 1222 | /* set the registers we touched back to the state at which we entered |
| 1223 | * this function |
| 1224 | */ |
| 1225 | writel(mii_addr, &mac->mii_mgmt_addr); |
| 1226 | writel(mii_cmd, &mac->mii_mgmt_cmd); |
| 1227 | |
| 1228 | return status; |
| 1229 | } |
| 1230 | |
| 1231 | static int et131x_mii_read(struct et131x_adapter *adapter, u8 reg, u16 *value) |
| 1232 | { |
| 1233 | struct phy_device *phydev = adapter->phydev; |
| 1234 | |
| 1235 | if (!phydev) |
| 1236 | return -EIO; |
| 1237 | |
Andrew Lunn | e5a03bf | 2016-01-06 20:11:16 +0100 | [diff] [blame] | 1238 | return et131x_phy_mii_read(adapter, phydev->mdio.addr, reg, value); |
Mark Einon | 38df649 | 2014-09-30 22:29:46 +0100 | [diff] [blame] | 1239 | } |
| 1240 | |
| 1241 | static int et131x_mii_write(struct et131x_adapter *adapter, u8 addr, u8 reg, |
| 1242 | u16 value) |
| 1243 | { |
| 1244 | struct mac_regs __iomem *mac = &adapter->regs->mac; |
| 1245 | int status = 0; |
| 1246 | u32 delay = 0; |
| 1247 | u32 mii_addr; |
| 1248 | u32 mii_cmd; |
| 1249 | u32 mii_indicator; |
| 1250 | |
| 1251 | /* Save a local copy of the registers we are dealing with so we can |
| 1252 | * set them back |
| 1253 | */ |
| 1254 | mii_addr = readl(&mac->mii_mgmt_addr); |
| 1255 | mii_cmd = readl(&mac->mii_mgmt_cmd); |
| 1256 | |
| 1257 | /* Stop the current operation */ |
| 1258 | writel(0, &mac->mii_mgmt_cmd); |
| 1259 | |
| 1260 | /* Set up the register we need to write to on the correct PHY */ |
| 1261 | writel(ET_MAC_MII_ADDR(addr, reg), &mac->mii_mgmt_addr); |
| 1262 | |
| 1263 | /* Add the value to write to the registers to the mac */ |
| 1264 | writel(value, &mac->mii_mgmt_ctrl); |
| 1265 | |
| 1266 | do { |
| 1267 | udelay(50); |
| 1268 | delay++; |
| 1269 | mii_indicator = readl(&mac->mii_mgmt_indicator); |
| 1270 | } while ((mii_indicator & ET_MAC_MGMT_BUSY) && delay < 100); |
| 1271 | |
| 1272 | /* If we hit the max delay, we could not write the register */ |
| 1273 | if (delay == 100) { |
| 1274 | u16 tmp; |
| 1275 | |
| 1276 | dev_warn(&adapter->pdev->dev, |
| 1277 | "reg 0x%08x could not be written", reg); |
| 1278 | dev_warn(&adapter->pdev->dev, "status is 0x%08x\n", |
| 1279 | mii_indicator); |
| 1280 | dev_warn(&adapter->pdev->dev, "command is 0x%08x\n", |
| 1281 | readl(&mac->mii_mgmt_cmd)); |
| 1282 | |
| 1283 | et131x_mii_read(adapter, reg, &tmp); |
| 1284 | |
| 1285 | status = -EIO; |
| 1286 | } |
| 1287 | /* Stop the write operation */ |
| 1288 | writel(0, &mac->mii_mgmt_cmd); |
| 1289 | |
| 1290 | /* set the registers we touched back to the state at which we entered |
| 1291 | * this function |
| 1292 | */ |
| 1293 | writel(mii_addr, &mac->mii_mgmt_addr); |
| 1294 | writel(mii_cmd, &mac->mii_mgmt_cmd); |
| 1295 | |
| 1296 | return status; |
| 1297 | } |
| 1298 | |
| 1299 | static void et1310_phy_read_mii_bit(struct et131x_adapter *adapter, |
| 1300 | u16 regnum, |
| 1301 | u16 bitnum, |
| 1302 | u8 *value) |
| 1303 | { |
| 1304 | u16 reg; |
| 1305 | u16 mask = 1 << bitnum; |
| 1306 | |
| 1307 | et131x_mii_read(adapter, regnum, ®); |
| 1308 | |
| 1309 | *value = (reg & mask) >> bitnum; |
| 1310 | } |
| 1311 | |
| 1312 | static void et1310_config_flow_control(struct et131x_adapter *adapter) |
| 1313 | { |
| 1314 | struct phy_device *phydev = adapter->phydev; |
| 1315 | |
| 1316 | if (phydev->duplex == DUPLEX_HALF) { |
| 1317 | adapter->flow = FLOW_NONE; |
| 1318 | } else { |
| 1319 | char remote_pause, remote_async_pause; |
| 1320 | |
| 1321 | et1310_phy_read_mii_bit(adapter, 5, 10, &remote_pause); |
| 1322 | et1310_phy_read_mii_bit(adapter, 5, 11, &remote_async_pause); |
| 1323 | |
| 1324 | if (remote_pause && remote_async_pause) { |
| 1325 | adapter->flow = adapter->wanted_flow; |
| 1326 | } else if (remote_pause && !remote_async_pause) { |
| 1327 | if (adapter->wanted_flow == FLOW_BOTH) |
| 1328 | adapter->flow = FLOW_BOTH; |
| 1329 | else |
| 1330 | adapter->flow = FLOW_NONE; |
| 1331 | } else if (!remote_pause && !remote_async_pause) { |
| 1332 | adapter->flow = FLOW_NONE; |
| 1333 | } else { |
| 1334 | if (adapter->wanted_flow == FLOW_BOTH) |
| 1335 | adapter->flow = FLOW_RXONLY; |
| 1336 | else |
| 1337 | adapter->flow = FLOW_NONE; |
| 1338 | } |
| 1339 | } |
| 1340 | } |
| 1341 | |
| 1342 | /* et1310_update_macstat_host_counters - Update local copy of the statistics */ |
| 1343 | static void et1310_update_macstat_host_counters(struct et131x_adapter *adapter) |
| 1344 | { |
| 1345 | struct ce_stats *stats = &adapter->stats; |
| 1346 | struct macstat_regs __iomem *macstat = |
| 1347 | &adapter->regs->macstat; |
| 1348 | |
| 1349 | stats->tx_collisions += readl(&macstat->tx_total_collisions); |
| 1350 | stats->tx_first_collisions += readl(&macstat->tx_single_collisions); |
| 1351 | stats->tx_deferred += readl(&macstat->tx_deferred); |
| 1352 | stats->tx_excessive_collisions += |
| 1353 | readl(&macstat->tx_multiple_collisions); |
| 1354 | stats->tx_late_collisions += readl(&macstat->tx_late_collisions); |
| 1355 | stats->tx_underflows += readl(&macstat->tx_undersize_frames); |
| 1356 | stats->tx_max_pkt_errs += readl(&macstat->tx_oversize_frames); |
| 1357 | |
| 1358 | stats->rx_align_errs += readl(&macstat->rx_align_errs); |
| 1359 | stats->rx_crc_errs += readl(&macstat->rx_code_errs); |
| 1360 | stats->rcvd_pkts_dropped += readl(&macstat->rx_drops); |
| 1361 | stats->rx_overflows += readl(&macstat->rx_oversize_packets); |
| 1362 | stats->rx_code_violations += readl(&macstat->rx_fcs_errs); |
| 1363 | stats->rx_length_errs += readl(&macstat->rx_frame_len_errs); |
| 1364 | stats->rx_other_errs += readl(&macstat->rx_fragment_packets); |
| 1365 | } |
| 1366 | |
| 1367 | /* et1310_handle_macstat_interrupt |
| 1368 | * |
| 1369 | * One of the MACSTAT counters has wrapped. Update the local copy of |
| 1370 | * the statistics held in the adapter structure, checking the "wrap" |
| 1371 | * bit for each counter. |
| 1372 | */ |
| 1373 | static void et1310_handle_macstat_interrupt(struct et131x_adapter *adapter) |
| 1374 | { |
| 1375 | u32 carry_reg1; |
| 1376 | u32 carry_reg2; |
| 1377 | |
| 1378 | /* Read the interrupt bits from the register(s). These are Clear On |
| 1379 | * Write. |
| 1380 | */ |
| 1381 | carry_reg1 = readl(&adapter->regs->macstat.carry_reg1); |
| 1382 | carry_reg2 = readl(&adapter->regs->macstat.carry_reg2); |
| 1383 | |
| 1384 | writel(carry_reg1, &adapter->regs->macstat.carry_reg1); |
| 1385 | writel(carry_reg2, &adapter->regs->macstat.carry_reg2); |
| 1386 | |
| 1387 | /* We need to do update the host copy of all the MAC_STAT counters. |
| 1388 | * For each counter, check it's overflow bit. If the overflow bit is |
| 1389 | * set, then increment the host version of the count by one complete |
| 1390 | * revolution of the counter. This routine is called when the counter |
| 1391 | * block indicates that one of the counters has wrapped. |
| 1392 | */ |
| 1393 | if (carry_reg1 & (1 << 14)) |
| 1394 | adapter->stats.rx_code_violations += COUNTER_WRAP_16_BIT; |
| 1395 | if (carry_reg1 & (1 << 8)) |
| 1396 | adapter->stats.rx_align_errs += COUNTER_WRAP_12_BIT; |
| 1397 | if (carry_reg1 & (1 << 7)) |
| 1398 | adapter->stats.rx_length_errs += COUNTER_WRAP_16_BIT; |
| 1399 | if (carry_reg1 & (1 << 2)) |
| 1400 | adapter->stats.rx_other_errs += COUNTER_WRAP_16_BIT; |
| 1401 | if (carry_reg1 & (1 << 6)) |
| 1402 | adapter->stats.rx_crc_errs += COUNTER_WRAP_16_BIT; |
| 1403 | if (carry_reg1 & (1 << 3)) |
| 1404 | adapter->stats.rx_overflows += COUNTER_WRAP_16_BIT; |
| 1405 | if (carry_reg1 & (1 << 0)) |
| 1406 | adapter->stats.rcvd_pkts_dropped += COUNTER_WRAP_16_BIT; |
| 1407 | if (carry_reg2 & (1 << 16)) |
| 1408 | adapter->stats.tx_max_pkt_errs += COUNTER_WRAP_12_BIT; |
| 1409 | if (carry_reg2 & (1 << 15)) |
| 1410 | adapter->stats.tx_underflows += COUNTER_WRAP_12_BIT; |
| 1411 | if (carry_reg2 & (1 << 6)) |
| 1412 | adapter->stats.tx_first_collisions += COUNTER_WRAP_12_BIT; |
| 1413 | if (carry_reg2 & (1 << 8)) |
| 1414 | adapter->stats.tx_deferred += COUNTER_WRAP_12_BIT; |
| 1415 | if (carry_reg2 & (1 << 5)) |
| 1416 | adapter->stats.tx_excessive_collisions += COUNTER_WRAP_12_BIT; |
| 1417 | if (carry_reg2 & (1 << 4)) |
| 1418 | adapter->stats.tx_late_collisions += COUNTER_WRAP_12_BIT; |
| 1419 | if (carry_reg2 & (1 << 2)) |
| 1420 | adapter->stats.tx_collisions += COUNTER_WRAP_12_BIT; |
| 1421 | } |
| 1422 | |
| 1423 | static int et131x_mdio_read(struct mii_bus *bus, int phy_addr, int reg) |
| 1424 | { |
| 1425 | struct net_device *netdev = bus->priv; |
| 1426 | struct et131x_adapter *adapter = netdev_priv(netdev); |
| 1427 | u16 value; |
| 1428 | int ret; |
| 1429 | |
| 1430 | ret = et131x_phy_mii_read(adapter, phy_addr, reg, &value); |
| 1431 | |
| 1432 | if (ret < 0) |
| 1433 | return ret; |
| 1434 | |
| 1435 | return value; |
| 1436 | } |
| 1437 | |
| 1438 | static int et131x_mdio_write(struct mii_bus *bus, int phy_addr, |
| 1439 | int reg, u16 value) |
| 1440 | { |
| 1441 | struct net_device *netdev = bus->priv; |
| 1442 | struct et131x_adapter *adapter = netdev_priv(netdev); |
| 1443 | |
| 1444 | return et131x_mii_write(adapter, phy_addr, reg, value); |
| 1445 | } |
| 1446 | |
| 1447 | /* et1310_phy_power_switch - PHY power control |
| 1448 | * @adapter: device to control |
| 1449 | * @down: true for off/false for back on |
| 1450 | * |
| 1451 | * one hundred, ten, one thousand megs |
| 1452 | * How would you like to have your LAN accessed |
| 1453 | * Can't you see that this code processed |
| 1454 | * Phy power, phy power.. |
| 1455 | */ |
| 1456 | static void et1310_phy_power_switch(struct et131x_adapter *adapter, bool down) |
| 1457 | { |
| 1458 | u16 data; |
| 1459 | struct phy_device *phydev = adapter->phydev; |
| 1460 | |
| 1461 | et131x_mii_read(adapter, MII_BMCR, &data); |
| 1462 | data &= ~BMCR_PDOWN; |
| 1463 | if (down) |
| 1464 | data |= BMCR_PDOWN; |
Andrew Lunn | e5a03bf | 2016-01-06 20:11:16 +0100 | [diff] [blame] | 1465 | et131x_mii_write(adapter, phydev->mdio.addr, MII_BMCR, data); |
Mark Einon | 38df649 | 2014-09-30 22:29:46 +0100 | [diff] [blame] | 1466 | } |
| 1467 | |
| 1468 | /* et131x_xcvr_init - Init the phy if we are setting it into force mode */ |
| 1469 | static void et131x_xcvr_init(struct et131x_adapter *adapter) |
| 1470 | { |
| 1471 | u16 lcr2; |
| 1472 | struct phy_device *phydev = adapter->phydev; |
| 1473 | |
| 1474 | /* Set the LED behavior such that LED 1 indicates speed (off = |
| 1475 | * 10Mbits, blink = 100Mbits, on = 1000Mbits) and LED 2 indicates |
| 1476 | * link and activity (on for link, blink off for activity). |
| 1477 | * |
| 1478 | * NOTE: Some customizations have been added here for specific |
| 1479 | * vendors; The LED behavior is now determined by vendor data in the |
| 1480 | * EEPROM. However, the above description is the default. |
| 1481 | */ |
| 1482 | if ((adapter->eeprom_data[1] & 0x4) == 0) { |
| 1483 | et131x_mii_read(adapter, PHY_LED_2, &lcr2); |
| 1484 | |
| 1485 | lcr2 &= (ET_LED2_LED_100TX | ET_LED2_LED_1000T); |
| 1486 | lcr2 |= (LED_VAL_LINKON_ACTIVE << LED_LINK_SHIFT); |
| 1487 | |
| 1488 | if ((adapter->eeprom_data[1] & 0x8) == 0) |
| 1489 | lcr2 |= (LED_VAL_1000BT_100BTX << LED_TXRX_SHIFT); |
| 1490 | else |
| 1491 | lcr2 |= (LED_VAL_LINKON << LED_TXRX_SHIFT); |
| 1492 | |
Andrew Lunn | e5a03bf | 2016-01-06 20:11:16 +0100 | [diff] [blame] | 1493 | et131x_mii_write(adapter, phydev->mdio.addr, PHY_LED_2, lcr2); |
Mark Einon | 38df649 | 2014-09-30 22:29:46 +0100 | [diff] [blame] | 1494 | } |
| 1495 | } |
| 1496 | |
| 1497 | /* et131x_configure_global_regs - configure JAGCore global regs */ |
| 1498 | static void et131x_configure_global_regs(struct et131x_adapter *adapter) |
| 1499 | { |
| 1500 | struct global_regs __iomem *regs = &adapter->regs->global; |
| 1501 | |
| 1502 | writel(0, ®s->rxq_start_addr); |
| 1503 | writel(INTERNAL_MEM_SIZE - 1, ®s->txq_end_addr); |
| 1504 | |
| 1505 | if (adapter->registry_jumbo_packet < 2048) { |
| 1506 | /* Tx / RxDMA and Tx/Rx MAC interfaces have a 1k word |
| 1507 | * block of RAM that the driver can split between Tx |
| 1508 | * and Rx as it desires. Our default is to split it |
| 1509 | * 50/50: |
| 1510 | */ |
| 1511 | writel(PARM_RX_MEM_END_DEF, ®s->rxq_end_addr); |
| 1512 | writel(PARM_RX_MEM_END_DEF + 1, ®s->txq_start_addr); |
| 1513 | } else if (adapter->registry_jumbo_packet < 8192) { |
| 1514 | /* For jumbo packets > 2k but < 8k, split 50-50. */ |
| 1515 | writel(INTERNAL_MEM_RX_OFFSET, ®s->rxq_end_addr); |
| 1516 | writel(INTERNAL_MEM_RX_OFFSET + 1, ®s->txq_start_addr); |
| 1517 | } else { |
| 1518 | /* 9216 is the only packet size greater than 8k that |
| 1519 | * is available. The Tx buffer has to be big enough |
| 1520 | * for one whole packet on the Tx side. We'll make |
| 1521 | * the Tx 9408, and give the rest to Rx |
| 1522 | */ |
| 1523 | writel(0x01b3, ®s->rxq_end_addr); |
| 1524 | writel(0x01b4, ®s->txq_start_addr); |
| 1525 | } |
| 1526 | |
| 1527 | /* Initialize the loopback register. Disable all loopbacks. */ |
| 1528 | writel(0, ®s->loopback); |
| 1529 | |
| 1530 | writel(0, ®s->msi_config); |
| 1531 | |
| 1532 | /* By default, disable the watchdog timer. It will be enabled when |
| 1533 | * a packet is queued. |
| 1534 | */ |
| 1535 | writel(0, ®s->watchdog_timer); |
| 1536 | } |
| 1537 | |
| 1538 | /* et131x_config_rx_dma_regs - Start of Rx_DMA init sequence */ |
| 1539 | static void et131x_config_rx_dma_regs(struct et131x_adapter *adapter) |
| 1540 | { |
| 1541 | struct rxdma_regs __iomem *rx_dma = &adapter->regs->rxdma; |
| 1542 | struct rx_ring *rx_local = &adapter->rx_ring; |
| 1543 | struct fbr_desc *fbr_entry; |
| 1544 | u32 entry; |
| 1545 | u32 psr_num_des; |
| 1546 | unsigned long flags; |
| 1547 | u8 id; |
| 1548 | |
| 1549 | et131x_rx_dma_disable(adapter); |
| 1550 | |
| 1551 | /* Load the completion writeback physical address */ |
| 1552 | writel(upper_32_bits(rx_local->rx_status_bus), &rx_dma->dma_wb_base_hi); |
| 1553 | writel(lower_32_bits(rx_local->rx_status_bus), &rx_dma->dma_wb_base_lo); |
| 1554 | |
| 1555 | memset(rx_local->rx_status_block, 0, sizeof(struct rx_status_block)); |
| 1556 | |
| 1557 | /* Set the address and parameters of the packet status ring */ |
| 1558 | writel(upper_32_bits(rx_local->ps_ring_physaddr), &rx_dma->psr_base_hi); |
| 1559 | writel(lower_32_bits(rx_local->ps_ring_physaddr), &rx_dma->psr_base_lo); |
| 1560 | writel(rx_local->psr_entries - 1, &rx_dma->psr_num_des); |
| 1561 | writel(0, &rx_dma->psr_full_offset); |
| 1562 | |
| 1563 | psr_num_des = readl(&rx_dma->psr_num_des) & ET_RXDMA_PSR_NUM_DES_MASK; |
| 1564 | writel((psr_num_des * LO_MARK_PERCENT_FOR_PSR) / 100, |
| 1565 | &rx_dma->psr_min_des); |
| 1566 | |
| 1567 | spin_lock_irqsave(&adapter->rcv_lock, flags); |
| 1568 | |
| 1569 | /* These local variables track the PSR in the adapter structure */ |
| 1570 | rx_local->local_psr_full = 0; |
| 1571 | |
| 1572 | for (id = 0; id < NUM_FBRS; id++) { |
| 1573 | u32 __iomem *num_des; |
| 1574 | u32 __iomem *full_offset; |
| 1575 | u32 __iomem *min_des; |
| 1576 | u32 __iomem *base_hi; |
| 1577 | u32 __iomem *base_lo; |
| 1578 | struct fbr_lookup *fbr = rx_local->fbr[id]; |
| 1579 | |
| 1580 | if (id == 0) { |
| 1581 | num_des = &rx_dma->fbr0_num_des; |
| 1582 | full_offset = &rx_dma->fbr0_full_offset; |
| 1583 | min_des = &rx_dma->fbr0_min_des; |
| 1584 | base_hi = &rx_dma->fbr0_base_hi; |
| 1585 | base_lo = &rx_dma->fbr0_base_lo; |
| 1586 | } else { |
| 1587 | num_des = &rx_dma->fbr1_num_des; |
| 1588 | full_offset = &rx_dma->fbr1_full_offset; |
| 1589 | min_des = &rx_dma->fbr1_min_des; |
| 1590 | base_hi = &rx_dma->fbr1_base_hi; |
| 1591 | base_lo = &rx_dma->fbr1_base_lo; |
| 1592 | } |
| 1593 | |
| 1594 | /* Now's the best time to initialize FBR contents */ |
| 1595 | fbr_entry = fbr->ring_virtaddr; |
| 1596 | for (entry = 0; entry < fbr->num_entries; entry++) { |
| 1597 | fbr_entry->addr_hi = fbr->bus_high[entry]; |
| 1598 | fbr_entry->addr_lo = fbr->bus_low[entry]; |
| 1599 | fbr_entry->word2 = entry; |
| 1600 | fbr_entry++; |
| 1601 | } |
| 1602 | |
| 1603 | /* Set the address and parameters of Free buffer ring 1 and 0 */ |
| 1604 | writel(upper_32_bits(fbr->ring_physaddr), base_hi); |
| 1605 | writel(lower_32_bits(fbr->ring_physaddr), base_lo); |
| 1606 | writel(fbr->num_entries - 1, num_des); |
| 1607 | writel(ET_DMA10_WRAP, full_offset); |
| 1608 | |
| 1609 | /* This variable tracks the free buffer ring 1 full position, |
| 1610 | * so it has to match the above. |
| 1611 | */ |
| 1612 | fbr->local_full = ET_DMA10_WRAP; |
| 1613 | writel(((fbr->num_entries * LO_MARK_PERCENT_FOR_RX) / 100) - 1, |
| 1614 | min_des); |
| 1615 | } |
| 1616 | |
| 1617 | /* Program the number of packets we will receive before generating an |
| 1618 | * interrupt. |
| 1619 | * For version B silicon, this value gets updated once autoneg is |
| 1620 | *complete. |
| 1621 | */ |
| 1622 | writel(PARM_RX_NUM_BUFS_DEF, &rx_dma->num_pkt_done); |
| 1623 | |
| 1624 | /* The "time_done" is not working correctly to coalesce interrupts |
| 1625 | * after a given time period, but rather is giving us an interrupt |
| 1626 | * regardless of whether we have received packets. |
| 1627 | * This value gets updated once autoneg is complete. |
| 1628 | */ |
| 1629 | writel(PARM_RX_TIME_INT_DEF, &rx_dma->max_pkt_time); |
| 1630 | |
| 1631 | spin_unlock_irqrestore(&adapter->rcv_lock, flags); |
| 1632 | } |
| 1633 | |
| 1634 | /* et131x_config_tx_dma_regs - Set up the tx dma section of the JAGCore. |
| 1635 | * |
| 1636 | * Configure the transmit engine with the ring buffers we have created |
| 1637 | * and prepare it for use. |
| 1638 | */ |
| 1639 | static void et131x_config_tx_dma_regs(struct et131x_adapter *adapter) |
| 1640 | { |
| 1641 | struct txdma_regs __iomem *txdma = &adapter->regs->txdma; |
| 1642 | struct tx_ring *tx_ring = &adapter->tx_ring; |
| 1643 | |
| 1644 | /* Load the hardware with the start of the transmit descriptor ring. */ |
| 1645 | writel(upper_32_bits(tx_ring->tx_desc_ring_pa), &txdma->pr_base_hi); |
| 1646 | writel(lower_32_bits(tx_ring->tx_desc_ring_pa), &txdma->pr_base_lo); |
| 1647 | |
| 1648 | /* Initialise the transmit DMA engine */ |
| 1649 | writel(NUM_DESC_PER_RING_TX - 1, &txdma->pr_num_des); |
| 1650 | |
| 1651 | /* Load the completion writeback physical address */ |
| 1652 | writel(upper_32_bits(tx_ring->tx_status_pa), &txdma->dma_wb_base_hi); |
| 1653 | writel(lower_32_bits(tx_ring->tx_status_pa), &txdma->dma_wb_base_lo); |
| 1654 | |
| 1655 | *tx_ring->tx_status = 0; |
| 1656 | |
| 1657 | writel(0, &txdma->service_request); |
| 1658 | tx_ring->send_idx = 0; |
| 1659 | } |
| 1660 | |
| 1661 | /* et131x_adapter_setup - Set the adapter up as per cassini+ documentation */ |
| 1662 | static void et131x_adapter_setup(struct et131x_adapter *adapter) |
| 1663 | { |
| 1664 | et131x_configure_global_regs(adapter); |
| 1665 | et1310_config_mac_regs1(adapter); |
| 1666 | |
| 1667 | /* Configure the MMC registers */ |
| 1668 | /* All we need to do is initialize the Memory Control Register */ |
| 1669 | writel(ET_MMC_ENABLE, &adapter->regs->mmc.mmc_ctrl); |
| 1670 | |
| 1671 | et1310_config_rxmac_regs(adapter); |
| 1672 | et1310_config_txmac_regs(adapter); |
| 1673 | |
| 1674 | et131x_config_rx_dma_regs(adapter); |
| 1675 | et131x_config_tx_dma_regs(adapter); |
| 1676 | |
| 1677 | et1310_config_macstat_regs(adapter); |
| 1678 | |
| 1679 | et1310_phy_power_switch(adapter, 0); |
| 1680 | et131x_xcvr_init(adapter); |
| 1681 | } |
| 1682 | |
| 1683 | /* et131x_soft_reset - Issue soft reset to the hardware, complete for ET1310 */ |
| 1684 | static void et131x_soft_reset(struct et131x_adapter *adapter) |
| 1685 | { |
| 1686 | u32 reg; |
| 1687 | |
| 1688 | /* Disable MAC Core */ |
| 1689 | reg = ET_MAC_CFG1_SOFT_RESET | ET_MAC_CFG1_SIM_RESET | |
| 1690 | ET_MAC_CFG1_RESET_RXMC | ET_MAC_CFG1_RESET_TXMC | |
| 1691 | ET_MAC_CFG1_RESET_RXFUNC | ET_MAC_CFG1_RESET_TXFUNC; |
| 1692 | writel(reg, &adapter->regs->mac.cfg1); |
| 1693 | |
| 1694 | reg = ET_RESET_ALL; |
| 1695 | writel(reg, &adapter->regs->global.sw_reset); |
| 1696 | |
| 1697 | reg = ET_MAC_CFG1_RESET_RXMC | ET_MAC_CFG1_RESET_TXMC | |
| 1698 | ET_MAC_CFG1_RESET_RXFUNC | ET_MAC_CFG1_RESET_TXFUNC; |
| 1699 | writel(reg, &adapter->regs->mac.cfg1); |
| 1700 | writel(0, &adapter->regs->mac.cfg1); |
| 1701 | } |
| 1702 | |
| 1703 | static void et131x_enable_interrupts(struct et131x_adapter *adapter) |
| 1704 | { |
| 1705 | u32 mask; |
| 1706 | |
| 1707 | if (adapter->flow == FLOW_TXONLY || adapter->flow == FLOW_BOTH) |
| 1708 | mask = INT_MASK_ENABLE; |
| 1709 | else |
| 1710 | mask = INT_MASK_ENABLE_NO_FLOW; |
| 1711 | |
| 1712 | writel(mask, &adapter->regs->global.int_mask); |
| 1713 | } |
| 1714 | |
| 1715 | static void et131x_disable_interrupts(struct et131x_adapter *adapter) |
| 1716 | { |
| 1717 | writel(INT_MASK_DISABLE, &adapter->regs->global.int_mask); |
| 1718 | } |
| 1719 | |
| 1720 | static void et131x_tx_dma_disable(struct et131x_adapter *adapter) |
| 1721 | { |
| 1722 | /* Setup the transmit dma configuration register */ |
| 1723 | writel(ET_TXDMA_CSR_HALT | ET_TXDMA_SNGL_EPKT, |
| 1724 | &adapter->regs->txdma.csr); |
| 1725 | } |
| 1726 | |
| 1727 | static void et131x_enable_txrx(struct net_device *netdev) |
| 1728 | { |
| 1729 | struct et131x_adapter *adapter = netdev_priv(netdev); |
| 1730 | |
| 1731 | et131x_rx_dma_enable(adapter); |
| 1732 | et131x_tx_dma_enable(adapter); |
| 1733 | |
| 1734 | if (adapter->flags & FMP_ADAPTER_INTERRUPT_IN_USE) |
| 1735 | et131x_enable_interrupts(adapter); |
| 1736 | |
| 1737 | netif_start_queue(netdev); |
| 1738 | } |
| 1739 | |
| 1740 | static void et131x_disable_txrx(struct net_device *netdev) |
| 1741 | { |
| 1742 | struct et131x_adapter *adapter = netdev_priv(netdev); |
| 1743 | |
| 1744 | netif_stop_queue(netdev); |
| 1745 | |
| 1746 | et131x_rx_dma_disable(adapter); |
| 1747 | et131x_tx_dma_disable(adapter); |
| 1748 | |
| 1749 | et131x_disable_interrupts(adapter); |
| 1750 | } |
| 1751 | |
| 1752 | static void et131x_init_send(struct et131x_adapter *adapter) |
| 1753 | { |
| 1754 | int i; |
| 1755 | struct tx_ring *tx_ring = &adapter->tx_ring; |
| 1756 | struct tcb *tcb = tx_ring->tcb_ring; |
| 1757 | |
| 1758 | tx_ring->tcb_qhead = tcb; |
| 1759 | |
| 1760 | memset(tcb, 0, sizeof(struct tcb) * NUM_TCB); |
| 1761 | |
| 1762 | for (i = 0; i < NUM_TCB; i++) { |
| 1763 | tcb->next = tcb + 1; |
| 1764 | tcb++; |
| 1765 | } |
| 1766 | |
| 1767 | tcb--; |
| 1768 | tx_ring->tcb_qtail = tcb; |
| 1769 | tcb->next = NULL; |
| 1770 | /* Curr send queue should now be empty */ |
| 1771 | tx_ring->send_head = NULL; |
| 1772 | tx_ring->send_tail = NULL; |
| 1773 | } |
| 1774 | |
| 1775 | /* et1310_enable_phy_coma |
| 1776 | * |
| 1777 | * driver receive an phy status change interrupt while in D0 and check that |
| 1778 | * phy_status is down. |
| 1779 | * |
| 1780 | * -- gate off JAGCore; |
| 1781 | * -- set gigE PHY in Coma mode |
| 1782 | * -- wake on phy_interrupt; Perform software reset JAGCore, |
| 1783 | * re-initialize jagcore and gigE PHY |
| 1784 | */ |
| 1785 | static void et1310_enable_phy_coma(struct et131x_adapter *adapter) |
| 1786 | { |
| 1787 | u32 pmcsr = readl(&adapter->regs->global.pm_csr); |
| 1788 | |
| 1789 | /* Stop sending packets. */ |
| 1790 | adapter->flags |= FMP_ADAPTER_LOWER_POWER; |
| 1791 | |
| 1792 | /* Wait for outstanding Receive packets */ |
| 1793 | et131x_disable_txrx(adapter->netdev); |
| 1794 | |
| 1795 | /* Gate off JAGCore 3 clock domains */ |
| 1796 | pmcsr &= ~ET_PMCSR_INIT; |
| 1797 | writel(pmcsr, &adapter->regs->global.pm_csr); |
| 1798 | |
| 1799 | /* Program gigE PHY in to Coma mode */ |
| 1800 | pmcsr |= ET_PM_PHY_SW_COMA; |
| 1801 | writel(pmcsr, &adapter->regs->global.pm_csr); |
| 1802 | } |
| 1803 | |
| 1804 | static void et1310_disable_phy_coma(struct et131x_adapter *adapter) |
| 1805 | { |
| 1806 | u32 pmcsr; |
| 1807 | |
| 1808 | pmcsr = readl(&adapter->regs->global.pm_csr); |
| 1809 | |
| 1810 | /* Disable phy_sw_coma register and re-enable JAGCore clocks */ |
| 1811 | pmcsr |= ET_PMCSR_INIT; |
| 1812 | pmcsr &= ~ET_PM_PHY_SW_COMA; |
| 1813 | writel(pmcsr, &adapter->regs->global.pm_csr); |
| 1814 | |
| 1815 | /* Restore the GbE PHY speed and duplex modes; |
| 1816 | * Reset JAGCore; re-configure and initialize JAGCore and gigE PHY |
| 1817 | */ |
| 1818 | |
| 1819 | /* Re-initialize the send structures */ |
| 1820 | et131x_init_send(adapter); |
| 1821 | |
| 1822 | /* Bring the device back to the state it was during init prior to |
| 1823 | * autonegotiation being complete. This way, when we get the auto-neg |
| 1824 | * complete interrupt, we can complete init by calling ConfigMacREGS2. |
| 1825 | */ |
| 1826 | et131x_soft_reset(adapter); |
| 1827 | |
| 1828 | et131x_adapter_setup(adapter); |
| 1829 | |
| 1830 | /* Allow Tx to restart */ |
| 1831 | adapter->flags &= ~FMP_ADAPTER_LOWER_POWER; |
| 1832 | |
| 1833 | et131x_enable_txrx(adapter->netdev); |
| 1834 | } |
| 1835 | |
| 1836 | static inline u32 bump_free_buff_ring(u32 *free_buff_ring, u32 limit) |
| 1837 | { |
| 1838 | u32 tmp_free_buff_ring = *free_buff_ring; |
| 1839 | |
| 1840 | tmp_free_buff_ring++; |
| 1841 | /* This works for all cases where limit < 1024. The 1023 case |
| 1842 | * works because 1023++ is 1024 which means the if condition is not |
| 1843 | * taken but the carry of the bit into the wrap bit toggles the wrap |
| 1844 | * value correctly |
| 1845 | */ |
| 1846 | if ((tmp_free_buff_ring & ET_DMA10_MASK) > limit) { |
| 1847 | tmp_free_buff_ring &= ~ET_DMA10_MASK; |
| 1848 | tmp_free_buff_ring ^= ET_DMA10_WRAP; |
| 1849 | } |
| 1850 | /* For the 1023 case */ |
| 1851 | tmp_free_buff_ring &= (ET_DMA10_MASK | ET_DMA10_WRAP); |
| 1852 | *free_buff_ring = tmp_free_buff_ring; |
| 1853 | return tmp_free_buff_ring; |
| 1854 | } |
| 1855 | |
| 1856 | /* et131x_rx_dma_memory_alloc |
| 1857 | * |
| 1858 | * Allocates Free buffer ring 1 for sure, free buffer ring 0 if required, |
| 1859 | * and the Packet Status Ring. |
| 1860 | */ |
| 1861 | static int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter) |
| 1862 | { |
| 1863 | u8 id; |
| 1864 | u32 i, j; |
| 1865 | u32 bufsize; |
| 1866 | u32 psr_size; |
| 1867 | u32 fbr_chunksize; |
| 1868 | struct rx_ring *rx_ring = &adapter->rx_ring; |
| 1869 | struct fbr_lookup *fbr; |
| 1870 | |
| 1871 | /* Alloc memory for the lookup table */ |
| 1872 | rx_ring->fbr[0] = kzalloc(sizeof(*fbr), GFP_KERNEL); |
| 1873 | if (rx_ring->fbr[0] == NULL) |
| 1874 | return -ENOMEM; |
| 1875 | rx_ring->fbr[1] = kzalloc(sizeof(*fbr), GFP_KERNEL); |
| 1876 | if (rx_ring->fbr[1] == NULL) |
| 1877 | return -ENOMEM; |
| 1878 | |
| 1879 | /* The first thing we will do is configure the sizes of the buffer |
| 1880 | * rings. These will change based on jumbo packet support. Larger |
| 1881 | * jumbo packets increases the size of each entry in FBR0, and the |
| 1882 | * number of entries in FBR0, while at the same time decreasing the |
| 1883 | * number of entries in FBR1. |
| 1884 | * |
| 1885 | * FBR1 holds "large" frames, FBR0 holds "small" frames. If FBR1 |
| 1886 | * entries are huge in order to accommodate a "jumbo" frame, then it |
| 1887 | * will have less entries. Conversely, FBR1 will now be relied upon |
| 1888 | * to carry more "normal" frames, thus it's entry size also increases |
| 1889 | * and the number of entries goes up too (since it now carries |
| 1890 | * "small" + "regular" packets. |
| 1891 | * |
| 1892 | * In this scheme, we try to maintain 512 entries between the two |
| 1893 | * rings. Also, FBR1 remains a constant size - when it's size doubles |
| 1894 | * the number of entries halves. FBR0 increases in size, however. |
| 1895 | */ |
| 1896 | if (adapter->registry_jumbo_packet < 2048) { |
| 1897 | rx_ring->fbr[0]->buffsize = 256; |
| 1898 | rx_ring->fbr[0]->num_entries = 512; |
| 1899 | rx_ring->fbr[1]->buffsize = 2048; |
| 1900 | rx_ring->fbr[1]->num_entries = 512; |
| 1901 | } else if (adapter->registry_jumbo_packet < 4096) { |
| 1902 | rx_ring->fbr[0]->buffsize = 512; |
| 1903 | rx_ring->fbr[0]->num_entries = 1024; |
| 1904 | rx_ring->fbr[1]->buffsize = 4096; |
| 1905 | rx_ring->fbr[1]->num_entries = 512; |
| 1906 | } else { |
| 1907 | rx_ring->fbr[0]->buffsize = 1024; |
| 1908 | rx_ring->fbr[0]->num_entries = 768; |
| 1909 | rx_ring->fbr[1]->buffsize = 16384; |
| 1910 | rx_ring->fbr[1]->num_entries = 128; |
| 1911 | } |
| 1912 | |
| 1913 | rx_ring->psr_entries = rx_ring->fbr[0]->num_entries + |
| 1914 | rx_ring->fbr[1]->num_entries; |
| 1915 | |
| 1916 | for (id = 0; id < NUM_FBRS; id++) { |
| 1917 | fbr = rx_ring->fbr[id]; |
| 1918 | /* Allocate an area of memory for Free Buffer Ring */ |
| 1919 | bufsize = sizeof(struct fbr_desc) * fbr->num_entries; |
| 1920 | fbr->ring_virtaddr = dma_alloc_coherent(&adapter->pdev->dev, |
| 1921 | bufsize, |
| 1922 | &fbr->ring_physaddr, |
| 1923 | GFP_KERNEL); |
| 1924 | if (!fbr->ring_virtaddr) { |
| 1925 | dev_err(&adapter->pdev->dev, |
| 1926 | "Cannot alloc memory for Free Buffer Ring %d\n", |
| 1927 | id); |
| 1928 | return -ENOMEM; |
| 1929 | } |
| 1930 | } |
| 1931 | |
| 1932 | for (id = 0; id < NUM_FBRS; id++) { |
| 1933 | fbr = rx_ring->fbr[id]; |
| 1934 | fbr_chunksize = (FBR_CHUNKS * fbr->buffsize); |
| 1935 | |
| 1936 | for (i = 0; i < fbr->num_entries / FBR_CHUNKS; i++) { |
| 1937 | dma_addr_t fbr_physaddr; |
| 1938 | |
| 1939 | fbr->mem_virtaddrs[i] = dma_alloc_coherent( |
| 1940 | &adapter->pdev->dev, fbr_chunksize, |
| 1941 | &fbr->mem_physaddrs[i], |
| 1942 | GFP_KERNEL); |
| 1943 | |
| 1944 | if (!fbr->mem_virtaddrs[i]) { |
| 1945 | dev_err(&adapter->pdev->dev, |
| 1946 | "Could not alloc memory\n"); |
| 1947 | return -ENOMEM; |
| 1948 | } |
| 1949 | |
| 1950 | /* See NOTE in "Save Physical Address" comment above */ |
| 1951 | fbr_physaddr = fbr->mem_physaddrs[i]; |
| 1952 | |
| 1953 | for (j = 0; j < FBR_CHUNKS; j++) { |
| 1954 | u32 k = (i * FBR_CHUNKS) + j; |
| 1955 | |
| 1956 | /* Save the Virtual address of this index for |
| 1957 | * quick access later |
| 1958 | */ |
| 1959 | fbr->virt[k] = (u8 *)fbr->mem_virtaddrs[i] + |
| 1960 | (j * fbr->buffsize); |
| 1961 | |
| 1962 | /* now store the physical address in the |
| 1963 | * descriptor so the device can access it |
| 1964 | */ |
| 1965 | fbr->bus_high[k] = upper_32_bits(fbr_physaddr); |
| 1966 | fbr->bus_low[k] = lower_32_bits(fbr_physaddr); |
| 1967 | fbr_physaddr += fbr->buffsize; |
| 1968 | } |
| 1969 | } |
| 1970 | } |
| 1971 | |
| 1972 | /* Allocate an area of memory for FIFO of Packet Status ring entries */ |
| 1973 | psr_size = sizeof(struct pkt_stat_desc) * rx_ring->psr_entries; |
| 1974 | |
| 1975 | rx_ring->ps_ring_virtaddr = dma_alloc_coherent(&adapter->pdev->dev, |
| 1976 | psr_size, |
| 1977 | &rx_ring->ps_ring_physaddr, |
| 1978 | GFP_KERNEL); |
| 1979 | |
| 1980 | if (!rx_ring->ps_ring_virtaddr) { |
| 1981 | dev_err(&adapter->pdev->dev, |
| 1982 | "Cannot alloc memory for Packet Status Ring\n"); |
| 1983 | return -ENOMEM; |
| 1984 | } |
| 1985 | |
| 1986 | /* Allocate an area of memory for writeback of status information */ |
| 1987 | rx_ring->rx_status_block = dma_alloc_coherent(&adapter->pdev->dev, |
| 1988 | sizeof(struct rx_status_block), |
| 1989 | &rx_ring->rx_status_bus, |
| 1990 | GFP_KERNEL); |
| 1991 | if (!rx_ring->rx_status_block) { |
| 1992 | dev_err(&adapter->pdev->dev, |
| 1993 | "Cannot alloc memory for Status Block\n"); |
| 1994 | return -ENOMEM; |
| 1995 | } |
| 1996 | rx_ring->num_rfd = NIC_DEFAULT_NUM_RFD; |
| 1997 | |
| 1998 | /* The RFDs are going to be put on lists later on, so initialize the |
| 1999 | * lists now. |
| 2000 | */ |
| 2001 | INIT_LIST_HEAD(&rx_ring->recv_list); |
| 2002 | return 0; |
| 2003 | } |
| 2004 | |
| 2005 | static void et131x_rx_dma_memory_free(struct et131x_adapter *adapter) |
| 2006 | { |
| 2007 | u8 id; |
| 2008 | u32 ii; |
| 2009 | u32 bufsize; |
| 2010 | u32 psr_size; |
| 2011 | struct rfd *rfd; |
| 2012 | struct rx_ring *rx_ring = &adapter->rx_ring; |
| 2013 | struct fbr_lookup *fbr; |
| 2014 | |
| 2015 | /* Free RFDs and associated packet descriptors */ |
| 2016 | WARN_ON(rx_ring->num_ready_recv != rx_ring->num_rfd); |
| 2017 | |
| 2018 | while (!list_empty(&rx_ring->recv_list)) { |
| 2019 | rfd = list_entry(rx_ring->recv_list.next, |
| 2020 | struct rfd, list_node); |
| 2021 | |
| 2022 | list_del(&rfd->list_node); |
| 2023 | rfd->skb = NULL; |
| 2024 | kfree(rfd); |
| 2025 | } |
| 2026 | |
| 2027 | /* Free Free Buffer Rings */ |
| 2028 | for (id = 0; id < NUM_FBRS; id++) { |
| 2029 | fbr = rx_ring->fbr[id]; |
| 2030 | |
| 2031 | if (!fbr || !fbr->ring_virtaddr) |
| 2032 | continue; |
| 2033 | |
| 2034 | /* First the packet memory */ |
| 2035 | for (ii = 0; ii < fbr->num_entries / FBR_CHUNKS; ii++) { |
| 2036 | if (fbr->mem_virtaddrs[ii]) { |
| 2037 | bufsize = fbr->buffsize * FBR_CHUNKS; |
| 2038 | |
| 2039 | dma_free_coherent(&adapter->pdev->dev, |
| 2040 | bufsize, |
| 2041 | fbr->mem_virtaddrs[ii], |
| 2042 | fbr->mem_physaddrs[ii]); |
| 2043 | |
| 2044 | fbr->mem_virtaddrs[ii] = NULL; |
| 2045 | } |
| 2046 | } |
| 2047 | |
| 2048 | bufsize = sizeof(struct fbr_desc) * fbr->num_entries; |
| 2049 | |
| 2050 | dma_free_coherent(&adapter->pdev->dev, |
| 2051 | bufsize, |
| 2052 | fbr->ring_virtaddr, |
| 2053 | fbr->ring_physaddr); |
| 2054 | |
| 2055 | fbr->ring_virtaddr = NULL; |
| 2056 | } |
| 2057 | |
| 2058 | /* Free Packet Status Ring */ |
| 2059 | if (rx_ring->ps_ring_virtaddr) { |
| 2060 | psr_size = sizeof(struct pkt_stat_desc) * rx_ring->psr_entries; |
| 2061 | |
| 2062 | dma_free_coherent(&adapter->pdev->dev, psr_size, |
| 2063 | rx_ring->ps_ring_virtaddr, |
| 2064 | rx_ring->ps_ring_physaddr); |
| 2065 | |
| 2066 | rx_ring->ps_ring_virtaddr = NULL; |
| 2067 | } |
| 2068 | |
| 2069 | /* Free area of memory for the writeback of status information */ |
| 2070 | if (rx_ring->rx_status_block) { |
| 2071 | dma_free_coherent(&adapter->pdev->dev, |
| 2072 | sizeof(struct rx_status_block), |
| 2073 | rx_ring->rx_status_block, |
| 2074 | rx_ring->rx_status_bus); |
| 2075 | rx_ring->rx_status_block = NULL; |
| 2076 | } |
| 2077 | |
| 2078 | /* Free the FBR Lookup Table */ |
| 2079 | kfree(rx_ring->fbr[0]); |
| 2080 | kfree(rx_ring->fbr[1]); |
| 2081 | |
| 2082 | /* Reset Counters */ |
| 2083 | rx_ring->num_ready_recv = 0; |
| 2084 | } |
| 2085 | |
| 2086 | /* et131x_init_recv - Initialize receive data structures */ |
| 2087 | static int et131x_init_recv(struct et131x_adapter *adapter) |
| 2088 | { |
| 2089 | struct rfd *rfd; |
| 2090 | u32 rfdct; |
| 2091 | struct rx_ring *rx_ring = &adapter->rx_ring; |
| 2092 | |
| 2093 | /* Setup each RFD */ |
| 2094 | for (rfdct = 0; rfdct < rx_ring->num_rfd; rfdct++) { |
| 2095 | rfd = kzalloc(sizeof(*rfd), GFP_ATOMIC | GFP_DMA); |
| 2096 | if (!rfd) |
| 2097 | return -ENOMEM; |
| 2098 | |
| 2099 | rfd->skb = NULL; |
| 2100 | |
| 2101 | /* Add this RFD to the recv_list */ |
| 2102 | list_add_tail(&rfd->list_node, &rx_ring->recv_list); |
| 2103 | |
| 2104 | /* Increment the available RFD's */ |
| 2105 | rx_ring->num_ready_recv++; |
| 2106 | } |
| 2107 | |
| 2108 | return 0; |
| 2109 | } |
| 2110 | |
| 2111 | /* et131x_set_rx_dma_timer - Set the heartbeat timer according to line rate */ |
| 2112 | static void et131x_set_rx_dma_timer(struct et131x_adapter *adapter) |
| 2113 | { |
| 2114 | struct phy_device *phydev = adapter->phydev; |
| 2115 | |
| 2116 | /* For version B silicon, we do not use the RxDMA timer for 10 and 100 |
| 2117 | * Mbits/s line rates. We do not enable and RxDMA interrupt coalescing. |
| 2118 | */ |
| 2119 | if ((phydev->speed == SPEED_100) || (phydev->speed == SPEED_10)) { |
| 2120 | writel(0, &adapter->regs->rxdma.max_pkt_time); |
| 2121 | writel(1, &adapter->regs->rxdma.num_pkt_done); |
| 2122 | } |
| 2123 | } |
| 2124 | |
| 2125 | /* nic_return_rfd - Recycle a RFD and put it back onto the receive list */ |
| 2126 | static void nic_return_rfd(struct et131x_adapter *adapter, struct rfd *rfd) |
| 2127 | { |
| 2128 | struct rx_ring *rx_local = &adapter->rx_ring; |
| 2129 | struct rxdma_regs __iomem *rx_dma = &adapter->regs->rxdma; |
| 2130 | u16 buff_index = rfd->bufferindex; |
| 2131 | u8 ring_index = rfd->ringindex; |
| 2132 | unsigned long flags; |
| 2133 | struct fbr_lookup *fbr = rx_local->fbr[ring_index]; |
| 2134 | |
| 2135 | /* We don't use any of the OOB data besides status. Otherwise, we |
| 2136 | * need to clean up OOB data |
| 2137 | */ |
| 2138 | if (buff_index < fbr->num_entries) { |
| 2139 | u32 free_buff_ring; |
| 2140 | u32 __iomem *offset; |
| 2141 | struct fbr_desc *next; |
| 2142 | |
| 2143 | if (ring_index == 0) |
| 2144 | offset = &rx_dma->fbr0_full_offset; |
| 2145 | else |
| 2146 | offset = &rx_dma->fbr1_full_offset; |
| 2147 | |
| 2148 | next = (struct fbr_desc *)(fbr->ring_virtaddr) + |
| 2149 | INDEX10(fbr->local_full); |
| 2150 | |
| 2151 | /* Handle the Free Buffer Ring advancement here. Write |
| 2152 | * the PA / Buffer Index for the returned buffer into |
| 2153 | * the oldest (next to be freed)FBR entry |
| 2154 | */ |
| 2155 | next->addr_hi = fbr->bus_high[buff_index]; |
| 2156 | next->addr_lo = fbr->bus_low[buff_index]; |
| 2157 | next->word2 = buff_index; |
| 2158 | |
| 2159 | free_buff_ring = bump_free_buff_ring(&fbr->local_full, |
| 2160 | fbr->num_entries - 1); |
| 2161 | writel(free_buff_ring, offset); |
| 2162 | } else { |
| 2163 | dev_err(&adapter->pdev->dev, |
| 2164 | "%s illegal Buffer Index returned\n", __func__); |
| 2165 | } |
| 2166 | |
| 2167 | /* The processing on this RFD is done, so put it back on the tail of |
| 2168 | * our list |
| 2169 | */ |
| 2170 | spin_lock_irqsave(&adapter->rcv_lock, flags); |
| 2171 | list_add_tail(&rfd->list_node, &rx_local->recv_list); |
| 2172 | rx_local->num_ready_recv++; |
| 2173 | spin_unlock_irqrestore(&adapter->rcv_lock, flags); |
| 2174 | |
| 2175 | WARN_ON(rx_local->num_ready_recv > rx_local->num_rfd); |
| 2176 | } |
| 2177 | |
| 2178 | /* nic_rx_pkts - Checks the hardware for available packets |
| 2179 | * |
| 2180 | * Checks the hardware for available packets, using completion ring |
| 2181 | * If packets are available, it gets an RFD from the recv_list, attaches |
| 2182 | * the packet to it, puts the RFD in the RecvPendList, and also returns |
| 2183 | * the pointer to the RFD. |
| 2184 | */ |
| 2185 | static struct rfd *nic_rx_pkts(struct et131x_adapter *adapter) |
| 2186 | { |
| 2187 | struct rx_ring *rx_local = &adapter->rx_ring; |
| 2188 | struct rx_status_block *status; |
| 2189 | struct pkt_stat_desc *psr; |
| 2190 | struct rfd *rfd; |
| 2191 | unsigned long flags; |
| 2192 | struct list_head *element; |
| 2193 | u8 ring_index; |
| 2194 | u16 buff_index; |
| 2195 | u32 len; |
| 2196 | u32 word0; |
| 2197 | u32 word1; |
| 2198 | struct sk_buff *skb; |
| 2199 | struct fbr_lookup *fbr; |
| 2200 | |
| 2201 | /* RX Status block is written by the DMA engine prior to every |
| 2202 | * interrupt. It contains the next to be used entry in the Packet |
| 2203 | * Status Ring, and also the two Free Buffer rings. |
| 2204 | */ |
| 2205 | status = rx_local->rx_status_block; |
| 2206 | word1 = status->word1 >> 16; |
| 2207 | |
| 2208 | /* Check the PSR and wrap bits do not match */ |
| 2209 | if ((word1 & 0x1FFF) == (rx_local->local_psr_full & 0x1FFF)) |
| 2210 | return NULL; /* Looks like this ring is not updated yet */ |
| 2211 | |
| 2212 | /* The packet status ring indicates that data is available. */ |
| 2213 | psr = (struct pkt_stat_desc *)(rx_local->ps_ring_virtaddr) + |
| 2214 | (rx_local->local_psr_full & 0xFFF); |
| 2215 | |
| 2216 | /* Grab any information that is required once the PSR is advanced, |
| 2217 | * since we can no longer rely on the memory being accurate |
| 2218 | */ |
| 2219 | len = psr->word1 & 0xFFFF; |
| 2220 | ring_index = (psr->word1 >> 26) & 0x03; |
| 2221 | fbr = rx_local->fbr[ring_index]; |
| 2222 | buff_index = (psr->word1 >> 16) & 0x3FF; |
| 2223 | word0 = psr->word0; |
| 2224 | |
| 2225 | /* Indicate that we have used this PSR entry. */ |
| 2226 | /* FIXME wrap 12 */ |
| 2227 | add_12bit(&rx_local->local_psr_full, 1); |
| 2228 | if ((rx_local->local_psr_full & 0xFFF) > rx_local->psr_entries - 1) { |
| 2229 | /* Clear psr full and toggle the wrap bit */ |
| 2230 | rx_local->local_psr_full &= ~0xFFF; |
| 2231 | rx_local->local_psr_full ^= 0x1000; |
| 2232 | } |
| 2233 | |
| 2234 | writel(rx_local->local_psr_full, &adapter->regs->rxdma.psr_full_offset); |
| 2235 | |
| 2236 | if (ring_index > 1 || buff_index > fbr->num_entries - 1) { |
| 2237 | /* Illegal buffer or ring index cannot be used by S/W*/ |
| 2238 | dev_err(&adapter->pdev->dev, |
| 2239 | "NICRxPkts PSR Entry %d indicates length of %d and/or bad bi(%d)\n", |
| 2240 | rx_local->local_psr_full & 0xFFF, len, buff_index); |
| 2241 | return NULL; |
| 2242 | } |
| 2243 | |
| 2244 | /* Get and fill the RFD. */ |
| 2245 | spin_lock_irqsave(&adapter->rcv_lock, flags); |
| 2246 | |
| 2247 | element = rx_local->recv_list.next; |
| 2248 | rfd = list_entry(element, struct rfd, list_node); |
| 2249 | |
| 2250 | if (!rfd) { |
| 2251 | spin_unlock_irqrestore(&adapter->rcv_lock, flags); |
| 2252 | return NULL; |
| 2253 | } |
| 2254 | |
| 2255 | list_del(&rfd->list_node); |
| 2256 | rx_local->num_ready_recv--; |
| 2257 | |
| 2258 | spin_unlock_irqrestore(&adapter->rcv_lock, flags); |
| 2259 | |
| 2260 | rfd->bufferindex = buff_index; |
| 2261 | rfd->ringindex = ring_index; |
| 2262 | |
| 2263 | /* In V1 silicon, there is a bug which screws up filtering of runt |
| 2264 | * packets. Therefore runt packet filtering is disabled in the MAC and |
| 2265 | * the packets are dropped here. They are also counted here. |
| 2266 | */ |
| 2267 | if (len < (NIC_MIN_PACKET_SIZE + 4)) { |
| 2268 | adapter->stats.rx_other_errs++; |
| 2269 | rfd->len = 0; |
| 2270 | goto out; |
| 2271 | } |
| 2272 | |
| 2273 | if ((word0 & ALCATEL_MULTICAST_PKT) && !(word0 & ALCATEL_BROADCAST_PKT)) |
| 2274 | adapter->stats.multicast_pkts_rcvd++; |
| 2275 | |
| 2276 | rfd->len = len; |
| 2277 | |
| 2278 | skb = dev_alloc_skb(rfd->len + 2); |
| 2279 | if (!skb) |
| 2280 | return NULL; |
| 2281 | |
| 2282 | adapter->netdev->stats.rx_bytes += rfd->len; |
| 2283 | |
| 2284 | memcpy(skb_put(skb, rfd->len), fbr->virt[buff_index], rfd->len); |
| 2285 | |
| 2286 | skb->protocol = eth_type_trans(skb, adapter->netdev); |
| 2287 | skb->ip_summed = CHECKSUM_NONE; |
| 2288 | netif_receive_skb(skb); |
| 2289 | |
| 2290 | out: |
| 2291 | nic_return_rfd(adapter, rfd); |
| 2292 | return rfd; |
| 2293 | } |
| 2294 | |
| 2295 | static int et131x_handle_recv_pkts(struct et131x_adapter *adapter, int budget) |
| 2296 | { |
| 2297 | struct rfd *rfd = NULL; |
| 2298 | int count = 0; |
| 2299 | int limit = budget; |
| 2300 | bool done = true; |
| 2301 | struct rx_ring *rx_ring = &adapter->rx_ring; |
| 2302 | |
| 2303 | if (budget > MAX_PACKETS_HANDLED) |
| 2304 | limit = MAX_PACKETS_HANDLED; |
| 2305 | |
| 2306 | /* Process up to available RFD's */ |
| 2307 | while (count < limit) { |
| 2308 | if (list_empty(&rx_ring->recv_list)) { |
| 2309 | WARN_ON(rx_ring->num_ready_recv != 0); |
| 2310 | done = false; |
| 2311 | break; |
| 2312 | } |
| 2313 | |
| 2314 | rfd = nic_rx_pkts(adapter); |
| 2315 | |
| 2316 | if (rfd == NULL) |
| 2317 | break; |
| 2318 | |
| 2319 | /* Do not receive any packets until a filter has been set. |
| 2320 | * Do not receive any packets until we have link. |
| 2321 | * If length is zero, return the RFD in order to advance the |
| 2322 | * Free buffer ring. |
| 2323 | */ |
| 2324 | if (!adapter->packet_filter || |
| 2325 | !netif_carrier_ok(adapter->netdev) || |
| 2326 | rfd->len == 0) |
| 2327 | continue; |
| 2328 | |
| 2329 | adapter->netdev->stats.rx_packets++; |
| 2330 | |
| 2331 | if (rx_ring->num_ready_recv < RFD_LOW_WATER_MARK) |
| 2332 | dev_warn(&adapter->pdev->dev, "RFD's are running out\n"); |
| 2333 | |
| 2334 | count++; |
| 2335 | } |
| 2336 | |
| 2337 | if (count == limit || !done) { |
| 2338 | rx_ring->unfinished_receives = true; |
| 2339 | writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO, |
| 2340 | &adapter->regs->global.watchdog_timer); |
| 2341 | } else { |
| 2342 | /* Watchdog timer will disable itself if appropriate. */ |
| 2343 | rx_ring->unfinished_receives = false; |
| 2344 | } |
| 2345 | |
| 2346 | return count; |
| 2347 | } |
| 2348 | |
| 2349 | /* et131x_tx_dma_memory_alloc |
| 2350 | * |
| 2351 | * Allocates memory that will be visible both to the device and to the CPU. |
| 2352 | * The OS will pass us packets, pointers to which we will insert in the Tx |
| 2353 | * Descriptor queue. The device will read this queue to find the packets in |
| 2354 | * memory. The device will update the "status" in memory each time it xmits a |
| 2355 | * packet. |
| 2356 | */ |
| 2357 | static int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter) |
| 2358 | { |
| 2359 | int desc_size = 0; |
| 2360 | struct tx_ring *tx_ring = &adapter->tx_ring; |
| 2361 | |
| 2362 | /* Allocate memory for the TCB's (Transmit Control Block) */ |
| 2363 | tx_ring->tcb_ring = kcalloc(NUM_TCB, sizeof(struct tcb), |
| 2364 | GFP_ATOMIC | GFP_DMA); |
| 2365 | if (!tx_ring->tcb_ring) |
| 2366 | return -ENOMEM; |
| 2367 | |
| 2368 | desc_size = (sizeof(struct tx_desc) * NUM_DESC_PER_RING_TX); |
| 2369 | tx_ring->tx_desc_ring = dma_alloc_coherent(&adapter->pdev->dev, |
| 2370 | desc_size, |
| 2371 | &tx_ring->tx_desc_ring_pa, |
| 2372 | GFP_KERNEL); |
| 2373 | if (!tx_ring->tx_desc_ring) { |
| 2374 | dev_err(&adapter->pdev->dev, |
| 2375 | "Cannot alloc memory for Tx Ring\n"); |
| 2376 | return -ENOMEM; |
| 2377 | } |
| 2378 | |
| 2379 | tx_ring->tx_status = dma_alloc_coherent(&adapter->pdev->dev, |
| 2380 | sizeof(u32), |
| 2381 | &tx_ring->tx_status_pa, |
| 2382 | GFP_KERNEL); |
| 2383 | if (!tx_ring->tx_status_pa) { |
| 2384 | dev_err(&adapter->pdev->dev, |
| 2385 | "Cannot alloc memory for Tx status block\n"); |
| 2386 | return -ENOMEM; |
| 2387 | } |
| 2388 | return 0; |
| 2389 | } |
| 2390 | |
| 2391 | static void et131x_tx_dma_memory_free(struct et131x_adapter *adapter) |
| 2392 | { |
| 2393 | int desc_size = 0; |
| 2394 | struct tx_ring *tx_ring = &adapter->tx_ring; |
| 2395 | |
| 2396 | if (tx_ring->tx_desc_ring) { |
| 2397 | /* Free memory relating to Tx rings here */ |
| 2398 | desc_size = (sizeof(struct tx_desc) * NUM_DESC_PER_RING_TX); |
| 2399 | dma_free_coherent(&adapter->pdev->dev, |
| 2400 | desc_size, |
| 2401 | tx_ring->tx_desc_ring, |
| 2402 | tx_ring->tx_desc_ring_pa); |
| 2403 | tx_ring->tx_desc_ring = NULL; |
| 2404 | } |
| 2405 | |
| 2406 | /* Free memory for the Tx status block */ |
| 2407 | if (tx_ring->tx_status) { |
| 2408 | dma_free_coherent(&adapter->pdev->dev, |
| 2409 | sizeof(u32), |
| 2410 | tx_ring->tx_status, |
| 2411 | tx_ring->tx_status_pa); |
| 2412 | |
| 2413 | tx_ring->tx_status = NULL; |
| 2414 | } |
| 2415 | /* Free the memory for the tcb structures */ |
| 2416 | kfree(tx_ring->tcb_ring); |
| 2417 | } |
| 2418 | |
| 2419 | /* nic_send_packet - NIC specific send handler for version B silicon. */ |
| 2420 | static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb) |
| 2421 | { |
| 2422 | u32 i; |
| 2423 | struct tx_desc desc[24]; |
| 2424 | u32 frag = 0; |
| 2425 | u32 thiscopy, remainder; |
| 2426 | struct sk_buff *skb = tcb->skb; |
| 2427 | u32 nr_frags = skb_shinfo(skb)->nr_frags + 1; |
| 2428 | struct skb_frag_struct *frags = &skb_shinfo(skb)->frags[0]; |
| 2429 | struct phy_device *phydev = adapter->phydev; |
| 2430 | dma_addr_t dma_addr; |
| 2431 | struct tx_ring *tx_ring = &adapter->tx_ring; |
| 2432 | |
| 2433 | /* Part of the optimizations of this send routine restrict us to |
| 2434 | * sending 24 fragments at a pass. In practice we should never see |
| 2435 | * more than 5 fragments. |
| 2436 | */ |
| 2437 | |
| 2438 | /* nr_frags should be no more than 18. */ |
| 2439 | BUILD_BUG_ON(MAX_SKB_FRAGS + 1 > 23); |
| 2440 | |
| 2441 | memset(desc, 0, sizeof(struct tx_desc) * (nr_frags + 1)); |
| 2442 | |
| 2443 | for (i = 0; i < nr_frags; i++) { |
| 2444 | /* If there is something in this element, lets get a |
| 2445 | * descriptor from the ring and get the necessary data |
| 2446 | */ |
| 2447 | if (i == 0) { |
| 2448 | /* If the fragments are smaller than a standard MTU, |
| 2449 | * then map them to a single descriptor in the Tx |
| 2450 | * Desc ring. However, if they're larger, as is |
| 2451 | * possible with support for jumbo packets, then |
| 2452 | * split them each across 2 descriptors. |
| 2453 | * |
| 2454 | * This will work until we determine why the hardware |
| 2455 | * doesn't seem to like large fragments. |
| 2456 | */ |
| 2457 | if (skb_headlen(skb) <= 1514) { |
| 2458 | /* Low 16bits are length, high is vlan and |
| 2459 | * unused currently so zero |
| 2460 | */ |
| 2461 | desc[frag].len_vlan = skb_headlen(skb); |
| 2462 | dma_addr = dma_map_single(&adapter->pdev->dev, |
| 2463 | skb->data, |
| 2464 | skb_headlen(skb), |
| 2465 | DMA_TO_DEVICE); |
| 2466 | desc[frag].addr_lo = lower_32_bits(dma_addr); |
| 2467 | desc[frag].addr_hi = upper_32_bits(dma_addr); |
| 2468 | frag++; |
| 2469 | } else { |
| 2470 | desc[frag].len_vlan = skb_headlen(skb) / 2; |
| 2471 | dma_addr = dma_map_single(&adapter->pdev->dev, |
| 2472 | skb->data, |
| 2473 | skb_headlen(skb) / 2, |
| 2474 | DMA_TO_DEVICE); |
| 2475 | desc[frag].addr_lo = lower_32_bits(dma_addr); |
| 2476 | desc[frag].addr_hi = upper_32_bits(dma_addr); |
| 2477 | frag++; |
| 2478 | |
| 2479 | desc[frag].len_vlan = skb_headlen(skb) / 2; |
| 2480 | dma_addr = dma_map_single(&adapter->pdev->dev, |
| 2481 | skb->data + |
| 2482 | skb_headlen(skb) / 2, |
| 2483 | skb_headlen(skb) / 2, |
| 2484 | DMA_TO_DEVICE); |
| 2485 | desc[frag].addr_lo = lower_32_bits(dma_addr); |
| 2486 | desc[frag].addr_hi = upper_32_bits(dma_addr); |
| 2487 | frag++; |
| 2488 | } |
| 2489 | } else { |
| 2490 | desc[frag].len_vlan = frags[i - 1].size; |
| 2491 | dma_addr = skb_frag_dma_map(&adapter->pdev->dev, |
| 2492 | &frags[i - 1], |
| 2493 | 0, |
| 2494 | frags[i - 1].size, |
| 2495 | DMA_TO_DEVICE); |
| 2496 | desc[frag].addr_lo = lower_32_bits(dma_addr); |
| 2497 | desc[frag].addr_hi = upper_32_bits(dma_addr); |
| 2498 | frag++; |
| 2499 | } |
| 2500 | } |
| 2501 | |
| 2502 | if (phydev && phydev->speed == SPEED_1000) { |
| 2503 | if (++tx_ring->since_irq == PARM_TX_NUM_BUFS_DEF) { |
| 2504 | /* Last element & Interrupt flag */ |
| 2505 | desc[frag - 1].flags = |
| 2506 | TXDESC_FLAG_INTPROC | TXDESC_FLAG_LASTPKT; |
| 2507 | tx_ring->since_irq = 0; |
| 2508 | } else { /* Last element */ |
| 2509 | desc[frag - 1].flags = TXDESC_FLAG_LASTPKT; |
| 2510 | } |
| 2511 | } else { |
| 2512 | desc[frag - 1].flags = |
| 2513 | TXDESC_FLAG_INTPROC | TXDESC_FLAG_LASTPKT; |
| 2514 | } |
| 2515 | |
| 2516 | desc[0].flags |= TXDESC_FLAG_FIRSTPKT; |
| 2517 | |
| 2518 | tcb->index_start = tx_ring->send_idx; |
| 2519 | tcb->stale = 0; |
| 2520 | |
| 2521 | thiscopy = NUM_DESC_PER_RING_TX - INDEX10(tx_ring->send_idx); |
| 2522 | |
| 2523 | if (thiscopy >= frag) { |
| 2524 | remainder = 0; |
| 2525 | thiscopy = frag; |
| 2526 | } else { |
| 2527 | remainder = frag - thiscopy; |
| 2528 | } |
| 2529 | |
| 2530 | memcpy(tx_ring->tx_desc_ring + INDEX10(tx_ring->send_idx), |
| 2531 | desc, |
| 2532 | sizeof(struct tx_desc) * thiscopy); |
| 2533 | |
| 2534 | add_10bit(&tx_ring->send_idx, thiscopy); |
| 2535 | |
| 2536 | if (INDEX10(tx_ring->send_idx) == 0 || |
| 2537 | INDEX10(tx_ring->send_idx) == NUM_DESC_PER_RING_TX) { |
| 2538 | tx_ring->send_idx &= ~ET_DMA10_MASK; |
| 2539 | tx_ring->send_idx ^= ET_DMA10_WRAP; |
| 2540 | } |
| 2541 | |
| 2542 | if (remainder) { |
| 2543 | memcpy(tx_ring->tx_desc_ring, |
| 2544 | desc + thiscopy, |
| 2545 | sizeof(struct tx_desc) * remainder); |
| 2546 | |
| 2547 | add_10bit(&tx_ring->send_idx, remainder); |
| 2548 | } |
| 2549 | |
| 2550 | if (INDEX10(tx_ring->send_idx) == 0) { |
| 2551 | if (tx_ring->send_idx) |
| 2552 | tcb->index = NUM_DESC_PER_RING_TX - 1; |
| 2553 | else |
| 2554 | tcb->index = ET_DMA10_WRAP|(NUM_DESC_PER_RING_TX - 1); |
| 2555 | } else { |
| 2556 | tcb->index = tx_ring->send_idx - 1; |
| 2557 | } |
| 2558 | |
| 2559 | spin_lock(&adapter->tcb_send_qlock); |
| 2560 | |
| 2561 | if (tx_ring->send_tail) |
| 2562 | tx_ring->send_tail->next = tcb; |
| 2563 | else |
| 2564 | tx_ring->send_head = tcb; |
| 2565 | |
| 2566 | tx_ring->send_tail = tcb; |
| 2567 | |
| 2568 | WARN_ON(tcb->next != NULL); |
| 2569 | |
| 2570 | tx_ring->used++; |
| 2571 | |
| 2572 | spin_unlock(&adapter->tcb_send_qlock); |
| 2573 | |
| 2574 | /* Write the new write pointer back to the device. */ |
| 2575 | writel(tx_ring->send_idx, &adapter->regs->txdma.service_request); |
| 2576 | |
| 2577 | /* For Gig only, we use Tx Interrupt coalescing. Enable the software |
| 2578 | * timer to wake us up if this packet isn't followed by N more. |
| 2579 | */ |
| 2580 | if (phydev && phydev->speed == SPEED_1000) { |
| 2581 | writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO, |
| 2582 | &adapter->regs->global.watchdog_timer); |
| 2583 | } |
| 2584 | return 0; |
| 2585 | } |
| 2586 | |
| 2587 | static int send_packet(struct sk_buff *skb, struct et131x_adapter *adapter) |
| 2588 | { |
| 2589 | int status; |
| 2590 | struct tcb *tcb; |
| 2591 | unsigned long flags; |
| 2592 | struct tx_ring *tx_ring = &adapter->tx_ring; |
| 2593 | |
| 2594 | /* All packets must have at least a MAC address and a protocol type */ |
| 2595 | if (skb->len < ETH_HLEN) |
| 2596 | return -EIO; |
| 2597 | |
| 2598 | spin_lock_irqsave(&adapter->tcb_ready_qlock, flags); |
| 2599 | |
| 2600 | tcb = tx_ring->tcb_qhead; |
| 2601 | |
| 2602 | if (tcb == NULL) { |
| 2603 | spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags); |
| 2604 | return -ENOMEM; |
| 2605 | } |
| 2606 | |
| 2607 | tx_ring->tcb_qhead = tcb->next; |
| 2608 | |
| 2609 | if (tx_ring->tcb_qhead == NULL) |
| 2610 | tx_ring->tcb_qtail = NULL; |
| 2611 | |
| 2612 | spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags); |
| 2613 | |
| 2614 | tcb->skb = skb; |
| 2615 | tcb->next = NULL; |
| 2616 | |
| 2617 | status = nic_send_packet(adapter, tcb); |
| 2618 | |
| 2619 | if (status != 0) { |
| 2620 | spin_lock_irqsave(&adapter->tcb_ready_qlock, flags); |
| 2621 | |
| 2622 | if (tx_ring->tcb_qtail) |
| 2623 | tx_ring->tcb_qtail->next = tcb; |
| 2624 | else |
| 2625 | /* Apparently ready Q is empty. */ |
| 2626 | tx_ring->tcb_qhead = tcb; |
| 2627 | |
| 2628 | tx_ring->tcb_qtail = tcb; |
| 2629 | spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags); |
| 2630 | return status; |
| 2631 | } |
| 2632 | WARN_ON(tx_ring->used > NUM_TCB); |
| 2633 | return 0; |
| 2634 | } |
| 2635 | |
| 2636 | /* free_send_packet - Recycle a struct tcb */ |
| 2637 | static inline void free_send_packet(struct et131x_adapter *adapter, |
| 2638 | struct tcb *tcb) |
| 2639 | { |
| 2640 | unsigned long flags; |
| 2641 | struct tx_desc *desc = NULL; |
| 2642 | struct net_device_stats *stats = &adapter->netdev->stats; |
| 2643 | struct tx_ring *tx_ring = &adapter->tx_ring; |
| 2644 | u64 dma_addr; |
| 2645 | |
| 2646 | if (tcb->skb) { |
| 2647 | stats->tx_bytes += tcb->skb->len; |
| 2648 | |
| 2649 | /* Iterate through the TX descriptors on the ring |
| 2650 | * corresponding to this packet and umap the fragments |
| 2651 | * they point to |
| 2652 | */ |
| 2653 | do { |
| 2654 | desc = tx_ring->tx_desc_ring + |
| 2655 | INDEX10(tcb->index_start); |
| 2656 | |
| 2657 | dma_addr = desc->addr_lo; |
| 2658 | dma_addr |= (u64)desc->addr_hi << 32; |
| 2659 | |
| 2660 | dma_unmap_single(&adapter->pdev->dev, |
| 2661 | dma_addr, |
| 2662 | desc->len_vlan, DMA_TO_DEVICE); |
| 2663 | |
| 2664 | add_10bit(&tcb->index_start, 1); |
| 2665 | if (INDEX10(tcb->index_start) >= |
| 2666 | NUM_DESC_PER_RING_TX) { |
| 2667 | tcb->index_start &= ~ET_DMA10_MASK; |
| 2668 | tcb->index_start ^= ET_DMA10_WRAP; |
| 2669 | } |
| 2670 | } while (desc != tx_ring->tx_desc_ring + INDEX10(tcb->index)); |
| 2671 | |
| 2672 | dev_kfree_skb_any(tcb->skb); |
| 2673 | } |
| 2674 | |
| 2675 | memset(tcb, 0, sizeof(struct tcb)); |
| 2676 | |
| 2677 | /* Add the TCB to the Ready Q */ |
| 2678 | spin_lock_irqsave(&adapter->tcb_ready_qlock, flags); |
| 2679 | |
| 2680 | stats->tx_packets++; |
| 2681 | |
| 2682 | if (tx_ring->tcb_qtail) |
| 2683 | tx_ring->tcb_qtail->next = tcb; |
| 2684 | else /* Apparently ready Q is empty. */ |
| 2685 | tx_ring->tcb_qhead = tcb; |
| 2686 | |
| 2687 | tx_ring->tcb_qtail = tcb; |
| 2688 | |
| 2689 | spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags); |
| 2690 | WARN_ON(tx_ring->used < 0); |
| 2691 | } |
| 2692 | |
| 2693 | /* et131x_free_busy_send_packets - Free and complete the stopped active sends */ |
| 2694 | static void et131x_free_busy_send_packets(struct et131x_adapter *adapter) |
| 2695 | { |
| 2696 | struct tcb *tcb; |
| 2697 | unsigned long flags; |
| 2698 | u32 freed = 0; |
| 2699 | struct tx_ring *tx_ring = &adapter->tx_ring; |
| 2700 | |
| 2701 | /* Any packets being sent? Check the first TCB on the send list */ |
| 2702 | spin_lock_irqsave(&adapter->tcb_send_qlock, flags); |
| 2703 | |
| 2704 | tcb = tx_ring->send_head; |
| 2705 | |
| 2706 | while (tcb != NULL && freed < NUM_TCB) { |
| 2707 | struct tcb *next = tcb->next; |
| 2708 | |
| 2709 | tx_ring->send_head = next; |
| 2710 | |
| 2711 | if (next == NULL) |
| 2712 | tx_ring->send_tail = NULL; |
| 2713 | |
| 2714 | tx_ring->used--; |
| 2715 | |
| 2716 | spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags); |
| 2717 | |
| 2718 | freed++; |
| 2719 | free_send_packet(adapter, tcb); |
| 2720 | |
| 2721 | spin_lock_irqsave(&adapter->tcb_send_qlock, flags); |
| 2722 | |
| 2723 | tcb = tx_ring->send_head; |
| 2724 | } |
| 2725 | |
| 2726 | WARN_ON(freed == NUM_TCB); |
| 2727 | |
| 2728 | spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags); |
| 2729 | |
| 2730 | tx_ring->used = 0; |
| 2731 | } |
| 2732 | |
| 2733 | /* et131x_handle_send_pkts |
| 2734 | * |
| 2735 | * Re-claim the send resources, complete sends and get more to send from |
| 2736 | * the send wait queue. |
| 2737 | */ |
| 2738 | static void et131x_handle_send_pkts(struct et131x_adapter *adapter) |
| 2739 | { |
| 2740 | unsigned long flags; |
| 2741 | u32 serviced; |
| 2742 | struct tcb *tcb; |
| 2743 | u32 index; |
| 2744 | struct tx_ring *tx_ring = &adapter->tx_ring; |
| 2745 | |
| 2746 | serviced = readl(&adapter->regs->txdma.new_service_complete); |
| 2747 | index = INDEX10(serviced); |
| 2748 | |
| 2749 | /* Has the ring wrapped? Process any descriptors that do not have |
| 2750 | * the same "wrap" indicator as the current completion indicator |
| 2751 | */ |
| 2752 | spin_lock_irqsave(&adapter->tcb_send_qlock, flags); |
| 2753 | |
| 2754 | tcb = tx_ring->send_head; |
| 2755 | |
| 2756 | while (tcb && |
| 2757 | ((serviced ^ tcb->index) & ET_DMA10_WRAP) && |
| 2758 | index < INDEX10(tcb->index)) { |
| 2759 | tx_ring->used--; |
| 2760 | tx_ring->send_head = tcb->next; |
| 2761 | if (tcb->next == NULL) |
| 2762 | tx_ring->send_tail = NULL; |
| 2763 | |
| 2764 | spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags); |
| 2765 | free_send_packet(adapter, tcb); |
| 2766 | spin_lock_irqsave(&adapter->tcb_send_qlock, flags); |
| 2767 | |
| 2768 | /* Goto the next packet */ |
| 2769 | tcb = tx_ring->send_head; |
| 2770 | } |
| 2771 | while (tcb && |
| 2772 | !((serviced ^ tcb->index) & ET_DMA10_WRAP) && |
| 2773 | index > (tcb->index & ET_DMA10_MASK)) { |
| 2774 | tx_ring->used--; |
| 2775 | tx_ring->send_head = tcb->next; |
| 2776 | if (tcb->next == NULL) |
| 2777 | tx_ring->send_tail = NULL; |
| 2778 | |
| 2779 | spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags); |
| 2780 | free_send_packet(adapter, tcb); |
| 2781 | spin_lock_irqsave(&adapter->tcb_send_qlock, flags); |
| 2782 | |
| 2783 | /* Goto the next packet */ |
| 2784 | tcb = tx_ring->send_head; |
| 2785 | } |
| 2786 | |
| 2787 | /* Wake up the queue when we hit a low-water mark */ |
| 2788 | if (tx_ring->used <= NUM_TCB / 3) |
| 2789 | netif_wake_queue(adapter->netdev); |
| 2790 | |
| 2791 | spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags); |
| 2792 | } |
| 2793 | |
| 2794 | static int et131x_get_settings(struct net_device *netdev, |
| 2795 | struct ethtool_cmd *cmd) |
| 2796 | { |
| 2797 | struct et131x_adapter *adapter = netdev_priv(netdev); |
| 2798 | |
| 2799 | return phy_ethtool_gset(adapter->phydev, cmd); |
| 2800 | } |
| 2801 | |
| 2802 | static int et131x_set_settings(struct net_device *netdev, |
| 2803 | struct ethtool_cmd *cmd) |
| 2804 | { |
| 2805 | struct et131x_adapter *adapter = netdev_priv(netdev); |
| 2806 | |
| 2807 | return phy_ethtool_sset(adapter->phydev, cmd); |
| 2808 | } |
| 2809 | |
| 2810 | static int et131x_get_regs_len(struct net_device *netdev) |
| 2811 | { |
| 2812 | #define ET131X_REGS_LEN 256 |
| 2813 | return ET131X_REGS_LEN * sizeof(u32); |
| 2814 | } |
| 2815 | |
| 2816 | static void et131x_get_regs(struct net_device *netdev, |
| 2817 | struct ethtool_regs *regs, void *regs_data) |
| 2818 | { |
| 2819 | struct et131x_adapter *adapter = netdev_priv(netdev); |
| 2820 | struct address_map __iomem *aregs = adapter->regs; |
| 2821 | u32 *regs_buff = regs_data; |
| 2822 | u32 num = 0; |
| 2823 | u16 tmp; |
| 2824 | |
| 2825 | memset(regs_data, 0, et131x_get_regs_len(netdev)); |
| 2826 | |
| 2827 | regs->version = (1 << 24) | (adapter->pdev->revision << 16) | |
| 2828 | adapter->pdev->device; |
| 2829 | |
| 2830 | /* PHY regs */ |
| 2831 | et131x_mii_read(adapter, MII_BMCR, &tmp); |
| 2832 | regs_buff[num++] = tmp; |
| 2833 | et131x_mii_read(adapter, MII_BMSR, &tmp); |
| 2834 | regs_buff[num++] = tmp; |
| 2835 | et131x_mii_read(adapter, MII_PHYSID1, &tmp); |
| 2836 | regs_buff[num++] = tmp; |
| 2837 | et131x_mii_read(adapter, MII_PHYSID2, &tmp); |
| 2838 | regs_buff[num++] = tmp; |
| 2839 | et131x_mii_read(adapter, MII_ADVERTISE, &tmp); |
| 2840 | regs_buff[num++] = tmp; |
| 2841 | et131x_mii_read(adapter, MII_LPA, &tmp); |
| 2842 | regs_buff[num++] = tmp; |
| 2843 | et131x_mii_read(adapter, MII_EXPANSION, &tmp); |
| 2844 | regs_buff[num++] = tmp; |
| 2845 | /* Autoneg next page transmit reg */ |
| 2846 | et131x_mii_read(adapter, 0x07, &tmp); |
| 2847 | regs_buff[num++] = tmp; |
| 2848 | /* Link partner next page reg */ |
| 2849 | et131x_mii_read(adapter, 0x08, &tmp); |
| 2850 | regs_buff[num++] = tmp; |
| 2851 | et131x_mii_read(adapter, MII_CTRL1000, &tmp); |
| 2852 | regs_buff[num++] = tmp; |
| 2853 | et131x_mii_read(adapter, MII_STAT1000, &tmp); |
| 2854 | regs_buff[num++] = tmp; |
| 2855 | et131x_mii_read(adapter, 0x0b, &tmp); |
| 2856 | regs_buff[num++] = tmp; |
| 2857 | et131x_mii_read(adapter, 0x0c, &tmp); |
| 2858 | regs_buff[num++] = tmp; |
| 2859 | et131x_mii_read(adapter, MII_MMD_CTRL, &tmp); |
| 2860 | regs_buff[num++] = tmp; |
| 2861 | et131x_mii_read(adapter, MII_MMD_DATA, &tmp); |
| 2862 | regs_buff[num++] = tmp; |
| 2863 | et131x_mii_read(adapter, MII_ESTATUS, &tmp); |
| 2864 | regs_buff[num++] = tmp; |
| 2865 | |
| 2866 | et131x_mii_read(adapter, PHY_INDEX_REG, &tmp); |
| 2867 | regs_buff[num++] = tmp; |
| 2868 | et131x_mii_read(adapter, PHY_DATA_REG, &tmp); |
| 2869 | regs_buff[num++] = tmp; |
| 2870 | et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG, &tmp); |
| 2871 | regs_buff[num++] = tmp; |
| 2872 | et131x_mii_read(adapter, PHY_LOOPBACK_CONTROL, &tmp); |
| 2873 | regs_buff[num++] = tmp; |
| 2874 | et131x_mii_read(adapter, PHY_LOOPBACK_CONTROL + 1, &tmp); |
| 2875 | regs_buff[num++] = tmp; |
| 2876 | |
| 2877 | et131x_mii_read(adapter, PHY_REGISTER_MGMT_CONTROL, &tmp); |
| 2878 | regs_buff[num++] = tmp; |
| 2879 | et131x_mii_read(adapter, PHY_CONFIG, &tmp); |
| 2880 | regs_buff[num++] = tmp; |
| 2881 | et131x_mii_read(adapter, PHY_PHY_CONTROL, &tmp); |
| 2882 | regs_buff[num++] = tmp; |
| 2883 | et131x_mii_read(adapter, PHY_INTERRUPT_MASK, &tmp); |
| 2884 | regs_buff[num++] = tmp; |
| 2885 | et131x_mii_read(adapter, PHY_INTERRUPT_STATUS, &tmp); |
| 2886 | regs_buff[num++] = tmp; |
| 2887 | et131x_mii_read(adapter, PHY_PHY_STATUS, &tmp); |
| 2888 | regs_buff[num++] = tmp; |
| 2889 | et131x_mii_read(adapter, PHY_LED_1, &tmp); |
| 2890 | regs_buff[num++] = tmp; |
| 2891 | et131x_mii_read(adapter, PHY_LED_2, &tmp); |
| 2892 | regs_buff[num++] = tmp; |
| 2893 | |
| 2894 | /* Global regs */ |
| 2895 | regs_buff[num++] = readl(&aregs->global.txq_start_addr); |
| 2896 | regs_buff[num++] = readl(&aregs->global.txq_end_addr); |
| 2897 | regs_buff[num++] = readl(&aregs->global.rxq_start_addr); |
| 2898 | regs_buff[num++] = readl(&aregs->global.rxq_end_addr); |
| 2899 | regs_buff[num++] = readl(&aregs->global.pm_csr); |
| 2900 | regs_buff[num++] = adapter->stats.interrupt_status; |
| 2901 | regs_buff[num++] = readl(&aregs->global.int_mask); |
| 2902 | regs_buff[num++] = readl(&aregs->global.int_alias_clr_en); |
| 2903 | regs_buff[num++] = readl(&aregs->global.int_status_alias); |
| 2904 | regs_buff[num++] = readl(&aregs->global.sw_reset); |
| 2905 | regs_buff[num++] = readl(&aregs->global.slv_timer); |
| 2906 | regs_buff[num++] = readl(&aregs->global.msi_config); |
| 2907 | regs_buff[num++] = readl(&aregs->global.loopback); |
| 2908 | regs_buff[num++] = readl(&aregs->global.watchdog_timer); |
| 2909 | |
| 2910 | /* TXDMA regs */ |
| 2911 | regs_buff[num++] = readl(&aregs->txdma.csr); |
| 2912 | regs_buff[num++] = readl(&aregs->txdma.pr_base_hi); |
| 2913 | regs_buff[num++] = readl(&aregs->txdma.pr_base_lo); |
| 2914 | regs_buff[num++] = readl(&aregs->txdma.pr_num_des); |
| 2915 | regs_buff[num++] = readl(&aregs->txdma.txq_wr_addr); |
| 2916 | regs_buff[num++] = readl(&aregs->txdma.txq_wr_addr_ext); |
| 2917 | regs_buff[num++] = readl(&aregs->txdma.txq_rd_addr); |
| 2918 | regs_buff[num++] = readl(&aregs->txdma.dma_wb_base_hi); |
| 2919 | regs_buff[num++] = readl(&aregs->txdma.dma_wb_base_lo); |
| 2920 | regs_buff[num++] = readl(&aregs->txdma.service_request); |
| 2921 | regs_buff[num++] = readl(&aregs->txdma.service_complete); |
| 2922 | regs_buff[num++] = readl(&aregs->txdma.cache_rd_index); |
| 2923 | regs_buff[num++] = readl(&aregs->txdma.cache_wr_index); |
| 2924 | regs_buff[num++] = readl(&aregs->txdma.tx_dma_error); |
| 2925 | regs_buff[num++] = readl(&aregs->txdma.desc_abort_cnt); |
| 2926 | regs_buff[num++] = readl(&aregs->txdma.payload_abort_cnt); |
| 2927 | regs_buff[num++] = readl(&aregs->txdma.writeback_abort_cnt); |
| 2928 | regs_buff[num++] = readl(&aregs->txdma.desc_timeout_cnt); |
| 2929 | regs_buff[num++] = readl(&aregs->txdma.payload_timeout_cnt); |
| 2930 | regs_buff[num++] = readl(&aregs->txdma.writeback_timeout_cnt); |
| 2931 | regs_buff[num++] = readl(&aregs->txdma.desc_error_cnt); |
| 2932 | regs_buff[num++] = readl(&aregs->txdma.payload_error_cnt); |
| 2933 | regs_buff[num++] = readl(&aregs->txdma.writeback_error_cnt); |
| 2934 | regs_buff[num++] = readl(&aregs->txdma.dropped_tlp_cnt); |
| 2935 | regs_buff[num++] = readl(&aregs->txdma.new_service_complete); |
| 2936 | regs_buff[num++] = readl(&aregs->txdma.ethernet_packet_cnt); |
| 2937 | |
| 2938 | /* RXDMA regs */ |
| 2939 | regs_buff[num++] = readl(&aregs->rxdma.csr); |
| 2940 | regs_buff[num++] = readl(&aregs->rxdma.dma_wb_base_hi); |
| 2941 | regs_buff[num++] = readl(&aregs->rxdma.dma_wb_base_lo); |
| 2942 | regs_buff[num++] = readl(&aregs->rxdma.num_pkt_done); |
| 2943 | regs_buff[num++] = readl(&aregs->rxdma.max_pkt_time); |
| 2944 | regs_buff[num++] = readl(&aregs->rxdma.rxq_rd_addr); |
| 2945 | regs_buff[num++] = readl(&aregs->rxdma.rxq_rd_addr_ext); |
| 2946 | regs_buff[num++] = readl(&aregs->rxdma.rxq_wr_addr); |
| 2947 | regs_buff[num++] = readl(&aregs->rxdma.psr_base_hi); |
| 2948 | regs_buff[num++] = readl(&aregs->rxdma.psr_base_lo); |
| 2949 | regs_buff[num++] = readl(&aregs->rxdma.psr_num_des); |
| 2950 | regs_buff[num++] = readl(&aregs->rxdma.psr_avail_offset); |
| 2951 | regs_buff[num++] = readl(&aregs->rxdma.psr_full_offset); |
| 2952 | regs_buff[num++] = readl(&aregs->rxdma.psr_access_index); |
| 2953 | regs_buff[num++] = readl(&aregs->rxdma.psr_min_des); |
| 2954 | regs_buff[num++] = readl(&aregs->rxdma.fbr0_base_lo); |
| 2955 | regs_buff[num++] = readl(&aregs->rxdma.fbr0_base_hi); |
| 2956 | regs_buff[num++] = readl(&aregs->rxdma.fbr0_num_des); |
| 2957 | regs_buff[num++] = readl(&aregs->rxdma.fbr0_avail_offset); |
| 2958 | regs_buff[num++] = readl(&aregs->rxdma.fbr0_full_offset); |
| 2959 | regs_buff[num++] = readl(&aregs->rxdma.fbr0_rd_index); |
| 2960 | regs_buff[num++] = readl(&aregs->rxdma.fbr0_min_des); |
| 2961 | regs_buff[num++] = readl(&aregs->rxdma.fbr1_base_lo); |
| 2962 | regs_buff[num++] = readl(&aregs->rxdma.fbr1_base_hi); |
| 2963 | regs_buff[num++] = readl(&aregs->rxdma.fbr1_num_des); |
| 2964 | regs_buff[num++] = readl(&aregs->rxdma.fbr1_avail_offset); |
| 2965 | regs_buff[num++] = readl(&aregs->rxdma.fbr1_full_offset); |
| 2966 | regs_buff[num++] = readl(&aregs->rxdma.fbr1_rd_index); |
| 2967 | regs_buff[num++] = readl(&aregs->rxdma.fbr1_min_des); |
| 2968 | } |
| 2969 | |
| 2970 | static void et131x_get_drvinfo(struct net_device *netdev, |
| 2971 | struct ethtool_drvinfo *info) |
| 2972 | { |
| 2973 | struct et131x_adapter *adapter = netdev_priv(netdev); |
| 2974 | |
| 2975 | strlcpy(info->driver, DRIVER_NAME, sizeof(info->driver)); |
| 2976 | strlcpy(info->version, DRIVER_VERSION, sizeof(info->version)); |
| 2977 | strlcpy(info->bus_info, pci_name(adapter->pdev), |
| 2978 | sizeof(info->bus_info)); |
| 2979 | } |
| 2980 | |
| 2981 | static struct ethtool_ops et131x_ethtool_ops = { |
| 2982 | .get_settings = et131x_get_settings, |
| 2983 | .set_settings = et131x_set_settings, |
| 2984 | .get_drvinfo = et131x_get_drvinfo, |
| 2985 | .get_regs_len = et131x_get_regs_len, |
| 2986 | .get_regs = et131x_get_regs, |
| 2987 | .get_link = ethtool_op_get_link, |
| 2988 | }; |
| 2989 | |
| 2990 | /* et131x_hwaddr_init - set up the MAC Address */ |
| 2991 | static void et131x_hwaddr_init(struct et131x_adapter *adapter) |
| 2992 | { |
| 2993 | /* If have our default mac from init and no mac address from |
| 2994 | * EEPROM then we need to generate the last octet and set it on the |
| 2995 | * device |
| 2996 | */ |
| 2997 | if (is_zero_ether_addr(adapter->rom_addr)) { |
| 2998 | /* We need to randomly generate the last octet so we |
| 2999 | * decrease our chances of setting the mac address to |
| 3000 | * same as another one of our cards in the system |
| 3001 | */ |
| 3002 | get_random_bytes(&adapter->addr[5], 1); |
| 3003 | /* We have the default value in the register we are |
| 3004 | * working with so we need to copy the current |
| 3005 | * address into the permanent address |
| 3006 | */ |
| 3007 | ether_addr_copy(adapter->rom_addr, adapter->addr); |
| 3008 | } else { |
| 3009 | /* We do not have an override address, so set the |
| 3010 | * current address to the permanent address and add |
| 3011 | * it to the device |
| 3012 | */ |
| 3013 | ether_addr_copy(adapter->addr, adapter->rom_addr); |
| 3014 | } |
| 3015 | } |
| 3016 | |
| 3017 | static int et131x_pci_init(struct et131x_adapter *adapter, |
| 3018 | struct pci_dev *pdev) |
| 3019 | { |
| 3020 | u16 max_payload; |
| 3021 | int i, rc; |
| 3022 | |
| 3023 | rc = et131x_init_eeprom(adapter); |
| 3024 | if (rc < 0) |
| 3025 | goto out; |
| 3026 | |
| 3027 | if (!pci_is_pcie(pdev)) { |
| 3028 | dev_err(&pdev->dev, "Missing PCIe capabilities\n"); |
| 3029 | goto err_out; |
| 3030 | } |
| 3031 | |
| 3032 | /* Program the Ack/Nak latency and replay timers */ |
| 3033 | max_payload = pdev->pcie_mpss; |
| 3034 | |
| 3035 | if (max_payload < 2) { |
| 3036 | static const u16 acknak[2] = { 0x76, 0xD0 }; |
| 3037 | static const u16 replay[2] = { 0x1E0, 0x2ED }; |
| 3038 | |
| 3039 | if (pci_write_config_word(pdev, ET1310_PCI_ACK_NACK, |
| 3040 | acknak[max_payload])) { |
| 3041 | dev_err(&pdev->dev, |
| 3042 | "Could not write PCI config space for ACK/NAK\n"); |
| 3043 | goto err_out; |
| 3044 | } |
| 3045 | if (pci_write_config_word(pdev, ET1310_PCI_REPLAY, |
| 3046 | replay[max_payload])) { |
| 3047 | dev_err(&pdev->dev, |
| 3048 | "Could not write PCI config space for Replay Timer\n"); |
| 3049 | goto err_out; |
| 3050 | } |
| 3051 | } |
| 3052 | |
| 3053 | /* l0s and l1 latency timers. We are using default values. |
| 3054 | * Representing 001 for L0s and 010 for L1 |
| 3055 | */ |
| 3056 | if (pci_write_config_byte(pdev, ET1310_PCI_L0L1LATENCY, 0x11)) { |
| 3057 | dev_err(&pdev->dev, |
| 3058 | "Could not write PCI config space for Latency Timers\n"); |
| 3059 | goto err_out; |
| 3060 | } |
| 3061 | |
| 3062 | /* Change the max read size to 2k */ |
| 3063 | if (pcie_set_readrq(pdev, 2048)) { |
| 3064 | dev_err(&pdev->dev, |
| 3065 | "Couldn't change PCI config space for Max read size\n"); |
| 3066 | goto err_out; |
| 3067 | } |
| 3068 | |
| 3069 | /* Get MAC address from config space if an eeprom exists, otherwise |
| 3070 | * the MAC address there will not be valid |
| 3071 | */ |
| 3072 | if (!adapter->has_eeprom) { |
| 3073 | et131x_hwaddr_init(adapter); |
| 3074 | return 0; |
| 3075 | } |
| 3076 | |
| 3077 | for (i = 0; i < ETH_ALEN; i++) { |
| 3078 | if (pci_read_config_byte(pdev, ET1310_PCI_MAC_ADDRESS + i, |
| 3079 | adapter->rom_addr + i)) { |
| 3080 | dev_err(&pdev->dev, "Could not read PCI config space for MAC address\n"); |
| 3081 | goto err_out; |
| 3082 | } |
| 3083 | } |
| 3084 | ether_addr_copy(adapter->addr, adapter->rom_addr); |
| 3085 | out: |
| 3086 | return rc; |
| 3087 | err_out: |
| 3088 | rc = -EIO; |
| 3089 | goto out; |
| 3090 | } |
| 3091 | |
| 3092 | /* et131x_error_timer_handler |
| 3093 | * @data: timer-specific variable; here a pointer to our adapter structure |
| 3094 | * |
| 3095 | * The routine called when the error timer expires, to track the number of |
| 3096 | * recurring errors. |
| 3097 | */ |
| 3098 | static void et131x_error_timer_handler(unsigned long data) |
| 3099 | { |
| 3100 | struct et131x_adapter *adapter = (struct et131x_adapter *)data; |
| 3101 | struct phy_device *phydev = adapter->phydev; |
| 3102 | |
| 3103 | if (et1310_in_phy_coma(adapter)) { |
| 3104 | /* Bring the device immediately out of coma, to |
| 3105 | * prevent it from sleeping indefinitely, this |
| 3106 | * mechanism could be improved! |
| 3107 | */ |
| 3108 | et1310_disable_phy_coma(adapter); |
| 3109 | adapter->boot_coma = 20; |
| 3110 | } else { |
| 3111 | et1310_update_macstat_host_counters(adapter); |
| 3112 | } |
| 3113 | |
| 3114 | if (!phydev->link && adapter->boot_coma < 11) |
| 3115 | adapter->boot_coma++; |
| 3116 | |
| 3117 | if (adapter->boot_coma == 10) { |
| 3118 | if (!phydev->link) { |
| 3119 | if (!et1310_in_phy_coma(adapter)) { |
| 3120 | /* NOTE - This was originally a 'sync with |
| 3121 | * interrupt'. How to do that under Linux? |
| 3122 | */ |
| 3123 | et131x_enable_interrupts(adapter); |
| 3124 | et1310_enable_phy_coma(adapter); |
| 3125 | } |
| 3126 | } |
| 3127 | } |
| 3128 | |
| 3129 | /* This is a periodic timer, so reschedule */ |
Nicholas Mc Guire | bc2f387 | 2015-02-11 04:27:54 -0500 | [diff] [blame] | 3130 | mod_timer(&adapter->error_timer, jiffies + |
| 3131 | msecs_to_jiffies(TX_ERROR_PERIOD)); |
Mark Einon | 38df649 | 2014-09-30 22:29:46 +0100 | [diff] [blame] | 3132 | } |
| 3133 | |
| 3134 | static void et131x_adapter_memory_free(struct et131x_adapter *adapter) |
| 3135 | { |
| 3136 | et131x_tx_dma_memory_free(adapter); |
| 3137 | et131x_rx_dma_memory_free(adapter); |
| 3138 | } |
| 3139 | |
| 3140 | static int et131x_adapter_memory_alloc(struct et131x_adapter *adapter) |
| 3141 | { |
| 3142 | int status; |
| 3143 | |
| 3144 | status = et131x_tx_dma_memory_alloc(adapter); |
| 3145 | if (status) { |
| 3146 | dev_err(&adapter->pdev->dev, |
| 3147 | "et131x_tx_dma_memory_alloc FAILED\n"); |
| 3148 | et131x_tx_dma_memory_free(adapter); |
| 3149 | return status; |
| 3150 | } |
| 3151 | |
| 3152 | status = et131x_rx_dma_memory_alloc(adapter); |
| 3153 | if (status) { |
| 3154 | dev_err(&adapter->pdev->dev, |
| 3155 | "et131x_rx_dma_memory_alloc FAILED\n"); |
| 3156 | et131x_adapter_memory_free(adapter); |
| 3157 | return status; |
| 3158 | } |
| 3159 | |
| 3160 | status = et131x_init_recv(adapter); |
| 3161 | if (status) { |
| 3162 | dev_err(&adapter->pdev->dev, "et131x_init_recv FAILED\n"); |
| 3163 | et131x_adapter_memory_free(adapter); |
| 3164 | } |
| 3165 | return status; |
| 3166 | } |
| 3167 | |
| 3168 | static void et131x_adjust_link(struct net_device *netdev) |
| 3169 | { |
| 3170 | struct et131x_adapter *adapter = netdev_priv(netdev); |
| 3171 | struct phy_device *phydev = adapter->phydev; |
| 3172 | |
| 3173 | if (!phydev) |
| 3174 | return; |
| 3175 | if (phydev->link == adapter->link) |
| 3176 | return; |
| 3177 | |
| 3178 | /* Check to see if we are in coma mode and if |
| 3179 | * so, disable it because we will not be able |
| 3180 | * to read PHY values until we are out. |
| 3181 | */ |
| 3182 | if (et1310_in_phy_coma(adapter)) |
| 3183 | et1310_disable_phy_coma(adapter); |
| 3184 | |
| 3185 | adapter->link = phydev->link; |
| 3186 | phy_print_status(phydev); |
| 3187 | |
| 3188 | if (phydev->link) { |
| 3189 | adapter->boot_coma = 20; |
| 3190 | if (phydev->speed == SPEED_10) { |
| 3191 | u16 register18; |
| 3192 | |
| 3193 | et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG, |
| 3194 | ®ister18); |
Andrew Lunn | e5a03bf | 2016-01-06 20:11:16 +0100 | [diff] [blame] | 3195 | et131x_mii_write(adapter, phydev->mdio.addr, |
Mark Einon | 38df649 | 2014-09-30 22:29:46 +0100 | [diff] [blame] | 3196 | PHY_MPHY_CONTROL_REG, |
| 3197 | register18 | 0x4); |
Andrew Lunn | e5a03bf | 2016-01-06 20:11:16 +0100 | [diff] [blame] | 3198 | et131x_mii_write(adapter, phydev->mdio.addr, |
| 3199 | PHY_INDEX_REG, register18 | 0x8402); |
| 3200 | et131x_mii_write(adapter, phydev->mdio.addr, |
| 3201 | PHY_DATA_REG, register18 | 511); |
| 3202 | et131x_mii_write(adapter, phydev->mdio.addr, |
Mark Einon | 38df649 | 2014-09-30 22:29:46 +0100 | [diff] [blame] | 3203 | PHY_MPHY_CONTROL_REG, register18); |
| 3204 | } |
| 3205 | |
| 3206 | et1310_config_flow_control(adapter); |
| 3207 | |
| 3208 | if (phydev->speed == SPEED_1000 && |
| 3209 | adapter->registry_jumbo_packet > 2048) { |
| 3210 | u16 reg; |
| 3211 | |
| 3212 | et131x_mii_read(adapter, PHY_CONFIG, ®); |
| 3213 | reg &= ~ET_PHY_CONFIG_TX_FIFO_DEPTH; |
| 3214 | reg |= ET_PHY_CONFIG_FIFO_DEPTH_32; |
Andrew Lunn | e5a03bf | 2016-01-06 20:11:16 +0100 | [diff] [blame] | 3215 | et131x_mii_write(adapter, phydev->mdio.addr, |
| 3216 | PHY_CONFIG, reg); |
Mark Einon | 38df649 | 2014-09-30 22:29:46 +0100 | [diff] [blame] | 3217 | } |
| 3218 | |
| 3219 | et131x_set_rx_dma_timer(adapter); |
| 3220 | et1310_config_mac_regs2(adapter); |
| 3221 | } else { |
| 3222 | adapter->boot_coma = 0; |
| 3223 | |
| 3224 | if (phydev->speed == SPEED_10) { |
| 3225 | u16 register18; |
| 3226 | |
| 3227 | et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG, |
| 3228 | ®ister18); |
Andrew Lunn | e5a03bf | 2016-01-06 20:11:16 +0100 | [diff] [blame] | 3229 | et131x_mii_write(adapter, phydev->mdio.addr, |
Mark Einon | 38df649 | 2014-09-30 22:29:46 +0100 | [diff] [blame] | 3230 | PHY_MPHY_CONTROL_REG, |
| 3231 | register18 | 0x4); |
Andrew Lunn | e5a03bf | 2016-01-06 20:11:16 +0100 | [diff] [blame] | 3232 | et131x_mii_write(adapter, phydev->mdio.addr, |
Mark Einon | 38df649 | 2014-09-30 22:29:46 +0100 | [diff] [blame] | 3233 | PHY_INDEX_REG, register18 | 0x8402); |
Andrew Lunn | e5a03bf | 2016-01-06 20:11:16 +0100 | [diff] [blame] | 3234 | et131x_mii_write(adapter, phydev->mdio.addr, |
Mark Einon | 38df649 | 2014-09-30 22:29:46 +0100 | [diff] [blame] | 3235 | PHY_DATA_REG, register18 | 511); |
Andrew Lunn | e5a03bf | 2016-01-06 20:11:16 +0100 | [diff] [blame] | 3236 | et131x_mii_write(adapter, phydev->mdio.addr, |
Mark Einon | 38df649 | 2014-09-30 22:29:46 +0100 | [diff] [blame] | 3237 | PHY_MPHY_CONTROL_REG, register18); |
| 3238 | } |
| 3239 | |
| 3240 | et131x_free_busy_send_packets(adapter); |
| 3241 | et131x_init_send(adapter); |
| 3242 | |
| 3243 | /* Bring the device back to the state it was during |
| 3244 | * init prior to autonegotiation being complete. This |
| 3245 | * way, when we get the auto-neg complete interrupt, |
| 3246 | * we can complete init by calling config_mac_regs2. |
| 3247 | */ |
| 3248 | et131x_soft_reset(adapter); |
| 3249 | |
| 3250 | et131x_adapter_setup(adapter); |
| 3251 | |
| 3252 | et131x_disable_txrx(netdev); |
| 3253 | et131x_enable_txrx(netdev); |
| 3254 | } |
| 3255 | } |
| 3256 | |
| 3257 | static int et131x_mii_probe(struct net_device *netdev) |
| 3258 | { |
| 3259 | struct et131x_adapter *adapter = netdev_priv(netdev); |
| 3260 | struct phy_device *phydev = NULL; |
| 3261 | |
| 3262 | phydev = phy_find_first(adapter->mii_bus); |
| 3263 | if (!phydev) { |
| 3264 | dev_err(&adapter->pdev->dev, "no PHY found\n"); |
| 3265 | return -ENODEV; |
| 3266 | } |
| 3267 | |
Andrew Lunn | 84eff6d | 2016-01-06 20:11:10 +0100 | [diff] [blame] | 3268 | phydev = phy_connect(netdev, phydev_name(phydev), |
Mark Einon | 38df649 | 2014-09-30 22:29:46 +0100 | [diff] [blame] | 3269 | &et131x_adjust_link, PHY_INTERFACE_MODE_MII); |
| 3270 | |
| 3271 | if (IS_ERR(phydev)) { |
| 3272 | dev_err(&adapter->pdev->dev, "Could not attach to PHY\n"); |
| 3273 | return PTR_ERR(phydev); |
| 3274 | } |
| 3275 | |
| 3276 | phydev->supported &= (SUPPORTED_10baseT_Half | |
| 3277 | SUPPORTED_10baseT_Full | |
| 3278 | SUPPORTED_100baseT_Half | |
| 3279 | SUPPORTED_100baseT_Full | |
| 3280 | SUPPORTED_Autoneg | |
| 3281 | SUPPORTED_MII | |
| 3282 | SUPPORTED_TP); |
| 3283 | |
| 3284 | if (adapter->pdev->device != ET131X_PCI_DEVICE_ID_FAST) |
| 3285 | phydev->supported |= SUPPORTED_1000baseT_Half | |
| 3286 | SUPPORTED_1000baseT_Full; |
| 3287 | |
| 3288 | phydev->advertising = phydev->supported; |
| 3289 | phydev->autoneg = AUTONEG_ENABLE; |
| 3290 | adapter->phydev = phydev; |
| 3291 | |
Andrew Lunn | 2220943 | 2016-01-06 20:11:13 +0100 | [diff] [blame] | 3292 | phy_attached_info(phydev); |
Mark Einon | 38df649 | 2014-09-30 22:29:46 +0100 | [diff] [blame] | 3293 | |
| 3294 | return 0; |
| 3295 | } |
| 3296 | |
| 3297 | static struct et131x_adapter *et131x_adapter_init(struct net_device *netdev, |
| 3298 | struct pci_dev *pdev) |
| 3299 | { |
| 3300 | static const u8 default_mac[] = { 0x00, 0x05, 0x3d, 0x00, 0x02, 0x00 }; |
| 3301 | |
| 3302 | struct et131x_adapter *adapter; |
| 3303 | |
| 3304 | adapter = netdev_priv(netdev); |
| 3305 | adapter->pdev = pci_dev_get(pdev); |
| 3306 | adapter->netdev = netdev; |
| 3307 | |
| 3308 | spin_lock_init(&adapter->tcb_send_qlock); |
| 3309 | spin_lock_init(&adapter->tcb_ready_qlock); |
| 3310 | spin_lock_init(&adapter->rcv_lock); |
| 3311 | |
| 3312 | adapter->registry_jumbo_packet = 1514; /* 1514-9216 */ |
| 3313 | |
| 3314 | ether_addr_copy(adapter->addr, default_mac); |
| 3315 | |
| 3316 | return adapter; |
| 3317 | } |
| 3318 | |
| 3319 | static void et131x_pci_remove(struct pci_dev *pdev) |
| 3320 | { |
| 3321 | struct net_device *netdev = pci_get_drvdata(pdev); |
| 3322 | struct et131x_adapter *adapter = netdev_priv(netdev); |
| 3323 | |
| 3324 | unregister_netdev(netdev); |
| 3325 | netif_napi_del(&adapter->napi); |
| 3326 | phy_disconnect(adapter->phydev); |
| 3327 | mdiobus_unregister(adapter->mii_bus); |
Mark Einon | 38df649 | 2014-09-30 22:29:46 +0100 | [diff] [blame] | 3328 | mdiobus_free(adapter->mii_bus); |
| 3329 | |
| 3330 | et131x_adapter_memory_free(adapter); |
| 3331 | iounmap(adapter->regs); |
| 3332 | pci_dev_put(pdev); |
| 3333 | |
| 3334 | free_netdev(netdev); |
| 3335 | pci_release_regions(pdev); |
| 3336 | pci_disable_device(pdev); |
| 3337 | } |
| 3338 | |
| 3339 | static void et131x_up(struct net_device *netdev) |
| 3340 | { |
| 3341 | struct et131x_adapter *adapter = netdev_priv(netdev); |
| 3342 | |
| 3343 | et131x_enable_txrx(netdev); |
| 3344 | phy_start(adapter->phydev); |
| 3345 | } |
| 3346 | |
| 3347 | static void et131x_down(struct net_device *netdev) |
| 3348 | { |
| 3349 | struct et131x_adapter *adapter = netdev_priv(netdev); |
| 3350 | |
| 3351 | /* Save the timestamp for the TX watchdog, prevent a timeout */ |
| 3352 | netdev->trans_start = jiffies; |
| 3353 | |
| 3354 | phy_stop(adapter->phydev); |
| 3355 | et131x_disable_txrx(netdev); |
| 3356 | } |
| 3357 | |
| 3358 | #ifdef CONFIG_PM_SLEEP |
| 3359 | static int et131x_suspend(struct device *dev) |
| 3360 | { |
| 3361 | struct pci_dev *pdev = to_pci_dev(dev); |
| 3362 | struct net_device *netdev = pci_get_drvdata(pdev); |
| 3363 | |
| 3364 | if (netif_running(netdev)) { |
| 3365 | netif_device_detach(netdev); |
| 3366 | et131x_down(netdev); |
| 3367 | pci_save_state(pdev); |
| 3368 | } |
| 3369 | |
| 3370 | return 0; |
| 3371 | } |
| 3372 | |
| 3373 | static int et131x_resume(struct device *dev) |
| 3374 | { |
| 3375 | struct pci_dev *pdev = to_pci_dev(dev); |
| 3376 | struct net_device *netdev = pci_get_drvdata(pdev); |
| 3377 | |
| 3378 | if (netif_running(netdev)) { |
| 3379 | pci_restore_state(pdev); |
| 3380 | et131x_up(netdev); |
| 3381 | netif_device_attach(netdev); |
| 3382 | } |
| 3383 | |
| 3384 | return 0; |
| 3385 | } |
| 3386 | #endif |
| 3387 | |
| 3388 | static SIMPLE_DEV_PM_OPS(et131x_pm_ops, et131x_suspend, et131x_resume); |
| 3389 | |
| 3390 | static irqreturn_t et131x_isr(int irq, void *dev_id) |
| 3391 | { |
| 3392 | bool handled = true; |
| 3393 | bool enable_interrupts = true; |
| 3394 | struct net_device *netdev = dev_id; |
| 3395 | struct et131x_adapter *adapter = netdev_priv(netdev); |
| 3396 | struct address_map __iomem *iomem = adapter->regs; |
| 3397 | struct rx_ring *rx_ring = &adapter->rx_ring; |
| 3398 | struct tx_ring *tx_ring = &adapter->tx_ring; |
| 3399 | u32 status; |
| 3400 | |
| 3401 | if (!netif_device_present(netdev)) { |
| 3402 | handled = false; |
| 3403 | enable_interrupts = false; |
| 3404 | goto out; |
| 3405 | } |
| 3406 | |
| 3407 | et131x_disable_interrupts(adapter); |
| 3408 | |
| 3409 | status = readl(&adapter->regs->global.int_status); |
| 3410 | |
| 3411 | if (adapter->flow == FLOW_TXONLY || adapter->flow == FLOW_BOTH) |
| 3412 | status &= ~INT_MASK_ENABLE; |
| 3413 | else |
| 3414 | status &= ~INT_MASK_ENABLE_NO_FLOW; |
| 3415 | |
| 3416 | /* Make sure this is our interrupt */ |
| 3417 | if (!status) { |
| 3418 | handled = false; |
| 3419 | et131x_enable_interrupts(adapter); |
| 3420 | goto out; |
| 3421 | } |
| 3422 | |
| 3423 | /* This is our interrupt, so process accordingly */ |
| 3424 | if (status & ET_INTR_WATCHDOG) { |
| 3425 | struct tcb *tcb = tx_ring->send_head; |
| 3426 | |
| 3427 | if (tcb) |
| 3428 | if (++tcb->stale > 1) |
| 3429 | status |= ET_INTR_TXDMA_ISR; |
| 3430 | |
| 3431 | if (rx_ring->unfinished_receives) |
| 3432 | status |= ET_INTR_RXDMA_XFR_DONE; |
| 3433 | else if (tcb == NULL) |
| 3434 | writel(0, &adapter->regs->global.watchdog_timer); |
| 3435 | |
| 3436 | status &= ~ET_INTR_WATCHDOG; |
| 3437 | } |
| 3438 | |
| 3439 | if (status & (ET_INTR_RXDMA_XFR_DONE | ET_INTR_TXDMA_ISR)) { |
| 3440 | enable_interrupts = false; |
| 3441 | napi_schedule(&adapter->napi); |
| 3442 | } |
| 3443 | |
| 3444 | status &= ~(ET_INTR_TXDMA_ISR | ET_INTR_RXDMA_XFR_DONE); |
| 3445 | |
| 3446 | if (!status) |
| 3447 | goto out; |
| 3448 | |
| 3449 | if (status & ET_INTR_TXDMA_ERR) { |
| 3450 | /* Following read also clears the register (COR) */ |
| 3451 | u32 txdma_err = readl(&iomem->txdma.tx_dma_error); |
| 3452 | |
| 3453 | dev_warn(&adapter->pdev->dev, |
| 3454 | "TXDMA_ERR interrupt, error = %d\n", |
| 3455 | txdma_err); |
| 3456 | } |
| 3457 | |
| 3458 | if (status & (ET_INTR_RXDMA_FB_R0_LOW | ET_INTR_RXDMA_FB_R1_LOW)) { |
| 3459 | /* This indicates the number of unused buffers in RXDMA free |
| 3460 | * buffer ring 0 is <= the limit you programmed. Free buffer |
| 3461 | * resources need to be returned. Free buffers are consumed as |
| 3462 | * packets are passed from the network to the host. The host |
| 3463 | * becomes aware of the packets from the contents of the packet |
| 3464 | * status ring. This ring is queried when the packet done |
| 3465 | * interrupt occurs. Packets are then passed to the OS. When |
| 3466 | * the OS is done with the packets the resources can be |
| 3467 | * returned to the ET1310 for re-use. This interrupt is one |
| 3468 | * method of returning resources. |
| 3469 | */ |
| 3470 | |
| 3471 | /* If the user has flow control on, then we will |
| 3472 | * send a pause packet, otherwise just exit |
| 3473 | */ |
| 3474 | if (adapter->flow == FLOW_TXONLY || adapter->flow == FLOW_BOTH) { |
| 3475 | u32 pm_csr; |
| 3476 | |
| 3477 | /* Tell the device to send a pause packet via the back |
| 3478 | * pressure register (bp req and bp xon/xoff) |
| 3479 | */ |
| 3480 | pm_csr = readl(&iomem->global.pm_csr); |
| 3481 | if (!et1310_in_phy_coma(adapter)) |
| 3482 | writel(3, &iomem->txmac.bp_ctrl); |
| 3483 | } |
| 3484 | } |
| 3485 | |
| 3486 | /* Handle Packet Status Ring Low Interrupt */ |
| 3487 | if (status & ET_INTR_RXDMA_STAT_LOW) { |
| 3488 | /* Same idea as with the two Free Buffer Rings. Packets going |
| 3489 | * from the network to the host each consume a free buffer |
| 3490 | * resource and a packet status resource. These resources are |
| 3491 | * passed to the OS. When the OS is done with the resources, |
| 3492 | * they need to be returned to the ET1310. This is one method |
| 3493 | * of returning the resources. |
| 3494 | */ |
| 3495 | } |
| 3496 | |
| 3497 | if (status & ET_INTR_RXDMA_ERR) { |
| 3498 | /* The rxdma_error interrupt is sent when a time-out on a |
| 3499 | * request issued by the JAGCore has occurred or a completion is |
| 3500 | * returned with an un-successful status. In both cases the |
| 3501 | * request is considered complete. The JAGCore will |
| 3502 | * automatically re-try the request in question. Normally |
| 3503 | * information on events like these are sent to the host using |
| 3504 | * the "Advanced Error Reporting" capability. This interrupt is |
| 3505 | * another way of getting similar information. The only thing |
| 3506 | * required is to clear the interrupt by reading the ISR in the |
| 3507 | * global resources. The JAGCore will do a re-try on the |
| 3508 | * request. Normally you should never see this interrupt. If |
| 3509 | * you start to see this interrupt occurring frequently then |
| 3510 | * something bad has occurred. A reset might be the thing to do. |
| 3511 | */ |
| 3512 | /* TRAP();*/ |
| 3513 | |
| 3514 | dev_warn(&adapter->pdev->dev, "RxDMA_ERR interrupt, error %x\n", |
| 3515 | readl(&iomem->txmac.tx_test)); |
| 3516 | } |
| 3517 | |
| 3518 | /* Handle the Wake on LAN Event */ |
| 3519 | if (status & ET_INTR_WOL) { |
| 3520 | /* This is a secondary interrupt for wake on LAN. The driver |
| 3521 | * should never see this, if it does, something serious is |
| 3522 | * wrong. |
| 3523 | */ |
| 3524 | dev_err(&adapter->pdev->dev, "WAKE_ON_LAN interrupt\n"); |
| 3525 | } |
| 3526 | |
| 3527 | if (status & ET_INTR_TXMAC) { |
| 3528 | u32 err = readl(&iomem->txmac.err); |
| 3529 | |
| 3530 | /* When any of the errors occur and TXMAC generates an |
| 3531 | * interrupt to report these errors, it usually means that |
| 3532 | * TXMAC has detected an error in the data stream retrieved |
| 3533 | * from the on-chip Tx Q. All of these errors are catastrophic |
| 3534 | * and TXMAC won't be able to recover data when these errors |
| 3535 | * occur. In a nutshell, the whole Tx path will have to be reset |
| 3536 | * and re-configured afterwards. |
| 3537 | */ |
| 3538 | dev_warn(&adapter->pdev->dev, "TXMAC interrupt, error 0x%08x\n", |
| 3539 | err); |
| 3540 | |
| 3541 | /* If we are debugging, we want to see this error, otherwise we |
| 3542 | * just want the device to be reset and continue |
| 3543 | */ |
| 3544 | } |
| 3545 | |
| 3546 | if (status & ET_INTR_RXMAC) { |
| 3547 | /* These interrupts are catastrophic to the device, what we need |
| 3548 | * to do is disable the interrupts and set the flag to cause us |
| 3549 | * to reset so we can solve this issue. |
| 3550 | */ |
| 3551 | dev_warn(&adapter->pdev->dev, |
| 3552 | "RXMAC interrupt, error 0x%08x. Requesting reset\n", |
| 3553 | readl(&iomem->rxmac.err_reg)); |
| 3554 | |
| 3555 | dev_warn(&adapter->pdev->dev, |
| 3556 | "Enable 0x%08x, Diag 0x%08x\n", |
| 3557 | readl(&iomem->rxmac.ctrl), |
| 3558 | readl(&iomem->rxmac.rxq_diag)); |
| 3559 | |
| 3560 | /* If we are debugging, we want to see this error, otherwise we |
| 3561 | * just want the device to be reset and continue |
| 3562 | */ |
| 3563 | } |
| 3564 | |
| 3565 | if (status & ET_INTR_MAC_STAT) { |
| 3566 | /* This means at least one of the un-masked counters in the |
| 3567 | * MAC_STAT block has rolled over. Use this to maintain the top, |
| 3568 | * software managed bits of the counter(s). |
| 3569 | */ |
| 3570 | et1310_handle_macstat_interrupt(adapter); |
| 3571 | } |
| 3572 | |
| 3573 | if (status & ET_INTR_SLV_TIMEOUT) { |
| 3574 | /* This means a timeout has occurred on a read or write request |
| 3575 | * to one of the JAGCore registers. The Global Resources block |
| 3576 | * has terminated the request and on a read request, returned a |
| 3577 | * "fake" value. The most likely reasons are: Bad Address or the |
| 3578 | * addressed module is in a power-down state and can't respond. |
| 3579 | */ |
| 3580 | } |
| 3581 | |
| 3582 | out: |
| 3583 | if (enable_interrupts) |
| 3584 | et131x_enable_interrupts(adapter); |
| 3585 | |
| 3586 | return IRQ_RETVAL(handled); |
| 3587 | } |
| 3588 | |
| 3589 | static int et131x_poll(struct napi_struct *napi, int budget) |
| 3590 | { |
| 3591 | struct et131x_adapter *adapter = |
| 3592 | container_of(napi, struct et131x_adapter, napi); |
| 3593 | int work_done = et131x_handle_recv_pkts(adapter, budget); |
| 3594 | |
| 3595 | et131x_handle_send_pkts(adapter); |
| 3596 | |
| 3597 | if (work_done < budget) { |
| 3598 | napi_complete(&adapter->napi); |
| 3599 | et131x_enable_interrupts(adapter); |
| 3600 | } |
| 3601 | |
| 3602 | return work_done; |
| 3603 | } |
| 3604 | |
| 3605 | /* et131x_stats - Return the current device statistics */ |
| 3606 | static struct net_device_stats *et131x_stats(struct net_device *netdev) |
| 3607 | { |
| 3608 | struct et131x_adapter *adapter = netdev_priv(netdev); |
| 3609 | struct net_device_stats *stats = &adapter->netdev->stats; |
| 3610 | struct ce_stats *devstat = &adapter->stats; |
| 3611 | |
| 3612 | stats->rx_errors = devstat->rx_length_errs + |
| 3613 | devstat->rx_align_errs + |
| 3614 | devstat->rx_crc_errs + |
| 3615 | devstat->rx_code_violations + |
| 3616 | devstat->rx_other_errs; |
| 3617 | stats->tx_errors = devstat->tx_max_pkt_errs; |
| 3618 | stats->multicast = devstat->multicast_pkts_rcvd; |
| 3619 | stats->collisions = devstat->tx_collisions; |
| 3620 | |
| 3621 | stats->rx_length_errors = devstat->rx_length_errs; |
| 3622 | stats->rx_over_errors = devstat->rx_overflows; |
| 3623 | stats->rx_crc_errors = devstat->rx_crc_errs; |
| 3624 | stats->rx_dropped = devstat->rcvd_pkts_dropped; |
| 3625 | |
| 3626 | /* NOTE: Not used, can't find analogous statistics */ |
| 3627 | /* stats->rx_frame_errors = devstat->; */ |
| 3628 | /* stats->rx_fifo_errors = devstat->; */ |
| 3629 | /* stats->rx_missed_errors = devstat->; */ |
| 3630 | |
| 3631 | /* stats->tx_aborted_errors = devstat->; */ |
| 3632 | /* stats->tx_carrier_errors = devstat->; */ |
| 3633 | /* stats->tx_fifo_errors = devstat->; */ |
| 3634 | /* stats->tx_heartbeat_errors = devstat->; */ |
| 3635 | /* stats->tx_window_errors = devstat->; */ |
| 3636 | return stats; |
| 3637 | } |
| 3638 | |
| 3639 | static int et131x_open(struct net_device *netdev) |
| 3640 | { |
| 3641 | struct et131x_adapter *adapter = netdev_priv(netdev); |
| 3642 | struct pci_dev *pdev = adapter->pdev; |
| 3643 | unsigned int irq = pdev->irq; |
| 3644 | int result; |
| 3645 | |
| 3646 | /* Start the timer to track NIC errors */ |
| 3647 | init_timer(&adapter->error_timer); |
Nicholas Mc Guire | bc2f387 | 2015-02-11 04:27:54 -0500 | [diff] [blame] | 3648 | adapter->error_timer.expires = jiffies + |
| 3649 | msecs_to_jiffies(TX_ERROR_PERIOD); |
Mark Einon | 38df649 | 2014-09-30 22:29:46 +0100 | [diff] [blame] | 3650 | adapter->error_timer.function = et131x_error_timer_handler; |
| 3651 | adapter->error_timer.data = (unsigned long)adapter; |
| 3652 | add_timer(&adapter->error_timer); |
| 3653 | |
| 3654 | result = request_irq(irq, et131x_isr, |
| 3655 | IRQF_SHARED, netdev->name, netdev); |
| 3656 | if (result) { |
| 3657 | dev_err(&pdev->dev, "could not register IRQ %d\n", irq); |
| 3658 | return result; |
| 3659 | } |
| 3660 | |
| 3661 | adapter->flags |= FMP_ADAPTER_INTERRUPT_IN_USE; |
| 3662 | |
| 3663 | napi_enable(&adapter->napi); |
| 3664 | |
| 3665 | et131x_up(netdev); |
| 3666 | |
| 3667 | return result; |
| 3668 | } |
| 3669 | |
| 3670 | static int et131x_close(struct net_device *netdev) |
| 3671 | { |
| 3672 | struct et131x_adapter *adapter = netdev_priv(netdev); |
| 3673 | |
| 3674 | et131x_down(netdev); |
| 3675 | napi_disable(&adapter->napi); |
| 3676 | |
| 3677 | adapter->flags &= ~FMP_ADAPTER_INTERRUPT_IN_USE; |
| 3678 | free_irq(adapter->pdev->irq, netdev); |
| 3679 | |
| 3680 | /* Stop the error timer */ |
| 3681 | return del_timer_sync(&adapter->error_timer); |
| 3682 | } |
| 3683 | |
| 3684 | static int et131x_ioctl(struct net_device *netdev, struct ifreq *reqbuf, |
| 3685 | int cmd) |
| 3686 | { |
| 3687 | struct et131x_adapter *adapter = netdev_priv(netdev); |
| 3688 | |
| 3689 | if (!adapter->phydev) |
| 3690 | return -EINVAL; |
| 3691 | |
| 3692 | return phy_mii_ioctl(adapter->phydev, reqbuf, cmd); |
| 3693 | } |
| 3694 | |
| 3695 | /* et131x_set_packet_filter - Configures the Rx Packet filtering */ |
| 3696 | static int et131x_set_packet_filter(struct et131x_adapter *adapter) |
| 3697 | { |
| 3698 | int filter = adapter->packet_filter; |
| 3699 | u32 ctrl; |
| 3700 | u32 pf_ctrl; |
| 3701 | |
| 3702 | ctrl = readl(&adapter->regs->rxmac.ctrl); |
| 3703 | pf_ctrl = readl(&adapter->regs->rxmac.pf_ctrl); |
| 3704 | |
| 3705 | /* Default to disabled packet filtering */ |
| 3706 | ctrl |= 0x04; |
| 3707 | |
| 3708 | /* Set us to be in promiscuous mode so we receive everything, this |
| 3709 | * is also true when we get a packet filter of 0 |
| 3710 | */ |
| 3711 | if ((filter & ET131X_PACKET_TYPE_PROMISCUOUS) || filter == 0) |
| 3712 | pf_ctrl &= ~7; /* Clear filter bits */ |
| 3713 | else { |
| 3714 | /* Set us up with Multicast packet filtering. Three cases are |
| 3715 | * possible - (1) we have a multi-cast list, (2) we receive ALL |
| 3716 | * multicast entries or (3) we receive none. |
| 3717 | */ |
| 3718 | if (filter & ET131X_PACKET_TYPE_ALL_MULTICAST) |
| 3719 | pf_ctrl &= ~2; /* Multicast filter bit */ |
| 3720 | else { |
| 3721 | et1310_setup_device_for_multicast(adapter); |
| 3722 | pf_ctrl |= 2; |
| 3723 | ctrl &= ~0x04; |
| 3724 | } |
| 3725 | |
| 3726 | /* Set us up with Unicast packet filtering */ |
| 3727 | if (filter & ET131X_PACKET_TYPE_DIRECTED) { |
| 3728 | et1310_setup_device_for_unicast(adapter); |
| 3729 | pf_ctrl |= 4; |
| 3730 | ctrl &= ~0x04; |
| 3731 | } |
| 3732 | |
| 3733 | /* Set us up with Broadcast packet filtering */ |
| 3734 | if (filter & ET131X_PACKET_TYPE_BROADCAST) { |
| 3735 | pf_ctrl |= 1; /* Broadcast filter bit */ |
| 3736 | ctrl &= ~0x04; |
| 3737 | } else { |
| 3738 | pf_ctrl &= ~1; |
| 3739 | } |
| 3740 | |
| 3741 | /* Setup the receive mac configuration registers - Packet |
| 3742 | * Filter control + the enable / disable for packet filter |
| 3743 | * in the control reg. |
| 3744 | */ |
| 3745 | writel(pf_ctrl, &adapter->regs->rxmac.pf_ctrl); |
| 3746 | writel(ctrl, &adapter->regs->rxmac.ctrl); |
| 3747 | } |
| 3748 | return 0; |
| 3749 | } |
| 3750 | |
| 3751 | static void et131x_multicast(struct net_device *netdev) |
| 3752 | { |
| 3753 | struct et131x_adapter *adapter = netdev_priv(netdev); |
| 3754 | int packet_filter; |
| 3755 | struct netdev_hw_addr *ha; |
| 3756 | int i; |
| 3757 | |
| 3758 | /* Before we modify the platform-independent filter flags, store them |
| 3759 | * locally. This allows us to determine if anything's changed and if |
| 3760 | * we even need to bother the hardware |
| 3761 | */ |
| 3762 | packet_filter = adapter->packet_filter; |
| 3763 | |
| 3764 | /* Clear the 'multicast' flag locally; because we only have a single |
| 3765 | * flag to check multicast, and multiple multicast addresses can be |
| 3766 | * set, this is the easiest way to determine if more than one |
| 3767 | * multicast address is being set. |
| 3768 | */ |
| 3769 | packet_filter &= ~ET131X_PACKET_TYPE_MULTICAST; |
| 3770 | |
| 3771 | /* Check the net_device flags and set the device independent flags |
| 3772 | * accordingly |
| 3773 | */ |
| 3774 | if (netdev->flags & IFF_PROMISC) |
| 3775 | adapter->packet_filter |= ET131X_PACKET_TYPE_PROMISCUOUS; |
| 3776 | else |
| 3777 | adapter->packet_filter &= ~ET131X_PACKET_TYPE_PROMISCUOUS; |
| 3778 | |
| 3779 | if ((netdev->flags & IFF_ALLMULTI) || |
| 3780 | (netdev_mc_count(netdev) > NIC_MAX_MCAST_LIST)) |
| 3781 | adapter->packet_filter |= ET131X_PACKET_TYPE_ALL_MULTICAST; |
| 3782 | |
| 3783 | if (netdev_mc_count(netdev) < 1) { |
| 3784 | adapter->packet_filter &= ~ET131X_PACKET_TYPE_ALL_MULTICAST; |
| 3785 | adapter->packet_filter &= ~ET131X_PACKET_TYPE_MULTICAST; |
| 3786 | } else { |
| 3787 | adapter->packet_filter |= ET131X_PACKET_TYPE_MULTICAST; |
| 3788 | } |
| 3789 | |
| 3790 | /* Set values in the private adapter struct */ |
| 3791 | i = 0; |
| 3792 | netdev_for_each_mc_addr(ha, netdev) { |
| 3793 | if (i == NIC_MAX_MCAST_LIST) |
| 3794 | break; |
| 3795 | ether_addr_copy(adapter->multicast_list[i++], ha->addr); |
| 3796 | } |
| 3797 | adapter->multicast_addr_count = i; |
| 3798 | |
| 3799 | /* Are the new flags different from the previous ones? If not, then no |
| 3800 | * action is required |
| 3801 | * |
| 3802 | * NOTE - This block will always update the multicast_list with the |
| 3803 | * hardware, even if the addresses aren't the same. |
| 3804 | */ |
| 3805 | if (packet_filter != adapter->packet_filter) |
| 3806 | et131x_set_packet_filter(adapter); |
| 3807 | } |
| 3808 | |
| 3809 | static netdev_tx_t et131x_tx(struct sk_buff *skb, struct net_device *netdev) |
| 3810 | { |
| 3811 | struct et131x_adapter *adapter = netdev_priv(netdev); |
| 3812 | struct tx_ring *tx_ring = &adapter->tx_ring; |
| 3813 | |
| 3814 | /* stop the queue if it's getting full */ |
| 3815 | if (tx_ring->used >= NUM_TCB - 1 && !netif_queue_stopped(netdev)) |
| 3816 | netif_stop_queue(netdev); |
| 3817 | |
| 3818 | /* Save the timestamp for the TX timeout watchdog */ |
| 3819 | netdev->trans_start = jiffies; |
| 3820 | |
| 3821 | /* TCB is not available */ |
| 3822 | if (tx_ring->used >= NUM_TCB) |
| 3823 | goto drop_err; |
| 3824 | |
| 3825 | if ((adapter->flags & FMP_ADAPTER_FAIL_SEND_MASK) || |
| 3826 | !netif_carrier_ok(netdev)) |
| 3827 | goto drop_err; |
| 3828 | |
| 3829 | if (send_packet(skb, adapter)) |
| 3830 | goto drop_err; |
| 3831 | |
| 3832 | return NETDEV_TX_OK; |
| 3833 | |
| 3834 | drop_err: |
| 3835 | dev_kfree_skb_any(skb); |
| 3836 | adapter->netdev->stats.tx_dropped++; |
| 3837 | return NETDEV_TX_OK; |
| 3838 | } |
| 3839 | |
| 3840 | /* et131x_tx_timeout - Timeout handler |
| 3841 | * |
| 3842 | * The handler called when a Tx request times out. The timeout period is |
| 3843 | * specified by the 'tx_timeo" element in the net_device structure (see |
| 3844 | * et131x_alloc_device() to see how this value is set). |
| 3845 | */ |
| 3846 | static void et131x_tx_timeout(struct net_device *netdev) |
| 3847 | { |
| 3848 | struct et131x_adapter *adapter = netdev_priv(netdev); |
| 3849 | struct tx_ring *tx_ring = &adapter->tx_ring; |
| 3850 | struct tcb *tcb; |
| 3851 | unsigned long flags; |
| 3852 | |
| 3853 | /* If the device is closed, ignore the timeout */ |
| 3854 | if (~(adapter->flags & FMP_ADAPTER_INTERRUPT_IN_USE)) |
| 3855 | return; |
| 3856 | |
| 3857 | /* Any nonrecoverable hardware error? |
| 3858 | * Checks adapter->flags for any failure in phy reading |
| 3859 | */ |
| 3860 | if (adapter->flags & FMP_ADAPTER_NON_RECOVER_ERROR) |
| 3861 | return; |
| 3862 | |
| 3863 | /* Hardware failure? */ |
| 3864 | if (adapter->flags & FMP_ADAPTER_HARDWARE_ERROR) { |
| 3865 | dev_err(&adapter->pdev->dev, "hardware error - reset\n"); |
| 3866 | return; |
| 3867 | } |
| 3868 | |
| 3869 | /* Is send stuck? */ |
| 3870 | spin_lock_irqsave(&adapter->tcb_send_qlock, flags); |
| 3871 | tcb = tx_ring->send_head; |
| 3872 | spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags); |
| 3873 | |
| 3874 | if (tcb) { |
| 3875 | tcb->count++; |
| 3876 | |
| 3877 | if (tcb->count > NIC_SEND_HANG_THRESHOLD) { |
| 3878 | dev_warn(&adapter->pdev->dev, |
| 3879 | "Send stuck - reset. tcb->WrIndex %x\n", |
| 3880 | tcb->index); |
| 3881 | |
| 3882 | adapter->netdev->stats.tx_errors++; |
| 3883 | |
| 3884 | /* perform reset of tx/rx */ |
| 3885 | et131x_disable_txrx(netdev); |
| 3886 | et131x_enable_txrx(netdev); |
| 3887 | } |
| 3888 | } |
| 3889 | } |
| 3890 | |
| 3891 | static int et131x_change_mtu(struct net_device *netdev, int new_mtu) |
| 3892 | { |
| 3893 | int result = 0; |
| 3894 | struct et131x_adapter *adapter = netdev_priv(netdev); |
| 3895 | |
| 3896 | if (new_mtu < 64 || new_mtu > 9216) |
| 3897 | return -EINVAL; |
| 3898 | |
| 3899 | et131x_disable_txrx(netdev); |
| 3900 | |
| 3901 | netdev->mtu = new_mtu; |
| 3902 | |
| 3903 | et131x_adapter_memory_free(adapter); |
| 3904 | |
| 3905 | /* Set the config parameter for Jumbo Packet support */ |
| 3906 | adapter->registry_jumbo_packet = new_mtu + 14; |
| 3907 | et131x_soft_reset(adapter); |
| 3908 | |
| 3909 | result = et131x_adapter_memory_alloc(adapter); |
| 3910 | if (result != 0) { |
| 3911 | dev_warn(&adapter->pdev->dev, |
| 3912 | "Change MTU failed; couldn't re-alloc DMA memory\n"); |
| 3913 | return result; |
| 3914 | } |
| 3915 | |
| 3916 | et131x_init_send(adapter); |
| 3917 | et131x_hwaddr_init(adapter); |
| 3918 | ether_addr_copy(netdev->dev_addr, adapter->addr); |
| 3919 | |
| 3920 | /* Init the device with the new settings */ |
| 3921 | et131x_adapter_setup(adapter); |
| 3922 | et131x_enable_txrx(netdev); |
| 3923 | |
| 3924 | return result; |
| 3925 | } |
| 3926 | |
| 3927 | static const struct net_device_ops et131x_netdev_ops = { |
| 3928 | .ndo_open = et131x_open, |
| 3929 | .ndo_stop = et131x_close, |
| 3930 | .ndo_start_xmit = et131x_tx, |
| 3931 | .ndo_set_rx_mode = et131x_multicast, |
| 3932 | .ndo_tx_timeout = et131x_tx_timeout, |
| 3933 | .ndo_change_mtu = et131x_change_mtu, |
| 3934 | .ndo_set_mac_address = eth_mac_addr, |
| 3935 | .ndo_validate_addr = eth_validate_addr, |
| 3936 | .ndo_get_stats = et131x_stats, |
| 3937 | .ndo_do_ioctl = et131x_ioctl, |
| 3938 | }; |
| 3939 | |
| 3940 | static int et131x_pci_setup(struct pci_dev *pdev, |
| 3941 | const struct pci_device_id *ent) |
| 3942 | { |
| 3943 | struct net_device *netdev; |
| 3944 | struct et131x_adapter *adapter; |
| 3945 | int rc; |
Mark Einon | 38df649 | 2014-09-30 22:29:46 +0100 | [diff] [blame] | 3946 | |
| 3947 | rc = pci_enable_device(pdev); |
| 3948 | if (rc < 0) { |
| 3949 | dev_err(&pdev->dev, "pci_enable_device() failed\n"); |
| 3950 | goto out; |
| 3951 | } |
| 3952 | |
| 3953 | /* Perform some basic PCI checks */ |
| 3954 | if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { |
| 3955 | dev_err(&pdev->dev, "Can't find PCI device's base address\n"); |
| 3956 | rc = -ENODEV; |
| 3957 | goto err_disable; |
| 3958 | } |
| 3959 | |
| 3960 | rc = pci_request_regions(pdev, DRIVER_NAME); |
| 3961 | if (rc < 0) { |
| 3962 | dev_err(&pdev->dev, "Can't get PCI resources\n"); |
| 3963 | goto err_disable; |
| 3964 | } |
| 3965 | |
| 3966 | pci_set_master(pdev); |
| 3967 | |
| 3968 | /* Check the DMA addressing support of this device */ |
| 3969 | if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) && |
| 3970 | dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) { |
| 3971 | dev_err(&pdev->dev, "No usable DMA addressing method\n"); |
| 3972 | rc = -EIO; |
| 3973 | goto err_release_res; |
| 3974 | } |
| 3975 | |
| 3976 | netdev = alloc_etherdev(sizeof(struct et131x_adapter)); |
| 3977 | if (!netdev) { |
| 3978 | dev_err(&pdev->dev, "Couldn't alloc netdev struct\n"); |
| 3979 | rc = -ENOMEM; |
| 3980 | goto err_release_res; |
| 3981 | } |
| 3982 | |
| 3983 | netdev->watchdog_timeo = ET131X_TX_TIMEOUT; |
| 3984 | netdev->netdev_ops = &et131x_netdev_ops; |
| 3985 | |
| 3986 | SET_NETDEV_DEV(netdev, &pdev->dev); |
| 3987 | netdev->ethtool_ops = &et131x_ethtool_ops; |
| 3988 | |
| 3989 | adapter = et131x_adapter_init(netdev, pdev); |
| 3990 | |
| 3991 | rc = et131x_pci_init(adapter, pdev); |
| 3992 | if (rc < 0) |
| 3993 | goto err_free_dev; |
| 3994 | |
| 3995 | /* Map the bus-relative registers to system virtual memory */ |
| 3996 | adapter->regs = pci_ioremap_bar(pdev, 0); |
| 3997 | if (!adapter->regs) { |
| 3998 | dev_err(&pdev->dev, "Cannot map device registers\n"); |
| 3999 | rc = -ENOMEM; |
| 4000 | goto err_free_dev; |
| 4001 | } |
| 4002 | |
| 4003 | /* If Phy COMA mode was enabled when we went down, disable it here. */ |
| 4004 | writel(ET_PMCSR_INIT, &adapter->regs->global.pm_csr); |
| 4005 | |
| 4006 | et131x_soft_reset(adapter); |
| 4007 | et131x_disable_interrupts(adapter); |
| 4008 | |
| 4009 | rc = et131x_adapter_memory_alloc(adapter); |
| 4010 | if (rc < 0) { |
| 4011 | dev_err(&pdev->dev, "Could not alloc adapter memory (DMA)\n"); |
| 4012 | goto err_iounmap; |
| 4013 | } |
| 4014 | |
| 4015 | et131x_init_send(adapter); |
| 4016 | |
| 4017 | netif_napi_add(netdev, &adapter->napi, et131x_poll, 64); |
| 4018 | |
| 4019 | ether_addr_copy(netdev->dev_addr, adapter->addr); |
| 4020 | |
| 4021 | rc = -ENOMEM; |
| 4022 | |
| 4023 | adapter->mii_bus = mdiobus_alloc(); |
| 4024 | if (!adapter->mii_bus) { |
| 4025 | dev_err(&pdev->dev, "Alloc of mii_bus struct failed\n"); |
| 4026 | goto err_mem_free; |
| 4027 | } |
| 4028 | |
| 4029 | adapter->mii_bus->name = "et131x_eth_mii"; |
| 4030 | snprintf(adapter->mii_bus->id, MII_BUS_ID_SIZE, "%x", |
| 4031 | (adapter->pdev->bus->number << 8) | adapter->pdev->devfn); |
| 4032 | adapter->mii_bus->priv = netdev; |
| 4033 | adapter->mii_bus->read = et131x_mdio_read; |
| 4034 | adapter->mii_bus->write = et131x_mdio_write; |
Mark Einon | 38df649 | 2014-09-30 22:29:46 +0100 | [diff] [blame] | 4035 | |
| 4036 | rc = mdiobus_register(adapter->mii_bus); |
| 4037 | if (rc < 0) { |
| 4038 | dev_err(&pdev->dev, "failed to register MII bus\n"); |
Andrew Lunn | e7f4dc3 | 2016-01-06 20:11:15 +0100 | [diff] [blame] | 4039 | goto err_mdio_free; |
Mark Einon | 38df649 | 2014-09-30 22:29:46 +0100 | [diff] [blame] | 4040 | } |
| 4041 | |
| 4042 | rc = et131x_mii_probe(netdev); |
| 4043 | if (rc < 0) { |
| 4044 | dev_err(&pdev->dev, "failed to probe MII bus\n"); |
| 4045 | goto err_mdio_unregister; |
| 4046 | } |
| 4047 | |
| 4048 | et131x_adapter_setup(adapter); |
| 4049 | |
| 4050 | /* Init variable for counting how long we do not have link status */ |
| 4051 | adapter->boot_coma = 0; |
| 4052 | et1310_disable_phy_coma(adapter); |
| 4053 | |
| 4054 | /* We can enable interrupts now |
| 4055 | * |
| 4056 | * NOTE - Because registration of interrupt handler is done in the |
| 4057 | * device's open(), defer enabling device interrupts to that |
| 4058 | * point |
| 4059 | */ |
| 4060 | |
| 4061 | rc = register_netdev(netdev); |
| 4062 | if (rc < 0) { |
| 4063 | dev_err(&pdev->dev, "register_netdev() failed\n"); |
| 4064 | goto err_phy_disconnect; |
| 4065 | } |
| 4066 | |
| 4067 | /* Register the net_device struct with the PCI subsystem. Save a copy |
| 4068 | * of the PCI config space for this device now that the device has |
| 4069 | * been initialized, just in case it needs to be quickly restored. |
| 4070 | */ |
| 4071 | pci_set_drvdata(pdev, netdev); |
| 4072 | out: |
| 4073 | return rc; |
| 4074 | |
| 4075 | err_phy_disconnect: |
| 4076 | phy_disconnect(adapter->phydev); |
| 4077 | err_mdio_unregister: |
| 4078 | mdiobus_unregister(adapter->mii_bus); |
Mark Einon | 38df649 | 2014-09-30 22:29:46 +0100 | [diff] [blame] | 4079 | err_mdio_free: |
| 4080 | mdiobus_free(adapter->mii_bus); |
| 4081 | err_mem_free: |
| 4082 | et131x_adapter_memory_free(adapter); |
| 4083 | err_iounmap: |
| 4084 | iounmap(adapter->regs); |
| 4085 | err_free_dev: |
| 4086 | pci_dev_put(pdev); |
| 4087 | free_netdev(netdev); |
| 4088 | err_release_res: |
| 4089 | pci_release_regions(pdev); |
| 4090 | err_disable: |
| 4091 | pci_disable_device(pdev); |
| 4092 | goto out; |
| 4093 | } |
| 4094 | |
| 4095 | static const struct pci_device_id et131x_pci_table[] = { |
| 4096 | { PCI_VDEVICE(ATT, ET131X_PCI_DEVICE_ID_GIG), 0UL}, |
| 4097 | { PCI_VDEVICE(ATT, ET131X_PCI_DEVICE_ID_FAST), 0UL}, |
| 4098 | { 0,} |
| 4099 | }; |
| 4100 | MODULE_DEVICE_TABLE(pci, et131x_pci_table); |
| 4101 | |
| 4102 | static struct pci_driver et131x_driver = { |
| 4103 | .name = DRIVER_NAME, |
| 4104 | .id_table = et131x_pci_table, |
| 4105 | .probe = et131x_pci_setup, |
| 4106 | .remove = et131x_pci_remove, |
| 4107 | .driver.pm = &et131x_pm_ops, |
| 4108 | }; |
| 4109 | |
| 4110 | module_pci_driver(et131x_driver); |