Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Agere Systems Inc. |
| 3 | * 10/100/1000 Base-T Ethernet Driver for the ET1301 and ET131x series MACs |
| 4 | * |
| 5 | * Copyright © 2005 Agere Systems Inc. |
| 6 | * All rights reserved. |
| 7 | * http://www.agere.com |
| 8 | * |
| 9 | * Copyright (c) 2011 Mark Einon <mark.einon@gmail.com> |
| 10 | * |
| 11 | *------------------------------------------------------------------------------ |
| 12 | * |
| 13 | * SOFTWARE LICENSE |
| 14 | * |
| 15 | * This software is provided subject to the following terms and conditions, |
| 16 | * which you should read carefully before using the software. Using this |
| 17 | * software indicates your acceptance of these terms and conditions. If you do |
| 18 | * not agree with these terms and conditions, do not use the software. |
| 19 | * |
| 20 | * Copyright © 2005 Agere Systems Inc. |
| 21 | * All rights reserved. |
| 22 | * |
| 23 | * Redistribution and use in source or binary forms, with or without |
| 24 | * modifications, are permitted provided that the following conditions are met: |
| 25 | * |
| 26 | * . Redistributions of source code must retain the above copyright notice, this |
| 27 | * list of conditions and the following Disclaimer as comments in the code as |
| 28 | * well as in the documentation and/or other materials provided with the |
| 29 | * distribution. |
| 30 | * |
| 31 | * . Redistributions in binary form must reproduce the above copyright notice, |
| 32 | * this list of conditions and the following Disclaimer in the documentation |
| 33 | * and/or other materials provided with the distribution. |
| 34 | * |
| 35 | * . Neither the name of Agere Systems Inc. nor the names of the contributors |
| 36 | * may be used to endorse or promote products derived from this software |
| 37 | * without specific prior written permission. |
| 38 | * |
| 39 | * Disclaimer |
| 40 | * |
| 41 | * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, |
| 42 | * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF |
| 43 | * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY |
| 44 | * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN |
| 45 | * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY |
| 46 | * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES |
| 47 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; |
| 48 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND |
| 49 | * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT |
| 50 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT |
| 51 | * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH |
| 52 | * DAMAGE. |
| 53 | * |
| 54 | */ |
| 55 | |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 56 | #include <linux/pci.h> |
| 57 | #include <linux/init.h> |
| 58 | #include <linux/module.h> |
| 59 | #include <linux/types.h> |
| 60 | #include <linux/kernel.h> |
| 61 | |
| 62 | #include <linux/sched.h> |
| 63 | #include <linux/ptrace.h> |
| 64 | #include <linux/slab.h> |
| 65 | #include <linux/ctype.h> |
| 66 | #include <linux/string.h> |
| 67 | #include <linux/timer.h> |
| 68 | #include <linux/interrupt.h> |
| 69 | #include <linux/in.h> |
| 70 | #include <linux/delay.h> |
| 71 | #include <linux/bitops.h> |
| 72 | #include <linux/io.h> |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 73 | |
| 74 | #include <linux/netdevice.h> |
| 75 | #include <linux/etherdevice.h> |
| 76 | #include <linux/skbuff.h> |
| 77 | #include <linux/if_arp.h> |
| 78 | #include <linux/ioport.h> |
| 79 | #include <linux/crc32.h> |
| 80 | #include <linux/random.h> |
| 81 | #include <linux/phy.h> |
| 82 | |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 83 | #include "et131x.h" |
| 84 | |
| 85 | MODULE_AUTHOR("Victor Soriano <vjsoriano@agere.com>"); |
| 86 | MODULE_AUTHOR("Mark Einon <mark.einon@gmail.com>"); |
| 87 | MODULE_LICENSE("Dual BSD/GPL"); |
Adnan Ali | 397d3e6 | 2012-05-25 18:56:40 +0100 | [diff] [blame] | 88 | MODULE_DESCRIPTION("10/100/1000 Base-T Ethernet Driver for the ET1310 by Agere Systems"); |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 89 | |
Mark Einon | bd156af | 2011-10-20 01:18:32 +0100 | [diff] [blame] | 90 | /* EEPROM defines */ |
| 91 | #define MAX_NUM_REGISTER_POLLS 1000 |
| 92 | #define MAX_NUM_WRITE_RETRIES 2 |
| 93 | |
| 94 | /* MAC defines */ |
| 95 | #define COUNTER_WRAP_16_BIT 0x10000 |
| 96 | #define COUNTER_WRAP_12_BIT 0x1000 |
| 97 | |
| 98 | /* PCI defines */ |
| 99 | #define INTERNAL_MEM_SIZE 0x400 /* 1024 of internal memory */ |
| 100 | #define INTERNAL_MEM_RX_OFFSET 0x1FF /* 50% Tx, 50% Rx */ |
| 101 | |
| 102 | /* ISR defines */ |
| 103 | /* |
| 104 | * For interrupts, normal running is: |
| 105 | * rxdma_xfr_done, phy_interrupt, mac_stat_interrupt, |
| 106 | * watchdog_interrupt & txdma_xfer_done |
| 107 | * |
| 108 | * In both cases, when flow control is enabled for either Tx or bi-direction, |
| 109 | * we additional enable rx_fbr0_low and rx_fbr1_low, so we know when the |
| 110 | * buffer rings are running low. |
| 111 | */ |
| 112 | #define INT_MASK_DISABLE 0xffffffff |
| 113 | |
| 114 | /* NOTE: Masking out MAC_STAT Interrupt for now... |
| 115 | * #define INT_MASK_ENABLE 0xfff6bf17 |
| 116 | * #define INT_MASK_ENABLE_NO_FLOW 0xfff6bfd7 |
| 117 | */ |
| 118 | #define INT_MASK_ENABLE 0xfffebf17 |
| 119 | #define INT_MASK_ENABLE_NO_FLOW 0xfffebfd7 |
| 120 | |
Mark Einon | 1c1c1b5 | 2011-10-20 01:18:36 +0100 | [diff] [blame] | 121 | /* General defines */ |
| 122 | /* Packet and header sizes */ |
| 123 | #define NIC_MIN_PACKET_SIZE 60 |
| 124 | |
| 125 | /* Multicast list size */ |
| 126 | #define NIC_MAX_MCAST_LIST 128 |
| 127 | |
| 128 | /* Supported Filters */ |
| 129 | #define ET131X_PACKET_TYPE_DIRECTED 0x0001 |
| 130 | #define ET131X_PACKET_TYPE_MULTICAST 0x0002 |
| 131 | #define ET131X_PACKET_TYPE_BROADCAST 0x0004 |
| 132 | #define ET131X_PACKET_TYPE_PROMISCUOUS 0x0008 |
| 133 | #define ET131X_PACKET_TYPE_ALL_MULTICAST 0x0010 |
| 134 | |
| 135 | /* Tx Timeout */ |
| 136 | #define ET131X_TX_TIMEOUT (1 * HZ) |
| 137 | #define NIC_SEND_HANG_THRESHOLD 0 |
| 138 | |
| 139 | /* MP_TCB flags */ |
| 140 | #define fMP_DEST_MULTI 0x00000001 |
| 141 | #define fMP_DEST_BROAD 0x00000002 |
| 142 | |
| 143 | /* MP_ADAPTER flags */ |
| 144 | #define fMP_ADAPTER_RECV_LOOKASIDE 0x00000004 |
| 145 | #define fMP_ADAPTER_INTERRUPT_IN_USE 0x00000008 |
Mark Einon | 1c1c1b5 | 2011-10-20 01:18:36 +0100 | [diff] [blame] | 146 | |
| 147 | /* MP_SHARED flags */ |
Mark Einon | 1c1c1b5 | 2011-10-20 01:18:36 +0100 | [diff] [blame] | 148 | #define fMP_ADAPTER_LOWER_POWER 0x00200000 |
| 149 | |
| 150 | #define fMP_ADAPTER_NON_RECOVER_ERROR 0x00800000 |
Mark Einon | 1c1c1b5 | 2011-10-20 01:18:36 +0100 | [diff] [blame] | 151 | #define fMP_ADAPTER_HARDWARE_ERROR 0x04000000 |
Mark Einon | 1c1c1b5 | 2011-10-20 01:18:36 +0100 | [diff] [blame] | 152 | |
| 153 | #define fMP_ADAPTER_FAIL_SEND_MASK 0x3ff00000 |
Mark Einon | 1c1c1b5 | 2011-10-20 01:18:36 +0100 | [diff] [blame] | 154 | |
| 155 | /* Some offsets in PCI config space that are actually used. */ |
Mark Einon | 1c1c1b5 | 2011-10-20 01:18:36 +0100 | [diff] [blame] | 156 | #define ET1310_PCI_MAC_ADDRESS 0xA4 |
| 157 | #define ET1310_PCI_EEPROM_STATUS 0xB2 |
| 158 | #define ET1310_PCI_ACK_NACK 0xC0 |
| 159 | #define ET1310_PCI_REPLAY 0xC2 |
| 160 | #define ET1310_PCI_L0L1LATENCY 0xCF |
| 161 | |
Mark Einon | 26d19bf | 2011-10-20 01:18:45 +0100 | [diff] [blame] | 162 | /* PCI Product IDs */ |
Mark Einon | 1c1c1b5 | 2011-10-20 01:18:36 +0100 | [diff] [blame] | 163 | #define ET131X_PCI_DEVICE_ID_GIG 0xED00 /* ET1310 1000 Base-T 8 */ |
| 164 | #define ET131X_PCI_DEVICE_ID_FAST 0xED01 /* ET1310 100 Base-T */ |
| 165 | |
| 166 | /* Define order of magnitude converter */ |
| 167 | #define NANO_IN_A_MICRO 1000 |
| 168 | |
| 169 | #define PARM_RX_NUM_BUFS_DEF 4 |
| 170 | #define PARM_RX_TIME_INT_DEF 10 |
| 171 | #define PARM_RX_MEM_END_DEF 0x2bc |
| 172 | #define PARM_TX_TIME_INT_DEF 40 |
| 173 | #define PARM_TX_NUM_BUFS_DEF 4 |
| 174 | #define PARM_DMA_CACHE_DEF 0 |
| 175 | |
Mark Einon | 562550b | 2011-10-20 01:18:37 +0100 | [diff] [blame] | 176 | /* RX defines */ |
Mark Einon | 6abafc1 | 2011-10-20 01:18:41 +0100 | [diff] [blame] | 177 | #define USE_FBR0 1 |
Mark Einon | 562550b | 2011-10-20 01:18:37 +0100 | [diff] [blame] | 178 | #define FBR_CHUNKS 32 |
Mark Einon | 562550b | 2011-10-20 01:18:37 +0100 | [diff] [blame] | 179 | #define MAX_DESC_PER_RING_RX 1024 |
| 180 | |
| 181 | /* number of RFDs - default and min */ |
| 182 | #ifdef USE_FBR0 |
| 183 | #define RFD_LOW_WATER_MARK 40 |
Mark Einon | 562550b | 2011-10-20 01:18:37 +0100 | [diff] [blame] | 184 | #define NIC_DEFAULT_NUM_RFD 1024 |
Mark Einon | 6abafc1 | 2011-10-20 01:18:41 +0100 | [diff] [blame] | 185 | #define NUM_FBRS 2 |
Mark Einon | 562550b | 2011-10-20 01:18:37 +0100 | [diff] [blame] | 186 | #else |
| 187 | #define RFD_LOW_WATER_MARK 20 |
Mark Einon | 562550b | 2011-10-20 01:18:37 +0100 | [diff] [blame] | 188 | #define NIC_DEFAULT_NUM_RFD 256 |
Mark Einon | 6abafc1 | 2011-10-20 01:18:41 +0100 | [diff] [blame] | 189 | #define NUM_FBRS 1 |
Mark Einon | 562550b | 2011-10-20 01:18:37 +0100 | [diff] [blame] | 190 | #endif |
| 191 | |
Mark Einon | 6abafc1 | 2011-10-20 01:18:41 +0100 | [diff] [blame] | 192 | #define NIC_MIN_NUM_RFD 64 |
Mark Einon | 562550b | 2011-10-20 01:18:37 +0100 | [diff] [blame] | 193 | #define NUM_PACKETS_HANDLED 256 |
| 194 | |
Mark Einon | 562550b | 2011-10-20 01:18:37 +0100 | [diff] [blame] | 195 | #define ALCATEL_MULTICAST_PKT 0x01000000 |
| 196 | #define ALCATEL_BROADCAST_PKT 0x02000000 |
| 197 | |
| 198 | /* typedefs for Free Buffer Descriptors */ |
| 199 | struct fbr_desc { |
| 200 | u32 addr_lo; |
| 201 | u32 addr_hi; |
| 202 | u32 word2; /* Bits 10-31 reserved, 0-9 descriptor */ |
| 203 | }; |
| 204 | |
| 205 | /* Packet Status Ring Descriptors |
| 206 | * |
| 207 | * Word 0: |
| 208 | * |
| 209 | * top 16 bits are from the Alcatel Status Word as enumerated in |
| 210 | * PE-MCXMAC Data Sheet IPD DS54 0210-1 (also IPD-DS80 0205-2) |
| 211 | * |
| 212 | * 0: hp hash pass |
| 213 | * 1: ipa IP checksum assist |
| 214 | * 2: ipp IP checksum pass |
| 215 | * 3: tcpa TCP checksum assist |
| 216 | * 4: tcpp TCP checksum pass |
| 217 | * 5: wol WOL Event |
| 218 | * 6: rxmac_error RXMAC Error Indicator |
| 219 | * 7: drop Drop packet |
| 220 | * 8: ft Frame Truncated |
| 221 | * 9: jp Jumbo Packet |
| 222 | * 10: vp VLAN Packet |
| 223 | * 11-15: unused |
| 224 | * 16: asw_prev_pkt_dropped e.g. IFG too small on previous |
| 225 | * 17: asw_RX_DV_event short receive event detected |
| 226 | * 18: asw_false_carrier_event bad carrier since last good packet |
| 227 | * 19: asw_code_err one or more nibbles signalled as errors |
| 228 | * 20: asw_CRC_err CRC error |
| 229 | * 21: asw_len_chk_err frame length field incorrect |
| 230 | * 22: asw_too_long frame length > 1518 bytes |
| 231 | * 23: asw_OK valid CRC + no code error |
| 232 | * 24: asw_multicast has a multicast address |
| 233 | * 25: asw_broadcast has a broadcast address |
| 234 | * 26: asw_dribble_nibble spurious bits after EOP |
| 235 | * 27: asw_control_frame is a control frame |
| 236 | * 28: asw_pause_frame is a pause frame |
| 237 | * 29: asw_unsupported_op unsupported OP code |
| 238 | * 30: asw_VLAN_tag VLAN tag detected |
| 239 | * 31: asw_long_evt Rx long event |
| 240 | * |
| 241 | * Word 1: |
| 242 | * 0-15: length length in bytes |
| 243 | * 16-25: bi Buffer Index |
| 244 | * 26-27: ri Ring Index |
| 245 | * 28-31: reserved |
| 246 | */ |
| 247 | |
| 248 | struct pkt_stat_desc { |
| 249 | u32 word0; |
| 250 | u32 word1; |
| 251 | }; |
| 252 | |
| 253 | /* Typedefs for the RX DMA status word */ |
| 254 | |
| 255 | /* |
| 256 | * rx status word 0 holds part of the status bits of the Rx DMA engine |
| 257 | * that get copied out to memory by the ET-1310. Word 0 is a 32 bit word |
| 258 | * which contains the Free Buffer ring 0 and 1 available offset. |
| 259 | * |
| 260 | * bit 0-9 FBR1 offset |
| 261 | * bit 10 Wrap flag for FBR1 |
| 262 | * bit 16-25 FBR0 offset |
| 263 | * bit 26 Wrap flag for FBR0 |
| 264 | */ |
| 265 | |
| 266 | /* |
| 267 | * RXSTAT_WORD1_t structure holds part of the status bits of the Rx DMA engine |
| 268 | * that get copied out to memory by the ET-1310. Word 3 is a 32 bit word |
| 269 | * which contains the Packet Status Ring available offset. |
| 270 | * |
| 271 | * bit 0-15 reserved |
| 272 | * bit 16-27 PSRoffset |
| 273 | * bit 28 PSRwrap |
| 274 | * bit 29-31 unused |
| 275 | */ |
| 276 | |
| 277 | /* |
| 278 | * struct rx_status_block is a structure representing the status of the Rx |
| 279 | * DMA engine it sits in free memory, and is pointed to by 0x101c / 0x1020 |
| 280 | */ |
| 281 | struct rx_status_block { |
| 282 | u32 word0; |
| 283 | u32 word1; |
| 284 | }; |
| 285 | |
| 286 | /* |
Mark Einon | 6abafc1 | 2011-10-20 01:18:41 +0100 | [diff] [blame] | 287 | * Structure for look-up table holding free buffer ring pointers, addresses |
| 288 | * and state. |
Mark Einon | 562550b | 2011-10-20 01:18:37 +0100 | [diff] [blame] | 289 | */ |
| 290 | struct fbr_lookup { |
Mark Einon | 6abafc1 | 2011-10-20 01:18:41 +0100 | [diff] [blame] | 291 | void *virt[MAX_DESC_PER_RING_RX]; |
| 292 | void *buffer1[MAX_DESC_PER_RING_RX]; |
| 293 | void *buffer2[MAX_DESC_PER_RING_RX]; |
| 294 | u32 bus_high[MAX_DESC_PER_RING_RX]; |
| 295 | u32 bus_low[MAX_DESC_PER_RING_RX]; |
| 296 | void *ring_virtaddr; |
| 297 | dma_addr_t ring_physaddr; |
| 298 | void *mem_virtaddrs[MAX_DESC_PER_RING_RX / FBR_CHUNKS]; |
| 299 | dma_addr_t mem_physaddrs[MAX_DESC_PER_RING_RX / FBR_CHUNKS]; |
Mark Einon | e013471 | 2011-12-06 23:23:10 +0000 | [diff] [blame] | 300 | u64 real_physaddr; |
| 301 | u64 offset; |
Mark Einon | 6abafc1 | 2011-10-20 01:18:41 +0100 | [diff] [blame] | 302 | u32 local_full; |
| 303 | u32 num_entries; |
| 304 | u32 buffsize; |
Mark Einon | 562550b | 2011-10-20 01:18:37 +0100 | [diff] [blame] | 305 | }; |
| 306 | |
| 307 | /* |
| 308 | * struct rx_ring is the sructure representing the adaptor's local |
| 309 | * reference(s) to the rings |
Mark Einon | e592a9b | 2011-10-20 01:18:42 +0100 | [diff] [blame] | 310 | * |
| 311 | ****************************************************************************** |
| 312 | * IMPORTANT NOTE :- fbr_lookup *fbr[NUM_FBRS] uses index 0 to refer to FBR1 |
| 313 | * and index 1 to refer to FRB0 |
| 314 | ****************************************************************************** |
Mark Einon | 562550b | 2011-10-20 01:18:37 +0100 | [diff] [blame] | 315 | */ |
| 316 | struct rx_ring { |
Mark Einon | 6abafc1 | 2011-10-20 01:18:41 +0100 | [diff] [blame] | 317 | struct fbr_lookup *fbr[NUM_FBRS]; |
Mark Einon | 562550b | 2011-10-20 01:18:37 +0100 | [diff] [blame] | 318 | void *ps_ring_virtaddr; |
| 319 | dma_addr_t ps_ring_physaddr; |
| 320 | u32 local_psr_full; |
| 321 | u32 psr_num_entries; |
| 322 | |
| 323 | struct rx_status_block *rx_status_block; |
| 324 | dma_addr_t rx_status_bus; |
| 325 | |
Mark Einon | 562550b | 2011-10-20 01:18:37 +0100 | [diff] [blame] | 326 | /* RECV */ |
| 327 | struct list_head recv_list; |
| 328 | u32 num_ready_recv; |
| 329 | |
| 330 | u32 num_rfd; |
| 331 | |
| 332 | bool unfinished_receives; |
| 333 | |
Mark Einon | 562550b | 2011-10-20 01:18:37 +0100 | [diff] [blame] | 334 | /* lookaside lists */ |
| 335 | struct kmem_cache *recv_lookaside; |
| 336 | }; |
| 337 | |
Mark Einon | 17ec9ff | 2011-10-20 01:18:38 +0100 | [diff] [blame] | 338 | /* TX defines */ |
| 339 | /* |
| 340 | * word 2 of the control bits in the Tx Descriptor ring for the ET-1310 |
| 341 | * |
| 342 | * 0-15: length of packet |
| 343 | * 16-27: VLAN tag |
| 344 | * 28: VLAN CFI |
| 345 | * 29-31: VLAN priority |
| 346 | * |
| 347 | * word 3 of the control bits in the Tx Descriptor ring for the ET-1310 |
| 348 | * |
| 349 | * 0: last packet in the sequence |
| 350 | * 1: first packet in the sequence |
| 351 | * 2: interrupt the processor when this pkt sent |
| 352 | * 3: Control word - no packet data |
| 353 | * 4: Issue half-duplex backpressure : XON/XOFF |
| 354 | * 5: send pause frame |
| 355 | * 6: Tx frame has error |
| 356 | * 7: append CRC |
| 357 | * 8: MAC override |
| 358 | * 9: pad packet |
| 359 | * 10: Packet is a Huge packet |
| 360 | * 11: append VLAN tag |
| 361 | * 12: IP checksum assist |
| 362 | * 13: TCP checksum assist |
| 363 | * 14: UDP checksum assist |
| 364 | */ |
| 365 | |
| 366 | /* struct tx_desc represents each descriptor on the ring */ |
| 367 | struct tx_desc { |
| 368 | u32 addr_hi; |
| 369 | u32 addr_lo; |
| 370 | u32 len_vlan; /* control words how to xmit the */ |
| 371 | u32 flags; /* data (detailed above) */ |
| 372 | }; |
| 373 | |
| 374 | /* |
| 375 | * The status of the Tx DMA engine it sits in free memory, and is pointed to |
| 376 | * by 0x101c / 0x1020. This is a DMA10 type |
| 377 | */ |
| 378 | |
| 379 | /* TCB (Transmit Control Block: Host Side) */ |
| 380 | struct tcb { |
| 381 | struct tcb *next; /* Next entry in ring */ |
| 382 | u32 flags; /* Our flags for the packet */ |
| 383 | u32 count; /* Used to spot stuck/lost packets */ |
| 384 | u32 stale; /* Used to spot stuck/lost packets */ |
| 385 | struct sk_buff *skb; /* Network skb we are tied to */ |
| 386 | u32 index; /* Ring indexes */ |
| 387 | u32 index_start; |
| 388 | }; |
| 389 | |
| 390 | /* Structure representing our local reference(s) to the ring */ |
| 391 | struct tx_ring { |
| 392 | /* TCB (Transmit Control Block) memory and lists */ |
| 393 | struct tcb *tcb_ring; |
| 394 | |
| 395 | /* List of TCBs that are ready to be used */ |
| 396 | struct tcb *tcb_qhead; |
| 397 | struct tcb *tcb_qtail; |
| 398 | |
| 399 | /* list of TCBs that are currently being sent. NOTE that access to all |
| 400 | * three of these (including used) are controlled via the |
| 401 | * TCBSendQLock. This lock should be secured prior to incementing / |
| 402 | * decrementing used, or any queue manipulation on send_head / |
| 403 | * tail |
| 404 | */ |
| 405 | struct tcb *send_head; |
| 406 | struct tcb *send_tail; |
| 407 | int used; |
| 408 | |
| 409 | /* The actual descriptor ring */ |
| 410 | struct tx_desc *tx_desc_ring; |
| 411 | dma_addr_t tx_desc_ring_pa; |
| 412 | |
| 413 | /* send_idx indicates where we last wrote to in the descriptor ring. */ |
| 414 | u32 send_idx; |
| 415 | |
| 416 | /* The location of the write-back status block */ |
| 417 | u32 *tx_status; |
| 418 | dma_addr_t tx_status_pa; |
| 419 | |
| 420 | /* Packets since the last IRQ: used for interrupt coalescing */ |
| 421 | int since_irq; |
| 422 | }; |
| 423 | |
Mark Einon | fd0651a | 2011-10-20 01:18:35 +0100 | [diff] [blame] | 424 | /* |
| 425 | * Do not change these values: if changed, then change also in respective |
| 426 | * TXdma and Rxdma engines |
| 427 | */ |
| 428 | #define NUM_DESC_PER_RING_TX 512 /* TX Do not change these values */ |
| 429 | #define NUM_TCB 64 |
| 430 | |
| 431 | /* |
| 432 | * These values are all superseded by registry entries to facilitate tuning. |
| 433 | * Once the desired performance has been achieved, the optimal registry values |
| 434 | * should be re-populated to these #defines: |
| 435 | */ |
Mark Einon | fd0651a | 2011-10-20 01:18:35 +0100 | [diff] [blame] | 436 | #define TX_ERROR_PERIOD 1000 |
| 437 | |
| 438 | #define LO_MARK_PERCENT_FOR_PSR 15 |
| 439 | #define LO_MARK_PERCENT_FOR_RX 15 |
| 440 | |
| 441 | /* RFD (Receive Frame Descriptor) */ |
| 442 | struct rfd { |
| 443 | struct list_head list_node; |
| 444 | struct sk_buff *skb; |
| 445 | u32 len; /* total size of receive frame */ |
| 446 | u16 bufferindex; |
| 447 | u8 ringindex; |
| 448 | }; |
| 449 | |
| 450 | /* Flow Control */ |
| 451 | #define FLOW_BOTH 0 |
| 452 | #define FLOW_TXONLY 1 |
| 453 | #define FLOW_RXONLY 2 |
| 454 | #define FLOW_NONE 3 |
| 455 | |
| 456 | /* Struct to define some device statistics */ |
| 457 | struct ce_stats { |
| 458 | /* MIB II variables |
| 459 | * |
| 460 | * NOTE: atomic_t types are only guaranteed to store 24-bits; if we |
| 461 | * MUST have 32, then we'll need another way to perform atomic |
| 462 | * operations |
| 463 | */ |
| 464 | u32 unicast_pkts_rcvd; |
| 465 | atomic_t unicast_pkts_xmtd; |
| 466 | u32 multicast_pkts_rcvd; |
| 467 | atomic_t multicast_pkts_xmtd; |
| 468 | u32 broadcast_pkts_rcvd; |
| 469 | atomic_t broadcast_pkts_xmtd; |
| 470 | u32 rcvd_pkts_dropped; |
| 471 | |
| 472 | /* Tx Statistics. */ |
| 473 | u32 tx_underflows; |
| 474 | |
| 475 | u32 tx_collisions; |
| 476 | u32 tx_excessive_collisions; |
| 477 | u32 tx_first_collisions; |
| 478 | u32 tx_late_collisions; |
| 479 | u32 tx_max_pkt_errs; |
| 480 | u32 tx_deferred; |
| 481 | |
| 482 | /* Rx Statistics. */ |
| 483 | u32 rx_overflows; |
| 484 | |
| 485 | u32 rx_length_errs; |
| 486 | u32 rx_align_errs; |
| 487 | u32 rx_crc_errs; |
| 488 | u32 rx_code_violations; |
| 489 | u32 rx_other_errs; |
| 490 | |
| 491 | u32 synchronous_iterations; |
| 492 | u32 interrupt_status; |
| 493 | }; |
| 494 | |
| 495 | /* The private adapter structure */ |
| 496 | struct et131x_adapter { |
| 497 | struct net_device *netdev; |
| 498 | struct pci_dev *pdev; |
| 499 | struct mii_bus *mii_bus; |
| 500 | struct phy_device *phydev; |
| 501 | struct work_struct task; |
| 502 | |
| 503 | /* Flags that indicate current state of the adapter */ |
| 504 | u32 flags; |
| 505 | |
| 506 | /* local link state, to determine if a state change has occurred */ |
| 507 | int link; |
| 508 | |
| 509 | /* Configuration */ |
| 510 | u8 rom_addr[ETH_ALEN]; |
| 511 | u8 addr[ETH_ALEN]; |
| 512 | bool has_eeprom; |
| 513 | u8 eeprom_data[2]; |
| 514 | |
| 515 | /* Spinlocks */ |
| 516 | spinlock_t lock; |
| 517 | |
| 518 | spinlock_t tcb_send_qlock; |
| 519 | spinlock_t tcb_ready_qlock; |
| 520 | spinlock_t send_hw_lock; |
| 521 | |
| 522 | spinlock_t rcv_lock; |
| 523 | spinlock_t rcv_pend_lock; |
| 524 | spinlock_t fbr_lock; |
| 525 | |
| 526 | spinlock_t phy_lock; |
| 527 | |
| 528 | /* Packet Filter and look ahead size */ |
| 529 | u32 packet_filter; |
| 530 | |
| 531 | /* multicast list */ |
| 532 | u32 multicast_addr_count; |
| 533 | u8 multicast_list[NIC_MAX_MCAST_LIST][ETH_ALEN]; |
| 534 | |
| 535 | /* Pointer to the device's PCI register space */ |
| 536 | struct address_map __iomem *regs; |
| 537 | |
| 538 | /* Registry parameters */ |
| 539 | u8 wanted_flow; /* Flow we want for 802.3x flow control */ |
| 540 | u32 registry_jumbo_packet; /* Max supported ethernet packet size */ |
| 541 | |
| 542 | /* Derived from the registry: */ |
| 543 | u8 flowcontrol; /* flow control validated by the far-end */ |
| 544 | |
| 545 | /* Minimize init-time */ |
| 546 | struct timer_list error_timer; |
| 547 | |
| 548 | /* variable putting the phy into coma mode when boot up with no cable |
| 549 | * plugged in after 5 seconds |
| 550 | */ |
| 551 | u8 boot_coma; |
| 552 | |
| 553 | /* Next two used to save power information at power down. This |
| 554 | * information will be used during power up to set up parts of Power |
| 555 | * Management in JAGCore |
| 556 | */ |
| 557 | u16 pdown_speed; |
| 558 | u8 pdown_duplex; |
| 559 | |
| 560 | /* Tx Memory Variables */ |
| 561 | struct tx_ring tx_ring; |
| 562 | |
| 563 | /* Rx Memory Variables */ |
| 564 | struct rx_ring rx_ring; |
| 565 | |
| 566 | /* Stats */ |
| 567 | struct ce_stats stats; |
| 568 | |
| 569 | struct net_device_stats net_stats; |
| 570 | }; |
| 571 | |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 572 | static int eeprom_wait_ready(struct pci_dev *pdev, u32 *status) |
| 573 | { |
| 574 | u32 reg; |
| 575 | int i; |
| 576 | |
| 577 | /* |
| 578 | * 1. Check LBCIF Status Register for bits 6 & 3:2 all equal to 0 and |
| 579 | * bits 7,1:0 both equal to 1, at least once after reset. |
| 580 | * Subsequent operations need only to check that bits 1:0 are equal |
| 581 | * to 1 prior to starting a single byte read/write |
| 582 | */ |
| 583 | |
| 584 | for (i = 0; i < MAX_NUM_REGISTER_POLLS; i++) { |
| 585 | /* Read registers grouped in DWORD1 */ |
| 586 | if (pci_read_config_dword(pdev, LBCIF_DWORD1_GROUP, ®)) |
| 587 | return -EIO; |
| 588 | |
| 589 | /* I2C idle and Phy Queue Avail both true */ |
| 590 | if ((reg & 0x3000) == 0x3000) { |
| 591 | if (status) |
| 592 | *status = reg; |
| 593 | return reg & 0xFF; |
| 594 | } |
| 595 | } |
| 596 | return -ETIMEDOUT; |
| 597 | } |
| 598 | |
| 599 | |
| 600 | /** |
| 601 | * eeprom_write - Write a byte to the ET1310's EEPROM |
| 602 | * @adapter: pointer to our private adapter structure |
| 603 | * @addr: the address to write |
| 604 | * @data: the value to write |
| 605 | * |
| 606 | * Returns 1 for a successful write. |
| 607 | */ |
| 608 | static int eeprom_write(struct et131x_adapter *adapter, u32 addr, u8 data) |
| 609 | { |
| 610 | struct pci_dev *pdev = adapter->pdev; |
| 611 | int index = 0; |
| 612 | int retries; |
| 613 | int err = 0; |
| 614 | int i2c_wack = 0; |
| 615 | int writeok = 0; |
| 616 | u32 status; |
| 617 | u32 val = 0; |
| 618 | |
| 619 | /* |
| 620 | * For an EEPROM, an I2C single byte write is defined as a START |
| 621 | * condition followed by the device address, EEPROM address, one byte |
| 622 | * of data and a STOP condition. The STOP condition will trigger the |
| 623 | * EEPROM's internally timed write cycle to the nonvolatile memory. |
| 624 | * All inputs are disabled during this write cycle and the EEPROM will |
| 625 | * not respond to any access until the internal write is complete. |
| 626 | */ |
| 627 | |
| 628 | err = eeprom_wait_ready(pdev, NULL); |
| 629 | if (err) |
| 630 | return err; |
| 631 | |
| 632 | /* |
| 633 | * 2. Write to the LBCIF Control Register: bit 7=1, bit 6=1, bit 3=0, |
| 634 | * and bits 1:0 both =0. Bit 5 should be set according to the |
| 635 | * type of EEPROM being accessed (1=two byte addressing, 0=one |
| 636 | * byte addressing). |
| 637 | */ |
| 638 | if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER, |
| 639 | LBCIF_CONTROL_LBCIF_ENABLE | LBCIF_CONTROL_I2C_WRITE)) |
| 640 | return -EIO; |
| 641 | |
| 642 | i2c_wack = 1; |
| 643 | |
| 644 | /* Prepare EEPROM address for Step 3 */ |
| 645 | |
| 646 | for (retries = 0; retries < MAX_NUM_WRITE_RETRIES; retries++) { |
| 647 | /* Write the address to the LBCIF Address Register */ |
| 648 | if (pci_write_config_dword(pdev, LBCIF_ADDRESS_REGISTER, addr)) |
| 649 | break; |
| 650 | /* |
| 651 | * Write the data to the LBCIF Data Register (the I2C write |
| 652 | * will begin). |
| 653 | */ |
| 654 | if (pci_write_config_byte(pdev, LBCIF_DATA_REGISTER, data)) |
| 655 | break; |
| 656 | /* |
| 657 | * Monitor bit 1:0 of the LBCIF Status Register. When bits |
| 658 | * 1:0 are both equal to 1, the I2C write has completed and the |
| 659 | * internal write cycle of the EEPROM is about to start. |
| 660 | * (bits 1:0 = 01 is a legal state while waiting from both |
| 661 | * equal to 1, but bits 1:0 = 10 is invalid and implies that |
| 662 | * something is broken). |
| 663 | */ |
| 664 | err = eeprom_wait_ready(pdev, &status); |
| 665 | if (err < 0) |
| 666 | return 0; |
| 667 | |
| 668 | /* |
| 669 | * Check bit 3 of the LBCIF Status Register. If equal to 1, |
| 670 | * an error has occurred.Don't break here if we are revision |
| 671 | * 1, this is so we do a blind write for load bug. |
| 672 | */ |
| 673 | if ((status & LBCIF_STATUS_GENERAL_ERROR) |
| 674 | && adapter->pdev->revision == 0) |
| 675 | break; |
| 676 | |
| 677 | /* |
| 678 | * Check bit 2 of the LBCIF Status Register. If equal to 1 an |
| 679 | * ACK error has occurred on the address phase of the write. |
| 680 | * This could be due to an actual hardware failure or the |
| 681 | * EEPROM may still be in its internal write cycle from a |
| 682 | * previous write. This write operation was ignored and must be |
| 683 | *repeated later. |
| 684 | */ |
| 685 | if (status & LBCIF_STATUS_ACK_ERROR) { |
| 686 | /* |
| 687 | * This could be due to an actual hardware failure |
| 688 | * or the EEPROM may still be in its internal write |
| 689 | * cycle from a previous write. This write operation |
| 690 | * was ignored and must be repeated later. |
| 691 | */ |
| 692 | udelay(10); |
| 693 | continue; |
| 694 | } |
| 695 | |
| 696 | writeok = 1; |
| 697 | break; |
| 698 | } |
| 699 | |
| 700 | /* |
| 701 | * Set bit 6 of the LBCIF Control Register = 0. |
| 702 | */ |
| 703 | udelay(10); |
| 704 | |
| 705 | while (i2c_wack) { |
| 706 | if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER, |
| 707 | LBCIF_CONTROL_LBCIF_ENABLE)) |
| 708 | writeok = 0; |
| 709 | |
| 710 | /* Do read until internal ACK_ERROR goes away meaning write |
| 711 | * completed |
| 712 | */ |
| 713 | do { |
| 714 | pci_write_config_dword(pdev, |
| 715 | LBCIF_ADDRESS_REGISTER, |
| 716 | addr); |
| 717 | do { |
| 718 | pci_read_config_dword(pdev, |
| 719 | LBCIF_DATA_REGISTER, &val); |
| 720 | } while ((val & 0x00010000) == 0); |
| 721 | } while (val & 0x00040000); |
| 722 | |
| 723 | if ((val & 0xFF00) != 0xC000 || index == 10000) |
| 724 | break; |
| 725 | index++; |
| 726 | } |
| 727 | return writeok ? 0 : -EIO; |
| 728 | } |
| 729 | |
| 730 | /** |
| 731 | * eeprom_read - Read a byte from the ET1310's EEPROM |
| 732 | * @adapter: pointer to our private adapter structure |
| 733 | * @addr: the address from which to read |
| 734 | * @pdata: a pointer to a byte in which to store the value of the read |
| 735 | * @eeprom_id: the ID of the EEPROM |
| 736 | * @addrmode: how the EEPROM is to be accessed |
| 737 | * |
| 738 | * Returns 1 for a successful read |
| 739 | */ |
| 740 | static int eeprom_read(struct et131x_adapter *adapter, u32 addr, u8 *pdata) |
| 741 | { |
| 742 | struct pci_dev *pdev = adapter->pdev; |
| 743 | int err; |
| 744 | u32 status; |
| 745 | |
| 746 | /* |
| 747 | * A single byte read is similar to the single byte write, with the |
| 748 | * exception of the data flow: |
| 749 | */ |
| 750 | |
| 751 | err = eeprom_wait_ready(pdev, NULL); |
| 752 | if (err) |
| 753 | return err; |
| 754 | /* |
| 755 | * Write to the LBCIF Control Register: bit 7=1, bit 6=0, bit 3=0, |
| 756 | * and bits 1:0 both =0. Bit 5 should be set according to the type |
| 757 | * of EEPROM being accessed (1=two byte addressing, 0=one byte |
| 758 | * addressing). |
| 759 | */ |
| 760 | if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER, |
| 761 | LBCIF_CONTROL_LBCIF_ENABLE)) |
| 762 | return -EIO; |
| 763 | /* |
| 764 | * Write the address to the LBCIF Address Register (I2C read will |
| 765 | * begin). |
| 766 | */ |
| 767 | if (pci_write_config_dword(pdev, LBCIF_ADDRESS_REGISTER, addr)) |
| 768 | return -EIO; |
| 769 | /* |
| 770 | * Monitor bit 0 of the LBCIF Status Register. When = 1, I2C read |
| 771 | * is complete. (if bit 1 =1 and bit 0 stays = 0, a hardware failure |
| 772 | * has occurred). |
| 773 | */ |
| 774 | err = eeprom_wait_ready(pdev, &status); |
| 775 | if (err < 0) |
| 776 | return err; |
| 777 | /* |
| 778 | * Regardless of error status, read data byte from LBCIF Data |
| 779 | * Register. |
| 780 | */ |
| 781 | *pdata = err; |
| 782 | /* |
| 783 | * Check bit 2 of the LBCIF Status Register. If = 1, |
| 784 | * then an error has occurred. |
| 785 | */ |
| 786 | return (status & LBCIF_STATUS_ACK_ERROR) ? -EIO : 0; |
| 787 | } |
| 788 | |
Francois Romieu | eb7a6ca | 2011-10-23 19:11:02 +0200 | [diff] [blame] | 789 | static int et131x_init_eeprom(struct et131x_adapter *adapter) |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 790 | { |
| 791 | struct pci_dev *pdev = adapter->pdev; |
| 792 | u8 eestatus; |
| 793 | |
| 794 | /* We first need to check the EEPROM Status code located at offset |
| 795 | * 0xB2 of config space |
| 796 | */ |
| 797 | pci_read_config_byte(pdev, ET1310_PCI_EEPROM_STATUS, |
| 798 | &eestatus); |
| 799 | |
| 800 | /* THIS IS A WORKAROUND: |
| 801 | * I need to call this function twice to get my card in a |
| 802 | * LG M1 Express Dual running. I tried also a msleep before this |
Justin P. Mattock | ac399bc | 2012-02-20 18:23:09 -0800 | [diff] [blame] | 803 | * function, because I thought there could be some time condidions |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 804 | * but it didn't work. Call the whole function twice also work. |
| 805 | */ |
| 806 | if (pci_read_config_byte(pdev, ET1310_PCI_EEPROM_STATUS, &eestatus)) { |
| 807 | dev_err(&pdev->dev, |
| 808 | "Could not read PCI config space for EEPROM Status\n"); |
| 809 | return -EIO; |
| 810 | } |
| 811 | |
| 812 | /* Determine if the error(s) we care about are present. If they are |
| 813 | * present we need to fail. |
| 814 | */ |
| 815 | if (eestatus & 0x4C) { |
| 816 | int write_failed = 0; |
| 817 | if (pdev->revision == 0x01) { |
| 818 | int i; |
| 819 | static const u8 eedata[4] = { 0xFE, 0x13, 0x10, 0xFF }; |
| 820 | |
| 821 | /* Re-write the first 4 bytes if we have an eeprom |
| 822 | * present and the revision id is 1, this fixes the |
| 823 | * corruption seen with 1310 B Silicon |
| 824 | */ |
| 825 | for (i = 0; i < 3; i++) |
| 826 | if (eeprom_write(adapter, i, eedata[i]) < 0) |
| 827 | write_failed = 1; |
| 828 | } |
| 829 | if (pdev->revision != 0x01 || write_failed) { |
| 830 | dev_err(&pdev->dev, |
| 831 | "Fatal EEPROM Status Error - 0x%04x\n", eestatus); |
| 832 | |
| 833 | /* This error could mean that there was an error |
| 834 | * reading the eeprom or that the eeprom doesn't exist. |
| 835 | * We will treat each case the same and not try to |
| 836 | * gather additional information that normally would |
| 837 | * come from the eeprom, like MAC Address |
| 838 | */ |
| 839 | adapter->has_eeprom = 0; |
| 840 | return -EIO; |
| 841 | } |
| 842 | } |
| 843 | adapter->has_eeprom = 1; |
| 844 | |
| 845 | /* Read the EEPROM for information regarding LED behavior. Refer to |
| 846 | * ET1310_phy.c, et131x_xcvr_init(), for its use. |
| 847 | */ |
| 848 | eeprom_read(adapter, 0x70, &adapter->eeprom_data[0]); |
| 849 | eeprom_read(adapter, 0x71, &adapter->eeprom_data[1]); |
| 850 | |
| 851 | if (adapter->eeprom_data[0] != 0xcd) |
| 852 | /* Disable all optional features */ |
| 853 | adapter->eeprom_data[1] = 0x00; |
| 854 | |
| 855 | return 0; |
| 856 | } |
| 857 | |
Mark Einon | 8310c60 | 2011-10-23 10:22:52 +0100 | [diff] [blame] | 858 | /** |
| 859 | * et131x_rx_dma_enable - re-start of Rx_DMA on the ET1310. |
| 860 | * @adapter: pointer to our adapter structure |
| 861 | */ |
Francois Romieu | eb7a6ca | 2011-10-23 19:11:02 +0200 | [diff] [blame] | 862 | static void et131x_rx_dma_enable(struct et131x_adapter *adapter) |
Mark Einon | 8310c60 | 2011-10-23 10:22:52 +0100 | [diff] [blame] | 863 | { |
| 864 | /* Setup the receive dma configuration register for normal operation */ |
| 865 | u32 csr = 0x2000; /* FBR1 enable */ |
| 866 | |
| 867 | if (adapter->rx_ring.fbr[0]->buffsize == 4096) |
| 868 | csr |= 0x0800; |
| 869 | else if (adapter->rx_ring.fbr[0]->buffsize == 8192) |
| 870 | csr |= 0x1000; |
| 871 | else if (adapter->rx_ring.fbr[0]->buffsize == 16384) |
| 872 | csr |= 0x1800; |
| 873 | #ifdef USE_FBR0 |
| 874 | csr |= 0x0400; /* FBR0 enable */ |
| 875 | if (adapter->rx_ring.fbr[1]->buffsize == 256) |
| 876 | csr |= 0x0100; |
| 877 | else if (adapter->rx_ring.fbr[1]->buffsize == 512) |
| 878 | csr |= 0x0200; |
| 879 | else if (adapter->rx_ring.fbr[1]->buffsize == 1024) |
| 880 | csr |= 0x0300; |
| 881 | #endif |
| 882 | writel(csr, &adapter->regs->rxdma.csr); |
| 883 | |
| 884 | csr = readl(&adapter->regs->rxdma.csr); |
| 885 | if ((csr & 0x00020000) != 0) { |
| 886 | udelay(5); |
| 887 | csr = readl(&adapter->regs->rxdma.csr); |
| 888 | if ((csr & 0x00020000) != 0) { |
| 889 | dev_err(&adapter->pdev->dev, |
| 890 | "RX Dma failed to exit halt state. CSR 0x%08x\n", |
| 891 | csr); |
| 892 | } |
| 893 | } |
| 894 | } |
| 895 | |
| 896 | /** |
| 897 | * et131x_rx_dma_disable - Stop of Rx_DMA on the ET1310 |
| 898 | * @adapter: pointer to our adapter structure |
| 899 | */ |
Francois Romieu | eb7a6ca | 2011-10-23 19:11:02 +0200 | [diff] [blame] | 900 | static void et131x_rx_dma_disable(struct et131x_adapter *adapter) |
Mark Einon | 8310c60 | 2011-10-23 10:22:52 +0100 | [diff] [blame] | 901 | { |
| 902 | u32 csr; |
| 903 | /* Setup the receive dma configuration register */ |
| 904 | writel(0x00002001, &adapter->regs->rxdma.csr); |
| 905 | csr = readl(&adapter->regs->rxdma.csr); |
| 906 | if ((csr & 0x00020000) == 0) { /* Check halt status (bit 17) */ |
| 907 | udelay(5); |
| 908 | csr = readl(&adapter->regs->rxdma.csr); |
| 909 | if ((csr & 0x00020000) == 0) |
| 910 | dev_err(&adapter->pdev->dev, |
| 911 | "RX Dma failed to enter halt state. CSR 0x%08x\n", |
| 912 | csr); |
| 913 | } |
| 914 | } |
| 915 | |
| 916 | /** |
| 917 | * et131x_tx_dma_enable - re-start of Tx_DMA on the ET1310. |
| 918 | * @adapter: pointer to our adapter structure |
| 919 | * |
| 920 | * Mainly used after a return to the D0 (full-power) state from a lower state. |
| 921 | */ |
Francois Romieu | eb7a6ca | 2011-10-23 19:11:02 +0200 | [diff] [blame] | 922 | static void et131x_tx_dma_enable(struct et131x_adapter *adapter) |
Mark Einon | 8310c60 | 2011-10-23 10:22:52 +0100 | [diff] [blame] | 923 | { |
| 924 | /* Setup the transmit dma configuration register for normal |
| 925 | * operation |
| 926 | */ |
| 927 | writel(ET_TXDMA_SNGL_EPKT|(PARM_DMA_CACHE_DEF << ET_TXDMA_CACHE_SHIFT), |
| 928 | &adapter->regs->txdma.csr); |
| 929 | } |
| 930 | |
| 931 | static inline void add_10bit(u32 *v, int n) |
| 932 | { |
| 933 | *v = INDEX10(*v + n) | (*v & ET_DMA10_WRAP); |
| 934 | } |
| 935 | |
| 936 | static inline void add_12bit(u32 *v, int n) |
| 937 | { |
| 938 | *v = INDEX12(*v + n) | (*v & ET_DMA12_WRAP); |
| 939 | } |
| 940 | |
| 941 | /** |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 942 | * et1310_config_mac_regs1 - Initialize the first part of MAC regs |
| 943 | * @adapter: pointer to our adapter structure |
| 944 | */ |
Francois Romieu | eb7a6ca | 2011-10-23 19:11:02 +0200 | [diff] [blame] | 945 | static void et1310_config_mac_regs1(struct et131x_adapter *adapter) |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 946 | { |
| 947 | struct mac_regs __iomem *macregs = &adapter->regs->mac; |
| 948 | u32 station1; |
| 949 | u32 station2; |
| 950 | u32 ipg; |
| 951 | |
| 952 | /* First we need to reset everything. Write to MAC configuration |
| 953 | * register 1 to perform reset. |
| 954 | */ |
| 955 | writel(0xC00F0000, ¯egs->cfg1); |
| 956 | |
| 957 | /* Next lets configure the MAC Inter-packet gap register */ |
| 958 | ipg = 0x38005860; /* IPG1 0x38 IPG2 0x58 B2B 0x60 */ |
| 959 | ipg |= 0x50 << 8; /* ifg enforce 0x50 */ |
| 960 | writel(ipg, ¯egs->ipg); |
| 961 | |
| 962 | /* Next lets configure the MAC Half Duplex register */ |
| 963 | /* BEB trunc 0xA, Ex Defer, Rexmit 0xF Coll 0x37 */ |
| 964 | writel(0x00A1F037, ¯egs->hfdp); |
| 965 | |
| 966 | /* Next lets configure the MAC Interface Control register */ |
| 967 | writel(0, ¯egs->if_ctrl); |
| 968 | |
| 969 | /* Let's move on to setting up the mii management configuration */ |
| 970 | writel(0x07, ¯egs->mii_mgmt_cfg); /* Clock reset 0x7 */ |
| 971 | |
| 972 | /* Next lets configure the MAC Station Address register. These |
| 973 | * values are read from the EEPROM during initialization and stored |
| 974 | * in the adapter structure. We write what is stored in the adapter |
| 975 | * structure to the MAC Station Address registers high and low. This |
| 976 | * station address is used for generating and checking pause control |
| 977 | * packets. |
| 978 | */ |
| 979 | station2 = (adapter->addr[1] << ET_MAC_STATION_ADDR2_OC2_SHIFT) | |
| 980 | (adapter->addr[0] << ET_MAC_STATION_ADDR2_OC1_SHIFT); |
| 981 | station1 = (adapter->addr[5] << ET_MAC_STATION_ADDR1_OC6_SHIFT) | |
| 982 | (adapter->addr[4] << ET_MAC_STATION_ADDR1_OC5_SHIFT) | |
| 983 | (adapter->addr[3] << ET_MAC_STATION_ADDR1_OC4_SHIFT) | |
| 984 | adapter->addr[2]; |
| 985 | writel(station1, ¯egs->station_addr_1); |
| 986 | writel(station2, ¯egs->station_addr_2); |
| 987 | |
Justin P. Mattock | ac399bc | 2012-02-20 18:23:09 -0800 | [diff] [blame] | 988 | /* Max ethernet packet in bytes that will be passed by the mac without |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 989 | * being truncated. Allow the MAC to pass 4 more than our max packet |
| 990 | * size. This is 4 for the Ethernet CRC. |
| 991 | * |
| 992 | * Packets larger than (registry_jumbo_packet) that do not contain a |
| 993 | * VLAN ID will be dropped by the Rx function. |
| 994 | */ |
| 995 | writel(adapter->registry_jumbo_packet + 4, ¯egs->max_fm_len); |
| 996 | |
| 997 | /* clear out MAC config reset */ |
| 998 | writel(0, ¯egs->cfg1); |
| 999 | } |
| 1000 | |
| 1001 | /** |
| 1002 | * et1310_config_mac_regs2 - Initialize the second part of MAC regs |
| 1003 | * @adapter: pointer to our adapter structure |
| 1004 | */ |
Francois Romieu | eb7a6ca | 2011-10-23 19:11:02 +0200 | [diff] [blame] | 1005 | static void et1310_config_mac_regs2(struct et131x_adapter *adapter) |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 1006 | { |
| 1007 | int32_t delay = 0; |
| 1008 | struct mac_regs __iomem *mac = &adapter->regs->mac; |
| 1009 | struct phy_device *phydev = adapter->phydev; |
| 1010 | u32 cfg1; |
| 1011 | u32 cfg2; |
| 1012 | u32 ifctrl; |
| 1013 | u32 ctl; |
| 1014 | |
| 1015 | ctl = readl(&adapter->regs->txmac.ctl); |
| 1016 | cfg1 = readl(&mac->cfg1); |
| 1017 | cfg2 = readl(&mac->cfg2); |
| 1018 | ifctrl = readl(&mac->if_ctrl); |
| 1019 | |
| 1020 | /* Set up the if mode bits */ |
| 1021 | cfg2 &= ~0x300; |
| 1022 | if (phydev && phydev->speed == SPEED_1000) { |
| 1023 | cfg2 |= 0x200; |
| 1024 | /* Phy mode bit */ |
| 1025 | ifctrl &= ~(1 << 24); |
| 1026 | } else { |
| 1027 | cfg2 |= 0x100; |
| 1028 | ifctrl |= (1 << 24); |
| 1029 | } |
| 1030 | |
| 1031 | /* We need to enable Rx/Tx */ |
| 1032 | cfg1 |= CFG1_RX_ENABLE | CFG1_TX_ENABLE | CFG1_TX_FLOW; |
| 1033 | /* Initialize loop back to off */ |
| 1034 | cfg1 &= ~(CFG1_LOOPBACK | CFG1_RX_FLOW); |
| 1035 | if (adapter->flowcontrol == FLOW_RXONLY || |
| 1036 | adapter->flowcontrol == FLOW_BOTH) |
| 1037 | cfg1 |= CFG1_RX_FLOW; |
| 1038 | writel(cfg1, &mac->cfg1); |
| 1039 | |
| 1040 | /* Now we need to initialize the MAC Configuration 2 register */ |
| 1041 | /* preamble 7, check length, huge frame off, pad crc, crc enable |
| 1042 | full duplex off */ |
| 1043 | cfg2 |= 0x7016; |
| 1044 | cfg2 &= ~0x0021; |
| 1045 | |
| 1046 | /* Turn on duplex if needed */ |
| 1047 | if (phydev && phydev->duplex == DUPLEX_FULL) |
| 1048 | cfg2 |= 0x01; |
| 1049 | |
| 1050 | ifctrl &= ~(1 << 26); |
| 1051 | if (phydev && phydev->duplex == DUPLEX_HALF) |
| 1052 | ifctrl |= (1<<26); /* Enable ghd */ |
| 1053 | |
| 1054 | writel(ifctrl, &mac->if_ctrl); |
| 1055 | writel(cfg2, &mac->cfg2); |
| 1056 | |
| 1057 | do { |
| 1058 | udelay(10); |
| 1059 | delay++; |
| 1060 | cfg1 = readl(&mac->cfg1); |
| 1061 | } while ((cfg1 & CFG1_WAIT) != CFG1_WAIT && delay < 100); |
| 1062 | |
| 1063 | if (delay == 100) { |
| 1064 | dev_warn(&adapter->pdev->dev, |
| 1065 | "Syncd bits did not respond correctly cfg1 word 0x%08x\n", |
| 1066 | cfg1); |
| 1067 | } |
| 1068 | |
| 1069 | /* Enable txmac */ |
| 1070 | ctl |= 0x09; /* TX mac enable, FC disable */ |
| 1071 | writel(ctl, &adapter->regs->txmac.ctl); |
| 1072 | |
| 1073 | /* Ready to start the RXDMA/TXDMA engine */ |
| 1074 | if (adapter->flags & fMP_ADAPTER_LOWER_POWER) { |
| 1075 | et131x_rx_dma_enable(adapter); |
| 1076 | et131x_tx_dma_enable(adapter); |
| 1077 | } |
| 1078 | } |
| 1079 | |
Mark Einon | 2288760e | 2011-10-23 10:22:51 +0100 | [diff] [blame] | 1080 | /** |
| 1081 | * et1310_in_phy_coma - check if the device is in phy coma |
| 1082 | * @adapter: pointer to our adapter structure |
| 1083 | * |
| 1084 | * Returns 0 if the device is not in phy coma, 1 if it is in phy coma |
| 1085 | */ |
Francois Romieu | eb7a6ca | 2011-10-23 19:11:02 +0200 | [diff] [blame] | 1086 | static int et1310_in_phy_coma(struct et131x_adapter *adapter) |
Mark Einon | 2288760e | 2011-10-23 10:22:51 +0100 | [diff] [blame] | 1087 | { |
| 1088 | u32 pmcsr; |
| 1089 | |
| 1090 | pmcsr = readl(&adapter->regs->global.pm_csr); |
| 1091 | |
| 1092 | return ET_PM_PHY_SW_COMA & pmcsr ? 1 : 0; |
| 1093 | } |
| 1094 | |
Francois Romieu | eb7a6ca | 2011-10-23 19:11:02 +0200 | [diff] [blame] | 1095 | static void et1310_setup_device_for_multicast(struct et131x_adapter *adapter) |
Mark Einon | a4d444b | 2011-10-23 10:22:50 +0100 | [diff] [blame] | 1096 | { |
| 1097 | struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac; |
Francois Romieu | 834d0ee | 2011-10-23 19:11:19 +0200 | [diff] [blame] | 1098 | u32 hash1 = 0; |
| 1099 | u32 hash2 = 0; |
| 1100 | u32 hash3 = 0; |
| 1101 | u32 hash4 = 0; |
Mark Einon | a4d444b | 2011-10-23 10:22:50 +0100 | [diff] [blame] | 1102 | u32 pm_csr; |
| 1103 | |
| 1104 | /* If ET131X_PACKET_TYPE_MULTICAST is specified, then we provision |
| 1105 | * the multi-cast LIST. If it is NOT specified, (and "ALL" is not |
| 1106 | * specified) then we should pass NO multi-cast addresses to the |
| 1107 | * driver. |
| 1108 | */ |
| 1109 | if (adapter->packet_filter & ET131X_PACKET_TYPE_MULTICAST) { |
Francois Romieu | 834d0ee | 2011-10-23 19:11:19 +0200 | [diff] [blame] | 1110 | int i; |
| 1111 | |
Mark Einon | a4d444b | 2011-10-23 10:22:50 +0100 | [diff] [blame] | 1112 | /* Loop through our multicast array and set up the device */ |
Francois Romieu | 834d0ee | 2011-10-23 19:11:19 +0200 | [diff] [blame] | 1113 | for (i = 0; i < adapter->multicast_addr_count; i++) { |
| 1114 | u32 result; |
| 1115 | |
| 1116 | result = ether_crc(6, adapter->multicast_list[i]); |
Mark Einon | a4d444b | 2011-10-23 10:22:50 +0100 | [diff] [blame] | 1117 | |
| 1118 | result = (result & 0x3F800000) >> 23; |
| 1119 | |
| 1120 | if (result < 32) { |
| 1121 | hash1 |= (1 << result); |
| 1122 | } else if ((31 < result) && (result < 64)) { |
| 1123 | result -= 32; |
| 1124 | hash2 |= (1 << result); |
| 1125 | } else if ((63 < result) && (result < 96)) { |
| 1126 | result -= 64; |
| 1127 | hash3 |= (1 << result); |
| 1128 | } else { |
| 1129 | result -= 96; |
| 1130 | hash4 |= (1 << result); |
| 1131 | } |
| 1132 | } |
| 1133 | } |
| 1134 | |
| 1135 | /* Write out the new hash to the device */ |
| 1136 | pm_csr = readl(&adapter->regs->global.pm_csr); |
| 1137 | if (!et1310_in_phy_coma(adapter)) { |
| 1138 | writel(hash1, &rxmac->multi_hash1); |
| 1139 | writel(hash2, &rxmac->multi_hash2); |
| 1140 | writel(hash3, &rxmac->multi_hash3); |
| 1141 | writel(hash4, &rxmac->multi_hash4); |
| 1142 | } |
| 1143 | } |
| 1144 | |
Francois Romieu | eb7a6ca | 2011-10-23 19:11:02 +0200 | [diff] [blame] | 1145 | static void et1310_setup_device_for_unicast(struct et131x_adapter *adapter) |
Mark Einon | a4d444b | 2011-10-23 10:22:50 +0100 | [diff] [blame] | 1146 | { |
| 1147 | struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac; |
| 1148 | u32 uni_pf1; |
| 1149 | u32 uni_pf2; |
| 1150 | u32 uni_pf3; |
| 1151 | u32 pm_csr; |
| 1152 | |
| 1153 | /* Set up unicast packet filter reg 3 to be the first two octets of |
| 1154 | * the MAC address for both address |
| 1155 | * |
| 1156 | * Set up unicast packet filter reg 2 to be the octets 2 - 5 of the |
| 1157 | * MAC address for second address |
| 1158 | * |
| 1159 | * Set up unicast packet filter reg 3 to be the octets 2 - 5 of the |
| 1160 | * MAC address for first address |
| 1161 | */ |
| 1162 | uni_pf3 = (adapter->addr[0] << ET_UNI_PF_ADDR2_1_SHIFT) | |
| 1163 | (adapter->addr[1] << ET_UNI_PF_ADDR2_2_SHIFT) | |
| 1164 | (adapter->addr[0] << ET_UNI_PF_ADDR1_1_SHIFT) | |
| 1165 | adapter->addr[1]; |
| 1166 | |
| 1167 | uni_pf2 = (adapter->addr[2] << ET_UNI_PF_ADDR2_3_SHIFT) | |
| 1168 | (adapter->addr[3] << ET_UNI_PF_ADDR2_4_SHIFT) | |
| 1169 | (adapter->addr[4] << ET_UNI_PF_ADDR2_5_SHIFT) | |
| 1170 | adapter->addr[5]; |
| 1171 | |
| 1172 | uni_pf1 = (adapter->addr[2] << ET_UNI_PF_ADDR1_3_SHIFT) | |
| 1173 | (adapter->addr[3] << ET_UNI_PF_ADDR1_4_SHIFT) | |
| 1174 | (adapter->addr[4] << ET_UNI_PF_ADDR1_5_SHIFT) | |
| 1175 | adapter->addr[5]; |
| 1176 | |
| 1177 | pm_csr = readl(&adapter->regs->global.pm_csr); |
| 1178 | if (!et1310_in_phy_coma(adapter)) { |
| 1179 | writel(uni_pf1, &rxmac->uni_pf_addr1); |
| 1180 | writel(uni_pf2, &rxmac->uni_pf_addr2); |
| 1181 | writel(uni_pf3, &rxmac->uni_pf_addr3); |
| 1182 | } |
| 1183 | } |
| 1184 | |
Francois Romieu | eb7a6ca | 2011-10-23 19:11:02 +0200 | [diff] [blame] | 1185 | static void et1310_config_rxmac_regs(struct et131x_adapter *adapter) |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 1186 | { |
| 1187 | struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac; |
| 1188 | struct phy_device *phydev = adapter->phydev; |
| 1189 | u32 sa_lo; |
| 1190 | u32 sa_hi = 0; |
| 1191 | u32 pf_ctrl = 0; |
| 1192 | |
| 1193 | /* Disable the MAC while it is being configured (also disable WOL) */ |
| 1194 | writel(0x8, &rxmac->ctrl); |
| 1195 | |
| 1196 | /* Initialize WOL to disabled. */ |
| 1197 | writel(0, &rxmac->crc0); |
| 1198 | writel(0, &rxmac->crc12); |
| 1199 | writel(0, &rxmac->crc34); |
| 1200 | |
| 1201 | /* We need to set the WOL mask0 - mask4 next. We initialize it to |
| 1202 | * its default Values of 0x00000000 because there are not WOL masks |
| 1203 | * as of this time. |
| 1204 | */ |
| 1205 | writel(0, &rxmac->mask0_word0); |
| 1206 | writel(0, &rxmac->mask0_word1); |
| 1207 | writel(0, &rxmac->mask0_word2); |
| 1208 | writel(0, &rxmac->mask0_word3); |
| 1209 | |
| 1210 | writel(0, &rxmac->mask1_word0); |
| 1211 | writel(0, &rxmac->mask1_word1); |
| 1212 | writel(0, &rxmac->mask1_word2); |
| 1213 | writel(0, &rxmac->mask1_word3); |
| 1214 | |
| 1215 | writel(0, &rxmac->mask2_word0); |
| 1216 | writel(0, &rxmac->mask2_word1); |
| 1217 | writel(0, &rxmac->mask2_word2); |
| 1218 | writel(0, &rxmac->mask2_word3); |
| 1219 | |
| 1220 | writel(0, &rxmac->mask3_word0); |
| 1221 | writel(0, &rxmac->mask3_word1); |
| 1222 | writel(0, &rxmac->mask3_word2); |
| 1223 | writel(0, &rxmac->mask3_word3); |
| 1224 | |
| 1225 | writel(0, &rxmac->mask4_word0); |
| 1226 | writel(0, &rxmac->mask4_word1); |
| 1227 | writel(0, &rxmac->mask4_word2); |
| 1228 | writel(0, &rxmac->mask4_word3); |
| 1229 | |
| 1230 | /* Lets setup the WOL Source Address */ |
| 1231 | sa_lo = (adapter->addr[2] << ET_WOL_LO_SA3_SHIFT) | |
| 1232 | (adapter->addr[3] << ET_WOL_LO_SA4_SHIFT) | |
| 1233 | (adapter->addr[4] << ET_WOL_LO_SA5_SHIFT) | |
| 1234 | adapter->addr[5]; |
| 1235 | writel(sa_lo, &rxmac->sa_lo); |
| 1236 | |
| 1237 | sa_hi = (u32) (adapter->addr[0] << ET_WOL_HI_SA1_SHIFT) | |
| 1238 | adapter->addr[1]; |
| 1239 | writel(sa_hi, &rxmac->sa_hi); |
| 1240 | |
| 1241 | /* Disable all Packet Filtering */ |
| 1242 | writel(0, &rxmac->pf_ctrl); |
| 1243 | |
| 1244 | /* Let's initialize the Unicast Packet filtering address */ |
| 1245 | if (adapter->packet_filter & ET131X_PACKET_TYPE_DIRECTED) { |
| 1246 | et1310_setup_device_for_unicast(adapter); |
| 1247 | pf_ctrl |= 4; /* Unicast filter */ |
| 1248 | } else { |
| 1249 | writel(0, &rxmac->uni_pf_addr1); |
| 1250 | writel(0, &rxmac->uni_pf_addr2); |
| 1251 | writel(0, &rxmac->uni_pf_addr3); |
| 1252 | } |
| 1253 | |
| 1254 | /* Let's initialize the Multicast hash */ |
| 1255 | if (!(adapter->packet_filter & ET131X_PACKET_TYPE_ALL_MULTICAST)) { |
| 1256 | pf_ctrl |= 2; /* Multicast filter */ |
| 1257 | et1310_setup_device_for_multicast(adapter); |
| 1258 | } |
| 1259 | |
| 1260 | /* Runt packet filtering. Didn't work in version A silicon. */ |
| 1261 | pf_ctrl |= (NIC_MIN_PACKET_SIZE + 4) << 16; |
| 1262 | pf_ctrl |= 8; /* Fragment filter */ |
| 1263 | |
| 1264 | if (adapter->registry_jumbo_packet > 8192) |
| 1265 | /* In order to transmit jumbo packets greater than 8k, the |
| 1266 | * FIFO between RxMAC and RxDMA needs to be reduced in size |
| 1267 | * to (16k - Jumbo packet size). In order to implement this, |
| 1268 | * we must use "cut through" mode in the RxMAC, which chops |
| 1269 | * packets down into segments which are (max_size * 16). In |
| 1270 | * this case we selected 256 bytes, since this is the size of |
| 1271 | * the PCI-Express TLP's that the 1310 uses. |
| 1272 | * |
| 1273 | * seg_en on, fc_en off, size 0x10 |
| 1274 | */ |
| 1275 | writel(0x41, &rxmac->mcif_ctrl_max_seg); |
| 1276 | else |
| 1277 | writel(0, &rxmac->mcif_ctrl_max_seg); |
| 1278 | |
| 1279 | /* Initialize the MCIF water marks */ |
| 1280 | writel(0, &rxmac->mcif_water_mark); |
| 1281 | |
| 1282 | /* Initialize the MIF control */ |
| 1283 | writel(0, &rxmac->mif_ctrl); |
| 1284 | |
| 1285 | /* Initialize the Space Available Register */ |
| 1286 | writel(0, &rxmac->space_avail); |
| 1287 | |
| 1288 | /* Initialize the the mif_ctrl register |
| 1289 | * bit 3: Receive code error. One or more nibbles were signaled as |
| 1290 | * errors during the reception of the packet. Clear this |
| 1291 | * bit in Gigabit, set it in 100Mbit. This was derived |
| 1292 | * experimentally at UNH. |
| 1293 | * bit 4: Receive CRC error. The packet's CRC did not match the |
| 1294 | * internally generated CRC. |
| 1295 | * bit 5: Receive length check error. Indicates that frame length |
| 1296 | * field value in the packet does not match the actual data |
| 1297 | * byte length and is not a type field. |
| 1298 | * bit 16: Receive frame truncated. |
| 1299 | * bit 17: Drop packet enable |
| 1300 | */ |
| 1301 | if (phydev && phydev->speed == SPEED_100) |
| 1302 | writel(0x30038, &rxmac->mif_ctrl); |
| 1303 | else |
| 1304 | writel(0x30030, &rxmac->mif_ctrl); |
| 1305 | |
| 1306 | /* Finally we initialize RxMac to be enabled & WOL disabled. Packet |
| 1307 | * filter is always enabled since it is where the runt packets are |
| 1308 | * supposed to be dropped. For version A silicon, runt packet |
| 1309 | * dropping doesn't work, so it is disabled in the pf_ctrl register, |
| 1310 | * but we still leave the packet filter on. |
| 1311 | */ |
| 1312 | writel(pf_ctrl, &rxmac->pf_ctrl); |
| 1313 | writel(0x9, &rxmac->ctrl); |
| 1314 | } |
| 1315 | |
Francois Romieu | eb7a6ca | 2011-10-23 19:11:02 +0200 | [diff] [blame] | 1316 | static void et1310_config_txmac_regs(struct et131x_adapter *adapter) |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 1317 | { |
| 1318 | struct txmac_regs __iomem *txmac = &adapter->regs->txmac; |
| 1319 | |
| 1320 | /* We need to update the Control Frame Parameters |
| 1321 | * cfpt - control frame pause timer set to 64 (0x40) |
| 1322 | * cfep - control frame extended pause timer set to 0x0 |
| 1323 | */ |
| 1324 | if (adapter->flowcontrol == FLOW_NONE) |
| 1325 | writel(0, &txmac->cf_param); |
| 1326 | else |
| 1327 | writel(0x40, &txmac->cf_param); |
| 1328 | } |
| 1329 | |
Francois Romieu | eb7a6ca | 2011-10-23 19:11:02 +0200 | [diff] [blame] | 1330 | static void et1310_config_macstat_regs(struct et131x_adapter *adapter) |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 1331 | { |
| 1332 | struct macstat_regs __iomem *macstat = |
| 1333 | &adapter->regs->macstat; |
| 1334 | |
| 1335 | /* Next we need to initialize all the macstat registers to zero on |
| 1336 | * the device. |
| 1337 | */ |
| 1338 | writel(0, &macstat->txrx_0_64_byte_frames); |
| 1339 | writel(0, &macstat->txrx_65_127_byte_frames); |
| 1340 | writel(0, &macstat->txrx_128_255_byte_frames); |
| 1341 | writel(0, &macstat->txrx_256_511_byte_frames); |
| 1342 | writel(0, &macstat->txrx_512_1023_byte_frames); |
| 1343 | writel(0, &macstat->txrx_1024_1518_byte_frames); |
| 1344 | writel(0, &macstat->txrx_1519_1522_gvln_frames); |
| 1345 | |
| 1346 | writel(0, &macstat->rx_bytes); |
| 1347 | writel(0, &macstat->rx_packets); |
| 1348 | writel(0, &macstat->rx_fcs_errs); |
| 1349 | writel(0, &macstat->rx_multicast_packets); |
| 1350 | writel(0, &macstat->rx_broadcast_packets); |
| 1351 | writel(0, &macstat->rx_control_frames); |
| 1352 | writel(0, &macstat->rx_pause_frames); |
| 1353 | writel(0, &macstat->rx_unknown_opcodes); |
| 1354 | writel(0, &macstat->rx_align_errs); |
| 1355 | writel(0, &macstat->rx_frame_len_errs); |
| 1356 | writel(0, &macstat->rx_code_errs); |
| 1357 | writel(0, &macstat->rx_carrier_sense_errs); |
| 1358 | writel(0, &macstat->rx_undersize_packets); |
| 1359 | writel(0, &macstat->rx_oversize_packets); |
| 1360 | writel(0, &macstat->rx_fragment_packets); |
| 1361 | writel(0, &macstat->rx_jabbers); |
| 1362 | writel(0, &macstat->rx_drops); |
| 1363 | |
| 1364 | writel(0, &macstat->tx_bytes); |
| 1365 | writel(0, &macstat->tx_packets); |
| 1366 | writel(0, &macstat->tx_multicast_packets); |
| 1367 | writel(0, &macstat->tx_broadcast_packets); |
| 1368 | writel(0, &macstat->tx_pause_frames); |
| 1369 | writel(0, &macstat->tx_deferred); |
| 1370 | writel(0, &macstat->tx_excessive_deferred); |
| 1371 | writel(0, &macstat->tx_single_collisions); |
| 1372 | writel(0, &macstat->tx_multiple_collisions); |
| 1373 | writel(0, &macstat->tx_late_collisions); |
| 1374 | writel(0, &macstat->tx_excessive_collisions); |
| 1375 | writel(0, &macstat->tx_total_collisions); |
| 1376 | writel(0, &macstat->tx_pause_honored_frames); |
| 1377 | writel(0, &macstat->tx_drops); |
| 1378 | writel(0, &macstat->tx_jabbers); |
| 1379 | writel(0, &macstat->tx_fcs_errs); |
| 1380 | writel(0, &macstat->tx_control_frames); |
| 1381 | writel(0, &macstat->tx_oversize_frames); |
| 1382 | writel(0, &macstat->tx_undersize_frames); |
| 1383 | writel(0, &macstat->tx_fragments); |
| 1384 | writel(0, &macstat->carry_reg1); |
| 1385 | writel(0, &macstat->carry_reg2); |
| 1386 | |
| 1387 | /* Unmask any counters that we want to track the overflow of. |
| 1388 | * Initially this will be all counters. It may become clear later |
| 1389 | * that we do not need to track all counters. |
| 1390 | */ |
| 1391 | writel(0xFFFFBE32, &macstat->carry_reg1_mask); |
| 1392 | writel(0xFFFE7E8B, &macstat->carry_reg2_mask); |
| 1393 | } |
| 1394 | |
Mark Einon | 2288760e | 2011-10-23 10:22:51 +0100 | [diff] [blame] | 1395 | /** |
| 1396 | * et131x_phy_mii_read - Read from the PHY through the MII Interface on the MAC |
| 1397 | * @adapter: pointer to our private adapter structure |
| 1398 | * @addr: the address of the transceiver |
| 1399 | * @reg: the register to read |
| 1400 | * @value: pointer to a 16-bit value in which the value will be stored |
| 1401 | * |
| 1402 | * Returns 0 on success, errno on failure (as defined in errno.h) |
| 1403 | */ |
Francois Romieu | eb7a6ca | 2011-10-23 19:11:02 +0200 | [diff] [blame] | 1404 | static int et131x_phy_mii_read(struct et131x_adapter *adapter, u8 addr, |
Mark Einon | 2288760e | 2011-10-23 10:22:51 +0100 | [diff] [blame] | 1405 | u8 reg, u16 *value) |
| 1406 | { |
| 1407 | struct mac_regs __iomem *mac = &adapter->regs->mac; |
| 1408 | int status = 0; |
| 1409 | u32 delay = 0; |
| 1410 | u32 mii_addr; |
| 1411 | u32 mii_cmd; |
| 1412 | u32 mii_indicator; |
| 1413 | |
| 1414 | /* Save a local copy of the registers we are dealing with so we can |
| 1415 | * set them back |
| 1416 | */ |
| 1417 | mii_addr = readl(&mac->mii_mgmt_addr); |
| 1418 | mii_cmd = readl(&mac->mii_mgmt_cmd); |
| 1419 | |
| 1420 | /* Stop the current operation */ |
| 1421 | writel(0, &mac->mii_mgmt_cmd); |
| 1422 | |
| 1423 | /* Set up the register we need to read from on the correct PHY */ |
| 1424 | writel(MII_ADDR(addr, reg), &mac->mii_mgmt_addr); |
| 1425 | |
| 1426 | writel(0x1, &mac->mii_mgmt_cmd); |
| 1427 | |
| 1428 | do { |
| 1429 | udelay(50); |
| 1430 | delay++; |
| 1431 | mii_indicator = readl(&mac->mii_mgmt_indicator); |
| 1432 | } while ((mii_indicator & MGMT_WAIT) && delay < 50); |
| 1433 | |
| 1434 | /* If we hit the max delay, we could not read the register */ |
| 1435 | if (delay == 50) { |
| 1436 | dev_warn(&adapter->pdev->dev, |
| 1437 | "reg 0x%08x could not be read\n", reg); |
| 1438 | dev_warn(&adapter->pdev->dev, "status is 0x%08x\n", |
| 1439 | mii_indicator); |
| 1440 | |
| 1441 | status = -EIO; |
| 1442 | } |
| 1443 | |
| 1444 | /* If we hit here we were able to read the register and we need to |
| 1445 | * return the value to the caller */ |
| 1446 | *value = readl(&mac->mii_mgmt_stat) & 0xFFFF; |
| 1447 | |
| 1448 | /* Stop the read operation */ |
| 1449 | writel(0, &mac->mii_mgmt_cmd); |
| 1450 | |
| 1451 | /* set the registers we touched back to the state at which we entered |
| 1452 | * this function |
| 1453 | */ |
| 1454 | writel(mii_addr, &mac->mii_mgmt_addr); |
| 1455 | writel(mii_cmd, &mac->mii_mgmt_cmd); |
| 1456 | |
| 1457 | return status; |
| 1458 | } |
| 1459 | |
Francois Romieu | eb7a6ca | 2011-10-23 19:11:02 +0200 | [diff] [blame] | 1460 | static int et131x_mii_read(struct et131x_adapter *adapter, u8 reg, u16 *value) |
Mark Einon | 2288760e | 2011-10-23 10:22:51 +0100 | [diff] [blame] | 1461 | { |
| 1462 | struct phy_device *phydev = adapter->phydev; |
| 1463 | |
| 1464 | if (!phydev) |
| 1465 | return -EIO; |
| 1466 | |
| 1467 | return et131x_phy_mii_read(adapter, phydev->addr, reg, value); |
| 1468 | } |
| 1469 | |
| 1470 | /** |
| 1471 | * et131x_mii_write - Write to a PHY register through the MII interface of the MAC |
| 1472 | * @adapter: pointer to our private adapter structure |
| 1473 | * @reg: the register to read |
| 1474 | * @value: 16-bit value to write |
| 1475 | * |
| 1476 | * FIXME: one caller in netdev still |
| 1477 | * |
| 1478 | * Return 0 on success, errno on failure (as defined in errno.h) |
| 1479 | */ |
Francois Romieu | eb7a6ca | 2011-10-23 19:11:02 +0200 | [diff] [blame] | 1480 | static int et131x_mii_write(struct et131x_adapter *adapter, u8 reg, u16 value) |
Mark Einon | 2288760e | 2011-10-23 10:22:51 +0100 | [diff] [blame] | 1481 | { |
| 1482 | struct mac_regs __iomem *mac = &adapter->regs->mac; |
| 1483 | struct phy_device *phydev = adapter->phydev; |
| 1484 | int status = 0; |
| 1485 | u8 addr; |
| 1486 | u32 delay = 0; |
| 1487 | u32 mii_addr; |
| 1488 | u32 mii_cmd; |
| 1489 | u32 mii_indicator; |
| 1490 | |
| 1491 | if (!phydev) |
| 1492 | return -EIO; |
| 1493 | |
| 1494 | addr = phydev->addr; |
| 1495 | |
| 1496 | /* Save a local copy of the registers we are dealing with so we can |
| 1497 | * set them back |
| 1498 | */ |
| 1499 | mii_addr = readl(&mac->mii_mgmt_addr); |
| 1500 | mii_cmd = readl(&mac->mii_mgmt_cmd); |
| 1501 | |
| 1502 | /* Stop the current operation */ |
| 1503 | writel(0, &mac->mii_mgmt_cmd); |
| 1504 | |
| 1505 | /* Set up the register we need to write to on the correct PHY */ |
| 1506 | writel(MII_ADDR(addr, reg), &mac->mii_mgmt_addr); |
| 1507 | |
| 1508 | /* Add the value to write to the registers to the mac */ |
| 1509 | writel(value, &mac->mii_mgmt_ctrl); |
| 1510 | |
| 1511 | do { |
| 1512 | udelay(50); |
| 1513 | delay++; |
| 1514 | mii_indicator = readl(&mac->mii_mgmt_indicator); |
| 1515 | } while ((mii_indicator & MGMT_BUSY) && delay < 100); |
| 1516 | |
| 1517 | /* If we hit the max delay, we could not write the register */ |
| 1518 | if (delay == 100) { |
| 1519 | u16 tmp; |
| 1520 | |
| 1521 | dev_warn(&adapter->pdev->dev, |
| 1522 | "reg 0x%08x could not be written", reg); |
| 1523 | dev_warn(&adapter->pdev->dev, "status is 0x%08x\n", |
| 1524 | mii_indicator); |
| 1525 | dev_warn(&adapter->pdev->dev, "command is 0x%08x\n", |
| 1526 | readl(&mac->mii_mgmt_cmd)); |
| 1527 | |
| 1528 | et131x_mii_read(adapter, reg, &tmp); |
| 1529 | |
| 1530 | status = -EIO; |
| 1531 | } |
| 1532 | /* Stop the write operation */ |
| 1533 | writel(0, &mac->mii_mgmt_cmd); |
| 1534 | |
| 1535 | /* |
| 1536 | * set the registers we touched back to the state at which we entered |
| 1537 | * this function |
| 1538 | */ |
| 1539 | writel(mii_addr, &mac->mii_mgmt_addr); |
| 1540 | writel(mii_cmd, &mac->mii_mgmt_cmd); |
| 1541 | |
| 1542 | return status; |
| 1543 | } |
| 1544 | |
| 1545 | /* Still used from _mac for BIT_READ */ |
Francois Romieu | eb7a6ca | 2011-10-23 19:11:02 +0200 | [diff] [blame] | 1546 | static void et1310_phy_access_mii_bit(struct et131x_adapter *adapter, |
| 1547 | u16 action, u16 regnum, u16 bitnum, |
| 1548 | u8 *value) |
Mark Einon | 2288760e | 2011-10-23 10:22:51 +0100 | [diff] [blame] | 1549 | { |
| 1550 | u16 reg; |
| 1551 | u16 mask = 0x0001 << bitnum; |
| 1552 | |
| 1553 | /* Read the requested register */ |
| 1554 | et131x_mii_read(adapter, regnum, ®); |
| 1555 | |
| 1556 | switch (action) { |
| 1557 | case TRUEPHY_BIT_READ: |
| 1558 | *value = (reg & mask) >> bitnum; |
| 1559 | break; |
| 1560 | |
| 1561 | case TRUEPHY_BIT_SET: |
| 1562 | et131x_mii_write(adapter, regnum, reg | mask); |
| 1563 | break; |
| 1564 | |
| 1565 | case TRUEPHY_BIT_CLEAR: |
| 1566 | et131x_mii_write(adapter, regnum, reg & ~mask); |
| 1567 | break; |
| 1568 | |
| 1569 | default: |
| 1570 | break; |
| 1571 | } |
| 1572 | } |
| 1573 | |
Francois Romieu | eb7a6ca | 2011-10-23 19:11:02 +0200 | [diff] [blame] | 1574 | static void et1310_config_flow_control(struct et131x_adapter *adapter) |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 1575 | { |
| 1576 | struct phy_device *phydev = adapter->phydev; |
| 1577 | |
| 1578 | if (phydev->duplex == DUPLEX_HALF) { |
| 1579 | adapter->flowcontrol = FLOW_NONE; |
| 1580 | } else { |
| 1581 | char remote_pause, remote_async_pause; |
| 1582 | |
| 1583 | et1310_phy_access_mii_bit(adapter, |
| 1584 | TRUEPHY_BIT_READ, 5, 10, &remote_pause); |
| 1585 | et1310_phy_access_mii_bit(adapter, |
| 1586 | TRUEPHY_BIT_READ, 5, 11, |
| 1587 | &remote_async_pause); |
| 1588 | |
| 1589 | if ((remote_pause == TRUEPHY_BIT_SET) && |
| 1590 | (remote_async_pause == TRUEPHY_BIT_SET)) { |
| 1591 | adapter->flowcontrol = adapter->wanted_flow; |
| 1592 | } else if ((remote_pause == TRUEPHY_BIT_SET) && |
| 1593 | (remote_async_pause == TRUEPHY_BIT_CLEAR)) { |
| 1594 | if (adapter->wanted_flow == FLOW_BOTH) |
| 1595 | adapter->flowcontrol = FLOW_BOTH; |
| 1596 | else |
| 1597 | adapter->flowcontrol = FLOW_NONE; |
| 1598 | } else if ((remote_pause == TRUEPHY_BIT_CLEAR) && |
| 1599 | (remote_async_pause == TRUEPHY_BIT_CLEAR)) { |
| 1600 | adapter->flowcontrol = FLOW_NONE; |
| 1601 | } else {/* if (remote_pause == TRUEPHY_CLEAR_BIT && |
| 1602 | remote_async_pause == TRUEPHY_SET_BIT) */ |
| 1603 | if (adapter->wanted_flow == FLOW_BOTH) |
| 1604 | adapter->flowcontrol = FLOW_RXONLY; |
| 1605 | else |
| 1606 | adapter->flowcontrol = FLOW_NONE; |
| 1607 | } |
| 1608 | } |
| 1609 | } |
| 1610 | |
| 1611 | /** |
| 1612 | * et1310_update_macstat_host_counters - Update the local copy of the statistics |
| 1613 | * @adapter: pointer to the adapter structure |
| 1614 | */ |
Francois Romieu | eb7a6ca | 2011-10-23 19:11:02 +0200 | [diff] [blame] | 1615 | static void et1310_update_macstat_host_counters(struct et131x_adapter *adapter) |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 1616 | { |
| 1617 | struct ce_stats *stats = &adapter->stats; |
| 1618 | struct macstat_regs __iomem *macstat = |
| 1619 | &adapter->regs->macstat; |
| 1620 | |
| 1621 | stats->tx_collisions += readl(&macstat->tx_total_collisions); |
| 1622 | stats->tx_first_collisions += readl(&macstat->tx_single_collisions); |
| 1623 | stats->tx_deferred += readl(&macstat->tx_deferred); |
| 1624 | stats->tx_excessive_collisions += |
| 1625 | readl(&macstat->tx_multiple_collisions); |
| 1626 | stats->tx_late_collisions += readl(&macstat->tx_late_collisions); |
| 1627 | stats->tx_underflows += readl(&macstat->tx_undersize_frames); |
| 1628 | stats->tx_max_pkt_errs += readl(&macstat->tx_oversize_frames); |
| 1629 | |
| 1630 | stats->rx_align_errs += readl(&macstat->rx_align_errs); |
| 1631 | stats->rx_crc_errs += readl(&macstat->rx_code_errs); |
| 1632 | stats->rcvd_pkts_dropped += readl(&macstat->rx_drops); |
| 1633 | stats->rx_overflows += readl(&macstat->rx_oversize_packets); |
| 1634 | stats->rx_code_violations += readl(&macstat->rx_fcs_errs); |
| 1635 | stats->rx_length_errs += readl(&macstat->rx_frame_len_errs); |
| 1636 | stats->rx_other_errs += readl(&macstat->rx_fragment_packets); |
| 1637 | } |
| 1638 | |
| 1639 | /** |
| 1640 | * et1310_handle_macstat_interrupt |
| 1641 | * @adapter: pointer to the adapter structure |
| 1642 | * |
| 1643 | * One of the MACSTAT counters has wrapped. Update the local copy of |
| 1644 | * the statistics held in the adapter structure, checking the "wrap" |
| 1645 | * bit for each counter. |
| 1646 | */ |
Francois Romieu | eb7a6ca | 2011-10-23 19:11:02 +0200 | [diff] [blame] | 1647 | static void et1310_handle_macstat_interrupt(struct et131x_adapter *adapter) |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 1648 | { |
| 1649 | u32 carry_reg1; |
| 1650 | u32 carry_reg2; |
| 1651 | |
| 1652 | /* Read the interrupt bits from the register(s). These are Clear On |
| 1653 | * Write. |
| 1654 | */ |
| 1655 | carry_reg1 = readl(&adapter->regs->macstat.carry_reg1); |
| 1656 | carry_reg2 = readl(&adapter->regs->macstat.carry_reg2); |
| 1657 | |
| 1658 | writel(carry_reg1, &adapter->regs->macstat.carry_reg1); |
| 1659 | writel(carry_reg2, &adapter->regs->macstat.carry_reg2); |
| 1660 | |
| 1661 | /* We need to do update the host copy of all the MAC_STAT counters. |
| 1662 | * For each counter, check it's overflow bit. If the overflow bit is |
| 1663 | * set, then increment the host version of the count by one complete |
| 1664 | * revolution of the counter. This routine is called when the counter |
| 1665 | * block indicates that one of the counters has wrapped. |
| 1666 | */ |
| 1667 | if (carry_reg1 & (1 << 14)) |
| 1668 | adapter->stats.rx_code_violations += COUNTER_WRAP_16_BIT; |
| 1669 | if (carry_reg1 & (1 << 8)) |
| 1670 | adapter->stats.rx_align_errs += COUNTER_WRAP_12_BIT; |
| 1671 | if (carry_reg1 & (1 << 7)) |
| 1672 | adapter->stats.rx_length_errs += COUNTER_WRAP_16_BIT; |
| 1673 | if (carry_reg1 & (1 << 2)) |
| 1674 | adapter->stats.rx_other_errs += COUNTER_WRAP_16_BIT; |
| 1675 | if (carry_reg1 & (1 << 6)) |
| 1676 | adapter->stats.rx_crc_errs += COUNTER_WRAP_16_BIT; |
| 1677 | if (carry_reg1 & (1 << 3)) |
| 1678 | adapter->stats.rx_overflows += COUNTER_WRAP_16_BIT; |
| 1679 | if (carry_reg1 & (1 << 0)) |
| 1680 | adapter->stats.rcvd_pkts_dropped += COUNTER_WRAP_16_BIT; |
| 1681 | if (carry_reg2 & (1 << 16)) |
| 1682 | adapter->stats.tx_max_pkt_errs += COUNTER_WRAP_12_BIT; |
| 1683 | if (carry_reg2 & (1 << 15)) |
| 1684 | adapter->stats.tx_underflows += COUNTER_WRAP_12_BIT; |
| 1685 | if (carry_reg2 & (1 << 6)) |
| 1686 | adapter->stats.tx_first_collisions += COUNTER_WRAP_12_BIT; |
| 1687 | if (carry_reg2 & (1 << 8)) |
| 1688 | adapter->stats.tx_deferred += COUNTER_WRAP_12_BIT; |
| 1689 | if (carry_reg2 & (1 << 5)) |
| 1690 | adapter->stats.tx_excessive_collisions += COUNTER_WRAP_12_BIT; |
| 1691 | if (carry_reg2 & (1 << 4)) |
| 1692 | adapter->stats.tx_late_collisions += COUNTER_WRAP_12_BIT; |
| 1693 | if (carry_reg2 & (1 << 2)) |
| 1694 | adapter->stats.tx_collisions += COUNTER_WRAP_12_BIT; |
| 1695 | } |
| 1696 | |
Francois Romieu | eb7a6ca | 2011-10-23 19:11:02 +0200 | [diff] [blame] | 1697 | static int et131x_mdio_read(struct mii_bus *bus, int phy_addr, int reg) |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 1698 | { |
| 1699 | struct net_device *netdev = bus->priv; |
| 1700 | struct et131x_adapter *adapter = netdev_priv(netdev); |
| 1701 | u16 value; |
| 1702 | int ret; |
| 1703 | |
| 1704 | ret = et131x_phy_mii_read(adapter, phy_addr, reg, &value); |
| 1705 | |
| 1706 | if (ret < 0) |
| 1707 | return ret; |
| 1708 | else |
| 1709 | return value; |
| 1710 | } |
| 1711 | |
joseph daniel | bf3313a | 2012-05-01 00:30:34 +0600 | [diff] [blame] | 1712 | static int et131x_mdio_write(struct mii_bus *bus, int phy_addr, |
| 1713 | int reg, u16 value) |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 1714 | { |
| 1715 | struct net_device *netdev = bus->priv; |
| 1716 | struct et131x_adapter *adapter = netdev_priv(netdev); |
| 1717 | |
| 1718 | return et131x_mii_write(adapter, reg, value); |
| 1719 | } |
| 1720 | |
Francois Romieu | eb7a6ca | 2011-10-23 19:11:02 +0200 | [diff] [blame] | 1721 | static int et131x_mdio_reset(struct mii_bus *bus) |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 1722 | { |
| 1723 | struct net_device *netdev = bus->priv; |
| 1724 | struct et131x_adapter *adapter = netdev_priv(netdev); |
| 1725 | |
| 1726 | et131x_mii_write(adapter, MII_BMCR, BMCR_RESET); |
| 1727 | |
| 1728 | return 0; |
| 1729 | } |
| 1730 | |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 1731 | /** |
| 1732 | * et1310_phy_power_down - PHY power control |
| 1733 | * @adapter: device to control |
| 1734 | * @down: true for off/false for back on |
| 1735 | * |
| 1736 | * one hundred, ten, one thousand megs |
| 1737 | * How would you like to have your LAN accessed |
| 1738 | * Can't you see that this code processed |
| 1739 | * Phy power, phy power.. |
| 1740 | */ |
Francois Romieu | eb7a6ca | 2011-10-23 19:11:02 +0200 | [diff] [blame] | 1741 | static void et1310_phy_power_down(struct et131x_adapter *adapter, bool down) |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 1742 | { |
| 1743 | u16 data; |
| 1744 | |
| 1745 | et131x_mii_read(adapter, MII_BMCR, &data); |
| 1746 | data &= ~BMCR_PDOWN; |
| 1747 | if (down) |
| 1748 | data |= BMCR_PDOWN; |
| 1749 | et131x_mii_write(adapter, MII_BMCR, data); |
| 1750 | } |
| 1751 | |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 1752 | /** |
| 1753 | * et131x_xcvr_init - Init the phy if we are setting it into force mode |
| 1754 | * @adapter: pointer to our private adapter structure |
| 1755 | * |
| 1756 | */ |
Francois Romieu | eb7a6ca | 2011-10-23 19:11:02 +0200 | [diff] [blame] | 1757 | static void et131x_xcvr_init(struct et131x_adapter *adapter) |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 1758 | { |
| 1759 | u16 imr; |
| 1760 | u16 isr; |
| 1761 | u16 lcr2; |
| 1762 | |
| 1763 | et131x_mii_read(adapter, PHY_INTERRUPT_STATUS, &isr); |
| 1764 | et131x_mii_read(adapter, PHY_INTERRUPT_MASK, &imr); |
| 1765 | |
| 1766 | /* Set the link status interrupt only. Bad behavior when link status |
| 1767 | * and auto neg are set, we run into a nested interrupt problem |
| 1768 | */ |
Dan Carpenter | b5b86a4 | 2012-06-09 12:17:01 +0300 | [diff] [blame] | 1769 | imr |= (ET_PHY_INT_MASK_AUTONEGSTAT | |
| 1770 | ET_PHY_INT_MASK_LINKSTAT | |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 1771 | ET_PHY_INT_MASK_ENABLE); |
| 1772 | |
| 1773 | et131x_mii_write(adapter, PHY_INTERRUPT_MASK, imr); |
| 1774 | |
| 1775 | /* Set the LED behavior such that LED 1 indicates speed (off = |
| 1776 | * 10Mbits, blink = 100Mbits, on = 1000Mbits) and LED 2 indicates |
| 1777 | * link and activity (on for link, blink off for activity). |
| 1778 | * |
| 1779 | * NOTE: Some customizations have been added here for specific |
| 1780 | * vendors; The LED behavior is now determined by vendor data in the |
| 1781 | * EEPROM. However, the above description is the default. |
| 1782 | */ |
| 1783 | if ((adapter->eeprom_data[1] & 0x4) == 0) { |
| 1784 | et131x_mii_read(adapter, PHY_LED_2, &lcr2); |
| 1785 | |
Dan Carpenter | b5b86a4 | 2012-06-09 12:17:01 +0300 | [diff] [blame] | 1786 | lcr2 &= (ET_LED2_LED_100TX | ET_LED2_LED_1000T); |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 1787 | lcr2 |= (LED_VAL_LINKON_ACTIVE << LED_LINK_SHIFT); |
| 1788 | |
| 1789 | if ((adapter->eeprom_data[1] & 0x8) == 0) |
| 1790 | lcr2 |= (LED_VAL_1000BT_100BTX << LED_TXRX_SHIFT); |
| 1791 | else |
| 1792 | lcr2 |= (LED_VAL_LINKON << LED_TXRX_SHIFT); |
| 1793 | |
| 1794 | et131x_mii_write(adapter, PHY_LED_2, lcr2); |
| 1795 | } |
| 1796 | } |
| 1797 | |
Mark Einon | 36f2771 | 2011-10-23 10:22:48 +0100 | [diff] [blame] | 1798 | /** |
| 1799 | * et131x_configure_global_regs - configure JAGCore global regs |
| 1800 | * @adapter: pointer to our adapter structure |
| 1801 | * |
| 1802 | * Used to configure the global registers on the JAGCore |
| 1803 | */ |
Francois Romieu | eb7a6ca | 2011-10-23 19:11:02 +0200 | [diff] [blame] | 1804 | static void et131x_configure_global_regs(struct et131x_adapter *adapter) |
Mark Einon | 36f2771 | 2011-10-23 10:22:48 +0100 | [diff] [blame] | 1805 | { |
| 1806 | struct global_regs __iomem *regs = &adapter->regs->global; |
| 1807 | |
| 1808 | writel(0, ®s->rxq_start_addr); |
| 1809 | writel(INTERNAL_MEM_SIZE - 1, ®s->txq_end_addr); |
| 1810 | |
| 1811 | if (adapter->registry_jumbo_packet < 2048) { |
| 1812 | /* Tx / RxDMA and Tx/Rx MAC interfaces have a 1k word |
| 1813 | * block of RAM that the driver can split between Tx |
| 1814 | * and Rx as it desires. Our default is to split it |
| 1815 | * 50/50: |
| 1816 | */ |
| 1817 | writel(PARM_RX_MEM_END_DEF, ®s->rxq_end_addr); |
| 1818 | writel(PARM_RX_MEM_END_DEF + 1, ®s->txq_start_addr); |
| 1819 | } else if (adapter->registry_jumbo_packet < 8192) { |
| 1820 | /* For jumbo packets > 2k but < 8k, split 50-50. */ |
| 1821 | writel(INTERNAL_MEM_RX_OFFSET, ®s->rxq_end_addr); |
| 1822 | writel(INTERNAL_MEM_RX_OFFSET + 1, ®s->txq_start_addr); |
| 1823 | } else { |
| 1824 | /* 9216 is the only packet size greater than 8k that |
| 1825 | * is available. The Tx buffer has to be big enough |
| 1826 | * for one whole packet on the Tx side. We'll make |
| 1827 | * the Tx 9408, and give the rest to Rx |
| 1828 | */ |
| 1829 | writel(0x01b3, ®s->rxq_end_addr); |
| 1830 | writel(0x01b4, ®s->txq_start_addr); |
| 1831 | } |
| 1832 | |
| 1833 | /* Initialize the loopback register. Disable all loopbacks. */ |
| 1834 | writel(0, ®s->loopback); |
| 1835 | |
| 1836 | /* MSI Register */ |
| 1837 | writel(0, ®s->msi_config); |
| 1838 | |
| 1839 | /* By default, disable the watchdog timer. It will be enabled when |
| 1840 | * a packet is queued. |
| 1841 | */ |
| 1842 | writel(0, ®s->watchdog_timer); |
| 1843 | } |
| 1844 | |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 1845 | /** |
Mark Einon | 36f2771 | 2011-10-23 10:22:48 +0100 | [diff] [blame] | 1846 | * et131x_config_rx_dma_regs - Start of Rx_DMA init sequence |
| 1847 | * @adapter: pointer to our adapter structure |
| 1848 | */ |
Francois Romieu | eb7a6ca | 2011-10-23 19:11:02 +0200 | [diff] [blame] | 1849 | static void et131x_config_rx_dma_regs(struct et131x_adapter *adapter) |
Mark Einon | 36f2771 | 2011-10-23 10:22:48 +0100 | [diff] [blame] | 1850 | { |
| 1851 | struct rxdma_regs __iomem *rx_dma = &adapter->regs->rxdma; |
| 1852 | struct rx_ring *rx_local = &adapter->rx_ring; |
| 1853 | struct fbr_desc *fbr_entry; |
| 1854 | u32 entry; |
| 1855 | u32 psr_num_des; |
| 1856 | unsigned long flags; |
| 1857 | |
| 1858 | /* Halt RXDMA to perform the reconfigure. */ |
| 1859 | et131x_rx_dma_disable(adapter); |
| 1860 | |
| 1861 | /* Load the completion writeback physical address |
| 1862 | * |
| 1863 | * NOTE : dma_alloc_coherent(), used above to alloc DMA regions, |
| 1864 | * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses |
| 1865 | * are ever returned, make sure the high part is retrieved here |
| 1866 | * before storing the adjusted address. |
| 1867 | */ |
| 1868 | writel((u32) ((u64)rx_local->rx_status_bus >> 32), |
| 1869 | &rx_dma->dma_wb_base_hi); |
| 1870 | writel((u32) rx_local->rx_status_bus, &rx_dma->dma_wb_base_lo); |
| 1871 | |
| 1872 | memset(rx_local->rx_status_block, 0, sizeof(struct rx_status_block)); |
| 1873 | |
| 1874 | /* Set the address and parameters of the packet status ring into the |
| 1875 | * 1310's registers |
| 1876 | */ |
| 1877 | writel((u32) ((u64)rx_local->ps_ring_physaddr >> 32), |
| 1878 | &rx_dma->psr_base_hi); |
| 1879 | writel((u32) rx_local->ps_ring_physaddr, &rx_dma->psr_base_lo); |
| 1880 | writel(rx_local->psr_num_entries - 1, &rx_dma->psr_num_des); |
| 1881 | writel(0, &rx_dma->psr_full_offset); |
| 1882 | |
| 1883 | psr_num_des = readl(&rx_dma->psr_num_des) & 0xFFF; |
| 1884 | writel((psr_num_des * LO_MARK_PERCENT_FOR_PSR) / 100, |
| 1885 | &rx_dma->psr_min_des); |
| 1886 | |
| 1887 | spin_lock_irqsave(&adapter->rcv_lock, flags); |
| 1888 | |
| 1889 | /* These local variables track the PSR in the adapter structure */ |
| 1890 | rx_local->local_psr_full = 0; |
| 1891 | |
| 1892 | /* Now's the best time to initialize FBR1 contents */ |
| 1893 | fbr_entry = (struct fbr_desc *) rx_local->fbr[0]->ring_virtaddr; |
| 1894 | for (entry = 0; entry < rx_local->fbr[0]->num_entries; entry++) { |
| 1895 | fbr_entry->addr_hi = rx_local->fbr[0]->bus_high[entry]; |
| 1896 | fbr_entry->addr_lo = rx_local->fbr[0]->bus_low[entry]; |
| 1897 | fbr_entry->word2 = entry; |
| 1898 | fbr_entry++; |
| 1899 | } |
| 1900 | |
| 1901 | /* Set the address and parameters of Free buffer ring 1 (and 0 if |
| 1902 | * required) into the 1310's registers |
| 1903 | */ |
Mark Einon | e013471 | 2011-12-06 23:23:10 +0000 | [diff] [blame] | 1904 | writel((u32) (rx_local->fbr[0]->real_physaddr >> 32), |
Mark Einon | 36f2771 | 2011-10-23 10:22:48 +0100 | [diff] [blame] | 1905 | &rx_dma->fbr1_base_hi); |
Mark Einon | e013471 | 2011-12-06 23:23:10 +0000 | [diff] [blame] | 1906 | writel((u32) rx_local->fbr[0]->real_physaddr, &rx_dma->fbr1_base_lo); |
Mark Einon | 36f2771 | 2011-10-23 10:22:48 +0100 | [diff] [blame] | 1907 | writel(rx_local->fbr[0]->num_entries - 1, &rx_dma->fbr1_num_des); |
| 1908 | writel(ET_DMA10_WRAP, &rx_dma->fbr1_full_offset); |
| 1909 | |
| 1910 | /* This variable tracks the free buffer ring 1 full position, so it |
| 1911 | * has to match the above. |
| 1912 | */ |
| 1913 | rx_local->fbr[0]->local_full = ET_DMA10_WRAP; |
| 1914 | writel( |
Mark Einon | 09a3fc2 | 2011-10-23 10:22:53 +0100 | [diff] [blame] | 1915 | ((rx_local->fbr[0]->num_entries * LO_MARK_PERCENT_FOR_RX) / 100) - 1, |
| 1916 | &rx_dma->fbr1_min_des); |
Mark Einon | 36f2771 | 2011-10-23 10:22:48 +0100 | [diff] [blame] | 1917 | |
| 1918 | #ifdef USE_FBR0 |
| 1919 | /* Now's the best time to initialize FBR0 contents */ |
| 1920 | fbr_entry = (struct fbr_desc *) rx_local->fbr[1]->ring_virtaddr; |
| 1921 | for (entry = 0; entry < rx_local->fbr[1]->num_entries; entry++) { |
| 1922 | fbr_entry->addr_hi = rx_local->fbr[1]->bus_high[entry]; |
| 1923 | fbr_entry->addr_lo = rx_local->fbr[1]->bus_low[entry]; |
| 1924 | fbr_entry->word2 = entry; |
| 1925 | fbr_entry++; |
| 1926 | } |
| 1927 | |
Mark Einon | e013471 | 2011-12-06 23:23:10 +0000 | [diff] [blame] | 1928 | writel((u32) (rx_local->fbr[1]->real_physaddr >> 32), |
Mark Einon | 36f2771 | 2011-10-23 10:22:48 +0100 | [diff] [blame] | 1929 | &rx_dma->fbr0_base_hi); |
Mark Einon | e013471 | 2011-12-06 23:23:10 +0000 | [diff] [blame] | 1930 | writel((u32) rx_local->fbr[1]->real_physaddr, &rx_dma->fbr0_base_lo); |
Mark Einon | 36f2771 | 2011-10-23 10:22:48 +0100 | [diff] [blame] | 1931 | writel(rx_local->fbr[1]->num_entries - 1, &rx_dma->fbr0_num_des); |
| 1932 | writel(ET_DMA10_WRAP, &rx_dma->fbr0_full_offset); |
| 1933 | |
| 1934 | /* This variable tracks the free buffer ring 0 full position, so it |
| 1935 | * has to match the above. |
| 1936 | */ |
| 1937 | rx_local->fbr[1]->local_full = ET_DMA10_WRAP; |
| 1938 | writel( |
Mark Einon | 09a3fc2 | 2011-10-23 10:22:53 +0100 | [diff] [blame] | 1939 | ((rx_local->fbr[1]->num_entries * LO_MARK_PERCENT_FOR_RX) / 100) - 1, |
| 1940 | &rx_dma->fbr0_min_des); |
Mark Einon | 36f2771 | 2011-10-23 10:22:48 +0100 | [diff] [blame] | 1941 | #endif |
| 1942 | |
| 1943 | /* Program the number of packets we will receive before generating an |
| 1944 | * interrupt. |
| 1945 | * For version B silicon, this value gets updated once autoneg is |
| 1946 | *complete. |
| 1947 | */ |
| 1948 | writel(PARM_RX_NUM_BUFS_DEF, &rx_dma->num_pkt_done); |
| 1949 | |
| 1950 | /* The "time_done" is not working correctly to coalesce interrupts |
| 1951 | * after a given time period, but rather is giving us an interrupt |
| 1952 | * regardless of whether we have received packets. |
| 1953 | * This value gets updated once autoneg is complete. |
| 1954 | */ |
| 1955 | writel(PARM_RX_TIME_INT_DEF, &rx_dma->max_pkt_time); |
| 1956 | |
| 1957 | spin_unlock_irqrestore(&adapter->rcv_lock, flags); |
| 1958 | } |
| 1959 | |
| 1960 | /** |
| 1961 | * et131x_config_tx_dma_regs - Set up the tx dma section of the JAGCore. |
| 1962 | * @adapter: pointer to our private adapter structure |
| 1963 | * |
| 1964 | * Configure the transmit engine with the ring buffers we have created |
| 1965 | * and prepare it for use. |
| 1966 | */ |
Francois Romieu | eb7a6ca | 2011-10-23 19:11:02 +0200 | [diff] [blame] | 1967 | static void et131x_config_tx_dma_regs(struct et131x_adapter *adapter) |
Mark Einon | 36f2771 | 2011-10-23 10:22:48 +0100 | [diff] [blame] | 1968 | { |
| 1969 | struct txdma_regs __iomem *txdma = &adapter->regs->txdma; |
| 1970 | |
| 1971 | /* Load the hardware with the start of the transmit descriptor ring. */ |
| 1972 | writel((u32) ((u64)adapter->tx_ring.tx_desc_ring_pa >> 32), |
| 1973 | &txdma->pr_base_hi); |
| 1974 | writel((u32) adapter->tx_ring.tx_desc_ring_pa, |
| 1975 | &txdma->pr_base_lo); |
| 1976 | |
| 1977 | /* Initialise the transmit DMA engine */ |
| 1978 | writel(NUM_DESC_PER_RING_TX - 1, &txdma->pr_num_des); |
| 1979 | |
| 1980 | /* Load the completion writeback physical address */ |
| 1981 | writel((u32)((u64)adapter->tx_ring.tx_status_pa >> 32), |
| 1982 | &txdma->dma_wb_base_hi); |
| 1983 | writel((u32)adapter->tx_ring.tx_status_pa, &txdma->dma_wb_base_lo); |
| 1984 | |
| 1985 | *adapter->tx_ring.tx_status = 0; |
| 1986 | |
| 1987 | writel(0, &txdma->service_request); |
| 1988 | adapter->tx_ring.send_idx = 0; |
| 1989 | } |
| 1990 | |
| 1991 | /** |
| 1992 | * et131x_adapter_setup - Set the adapter up as per cassini+ documentation |
| 1993 | * @adapter: pointer to our private adapter structure |
| 1994 | * |
| 1995 | * Returns 0 on success, errno on failure (as defined in errno.h) |
| 1996 | */ |
Francois Romieu | eb7a6ca | 2011-10-23 19:11:02 +0200 | [diff] [blame] | 1997 | static void et131x_adapter_setup(struct et131x_adapter *adapter) |
Mark Einon | 36f2771 | 2011-10-23 10:22:48 +0100 | [diff] [blame] | 1998 | { |
| 1999 | /* Configure the JAGCore */ |
| 2000 | et131x_configure_global_regs(adapter); |
| 2001 | |
| 2002 | et1310_config_mac_regs1(adapter); |
| 2003 | |
| 2004 | /* Configure the MMC registers */ |
| 2005 | /* All we need to do is initialize the Memory Control Register */ |
| 2006 | writel(ET_MMC_ENABLE, &adapter->regs->mmc.mmc_ctrl); |
| 2007 | |
| 2008 | et1310_config_rxmac_regs(adapter); |
| 2009 | et1310_config_txmac_regs(adapter); |
| 2010 | |
| 2011 | et131x_config_rx_dma_regs(adapter); |
| 2012 | et131x_config_tx_dma_regs(adapter); |
| 2013 | |
| 2014 | et1310_config_macstat_regs(adapter); |
| 2015 | |
| 2016 | et1310_phy_power_down(adapter, 0); |
| 2017 | et131x_xcvr_init(adapter); |
| 2018 | } |
| 2019 | |
| 2020 | /** |
Mark Einon | 5da2b15 | 2011-10-23 10:22:49 +0100 | [diff] [blame] | 2021 | * et131x_soft_reset - Issue a soft reset to the hardware, complete for ET1310 |
| 2022 | * @adapter: pointer to our private adapter structure |
| 2023 | */ |
Francois Romieu | eb7a6ca | 2011-10-23 19:11:02 +0200 | [diff] [blame] | 2024 | static void et131x_soft_reset(struct et131x_adapter *adapter) |
Mark Einon | 5da2b15 | 2011-10-23 10:22:49 +0100 | [diff] [blame] | 2025 | { |
| 2026 | /* Disable MAC Core */ |
| 2027 | writel(0xc00f0000, &adapter->regs->mac.cfg1); |
| 2028 | |
| 2029 | /* Set everything to a reset value */ |
| 2030 | writel(0x7F, &adapter->regs->global.sw_reset); |
| 2031 | writel(0x000f0000, &adapter->regs->mac.cfg1); |
| 2032 | writel(0x00000000, &adapter->regs->mac.cfg1); |
| 2033 | } |
| 2034 | |
| 2035 | /** |
Mark Einon | a4d444b | 2011-10-23 10:22:50 +0100 | [diff] [blame] | 2036 | * et131x_enable_interrupts - enable interrupt |
| 2037 | * @adapter: et131x device |
| 2038 | * |
| 2039 | * Enable the appropriate interrupts on the ET131x according to our |
| 2040 | * configuration |
| 2041 | */ |
Francois Romieu | eb7a6ca | 2011-10-23 19:11:02 +0200 | [diff] [blame] | 2042 | static void et131x_enable_interrupts(struct et131x_adapter *adapter) |
Mark Einon | a4d444b | 2011-10-23 10:22:50 +0100 | [diff] [blame] | 2043 | { |
| 2044 | u32 mask; |
| 2045 | |
| 2046 | /* Enable all global interrupts */ |
| 2047 | if (adapter->flowcontrol == FLOW_TXONLY || |
| 2048 | adapter->flowcontrol == FLOW_BOTH) |
| 2049 | mask = INT_MASK_ENABLE; |
| 2050 | else |
| 2051 | mask = INT_MASK_ENABLE_NO_FLOW; |
| 2052 | |
| 2053 | writel(mask, &adapter->regs->global.int_mask); |
| 2054 | } |
| 2055 | |
| 2056 | /** |
| 2057 | * et131x_disable_interrupts - interrupt disable |
| 2058 | * @adapter: et131x device |
| 2059 | * |
| 2060 | * Block all interrupts from the et131x device at the device itself |
| 2061 | */ |
Francois Romieu | eb7a6ca | 2011-10-23 19:11:02 +0200 | [diff] [blame] | 2062 | static void et131x_disable_interrupts(struct et131x_adapter *adapter) |
Mark Einon | a4d444b | 2011-10-23 10:22:50 +0100 | [diff] [blame] | 2063 | { |
| 2064 | /* Disable all global interrupts */ |
| 2065 | writel(INT_MASK_DISABLE, &adapter->regs->global.int_mask); |
| 2066 | } |
| 2067 | |
| 2068 | /** |
| 2069 | * et131x_tx_dma_disable - Stop of Tx_DMA on the ET1310 |
| 2070 | * @adapter: pointer to our adapter structure |
| 2071 | */ |
Francois Romieu | eb7a6ca | 2011-10-23 19:11:02 +0200 | [diff] [blame] | 2072 | static void et131x_tx_dma_disable(struct et131x_adapter *adapter) |
Mark Einon | a4d444b | 2011-10-23 10:22:50 +0100 | [diff] [blame] | 2073 | { |
| 2074 | /* Setup the tramsmit dma configuration register */ |
| 2075 | writel(ET_TXDMA_CSR_HALT|ET_TXDMA_SNGL_EPKT, |
| 2076 | &adapter->regs->txdma.csr); |
| 2077 | } |
| 2078 | |
| 2079 | /** |
Mark Einon | a4d444b | 2011-10-23 10:22:50 +0100 | [diff] [blame] | 2080 | * et131x_enable_txrx - Enable tx/rx queues |
| 2081 | * @netdev: device to be enabled |
| 2082 | */ |
Francois Romieu | eb7a6ca | 2011-10-23 19:11:02 +0200 | [diff] [blame] | 2083 | static void et131x_enable_txrx(struct net_device *netdev) |
Mark Einon | a4d444b | 2011-10-23 10:22:50 +0100 | [diff] [blame] | 2084 | { |
| 2085 | struct et131x_adapter *adapter = netdev_priv(netdev); |
| 2086 | |
| 2087 | /* Enable the Tx and Rx DMA engines (if not already enabled) */ |
| 2088 | et131x_rx_dma_enable(adapter); |
| 2089 | et131x_tx_dma_enable(adapter); |
| 2090 | |
| 2091 | /* Enable device interrupts */ |
| 2092 | if (adapter->flags & fMP_ADAPTER_INTERRUPT_IN_USE) |
| 2093 | et131x_enable_interrupts(adapter); |
| 2094 | |
| 2095 | /* We're ready to move some data, so start the queue */ |
| 2096 | netif_start_queue(netdev); |
| 2097 | } |
| 2098 | |
| 2099 | /** |
| 2100 | * et131x_disable_txrx - Disable tx/rx queues |
| 2101 | * @netdev: device to be disabled |
| 2102 | */ |
Francois Romieu | eb7a6ca | 2011-10-23 19:11:02 +0200 | [diff] [blame] | 2103 | static void et131x_disable_txrx(struct net_device *netdev) |
Mark Einon | a4d444b | 2011-10-23 10:22:50 +0100 | [diff] [blame] | 2104 | { |
| 2105 | struct et131x_adapter *adapter = netdev_priv(netdev); |
| 2106 | |
| 2107 | /* First thing is to stop the queue */ |
| 2108 | netif_stop_queue(netdev); |
| 2109 | |
| 2110 | /* Stop the Tx and Rx DMA engines */ |
| 2111 | et131x_rx_dma_disable(adapter); |
| 2112 | et131x_tx_dma_disable(adapter); |
| 2113 | |
| 2114 | /* Disable device interrupts */ |
| 2115 | et131x_disable_interrupts(adapter); |
| 2116 | } |
| 2117 | |
| 2118 | /** |
Mark Einon | 8310c60 | 2011-10-23 10:22:52 +0100 | [diff] [blame] | 2119 | * et131x_init_send - Initialize send data structures |
| 2120 | * @adapter: pointer to our private adapter structure |
| 2121 | */ |
Francois Romieu | eb7a6ca | 2011-10-23 19:11:02 +0200 | [diff] [blame] | 2122 | static void et131x_init_send(struct et131x_adapter *adapter) |
Mark Einon | 8310c60 | 2011-10-23 10:22:52 +0100 | [diff] [blame] | 2123 | { |
| 2124 | struct tcb *tcb; |
| 2125 | u32 ct; |
| 2126 | struct tx_ring *tx_ring; |
| 2127 | |
| 2128 | /* Setup some convenience pointers */ |
| 2129 | tx_ring = &adapter->tx_ring; |
| 2130 | tcb = adapter->tx_ring.tcb_ring; |
| 2131 | |
| 2132 | tx_ring->tcb_qhead = tcb; |
| 2133 | |
| 2134 | memset(tcb, 0, sizeof(struct tcb) * NUM_TCB); |
| 2135 | |
| 2136 | /* Go through and set up each TCB */ |
| 2137 | for (ct = 0; ct++ < NUM_TCB; tcb++) |
| 2138 | /* Set the link pointer in HW TCB to the next TCB in the |
| 2139 | * chain |
| 2140 | */ |
| 2141 | tcb->next = tcb + 1; |
| 2142 | |
| 2143 | /* Set the tail pointer */ |
| 2144 | tcb--; |
| 2145 | tx_ring->tcb_qtail = tcb; |
| 2146 | tcb->next = NULL; |
| 2147 | /* Curr send queue should now be empty */ |
| 2148 | tx_ring->send_head = NULL; |
| 2149 | tx_ring->send_tail = NULL; |
| 2150 | } |
| 2151 | |
| 2152 | /** |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 2153 | * et1310_enable_phy_coma - called when network cable is unplugged |
| 2154 | * @adapter: pointer to our adapter structure |
| 2155 | * |
| 2156 | * driver receive an phy status change interrupt while in D0 and check that |
| 2157 | * phy_status is down. |
| 2158 | * |
| 2159 | * -- gate off JAGCore; |
| 2160 | * -- set gigE PHY in Coma mode |
| 2161 | * -- wake on phy_interrupt; Perform software reset JAGCore, |
| 2162 | * re-initialize jagcore and gigE PHY |
| 2163 | * |
| 2164 | * Add D0-ASPM-PhyLinkDown Support: |
| 2165 | * -- while in D0, when there is a phy_interrupt indicating phy link |
| 2166 | * down status, call the MPSetPhyComa routine to enter this active |
| 2167 | * state power saving mode |
| 2168 | * -- while in D0-ASPM-PhyLinkDown mode, when there is a phy_interrupt |
| 2169 | * indicating linkup status, call the MPDisablePhyComa routine to |
| 2170 | * restore JAGCore and gigE PHY |
| 2171 | */ |
Francois Romieu | eb7a6ca | 2011-10-23 19:11:02 +0200 | [diff] [blame] | 2172 | static void et1310_enable_phy_coma(struct et131x_adapter *adapter) |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 2173 | { |
| 2174 | unsigned long flags; |
| 2175 | u32 pmcsr; |
| 2176 | |
| 2177 | pmcsr = readl(&adapter->regs->global.pm_csr); |
| 2178 | |
| 2179 | /* Save the GbE PHY speed and duplex modes. Need to restore this |
| 2180 | * when cable is plugged back in |
| 2181 | */ |
| 2182 | /* |
| 2183 | * TODO - when PM is re-enabled, check if we need to |
| 2184 | * perform a similar task as this - |
| 2185 | * adapter->pdown_speed = adapter->ai_force_speed; |
| 2186 | * adapter->pdown_duplex = adapter->ai_force_duplex; |
| 2187 | */ |
| 2188 | |
| 2189 | /* Stop sending packets. */ |
| 2190 | spin_lock_irqsave(&adapter->send_hw_lock, flags); |
| 2191 | adapter->flags |= fMP_ADAPTER_LOWER_POWER; |
| 2192 | spin_unlock_irqrestore(&adapter->send_hw_lock, flags); |
| 2193 | |
| 2194 | /* Wait for outstanding Receive packets */ |
| 2195 | |
| 2196 | et131x_disable_txrx(adapter->netdev); |
| 2197 | |
| 2198 | /* Gate off JAGCore 3 clock domains */ |
| 2199 | pmcsr &= ~ET_PMCSR_INIT; |
| 2200 | writel(pmcsr, &adapter->regs->global.pm_csr); |
| 2201 | |
| 2202 | /* Program gigE PHY in to Coma mode */ |
| 2203 | pmcsr |= ET_PM_PHY_SW_COMA; |
| 2204 | writel(pmcsr, &adapter->regs->global.pm_csr); |
| 2205 | } |
| 2206 | |
| 2207 | /** |
| 2208 | * et1310_disable_phy_coma - Disable the Phy Coma Mode |
| 2209 | * @adapter: pointer to our adapter structure |
| 2210 | */ |
Francois Romieu | eb7a6ca | 2011-10-23 19:11:02 +0200 | [diff] [blame] | 2211 | static void et1310_disable_phy_coma(struct et131x_adapter *adapter) |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 2212 | { |
| 2213 | u32 pmcsr; |
| 2214 | |
| 2215 | pmcsr = readl(&adapter->regs->global.pm_csr); |
| 2216 | |
| 2217 | /* Disable phy_sw_coma register and re-enable JAGCore clocks */ |
| 2218 | pmcsr |= ET_PMCSR_INIT; |
| 2219 | pmcsr &= ~ET_PM_PHY_SW_COMA; |
| 2220 | writel(pmcsr, &adapter->regs->global.pm_csr); |
| 2221 | |
| 2222 | /* Restore the GbE PHY speed and duplex modes; |
| 2223 | * Reset JAGCore; re-configure and initialize JAGCore and gigE PHY |
| 2224 | */ |
| 2225 | /* TODO - when PM is re-enabled, check if we need to |
| 2226 | * perform a similar task as this - |
| 2227 | * adapter->ai_force_speed = adapter->pdown_speed; |
| 2228 | * adapter->ai_force_duplex = adapter->pdown_duplex; |
| 2229 | */ |
| 2230 | |
| 2231 | /* Re-initialize the send structures */ |
| 2232 | et131x_init_send(adapter); |
| 2233 | |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 2234 | /* Bring the device back to the state it was during init prior to |
| 2235 | * autonegotiation being complete. This way, when we get the auto-neg |
| 2236 | * complete interrupt, we can complete init by calling ConfigMacREGS2. |
| 2237 | */ |
| 2238 | et131x_soft_reset(adapter); |
| 2239 | |
| 2240 | /* setup et1310 as per the documentation ?? */ |
| 2241 | et131x_adapter_setup(adapter); |
| 2242 | |
| 2243 | /* Allow Tx to restart */ |
| 2244 | adapter->flags &= ~fMP_ADAPTER_LOWER_POWER; |
| 2245 | |
| 2246 | et131x_enable_txrx(adapter->netdev); |
| 2247 | } |
| 2248 | |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 2249 | static inline u32 bump_free_buff_ring(u32 *free_buff_ring, u32 limit) |
| 2250 | { |
| 2251 | u32 tmp_free_buff_ring = *free_buff_ring; |
| 2252 | tmp_free_buff_ring++; |
| 2253 | /* This works for all cases where limit < 1024. The 1023 case |
| 2254 | works because 1023++ is 1024 which means the if condition is not |
| 2255 | taken but the carry of the bit into the wrap bit toggles the wrap |
| 2256 | value correctly */ |
| 2257 | if ((tmp_free_buff_ring & ET_DMA10_MASK) > limit) { |
| 2258 | tmp_free_buff_ring &= ~ET_DMA10_MASK; |
| 2259 | tmp_free_buff_ring ^= ET_DMA10_WRAP; |
| 2260 | } |
| 2261 | /* For the 1023 case */ |
| 2262 | tmp_free_buff_ring &= (ET_DMA10_MASK|ET_DMA10_WRAP); |
| 2263 | *free_buff_ring = tmp_free_buff_ring; |
| 2264 | return tmp_free_buff_ring; |
| 2265 | } |
| 2266 | |
| 2267 | /** |
Mark Einon | 44012df | 2011-10-23 10:22:47 +0100 | [diff] [blame] | 2268 | * et131x_align_allocated_memory - Align allocated memory on a given boundary |
| 2269 | * @adapter: pointer to our adapter structure |
| 2270 | * @phys_addr: pointer to Physical address |
| 2271 | * @offset: pointer to the offset variable |
| 2272 | * @mask: correct mask |
| 2273 | */ |
Francois Romieu | eb7a6ca | 2011-10-23 19:11:02 +0200 | [diff] [blame] | 2274 | static void et131x_align_allocated_memory(struct et131x_adapter *adapter, |
Mark Einon | e013471 | 2011-12-06 23:23:10 +0000 | [diff] [blame] | 2275 | u64 *phys_addr, u64 *offset, |
| 2276 | u64 mask) |
Mark Einon | 44012df | 2011-10-23 10:22:47 +0100 | [diff] [blame] | 2277 | { |
Mark Einon | e013471 | 2011-12-06 23:23:10 +0000 | [diff] [blame] | 2278 | u64 new_addr = *phys_addr & ~mask; |
Mark Einon | 44012df | 2011-10-23 10:22:47 +0100 | [diff] [blame] | 2279 | |
| 2280 | *offset = 0; |
| 2281 | |
Mark Einon | 44012df | 2011-10-23 10:22:47 +0100 | [diff] [blame] | 2282 | if (new_addr != *phys_addr) { |
| 2283 | /* Move to next aligned block */ |
| 2284 | new_addr += mask + 1; |
| 2285 | /* Return offset for adjusting virt addr */ |
| 2286 | *offset = new_addr - *phys_addr; |
| 2287 | /* Return new physical address */ |
| 2288 | *phys_addr = new_addr; |
| 2289 | } |
| 2290 | } |
| 2291 | |
| 2292 | /** |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 2293 | * et131x_rx_dma_memory_alloc |
| 2294 | * @adapter: pointer to our private adapter structure |
| 2295 | * |
| 2296 | * Returns 0 on success and errno on failure (as defined in errno.h) |
| 2297 | * |
| 2298 | * Allocates Free buffer ring 1 for sure, free buffer ring 0 if required, |
| 2299 | * and the Packet Status Ring. |
| 2300 | */ |
Francois Romieu | eb7a6ca | 2011-10-23 19:11:02 +0200 | [diff] [blame] | 2301 | static int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter) |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 2302 | { |
| 2303 | u32 i, j; |
| 2304 | u32 bufsize; |
| 2305 | u32 pktstat_ringsize, fbr_chunksize; |
| 2306 | struct rx_ring *rx_ring; |
| 2307 | |
| 2308 | /* Setup some convenience pointers */ |
| 2309 | rx_ring = &adapter->rx_ring; |
| 2310 | |
| 2311 | /* Alloc memory for the lookup table */ |
| 2312 | #ifdef USE_FBR0 |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 2313 | rx_ring->fbr[1] = kmalloc(sizeof(struct fbr_lookup), GFP_KERNEL); |
Mark Einon | e592a9b | 2011-10-20 01:18:42 +0100 | [diff] [blame] | 2314 | #endif |
| 2315 | rx_ring->fbr[0] = kmalloc(sizeof(struct fbr_lookup), GFP_KERNEL); |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 2316 | |
| 2317 | /* The first thing we will do is configure the sizes of the buffer |
| 2318 | * rings. These will change based on jumbo packet support. Larger |
| 2319 | * jumbo packets increases the size of each entry in FBR0, and the |
| 2320 | * number of entries in FBR0, while at the same time decreasing the |
| 2321 | * number of entries in FBR1. |
| 2322 | * |
| 2323 | * FBR1 holds "large" frames, FBR0 holds "small" frames. If FBR1 |
| 2324 | * entries are huge in order to accommodate a "jumbo" frame, then it |
| 2325 | * will have less entries. Conversely, FBR1 will now be relied upon |
| 2326 | * to carry more "normal" frames, thus it's entry size also increases |
| 2327 | * and the number of entries goes up too (since it now carries |
| 2328 | * "small" + "regular" packets. |
| 2329 | * |
| 2330 | * In this scheme, we try to maintain 512 entries between the two |
| 2331 | * rings. Also, FBR1 remains a constant size - when it's size doubles |
| 2332 | * the number of entries halves. FBR0 increases in size, however. |
| 2333 | */ |
| 2334 | |
| 2335 | if (adapter->registry_jumbo_packet < 2048) { |
| 2336 | #ifdef USE_FBR0 |
Mark Einon | e592a9b | 2011-10-20 01:18:42 +0100 | [diff] [blame] | 2337 | rx_ring->fbr[1]->buffsize = 256; |
Mark Einon | 6abafc1 | 2011-10-20 01:18:41 +0100 | [diff] [blame] | 2338 | rx_ring->fbr[1]->num_entries = 512; |
Mark Einon | e592a9b | 2011-10-20 01:18:42 +0100 | [diff] [blame] | 2339 | #endif |
| 2340 | rx_ring->fbr[0]->buffsize = 2048; |
| 2341 | rx_ring->fbr[0]->num_entries = 512; |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 2342 | } else if (adapter->registry_jumbo_packet < 4096) { |
| 2343 | #ifdef USE_FBR0 |
Mark Einon | e592a9b | 2011-10-20 01:18:42 +0100 | [diff] [blame] | 2344 | rx_ring->fbr[1]->buffsize = 512; |
| 2345 | rx_ring->fbr[1]->num_entries = 1024; |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 2346 | #endif |
Mark Einon | e592a9b | 2011-10-20 01:18:42 +0100 | [diff] [blame] | 2347 | rx_ring->fbr[0]->buffsize = 4096; |
| 2348 | rx_ring->fbr[0]->num_entries = 512; |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 2349 | } else { |
| 2350 | #ifdef USE_FBR0 |
Mark Einon | e592a9b | 2011-10-20 01:18:42 +0100 | [diff] [blame] | 2351 | rx_ring->fbr[1]->buffsize = 1024; |
| 2352 | rx_ring->fbr[1]->num_entries = 768; |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 2353 | #endif |
Mark Einon | e592a9b | 2011-10-20 01:18:42 +0100 | [diff] [blame] | 2354 | rx_ring->fbr[0]->buffsize = 16384; |
| 2355 | rx_ring->fbr[0]->num_entries = 128; |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 2356 | } |
| 2357 | |
| 2358 | #ifdef USE_FBR0 |
Mark Einon | 09a3fc2 | 2011-10-23 10:22:53 +0100 | [diff] [blame] | 2359 | adapter->rx_ring.psr_num_entries = |
| 2360 | adapter->rx_ring.fbr[1]->num_entries + |
| 2361 | adapter->rx_ring.fbr[0]->num_entries; |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 2362 | #else |
Mark Einon | e592a9b | 2011-10-20 01:18:42 +0100 | [diff] [blame] | 2363 | adapter->rx_ring.psr_num_entries = adapter->rx_ring.fbr[0]->num_entries; |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 2364 | #endif |
| 2365 | |
| 2366 | /* Allocate an area of memory for Free Buffer Ring 1 */ |
Mark Einon | 09a3fc2 | 2011-10-23 10:22:53 +0100 | [diff] [blame] | 2367 | bufsize = (sizeof(struct fbr_desc) * rx_ring->fbr[0]->num_entries) + |
| 2368 | 0xfff; |
Mark Einon | 0d1b7a8 | 2011-10-20 01:18:43 +0100 | [diff] [blame] | 2369 | rx_ring->fbr[0]->ring_virtaddr = dma_alloc_coherent(&adapter->pdev->dev, |
| 2370 | bufsize, |
| 2371 | &rx_ring->fbr[0]->ring_physaddr, |
| 2372 | GFP_KERNEL); |
Mark Einon | e592a9b | 2011-10-20 01:18:42 +0100 | [diff] [blame] | 2373 | if (!rx_ring->fbr[0]->ring_virtaddr) { |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 2374 | dev_err(&adapter->pdev->dev, |
| 2375 | "Cannot alloc memory for Free Buffer Ring 1\n"); |
| 2376 | return -ENOMEM; |
| 2377 | } |
| 2378 | |
| 2379 | /* Save physical address |
| 2380 | * |
Mark Einon | 26dc751 | 2011-10-20 01:18:47 +0100 | [diff] [blame] | 2381 | * NOTE: dma_alloc_coherent(), used above to alloc DMA regions, |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 2382 | * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses |
| 2383 | * are ever returned, make sure the high part is retrieved here |
| 2384 | * before storing the adjusted address. |
| 2385 | */ |
Mark Einon | e592a9b | 2011-10-20 01:18:42 +0100 | [diff] [blame] | 2386 | rx_ring->fbr[0]->real_physaddr = rx_ring->fbr[0]->ring_physaddr; |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 2387 | |
| 2388 | /* Align Free Buffer Ring 1 on a 4K boundary */ |
| 2389 | et131x_align_allocated_memory(adapter, |
Mark Einon | e592a9b | 2011-10-20 01:18:42 +0100 | [diff] [blame] | 2390 | &rx_ring->fbr[0]->real_physaddr, |
| 2391 | &rx_ring->fbr[0]->offset, 0x0FFF); |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 2392 | |
Mark Einon | e592a9b | 2011-10-20 01:18:42 +0100 | [diff] [blame] | 2393 | rx_ring->fbr[0]->ring_virtaddr = |
| 2394 | (void *)((u8 *) rx_ring->fbr[0]->ring_virtaddr + |
| 2395 | rx_ring->fbr[0]->offset); |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 2396 | |
| 2397 | #ifdef USE_FBR0 |
| 2398 | /* Allocate an area of memory for Free Buffer Ring 0 */ |
Mark Einon | 09a3fc2 | 2011-10-23 10:22:53 +0100 | [diff] [blame] | 2399 | bufsize = (sizeof(struct fbr_desc) * rx_ring->fbr[1]->num_entries) + |
| 2400 | 0xfff; |
Mark Einon | 0d1b7a8 | 2011-10-20 01:18:43 +0100 | [diff] [blame] | 2401 | rx_ring->fbr[1]->ring_virtaddr = dma_alloc_coherent(&adapter->pdev->dev, |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 2402 | bufsize, |
Mark Einon | 0d1b7a8 | 2011-10-20 01:18:43 +0100 | [diff] [blame] | 2403 | &rx_ring->fbr[1]->ring_physaddr, |
| 2404 | GFP_KERNEL); |
Mark Einon | e592a9b | 2011-10-20 01:18:42 +0100 | [diff] [blame] | 2405 | if (!rx_ring->fbr[1]->ring_virtaddr) { |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 2406 | dev_err(&adapter->pdev->dev, |
| 2407 | "Cannot alloc memory for Free Buffer Ring 0\n"); |
| 2408 | return -ENOMEM; |
| 2409 | } |
| 2410 | |
| 2411 | /* Save physical address |
| 2412 | * |
Mark Einon | 26dc751 | 2011-10-20 01:18:47 +0100 | [diff] [blame] | 2413 | * NOTE: dma_alloc_coherent(), used above to alloc DMA regions, |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 2414 | * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses |
| 2415 | * are ever returned, make sure the high part is retrieved here before |
| 2416 | * storing the adjusted address. |
| 2417 | */ |
Mark Einon | e592a9b | 2011-10-20 01:18:42 +0100 | [diff] [blame] | 2418 | rx_ring->fbr[1]->real_physaddr = rx_ring->fbr[1]->ring_physaddr; |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 2419 | |
| 2420 | /* Align Free Buffer Ring 0 on a 4K boundary */ |
| 2421 | et131x_align_allocated_memory(adapter, |
Mark Einon | e592a9b | 2011-10-20 01:18:42 +0100 | [diff] [blame] | 2422 | &rx_ring->fbr[1]->real_physaddr, |
| 2423 | &rx_ring->fbr[1]->offset, 0x0FFF); |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 2424 | |
Mark Einon | e592a9b | 2011-10-20 01:18:42 +0100 | [diff] [blame] | 2425 | rx_ring->fbr[1]->ring_virtaddr = |
| 2426 | (void *)((u8 *) rx_ring->fbr[1]->ring_virtaddr + |
| 2427 | rx_ring->fbr[1]->offset); |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 2428 | #endif |
Mark Einon | e592a9b | 2011-10-20 01:18:42 +0100 | [diff] [blame] | 2429 | for (i = 0; i < (rx_ring->fbr[0]->num_entries / FBR_CHUNKS); i++) { |
Mark Einon | e013471 | 2011-12-06 23:23:10 +0000 | [diff] [blame] | 2430 | u64 fbr1_tmp_physaddr; |
| 2431 | u64 fbr1_offset; |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 2432 | u32 fbr1_align; |
| 2433 | |
| 2434 | /* This code allocates an area of memory big enough for N |
| 2435 | * free buffers + (buffer_size - 1) so that the buffers can |
| 2436 | * be aligned on 4k boundaries. If each buffer were aligned |
| 2437 | * to a buffer_size boundary, the effect would be to double |
| 2438 | * the size of FBR0. By allocating N buffers at once, we |
| 2439 | * reduce this overhead. |
| 2440 | */ |
Mark Einon | e592a9b | 2011-10-20 01:18:42 +0100 | [diff] [blame] | 2441 | if (rx_ring->fbr[0]->buffsize > 4096) |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 2442 | fbr1_align = 4096; |
| 2443 | else |
Mark Einon | e592a9b | 2011-10-20 01:18:42 +0100 | [diff] [blame] | 2444 | fbr1_align = rx_ring->fbr[0]->buffsize; |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 2445 | |
| 2446 | fbr_chunksize = |
Mark Einon | e592a9b | 2011-10-20 01:18:42 +0100 | [diff] [blame] | 2447 | (FBR_CHUNKS * rx_ring->fbr[0]->buffsize) + fbr1_align - 1; |
Mark Einon | 6abafc1 | 2011-10-20 01:18:41 +0100 | [diff] [blame] | 2448 | rx_ring->fbr[0]->mem_virtaddrs[i] = |
Mark Einon | 0d1b7a8 | 2011-10-20 01:18:43 +0100 | [diff] [blame] | 2449 | dma_alloc_coherent(&adapter->pdev->dev, fbr_chunksize, |
Mark Einon | 09a3fc2 | 2011-10-23 10:22:53 +0100 | [diff] [blame] | 2450 | &rx_ring->fbr[0]->mem_physaddrs[i], |
| 2451 | GFP_KERNEL); |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 2452 | |
Mark Einon | 6abafc1 | 2011-10-20 01:18:41 +0100 | [diff] [blame] | 2453 | if (!rx_ring->fbr[0]->mem_virtaddrs[i]) { |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 2454 | dev_err(&adapter->pdev->dev, |
| 2455 | "Could not alloc memory\n"); |
| 2456 | return -ENOMEM; |
| 2457 | } |
| 2458 | |
| 2459 | /* See NOTE in "Save Physical Address" comment above */ |
Mark Einon | e592a9b | 2011-10-20 01:18:42 +0100 | [diff] [blame] | 2460 | fbr1_tmp_physaddr = rx_ring->fbr[0]->mem_physaddrs[i]; |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 2461 | |
| 2462 | et131x_align_allocated_memory(adapter, |
Mark Einon | e592a9b | 2011-10-20 01:18:42 +0100 | [diff] [blame] | 2463 | &fbr1_tmp_physaddr, |
| 2464 | &fbr1_offset, (fbr1_align - 1)); |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 2465 | |
| 2466 | for (j = 0; j < FBR_CHUNKS; j++) { |
| 2467 | u32 index = (i * FBR_CHUNKS) + j; |
| 2468 | |
Mark Einon | e592a9b | 2011-10-20 01:18:42 +0100 | [diff] [blame] | 2469 | /* Save the Virtual address of this index for quick |
| 2470 | * access later |
| 2471 | */ |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 2472 | rx_ring->fbr[0]->virt[index] = |
Mark Einon | 6abafc1 | 2011-10-20 01:18:41 +0100 | [diff] [blame] | 2473 | (u8 *) rx_ring->fbr[0]->mem_virtaddrs[i] + |
Mark Einon | e592a9b | 2011-10-20 01:18:42 +0100 | [diff] [blame] | 2474 | (j * rx_ring->fbr[0]->buffsize) + fbr1_offset; |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 2475 | |
Mark Einon | e592a9b | 2011-10-20 01:18:42 +0100 | [diff] [blame] | 2476 | /* now store the physical address in the descriptor |
| 2477 | * so the device can access it |
| 2478 | */ |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 2479 | rx_ring->fbr[0]->bus_high[index] = |
Mark Einon | e592a9b | 2011-10-20 01:18:42 +0100 | [diff] [blame] | 2480 | (u32) (fbr1_tmp_physaddr >> 32); |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 2481 | rx_ring->fbr[0]->bus_low[index] = |
Mark Einon | e592a9b | 2011-10-20 01:18:42 +0100 | [diff] [blame] | 2482 | (u32) fbr1_tmp_physaddr; |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 2483 | |
Mark Einon | e592a9b | 2011-10-20 01:18:42 +0100 | [diff] [blame] | 2484 | fbr1_tmp_physaddr += rx_ring->fbr[0]->buffsize; |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 2485 | |
| 2486 | rx_ring->fbr[0]->buffer1[index] = |
| 2487 | rx_ring->fbr[0]->virt[index]; |
| 2488 | rx_ring->fbr[0]->buffer2[index] = |
| 2489 | rx_ring->fbr[0]->virt[index] - 4; |
| 2490 | } |
| 2491 | } |
Mark Einon | e592a9b | 2011-10-20 01:18:42 +0100 | [diff] [blame] | 2492 | |
| 2493 | #ifdef USE_FBR0 |
| 2494 | /* Same for FBR0 (if in use) */ |
| 2495 | for (i = 0; i < (rx_ring->fbr[1]->num_entries / FBR_CHUNKS); i++) { |
Mark Einon | e013471 | 2011-12-06 23:23:10 +0000 | [diff] [blame] | 2496 | u64 fbr0_tmp_physaddr; |
| 2497 | u64 fbr0_offset; |
Mark Einon | e592a9b | 2011-10-20 01:18:42 +0100 | [diff] [blame] | 2498 | |
| 2499 | fbr_chunksize = |
| 2500 | ((FBR_CHUNKS + 1) * rx_ring->fbr[1]->buffsize) - 1; |
| 2501 | rx_ring->fbr[1]->mem_virtaddrs[i] = |
Mark Einon | 0d1b7a8 | 2011-10-20 01:18:43 +0100 | [diff] [blame] | 2502 | dma_alloc_coherent(&adapter->pdev->dev, fbr_chunksize, |
Mark Einon | 09a3fc2 | 2011-10-23 10:22:53 +0100 | [diff] [blame] | 2503 | &rx_ring->fbr[1]->mem_physaddrs[i], |
| 2504 | GFP_KERNEL); |
Mark Einon | e592a9b | 2011-10-20 01:18:42 +0100 | [diff] [blame] | 2505 | |
| 2506 | if (!rx_ring->fbr[1]->mem_virtaddrs[i]) { |
| 2507 | dev_err(&adapter->pdev->dev, |
| 2508 | "Could not alloc memory\n"); |
| 2509 | return -ENOMEM; |
| 2510 | } |
| 2511 | |
| 2512 | /* See NOTE in "Save Physical Address" comment above */ |
| 2513 | fbr0_tmp_physaddr = rx_ring->fbr[1]->mem_physaddrs[i]; |
| 2514 | |
| 2515 | et131x_align_allocated_memory(adapter, |
| 2516 | &fbr0_tmp_physaddr, |
| 2517 | &fbr0_offset, |
| 2518 | rx_ring->fbr[1]->buffsize - 1); |
| 2519 | |
| 2520 | for (j = 0; j < FBR_CHUNKS; j++) { |
| 2521 | u32 index = (i * FBR_CHUNKS) + j; |
| 2522 | |
| 2523 | rx_ring->fbr[1]->virt[index] = |
| 2524 | (u8 *) rx_ring->fbr[1]->mem_virtaddrs[i] + |
| 2525 | (j * rx_ring->fbr[1]->buffsize) + fbr0_offset; |
| 2526 | |
| 2527 | rx_ring->fbr[1]->bus_high[index] = |
| 2528 | (u32) (fbr0_tmp_physaddr >> 32); |
| 2529 | rx_ring->fbr[1]->bus_low[index] = |
| 2530 | (u32) fbr0_tmp_physaddr; |
| 2531 | |
| 2532 | fbr0_tmp_physaddr += rx_ring->fbr[1]->buffsize; |
| 2533 | |
| 2534 | rx_ring->fbr[1]->buffer1[index] = |
| 2535 | rx_ring->fbr[1]->virt[index]; |
| 2536 | rx_ring->fbr[1]->buffer2[index] = |
| 2537 | rx_ring->fbr[1]->virt[index] - 4; |
| 2538 | } |
| 2539 | } |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 2540 | #endif |
| 2541 | |
| 2542 | /* Allocate an area of memory for FIFO of Packet Status ring entries */ |
| 2543 | pktstat_ringsize = |
| 2544 | sizeof(struct pkt_stat_desc) * adapter->rx_ring.psr_num_entries; |
| 2545 | |
Mark Einon | 0d1b7a8 | 2011-10-20 01:18:43 +0100 | [diff] [blame] | 2546 | rx_ring->ps_ring_virtaddr = dma_alloc_coherent(&adapter->pdev->dev, |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 2547 | pktstat_ringsize, |
Mark Einon | 0d1b7a8 | 2011-10-20 01:18:43 +0100 | [diff] [blame] | 2548 | &rx_ring->ps_ring_physaddr, |
| 2549 | GFP_KERNEL); |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 2550 | |
| 2551 | if (!rx_ring->ps_ring_virtaddr) { |
| 2552 | dev_err(&adapter->pdev->dev, |
| 2553 | "Cannot alloc memory for Packet Status Ring\n"); |
| 2554 | return -ENOMEM; |
| 2555 | } |
| 2556 | printk(KERN_INFO "Packet Status Ring %lx\n", |
| 2557 | (unsigned long) rx_ring->ps_ring_physaddr); |
| 2558 | |
| 2559 | /* |
Mark Einon | 26dc751 | 2011-10-20 01:18:47 +0100 | [diff] [blame] | 2560 | * NOTE : dma_alloc_coherent(), used above to alloc DMA regions, |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 2561 | * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses |
| 2562 | * are ever returned, make sure the high part is retrieved here before |
| 2563 | * storing the adjusted address. |
| 2564 | */ |
| 2565 | |
| 2566 | /* Allocate an area of memory for writeback of status information */ |
Mark Einon | 0d1b7a8 | 2011-10-20 01:18:43 +0100 | [diff] [blame] | 2567 | rx_ring->rx_status_block = dma_alloc_coherent(&adapter->pdev->dev, |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 2568 | sizeof(struct rx_status_block), |
Mark Einon | 0d1b7a8 | 2011-10-20 01:18:43 +0100 | [diff] [blame] | 2569 | &rx_ring->rx_status_bus, |
| 2570 | GFP_KERNEL); |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 2571 | if (!rx_ring->rx_status_block) { |
| 2572 | dev_err(&adapter->pdev->dev, |
| 2573 | "Cannot alloc memory for Status Block\n"); |
| 2574 | return -ENOMEM; |
| 2575 | } |
| 2576 | rx_ring->num_rfd = NIC_DEFAULT_NUM_RFD; |
| 2577 | printk(KERN_INFO "PRS %lx\n", (unsigned long)rx_ring->rx_status_bus); |
| 2578 | |
| 2579 | /* Recv |
Mark Einon | 26dc751 | 2011-10-20 01:18:47 +0100 | [diff] [blame] | 2580 | * kmem_cache_create initializes a lookaside list. After successful |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 2581 | * creation, nonpaged fixed-size blocks can be allocated from and |
| 2582 | * freed to the lookaside list. |
| 2583 | * RFDs will be allocated from this pool. |
| 2584 | */ |
| 2585 | rx_ring->recv_lookaside = kmem_cache_create(adapter->netdev->name, |
| 2586 | sizeof(struct rfd), |
| 2587 | 0, |
| 2588 | SLAB_CACHE_DMA | |
| 2589 | SLAB_HWCACHE_ALIGN, |
| 2590 | NULL); |
| 2591 | |
| 2592 | adapter->flags |= fMP_ADAPTER_RECV_LOOKASIDE; |
| 2593 | |
| 2594 | /* The RFDs are going to be put on lists later on, so initialize the |
| 2595 | * lists now. |
| 2596 | */ |
| 2597 | INIT_LIST_HEAD(&rx_ring->recv_list); |
| 2598 | return 0; |
| 2599 | } |
| 2600 | |
| 2601 | /** |
| 2602 | * et131x_rx_dma_memory_free - Free all memory allocated within this module. |
| 2603 | * @adapter: pointer to our private adapter structure |
| 2604 | */ |
Francois Romieu | eb7a6ca | 2011-10-23 19:11:02 +0200 | [diff] [blame] | 2605 | static void et131x_rx_dma_memory_free(struct et131x_adapter *adapter) |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 2606 | { |
| 2607 | u32 index; |
| 2608 | u32 bufsize; |
| 2609 | u32 pktstat_ringsize; |
| 2610 | struct rfd *rfd; |
| 2611 | struct rx_ring *rx_ring; |
| 2612 | |
| 2613 | /* Setup some convenience pointers */ |
| 2614 | rx_ring = &adapter->rx_ring; |
| 2615 | |
| 2616 | /* Free RFDs and associated packet descriptors */ |
| 2617 | WARN_ON(rx_ring->num_ready_recv != rx_ring->num_rfd); |
| 2618 | |
| 2619 | while (!list_empty(&rx_ring->recv_list)) { |
| 2620 | rfd = (struct rfd *) list_entry(rx_ring->recv_list.next, |
| 2621 | struct rfd, list_node); |
| 2622 | |
| 2623 | list_del(&rfd->list_node); |
| 2624 | rfd->skb = NULL; |
| 2625 | kmem_cache_free(adapter->rx_ring.recv_lookaside, rfd); |
| 2626 | } |
| 2627 | |
| 2628 | /* Free Free Buffer Ring 1 */ |
Mark Einon | 6abafc1 | 2011-10-20 01:18:41 +0100 | [diff] [blame] | 2629 | if (rx_ring->fbr[0]->ring_virtaddr) { |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 2630 | /* First the packet memory */ |
| 2631 | for (index = 0; index < |
Mark Einon | 6abafc1 | 2011-10-20 01:18:41 +0100 | [diff] [blame] | 2632 | (rx_ring->fbr[0]->num_entries / FBR_CHUNKS); index++) { |
| 2633 | if (rx_ring->fbr[0]->mem_virtaddrs[index]) { |
Mark Einon | e592a9b | 2011-10-20 01:18:42 +0100 | [diff] [blame] | 2634 | u32 fbr1_align; |
| 2635 | |
| 2636 | if (rx_ring->fbr[0]->buffsize > 4096) |
| 2637 | fbr1_align = 4096; |
| 2638 | else |
| 2639 | fbr1_align = rx_ring->fbr[0]->buffsize; |
| 2640 | |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 2641 | bufsize = |
Mark Einon | e592a9b | 2011-10-20 01:18:42 +0100 | [diff] [blame] | 2642 | (rx_ring->fbr[0]->buffsize * FBR_CHUNKS) + |
| 2643 | fbr1_align - 1; |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 2644 | |
Mark Einon | 675c8f6 | 2011-10-20 01:18:44 +0100 | [diff] [blame] | 2645 | dma_free_coherent(&adapter->pdev->dev, |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 2646 | bufsize, |
Mark Einon | 6abafc1 | 2011-10-20 01:18:41 +0100 | [diff] [blame] | 2647 | rx_ring->fbr[0]->mem_virtaddrs[index], |
| 2648 | rx_ring->fbr[0]->mem_physaddrs[index]); |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 2649 | |
Mark Einon | 6abafc1 | 2011-10-20 01:18:41 +0100 | [diff] [blame] | 2650 | rx_ring->fbr[0]->mem_virtaddrs[index] = NULL; |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 2651 | } |
| 2652 | } |
| 2653 | |
| 2654 | /* Now the FIFO itself */ |
Mark Einon | 6abafc1 | 2011-10-20 01:18:41 +0100 | [diff] [blame] | 2655 | rx_ring->fbr[0]->ring_virtaddr = (void *)((u8 *) |
Mark Einon | 09a3fc2 | 2011-10-23 10:22:53 +0100 | [diff] [blame] | 2656 | rx_ring->fbr[0]->ring_virtaddr - rx_ring->fbr[0]->offset); |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 2657 | |
Mark Einon | 09a3fc2 | 2011-10-23 10:22:53 +0100 | [diff] [blame] | 2658 | bufsize = |
| 2659 | (sizeof(struct fbr_desc) * rx_ring->fbr[0]->num_entries) + |
| 2660 | 0xfff; |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 2661 | |
Mark Einon | 675c8f6 | 2011-10-20 01:18:44 +0100 | [diff] [blame] | 2662 | dma_free_coherent(&adapter->pdev->dev, bufsize, |
Mark Einon | 6abafc1 | 2011-10-20 01:18:41 +0100 | [diff] [blame] | 2663 | rx_ring->fbr[0]->ring_virtaddr, |
| 2664 | rx_ring->fbr[0]->ring_physaddr); |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 2665 | |
Mark Einon | 6abafc1 | 2011-10-20 01:18:41 +0100 | [diff] [blame] | 2666 | rx_ring->fbr[0]->ring_virtaddr = NULL; |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 2667 | } |
Mark Einon | e592a9b | 2011-10-20 01:18:42 +0100 | [diff] [blame] | 2668 | |
| 2669 | #ifdef USE_FBR0 |
| 2670 | /* Now the same for Free Buffer Ring 0 */ |
| 2671 | if (rx_ring->fbr[1]->ring_virtaddr) { |
| 2672 | /* First the packet memory */ |
| 2673 | for (index = 0; index < |
| 2674 | (rx_ring->fbr[1]->num_entries / FBR_CHUNKS); index++) { |
| 2675 | if (rx_ring->fbr[1]->mem_virtaddrs[index]) { |
| 2676 | bufsize = |
| 2677 | (rx_ring->fbr[1]->buffsize * |
| 2678 | (FBR_CHUNKS + 1)) - 1; |
| 2679 | |
Mark Einon | 675c8f6 | 2011-10-20 01:18:44 +0100 | [diff] [blame] | 2680 | dma_free_coherent(&adapter->pdev->dev, |
Mark Einon | e592a9b | 2011-10-20 01:18:42 +0100 | [diff] [blame] | 2681 | bufsize, |
| 2682 | rx_ring->fbr[1]->mem_virtaddrs[index], |
| 2683 | rx_ring->fbr[1]->mem_physaddrs[index]); |
| 2684 | |
| 2685 | rx_ring->fbr[1]->mem_virtaddrs[index] = NULL; |
| 2686 | } |
| 2687 | } |
| 2688 | |
| 2689 | /* Now the FIFO itself */ |
| 2690 | rx_ring->fbr[1]->ring_virtaddr = (void *)((u8 *) |
Mark Einon | 09a3fc2 | 2011-10-23 10:22:53 +0100 | [diff] [blame] | 2691 | rx_ring->fbr[1]->ring_virtaddr - rx_ring->fbr[1]->offset); |
Mark Einon | e592a9b | 2011-10-20 01:18:42 +0100 | [diff] [blame] | 2692 | |
Mark Einon | 09a3fc2 | 2011-10-23 10:22:53 +0100 | [diff] [blame] | 2693 | bufsize = |
| 2694 | (sizeof(struct fbr_desc) * rx_ring->fbr[1]->num_entries) + |
| 2695 | 0xfff; |
Mark Einon | e592a9b | 2011-10-20 01:18:42 +0100 | [diff] [blame] | 2696 | |
Mark Einon | 675c8f6 | 2011-10-20 01:18:44 +0100 | [diff] [blame] | 2697 | dma_free_coherent(&adapter->pdev->dev, |
Mark Einon | 09a3fc2 | 2011-10-23 10:22:53 +0100 | [diff] [blame] | 2698 | bufsize, |
| 2699 | rx_ring->fbr[1]->ring_virtaddr, |
| 2700 | rx_ring->fbr[1]->ring_physaddr); |
Mark Einon | e592a9b | 2011-10-20 01:18:42 +0100 | [diff] [blame] | 2701 | |
| 2702 | rx_ring->fbr[1]->ring_virtaddr = NULL; |
| 2703 | } |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 2704 | #endif |
| 2705 | |
| 2706 | /* Free Packet Status Ring */ |
| 2707 | if (rx_ring->ps_ring_virtaddr) { |
| 2708 | pktstat_ringsize = |
| 2709 | sizeof(struct pkt_stat_desc) * |
| 2710 | adapter->rx_ring.psr_num_entries; |
| 2711 | |
Mark Einon | 675c8f6 | 2011-10-20 01:18:44 +0100 | [diff] [blame] | 2712 | dma_free_coherent(&adapter->pdev->dev, pktstat_ringsize, |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 2713 | rx_ring->ps_ring_virtaddr, |
| 2714 | rx_ring->ps_ring_physaddr); |
| 2715 | |
| 2716 | rx_ring->ps_ring_virtaddr = NULL; |
| 2717 | } |
| 2718 | |
| 2719 | /* Free area of memory for the writeback of status information */ |
| 2720 | if (rx_ring->rx_status_block) { |
Mark Einon | 675c8f6 | 2011-10-20 01:18:44 +0100 | [diff] [blame] | 2721 | dma_free_coherent(&adapter->pdev->dev, |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 2722 | sizeof(struct rx_status_block), |
| 2723 | rx_ring->rx_status_block, rx_ring->rx_status_bus); |
| 2724 | rx_ring->rx_status_block = NULL; |
| 2725 | } |
| 2726 | |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 2727 | /* Destroy the lookaside (RFD) pool */ |
| 2728 | if (adapter->flags & fMP_ADAPTER_RECV_LOOKASIDE) { |
| 2729 | kmem_cache_destroy(rx_ring->recv_lookaside); |
| 2730 | adapter->flags &= ~fMP_ADAPTER_RECV_LOOKASIDE; |
| 2731 | } |
| 2732 | |
| 2733 | /* Free the FBR Lookup Table */ |
| 2734 | #ifdef USE_FBR0 |
Mark Einon | e592a9b | 2011-10-20 01:18:42 +0100 | [diff] [blame] | 2735 | kfree(rx_ring->fbr[1]); |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 2736 | #endif |
| 2737 | |
Mark Einon | e592a9b | 2011-10-20 01:18:42 +0100 | [diff] [blame] | 2738 | kfree(rx_ring->fbr[0]); |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 2739 | |
| 2740 | /* Reset Counters */ |
| 2741 | rx_ring->num_ready_recv = 0; |
| 2742 | } |
| 2743 | |
| 2744 | /** |
| 2745 | * et131x_init_recv - Initialize receive data structures. |
| 2746 | * @adapter: pointer to our private adapter structure |
| 2747 | * |
| 2748 | * Returns 0 on success and errno on failure (as defined in errno.h) |
| 2749 | */ |
Francois Romieu | eb7a6ca | 2011-10-23 19:11:02 +0200 | [diff] [blame] | 2750 | static int et131x_init_recv(struct et131x_adapter *adapter) |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 2751 | { |
| 2752 | int status = -ENOMEM; |
| 2753 | struct rfd *rfd = NULL; |
| 2754 | u32 rfdct; |
| 2755 | u32 numrfd = 0; |
| 2756 | struct rx_ring *rx_ring; |
| 2757 | |
| 2758 | /* Setup some convenience pointers */ |
| 2759 | rx_ring = &adapter->rx_ring; |
| 2760 | |
| 2761 | /* Setup each RFD */ |
| 2762 | for (rfdct = 0; rfdct < rx_ring->num_rfd; rfdct++) { |
| 2763 | rfd = kmem_cache_alloc(rx_ring->recv_lookaside, |
| 2764 | GFP_ATOMIC | GFP_DMA); |
| 2765 | |
| 2766 | if (!rfd) { |
| 2767 | dev_err(&adapter->pdev->dev, |
| 2768 | "Couldn't alloc RFD out of kmem_cache\n"); |
| 2769 | status = -ENOMEM; |
| 2770 | continue; |
| 2771 | } |
| 2772 | |
| 2773 | rfd->skb = NULL; |
| 2774 | |
| 2775 | /* Add this RFD to the recv_list */ |
| 2776 | list_add_tail(&rfd->list_node, &rx_ring->recv_list); |
| 2777 | |
| 2778 | /* Increment both the available RFD's, and the total RFD's. */ |
| 2779 | rx_ring->num_ready_recv++; |
| 2780 | numrfd++; |
| 2781 | } |
| 2782 | |
| 2783 | if (numrfd > NIC_MIN_NUM_RFD) |
| 2784 | status = 0; |
| 2785 | |
| 2786 | rx_ring->num_rfd = numrfd; |
| 2787 | |
| 2788 | if (status != 0) { |
| 2789 | kmem_cache_free(rx_ring->recv_lookaside, rfd); |
| 2790 | dev_err(&adapter->pdev->dev, |
| 2791 | "Allocation problems in et131x_init_recv\n"); |
| 2792 | } |
| 2793 | return status; |
| 2794 | } |
| 2795 | |
| 2796 | /** |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 2797 | * et131x_set_rx_dma_timer - Set the heartbeat timer according to line rate. |
| 2798 | * @adapter: pointer to our adapter structure |
| 2799 | */ |
Francois Romieu | eb7a6ca | 2011-10-23 19:11:02 +0200 | [diff] [blame] | 2800 | static void et131x_set_rx_dma_timer(struct et131x_adapter *adapter) |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 2801 | { |
| 2802 | struct phy_device *phydev = adapter->phydev; |
| 2803 | |
| 2804 | if (!phydev) |
| 2805 | return; |
| 2806 | |
| 2807 | /* For version B silicon, we do not use the RxDMA timer for 10 and 100 |
| 2808 | * Mbits/s line rates. We do not enable and RxDMA interrupt coalescing. |
| 2809 | */ |
| 2810 | if ((phydev->speed == SPEED_100) || (phydev->speed == SPEED_10)) { |
| 2811 | writel(0, &adapter->regs->rxdma.max_pkt_time); |
| 2812 | writel(1, &adapter->regs->rxdma.num_pkt_done); |
| 2813 | } |
| 2814 | } |
| 2815 | |
| 2816 | /** |
| 2817 | * NICReturnRFD - Recycle a RFD and put it back onto the receive list |
| 2818 | * @adapter: pointer to our adapter |
| 2819 | * @rfd: pointer to the RFD |
| 2820 | */ |
| 2821 | static void nic_return_rfd(struct et131x_adapter *adapter, struct rfd *rfd) |
| 2822 | { |
| 2823 | struct rx_ring *rx_local = &adapter->rx_ring; |
| 2824 | struct rxdma_regs __iomem *rx_dma = &adapter->regs->rxdma; |
| 2825 | u16 buff_index = rfd->bufferindex; |
| 2826 | u8 ring_index = rfd->ringindex; |
| 2827 | unsigned long flags; |
| 2828 | |
| 2829 | /* We don't use any of the OOB data besides status. Otherwise, we |
| 2830 | * need to clean up OOB data |
| 2831 | */ |
| 2832 | if ( |
| 2833 | #ifdef USE_FBR0 |
Mark Einon | e592a9b | 2011-10-20 01:18:42 +0100 | [diff] [blame] | 2834 | (ring_index == 0 && buff_index < rx_local->fbr[1]->num_entries) || |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 2835 | #endif |
Mark Einon | e592a9b | 2011-10-20 01:18:42 +0100 | [diff] [blame] | 2836 | (ring_index == 1 && buff_index < rx_local->fbr[0]->num_entries)) { |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 2837 | spin_lock_irqsave(&adapter->fbr_lock, flags); |
| 2838 | |
| 2839 | if (ring_index == 1) { |
Mark Einon | 09a3fc2 | 2011-10-23 10:22:53 +0100 | [diff] [blame] | 2840 | struct fbr_desc *next = (struct fbr_desc *) |
| 2841 | (rx_local->fbr[0]->ring_virtaddr) + |
| 2842 | INDEX10(rx_local->fbr[0]->local_full); |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 2843 | |
| 2844 | /* Handle the Free Buffer Ring advancement here. Write |
| 2845 | * the PA / Buffer Index for the returned buffer into |
| 2846 | * the oldest (next to be freed)FBR entry |
| 2847 | */ |
Mark Einon | e592a9b | 2011-10-20 01:18:42 +0100 | [diff] [blame] | 2848 | next->addr_hi = rx_local->fbr[0]->bus_high[buff_index]; |
| 2849 | next->addr_lo = rx_local->fbr[0]->bus_low[buff_index]; |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 2850 | next->word2 = buff_index; |
| 2851 | |
Mark Einon | 09a3fc2 | 2011-10-23 10:22:53 +0100 | [diff] [blame] | 2852 | writel(bump_free_buff_ring( |
| 2853 | &rx_local->fbr[0]->local_full, |
| 2854 | rx_local->fbr[0]->num_entries - 1), |
| 2855 | &rx_dma->fbr1_full_offset); |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 2856 | } |
| 2857 | #ifdef USE_FBR0 |
| 2858 | else { |
| 2859 | struct fbr_desc *next = (struct fbr_desc *) |
Mark Einon | e592a9b | 2011-10-20 01:18:42 +0100 | [diff] [blame] | 2860 | rx_local->fbr[1]->ring_virtaddr + |
| 2861 | INDEX10(rx_local->fbr[1]->local_full); |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 2862 | |
| 2863 | /* Handle the Free Buffer Ring advancement here. Write |
| 2864 | * the PA / Buffer Index for the returned buffer into |
| 2865 | * the oldest (next to be freed) FBR entry |
| 2866 | */ |
Mark Einon | e592a9b | 2011-10-20 01:18:42 +0100 | [diff] [blame] | 2867 | next->addr_hi = rx_local->fbr[1]->bus_high[buff_index]; |
| 2868 | next->addr_lo = rx_local->fbr[1]->bus_low[buff_index]; |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 2869 | next->word2 = buff_index; |
| 2870 | |
Mark Einon | 6abafc1 | 2011-10-20 01:18:41 +0100 | [diff] [blame] | 2871 | writel(bump_free_buff_ring( |
Mark Einon | e592a9b | 2011-10-20 01:18:42 +0100 | [diff] [blame] | 2872 | &rx_local->fbr[1]->local_full, |
| 2873 | rx_local->fbr[1]->num_entries - 1), |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 2874 | &rx_dma->fbr0_full_offset); |
| 2875 | } |
| 2876 | #endif |
| 2877 | spin_unlock_irqrestore(&adapter->fbr_lock, flags); |
| 2878 | } else { |
| 2879 | dev_err(&adapter->pdev->dev, |
| 2880 | "%s illegal Buffer Index returned\n", __func__); |
| 2881 | } |
| 2882 | |
| 2883 | /* The processing on this RFD is done, so put it back on the tail of |
| 2884 | * our list |
| 2885 | */ |
| 2886 | spin_lock_irqsave(&adapter->rcv_lock, flags); |
| 2887 | list_add_tail(&rfd->list_node, &rx_local->recv_list); |
| 2888 | rx_local->num_ready_recv++; |
| 2889 | spin_unlock_irqrestore(&adapter->rcv_lock, flags); |
| 2890 | |
| 2891 | WARN_ON(rx_local->num_ready_recv > rx_local->num_rfd); |
| 2892 | } |
| 2893 | |
Mark Einon | 54dbf04 | 2011-11-13 19:43:39 +0000 | [diff] [blame] | 2894 | /** |
| 2895 | * nic_rx_pkts - Checks the hardware for available packets |
| 2896 | * @adapter: pointer to our adapter |
| 2897 | * |
| 2898 | * Returns rfd, a pointer to our MPRFD. |
| 2899 | * |
| 2900 | * Checks the hardware for available packets, using completion ring |
| 2901 | * If packets are available, it gets an RFD from the recv_list, attaches |
| 2902 | * the packet to it, puts the RFD in the RecvPendList, and also returns |
| 2903 | * the pointer to the RFD. |
| 2904 | */ |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 2905 | static struct rfd *nic_rx_pkts(struct et131x_adapter *adapter) |
| 2906 | { |
| 2907 | struct rx_ring *rx_local = &adapter->rx_ring; |
| 2908 | struct rx_status_block *status; |
| 2909 | struct pkt_stat_desc *psr; |
| 2910 | struct rfd *rfd; |
| 2911 | u32 i; |
| 2912 | u8 *buf; |
| 2913 | unsigned long flags; |
| 2914 | struct list_head *element; |
| 2915 | u8 ring_index; |
| 2916 | u16 buff_index; |
| 2917 | u32 len; |
| 2918 | u32 word0; |
| 2919 | u32 word1; |
| 2920 | |
| 2921 | /* RX Status block is written by the DMA engine prior to every |
| 2922 | * interrupt. It contains the next to be used entry in the Packet |
| 2923 | * Status Ring, and also the two Free Buffer rings. |
| 2924 | */ |
| 2925 | status = rx_local->rx_status_block; |
| 2926 | word1 = status->word1 >> 16; /* Get the useful bits */ |
| 2927 | |
| 2928 | /* Check the PSR and wrap bits do not match */ |
| 2929 | if ((word1 & 0x1FFF) == (rx_local->local_psr_full & 0x1FFF)) |
| 2930 | /* Looks like this ring is not updated yet */ |
| 2931 | return NULL; |
| 2932 | |
| 2933 | /* The packet status ring indicates that data is available. */ |
| 2934 | psr = (struct pkt_stat_desc *) (rx_local->ps_ring_virtaddr) + |
| 2935 | (rx_local->local_psr_full & 0xFFF); |
| 2936 | |
| 2937 | /* Grab any information that is required once the PSR is |
| 2938 | * advanced, since we can no longer rely on the memory being |
| 2939 | * accurate |
| 2940 | */ |
| 2941 | len = psr->word1 & 0xFFFF; |
| 2942 | ring_index = (psr->word1 >> 26) & 0x03; |
| 2943 | buff_index = (psr->word1 >> 16) & 0x3FF; |
| 2944 | word0 = psr->word0; |
| 2945 | |
| 2946 | /* Indicate that we have used this PSR entry. */ |
| 2947 | /* FIXME wrap 12 */ |
| 2948 | add_12bit(&rx_local->local_psr_full, 1); |
| 2949 | if ( |
| 2950 | (rx_local->local_psr_full & 0xFFF) > rx_local->psr_num_entries - 1) { |
| 2951 | /* Clear psr full and toggle the wrap bit */ |
| 2952 | rx_local->local_psr_full &= ~0xFFF; |
| 2953 | rx_local->local_psr_full ^= 0x1000; |
| 2954 | } |
| 2955 | |
| 2956 | writel(rx_local->local_psr_full, |
| 2957 | &adapter->regs->rxdma.psr_full_offset); |
| 2958 | |
| 2959 | #ifndef USE_FBR0 |
| 2960 | if (ring_index != 1) |
| 2961 | return NULL; |
| 2962 | #endif |
| 2963 | |
| 2964 | #ifdef USE_FBR0 |
| 2965 | if (ring_index > 1 || |
| 2966 | (ring_index == 0 && |
Mark Einon | e592a9b | 2011-10-20 01:18:42 +0100 | [diff] [blame] | 2967 | buff_index > rx_local->fbr[1]->num_entries - 1) || |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 2968 | (ring_index == 1 && |
Adnan Ali | 397d3e6 | 2012-05-25 18:56:40 +0100 | [diff] [blame] | 2969 | buff_index > rx_local->fbr[0]->num_entries - 1)) { |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 2970 | #else |
Adnan Ali | 397d3e6 | 2012-05-25 18:56:40 +0100 | [diff] [blame] | 2971 | if (ring_index != 1 || buff_index > rx_local->fbr[0]->num_entries - 1) { |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 2972 | #endif |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 2973 | /* Illegal buffer or ring index cannot be used by S/W*/ |
| 2974 | dev_err(&adapter->pdev->dev, |
| 2975 | "NICRxPkts PSR Entry %d indicates " |
| 2976 | "length of %d and/or bad bi(%d)\n", |
| 2977 | rx_local->local_psr_full & 0xFFF, |
| 2978 | len, buff_index); |
| 2979 | return NULL; |
| 2980 | } |
| 2981 | |
| 2982 | /* Get and fill the RFD. */ |
| 2983 | spin_lock_irqsave(&adapter->rcv_lock, flags); |
| 2984 | |
| 2985 | rfd = NULL; |
| 2986 | element = rx_local->recv_list.next; |
| 2987 | rfd = (struct rfd *) list_entry(element, struct rfd, list_node); |
| 2988 | |
| 2989 | if (rfd == NULL) { |
| 2990 | spin_unlock_irqrestore(&adapter->rcv_lock, flags); |
| 2991 | return NULL; |
| 2992 | } |
| 2993 | |
| 2994 | list_del(&rfd->list_node); |
| 2995 | rx_local->num_ready_recv--; |
| 2996 | |
| 2997 | spin_unlock_irqrestore(&adapter->rcv_lock, flags); |
| 2998 | |
| 2999 | rfd->bufferindex = buff_index; |
| 3000 | rfd->ringindex = ring_index; |
| 3001 | |
| 3002 | /* In V1 silicon, there is a bug which screws up filtering of |
| 3003 | * runt packets. Therefore runt packet filtering is disabled |
| 3004 | * in the MAC and the packets are dropped here. They are |
| 3005 | * also counted here. |
| 3006 | */ |
| 3007 | if (len < (NIC_MIN_PACKET_SIZE + 4)) { |
| 3008 | adapter->stats.rx_other_errs++; |
| 3009 | len = 0; |
| 3010 | } |
| 3011 | |
| 3012 | if (len) { |
| 3013 | /* Determine if this is a multicast packet coming in */ |
| 3014 | if ((word0 & ALCATEL_MULTICAST_PKT) && |
| 3015 | !(word0 & ALCATEL_BROADCAST_PKT)) { |
| 3016 | /* Promiscuous mode and Multicast mode are |
| 3017 | * not mutually exclusive as was first |
| 3018 | * thought. I guess Promiscuous is just |
| 3019 | * considered a super-set of the other |
| 3020 | * filters. Generally filter is 0x2b when in |
| 3021 | * promiscuous mode. |
| 3022 | */ |
| 3023 | if ((adapter->packet_filter & |
| 3024 | ET131X_PACKET_TYPE_MULTICAST) |
| 3025 | && !(adapter->packet_filter & |
| 3026 | ET131X_PACKET_TYPE_PROMISCUOUS) |
| 3027 | && !(adapter->packet_filter & |
| 3028 | ET131X_PACKET_TYPE_ALL_MULTICAST)) { |
Mark Einon | e592a9b | 2011-10-20 01:18:42 +0100 | [diff] [blame] | 3029 | /* |
| 3030 | * Note - ring_index for fbr[] array is reversed |
| 3031 | * 1 for FBR0 etc |
| 3032 | */ |
| 3033 | buf = rx_local->fbr[(ring_index == 0 ? 1 : 0)]-> |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 3034 | virt[buff_index]; |
| 3035 | |
| 3036 | /* Loop through our list to see if the |
| 3037 | * destination address of this packet |
| 3038 | * matches one in our list. |
| 3039 | */ |
| 3040 | for (i = 0; i < adapter->multicast_addr_count; |
| 3041 | i++) { |
| 3042 | if (buf[0] == |
| 3043 | adapter->multicast_list[i][0] |
| 3044 | && buf[1] == |
| 3045 | adapter->multicast_list[i][1] |
| 3046 | && buf[2] == |
| 3047 | adapter->multicast_list[i][2] |
| 3048 | && buf[3] == |
| 3049 | adapter->multicast_list[i][3] |
| 3050 | && buf[4] == |
| 3051 | adapter->multicast_list[i][4] |
| 3052 | && buf[5] == |
| 3053 | adapter->multicast_list[i][5]) { |
| 3054 | break; |
| 3055 | } |
| 3056 | } |
| 3057 | |
| 3058 | /* If our index is equal to the number |
| 3059 | * of Multicast address we have, then |
| 3060 | * this means we did not find this |
| 3061 | * packet's matching address in our |
| 3062 | * list. Set the len to zero, |
| 3063 | * so we free our RFD when we return |
| 3064 | * from this function. |
| 3065 | */ |
| 3066 | if (i == adapter->multicast_addr_count) |
| 3067 | len = 0; |
| 3068 | } |
| 3069 | |
| 3070 | if (len > 0) |
| 3071 | adapter->stats.multicast_pkts_rcvd++; |
| 3072 | } else if (word0 & ALCATEL_BROADCAST_PKT) |
| 3073 | adapter->stats.broadcast_pkts_rcvd++; |
| 3074 | else |
| 3075 | /* Not sure what this counter measures in |
| 3076 | * promiscuous mode. Perhaps we should check |
| 3077 | * the MAC address to see if it is directed |
| 3078 | * to us in promiscuous mode. |
| 3079 | */ |
| 3080 | adapter->stats.unicast_pkts_rcvd++; |
| 3081 | } |
| 3082 | |
| 3083 | if (len > 0) { |
| 3084 | struct sk_buff *skb = NULL; |
| 3085 | |
| 3086 | /*rfd->len = len - 4; */ |
| 3087 | rfd->len = len; |
| 3088 | |
| 3089 | skb = dev_alloc_skb(rfd->len + 2); |
| 3090 | if (!skb) { |
| 3091 | dev_err(&adapter->pdev->dev, |
| 3092 | "Couldn't alloc an SKB for Rx\n"); |
| 3093 | return NULL; |
| 3094 | } |
| 3095 | |
| 3096 | adapter->net_stats.rx_bytes += rfd->len; |
| 3097 | |
Mark Einon | e592a9b | 2011-10-20 01:18:42 +0100 | [diff] [blame] | 3098 | /* |
| 3099 | * Note - ring_index for fbr[] array is reversed, |
| 3100 | * 1 for FBR0 etc |
| 3101 | */ |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 3102 | memcpy(skb_put(skb, rfd->len), |
Mark Einon | 09a3fc2 | 2011-10-23 10:22:53 +0100 | [diff] [blame] | 3103 | rx_local->fbr[(ring_index == 0 ? 1 : 0)]->virt[buff_index], |
| 3104 | rfd->len); |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 3105 | |
| 3106 | skb->dev = adapter->netdev; |
| 3107 | skb->protocol = eth_type_trans(skb, adapter->netdev); |
| 3108 | skb->ip_summed = CHECKSUM_NONE; |
| 3109 | |
Mark Einon | fc7e2a5 | 2012-02-20 22:33:24 +0000 | [diff] [blame] | 3110 | netif_rx_ni(skb); |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 3111 | } else { |
| 3112 | rfd->len = 0; |
| 3113 | } |
| 3114 | |
| 3115 | nic_return_rfd(adapter, rfd); |
| 3116 | return rfd; |
| 3117 | } |
| 3118 | |
| 3119 | /** |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 3120 | * et131x_handle_recv_interrupt - Interrupt handler for receive processing |
| 3121 | * @adapter: pointer to our adapter |
| 3122 | * |
| 3123 | * Assumption, Rcv spinlock has been acquired. |
| 3124 | */ |
Francois Romieu | eb7a6ca | 2011-10-23 19:11:02 +0200 | [diff] [blame] | 3125 | static void et131x_handle_recv_interrupt(struct et131x_adapter *adapter) |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 3126 | { |
| 3127 | struct rfd *rfd = NULL; |
| 3128 | u32 count = 0; |
| 3129 | bool done = true; |
| 3130 | |
| 3131 | /* Process up to available RFD's */ |
| 3132 | while (count < NUM_PACKETS_HANDLED) { |
| 3133 | if (list_empty(&adapter->rx_ring.recv_list)) { |
| 3134 | WARN_ON(adapter->rx_ring.num_ready_recv != 0); |
| 3135 | done = false; |
| 3136 | break; |
| 3137 | } |
| 3138 | |
| 3139 | rfd = nic_rx_pkts(adapter); |
| 3140 | |
| 3141 | if (rfd == NULL) |
| 3142 | break; |
| 3143 | |
| 3144 | /* Do not receive any packets until a filter has been set. |
| 3145 | * Do not receive any packets until we have link. |
| 3146 | * If length is zero, return the RFD in order to advance the |
| 3147 | * Free buffer ring. |
| 3148 | */ |
| 3149 | if (!adapter->packet_filter || |
| 3150 | !netif_carrier_ok(adapter->netdev) || |
| 3151 | rfd->len == 0) |
| 3152 | continue; |
| 3153 | |
| 3154 | /* Increment the number of packets we received */ |
| 3155 | adapter->net_stats.rx_packets++; |
| 3156 | |
| 3157 | /* Set the status on the packet, either resources or success */ |
| 3158 | if (adapter->rx_ring.num_ready_recv < RFD_LOW_WATER_MARK) { |
| 3159 | dev_warn(&adapter->pdev->dev, |
| 3160 | "RFD's are running out\n"); |
| 3161 | } |
| 3162 | count++; |
| 3163 | } |
| 3164 | |
| 3165 | if (count == NUM_PACKETS_HANDLED || !done) { |
| 3166 | adapter->rx_ring.unfinished_receives = true; |
| 3167 | writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO, |
| 3168 | &adapter->regs->global.watchdog_timer); |
| 3169 | } else |
| 3170 | /* Watchdog timer will disable itself if appropriate. */ |
| 3171 | adapter->rx_ring.unfinished_receives = false; |
| 3172 | } |
| 3173 | |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 3174 | /** |
| 3175 | * et131x_tx_dma_memory_alloc |
| 3176 | * @adapter: pointer to our private adapter structure |
| 3177 | * |
| 3178 | * Returns 0 on success and errno on failure (as defined in errno.h). |
| 3179 | * |
| 3180 | * Allocates memory that will be visible both to the device and to the CPU. |
| 3181 | * The OS will pass us packets, pointers to which we will insert in the Tx |
| 3182 | * Descriptor queue. The device will read this queue to find the packets in |
| 3183 | * memory. The device will update the "status" in memory each time it xmits a |
| 3184 | * packet. |
| 3185 | */ |
Francois Romieu | eb7a6ca | 2011-10-23 19:11:02 +0200 | [diff] [blame] | 3186 | static int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter) |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 3187 | { |
| 3188 | int desc_size = 0; |
| 3189 | struct tx_ring *tx_ring = &adapter->tx_ring; |
| 3190 | |
| 3191 | /* Allocate memory for the TCB's (Transmit Control Block) */ |
| 3192 | adapter->tx_ring.tcb_ring = |
| 3193 | kcalloc(NUM_TCB, sizeof(struct tcb), GFP_ATOMIC | GFP_DMA); |
| 3194 | if (!adapter->tx_ring.tcb_ring) { |
| 3195 | dev_err(&adapter->pdev->dev, "Cannot alloc memory for TCBs\n"); |
| 3196 | return -ENOMEM; |
| 3197 | } |
| 3198 | |
| 3199 | /* Allocate enough memory for the Tx descriptor ring, and allocate |
| 3200 | * some extra so that the ring can be aligned on a 4k boundary. |
| 3201 | */ |
| 3202 | desc_size = (sizeof(struct tx_desc) * NUM_DESC_PER_RING_TX) + 4096 - 1; |
| 3203 | tx_ring->tx_desc_ring = |
Mark Einon | 09a3fc2 | 2011-10-23 10:22:53 +0100 | [diff] [blame] | 3204 | (struct tx_desc *) dma_alloc_coherent(&adapter->pdev->dev, |
| 3205 | desc_size, |
| 3206 | &tx_ring->tx_desc_ring_pa, |
| 3207 | GFP_KERNEL); |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 3208 | if (!adapter->tx_ring.tx_desc_ring) { |
| 3209 | dev_err(&adapter->pdev->dev, |
Mark Einon | 09a3fc2 | 2011-10-23 10:22:53 +0100 | [diff] [blame] | 3210 | "Cannot alloc memory for Tx Ring\n"); |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 3211 | return -ENOMEM; |
| 3212 | } |
| 3213 | |
| 3214 | /* Save physical address |
| 3215 | * |
Mark Einon | 26dc751 | 2011-10-20 01:18:47 +0100 | [diff] [blame] | 3216 | * NOTE: dma_alloc_coherent(), used above to alloc DMA regions, |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 3217 | * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses |
| 3218 | * are ever returned, make sure the high part is retrieved here before |
| 3219 | * storing the adjusted address. |
| 3220 | */ |
| 3221 | /* Allocate memory for the Tx status block */ |
Mark Einon | 0d1b7a8 | 2011-10-20 01:18:43 +0100 | [diff] [blame] | 3222 | tx_ring->tx_status = dma_alloc_coherent(&adapter->pdev->dev, |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 3223 | sizeof(u32), |
Mark Einon | 0d1b7a8 | 2011-10-20 01:18:43 +0100 | [diff] [blame] | 3224 | &tx_ring->tx_status_pa, |
| 3225 | GFP_KERNEL); |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 3226 | if (!adapter->tx_ring.tx_status_pa) { |
| 3227 | dev_err(&adapter->pdev->dev, |
| 3228 | "Cannot alloc memory for Tx status block\n"); |
| 3229 | return -ENOMEM; |
| 3230 | } |
| 3231 | return 0; |
| 3232 | } |
| 3233 | |
| 3234 | /** |
| 3235 | * et131x_tx_dma_memory_free - Free all memory allocated within this module |
| 3236 | * @adapter: pointer to our private adapter structure |
| 3237 | * |
| 3238 | * Returns 0 on success and errno on failure (as defined in errno.h). |
| 3239 | */ |
Francois Romieu | eb7a6ca | 2011-10-23 19:11:02 +0200 | [diff] [blame] | 3240 | static void et131x_tx_dma_memory_free(struct et131x_adapter *adapter) |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 3241 | { |
| 3242 | int desc_size = 0; |
| 3243 | |
| 3244 | if (adapter->tx_ring.tx_desc_ring) { |
| 3245 | /* Free memory relating to Tx rings here */ |
| 3246 | desc_size = (sizeof(struct tx_desc) * NUM_DESC_PER_RING_TX) |
| 3247 | + 4096 - 1; |
Mark Einon | 675c8f6 | 2011-10-20 01:18:44 +0100 | [diff] [blame] | 3248 | dma_free_coherent(&adapter->pdev->dev, |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 3249 | desc_size, |
| 3250 | adapter->tx_ring.tx_desc_ring, |
| 3251 | adapter->tx_ring.tx_desc_ring_pa); |
| 3252 | adapter->tx_ring.tx_desc_ring = NULL; |
| 3253 | } |
| 3254 | |
| 3255 | /* Free memory for the Tx status block */ |
| 3256 | if (adapter->tx_ring.tx_status) { |
Mark Einon | 675c8f6 | 2011-10-20 01:18:44 +0100 | [diff] [blame] | 3257 | dma_free_coherent(&adapter->pdev->dev, |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 3258 | sizeof(u32), |
| 3259 | adapter->tx_ring.tx_status, |
| 3260 | adapter->tx_ring.tx_status_pa); |
| 3261 | |
| 3262 | adapter->tx_ring.tx_status = NULL; |
| 3263 | } |
| 3264 | /* Free the memory for the tcb structures */ |
| 3265 | kfree(adapter->tx_ring.tcb_ring); |
| 3266 | } |
| 3267 | |
| 3268 | /** |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 3269 | * nic_send_packet - NIC specific send handler for version B silicon. |
| 3270 | * @adapter: pointer to our adapter |
| 3271 | * @tcb: pointer to struct tcb |
| 3272 | * |
| 3273 | * Returns 0 or errno. |
| 3274 | */ |
| 3275 | static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb) |
| 3276 | { |
| 3277 | u32 i; |
| 3278 | struct tx_desc desc[24]; /* 24 x 16 byte */ |
| 3279 | u32 frag = 0; |
| 3280 | u32 thiscopy, remainder; |
| 3281 | struct sk_buff *skb = tcb->skb; |
| 3282 | u32 nr_frags = skb_shinfo(skb)->nr_frags + 1; |
| 3283 | struct skb_frag_struct *frags = &skb_shinfo(skb)->frags[0]; |
| 3284 | unsigned long flags; |
| 3285 | struct phy_device *phydev = adapter->phydev; |
| 3286 | |
| 3287 | /* Part of the optimizations of this send routine restrict us to |
| 3288 | * sending 24 fragments at a pass. In practice we should never see |
| 3289 | * more than 5 fragments. |
| 3290 | * |
| 3291 | * NOTE: The older version of this function (below) can handle any |
| 3292 | * number of fragments. If needed, we can call this function, |
| 3293 | * although it is less efficient. |
| 3294 | */ |
| 3295 | if (nr_frags > 23) |
| 3296 | return -EIO; |
| 3297 | |
| 3298 | memset(desc, 0, sizeof(struct tx_desc) * (nr_frags + 1)); |
| 3299 | |
| 3300 | for (i = 0; i < nr_frags; i++) { |
| 3301 | /* If there is something in this element, lets get a |
| 3302 | * descriptor from the ring and get the necessary data |
| 3303 | */ |
| 3304 | if (i == 0) { |
| 3305 | /* If the fragments are smaller than a standard MTU, |
| 3306 | * then map them to a single descriptor in the Tx |
| 3307 | * Desc ring. However, if they're larger, as is |
| 3308 | * possible with support for jumbo packets, then |
| 3309 | * split them each across 2 descriptors. |
| 3310 | * |
| 3311 | * This will work until we determine why the hardware |
| 3312 | * doesn't seem to like large fragments. |
| 3313 | */ |
| 3314 | if ((skb->len - skb->data_len) <= 1514) { |
| 3315 | desc[frag].addr_hi = 0; |
| 3316 | /* Low 16bits are length, high is vlan and |
| 3317 | unused currently so zero */ |
| 3318 | desc[frag].len_vlan = |
| 3319 | skb->len - skb->data_len; |
| 3320 | |
| 3321 | /* NOTE: Here, the dma_addr_t returned from |
Mark Einon | 26dc751 | 2011-10-20 01:18:47 +0100 | [diff] [blame] | 3322 | * dma_map_single() is implicitly cast as a |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 3323 | * u32. Although dma_addr_t can be |
| 3324 | * 64-bit, the address returned by |
Mark Einon | 26dc751 | 2011-10-20 01:18:47 +0100 | [diff] [blame] | 3325 | * dma_map_single() is always 32-bit |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 3326 | * addressable (as defined by the pci/dma |
| 3327 | * subsystem) |
| 3328 | */ |
| 3329 | desc[frag++].addr_lo = |
Mark Einon | 26dc751 | 2011-10-20 01:18:47 +0100 | [diff] [blame] | 3330 | dma_map_single(&adapter->pdev->dev, |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 3331 | skb->data, |
| 3332 | skb->len - |
| 3333 | skb->data_len, |
Mark Einon | 26dc751 | 2011-10-20 01:18:47 +0100 | [diff] [blame] | 3334 | DMA_TO_DEVICE); |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 3335 | } else { |
| 3336 | desc[frag].addr_hi = 0; |
| 3337 | desc[frag].len_vlan = |
| 3338 | (skb->len - skb->data_len) / 2; |
| 3339 | |
| 3340 | /* NOTE: Here, the dma_addr_t returned from |
Mark Einon | 26dc751 | 2011-10-20 01:18:47 +0100 | [diff] [blame] | 3341 | * dma_map_single() is implicitly cast as a |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 3342 | * u32. Although dma_addr_t can be |
| 3343 | * 64-bit, the address returned by |
Mark Einon | 26dc751 | 2011-10-20 01:18:47 +0100 | [diff] [blame] | 3344 | * dma_map_single() is always 32-bit |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 3345 | * addressable (as defined by the pci/dma |
| 3346 | * subsystem) |
| 3347 | */ |
| 3348 | desc[frag++].addr_lo = |
Mark Einon | 26dc751 | 2011-10-20 01:18:47 +0100 | [diff] [blame] | 3349 | dma_map_single(&adapter->pdev->dev, |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 3350 | skb->data, |
| 3351 | ((skb->len - |
| 3352 | skb->data_len) / 2), |
Mark Einon | 26dc751 | 2011-10-20 01:18:47 +0100 | [diff] [blame] | 3353 | DMA_TO_DEVICE); |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 3354 | desc[frag].addr_hi = 0; |
| 3355 | |
| 3356 | desc[frag].len_vlan = |
| 3357 | (skb->len - skb->data_len) / 2; |
| 3358 | |
| 3359 | /* NOTE: Here, the dma_addr_t returned from |
Mark Einon | 26dc751 | 2011-10-20 01:18:47 +0100 | [diff] [blame] | 3360 | * dma_map_single() is implicitly cast as a |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 3361 | * u32. Although dma_addr_t can be |
| 3362 | * 64-bit, the address returned by |
Mark Einon | 26dc751 | 2011-10-20 01:18:47 +0100 | [diff] [blame] | 3363 | * dma_map_single() is always 32-bit |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 3364 | * addressable (as defined by the pci/dma |
| 3365 | * subsystem) |
| 3366 | */ |
| 3367 | desc[frag++].addr_lo = |
Mark Einon | 26dc751 | 2011-10-20 01:18:47 +0100 | [diff] [blame] | 3368 | dma_map_single(&adapter->pdev->dev, |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 3369 | skb->data + |
| 3370 | ((skb->len - |
| 3371 | skb->data_len) / 2), |
| 3372 | ((skb->len - |
| 3373 | skb->data_len) / 2), |
Mark Einon | 26dc751 | 2011-10-20 01:18:47 +0100 | [diff] [blame] | 3374 | DMA_TO_DEVICE); |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 3375 | } |
| 3376 | } else { |
| 3377 | desc[frag].addr_hi = 0; |
| 3378 | desc[frag].len_vlan = |
| 3379 | frags[i - 1].size; |
| 3380 | |
| 3381 | /* NOTE: Here, the dma_addr_t returned from |
Mark Einon | 26dc751 | 2011-10-20 01:18:47 +0100 | [diff] [blame] | 3382 | * dma_map_page() is implicitly cast as a u32. |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 3383 | * Although dma_addr_t can be 64-bit, the address |
Mark Einon | 26dc751 | 2011-10-20 01:18:47 +0100 | [diff] [blame] | 3384 | * returned by dma_map_page() is always 32-bit |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 3385 | * addressable (as defined by the pci/dma subsystem) |
| 3386 | */ |
Linus Torvalds | aa77677 | 2011-10-26 15:39:02 +0200 | [diff] [blame] | 3387 | desc[frag++].addr_lo = skb_frag_dma_map( |
| 3388 | &adapter->pdev->dev, |
| 3389 | &frags[i - 1], |
| 3390 | 0, |
| 3391 | frags[i - 1].size, |
| 3392 | DMA_TO_DEVICE); |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 3393 | } |
| 3394 | } |
| 3395 | |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 3396 | if (phydev && phydev->speed == SPEED_1000) { |
| 3397 | if (++adapter->tx_ring.since_irq == PARM_TX_NUM_BUFS_DEF) { |
| 3398 | /* Last element & Interrupt flag */ |
| 3399 | desc[frag - 1].flags = 0x5; |
| 3400 | adapter->tx_ring.since_irq = 0; |
| 3401 | } else { /* Last element */ |
| 3402 | desc[frag - 1].flags = 0x1; |
| 3403 | } |
| 3404 | } else |
| 3405 | desc[frag - 1].flags = 0x5; |
| 3406 | |
| 3407 | desc[0].flags |= 2; /* First element flag */ |
| 3408 | |
| 3409 | tcb->index_start = adapter->tx_ring.send_idx; |
| 3410 | tcb->stale = 0; |
| 3411 | |
| 3412 | spin_lock_irqsave(&adapter->send_hw_lock, flags); |
| 3413 | |
| 3414 | thiscopy = NUM_DESC_PER_RING_TX - |
| 3415 | INDEX10(adapter->tx_ring.send_idx); |
| 3416 | |
| 3417 | if (thiscopy >= frag) { |
| 3418 | remainder = 0; |
| 3419 | thiscopy = frag; |
| 3420 | } else { |
| 3421 | remainder = frag - thiscopy; |
| 3422 | } |
| 3423 | |
| 3424 | memcpy(adapter->tx_ring.tx_desc_ring + |
| 3425 | INDEX10(adapter->tx_ring.send_idx), desc, |
| 3426 | sizeof(struct tx_desc) * thiscopy); |
| 3427 | |
| 3428 | add_10bit(&adapter->tx_ring.send_idx, thiscopy); |
| 3429 | |
| 3430 | if (INDEX10(adapter->tx_ring.send_idx) == 0 || |
| 3431 | INDEX10(adapter->tx_ring.send_idx) == NUM_DESC_PER_RING_TX) { |
| 3432 | adapter->tx_ring.send_idx &= ~ET_DMA10_MASK; |
| 3433 | adapter->tx_ring.send_idx ^= ET_DMA10_WRAP; |
| 3434 | } |
| 3435 | |
| 3436 | if (remainder) { |
| 3437 | memcpy(adapter->tx_ring.tx_desc_ring, |
| 3438 | desc + thiscopy, |
| 3439 | sizeof(struct tx_desc) * remainder); |
| 3440 | |
| 3441 | add_10bit(&adapter->tx_ring.send_idx, remainder); |
| 3442 | } |
| 3443 | |
| 3444 | if (INDEX10(adapter->tx_ring.send_idx) == 0) { |
| 3445 | if (adapter->tx_ring.send_idx) |
| 3446 | tcb->index = NUM_DESC_PER_RING_TX - 1; |
| 3447 | else |
| 3448 | tcb->index = ET_DMA10_WRAP|(NUM_DESC_PER_RING_TX - 1); |
| 3449 | } else |
| 3450 | tcb->index = adapter->tx_ring.send_idx - 1; |
| 3451 | |
| 3452 | spin_lock(&adapter->tcb_send_qlock); |
| 3453 | |
| 3454 | if (adapter->tx_ring.send_tail) |
| 3455 | adapter->tx_ring.send_tail->next = tcb; |
| 3456 | else |
| 3457 | adapter->tx_ring.send_head = tcb; |
| 3458 | |
| 3459 | adapter->tx_ring.send_tail = tcb; |
| 3460 | |
| 3461 | WARN_ON(tcb->next != NULL); |
| 3462 | |
| 3463 | adapter->tx_ring.used++; |
| 3464 | |
| 3465 | spin_unlock(&adapter->tcb_send_qlock); |
| 3466 | |
| 3467 | /* Write the new write pointer back to the device. */ |
| 3468 | writel(adapter->tx_ring.send_idx, |
| 3469 | &adapter->regs->txdma.service_request); |
| 3470 | |
| 3471 | /* For Gig only, we use Tx Interrupt coalescing. Enable the software |
| 3472 | * timer to wake us up if this packet isn't followed by N more. |
| 3473 | */ |
| 3474 | if (phydev && phydev->speed == SPEED_1000) { |
| 3475 | writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO, |
| 3476 | &adapter->regs->global.watchdog_timer); |
| 3477 | } |
| 3478 | spin_unlock_irqrestore(&adapter->send_hw_lock, flags); |
| 3479 | |
| 3480 | return 0; |
| 3481 | } |
| 3482 | |
| 3483 | /** |
| 3484 | * send_packet - Do the work to send a packet |
| 3485 | * @skb: the packet(s) to send |
| 3486 | * @adapter: a pointer to the device's private adapter structure |
| 3487 | * |
| 3488 | * Return 0 in almost all cases; non-zero value in extreme hard failure only. |
| 3489 | * |
| 3490 | * Assumption: Send spinlock has been acquired |
| 3491 | */ |
| 3492 | static int send_packet(struct sk_buff *skb, struct et131x_adapter *adapter) |
| 3493 | { |
| 3494 | int status; |
| 3495 | struct tcb *tcb = NULL; |
| 3496 | u16 *shbufva; |
| 3497 | unsigned long flags; |
| 3498 | |
| 3499 | /* All packets must have at least a MAC address and a protocol type */ |
| 3500 | if (skb->len < ETH_HLEN) |
| 3501 | return -EIO; |
| 3502 | |
| 3503 | /* Get a TCB for this packet */ |
| 3504 | spin_lock_irqsave(&adapter->tcb_ready_qlock, flags); |
| 3505 | |
| 3506 | tcb = adapter->tx_ring.tcb_qhead; |
| 3507 | |
| 3508 | if (tcb == NULL) { |
| 3509 | spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags); |
| 3510 | return -ENOMEM; |
| 3511 | } |
| 3512 | |
| 3513 | adapter->tx_ring.tcb_qhead = tcb->next; |
| 3514 | |
| 3515 | if (adapter->tx_ring.tcb_qhead == NULL) |
| 3516 | adapter->tx_ring.tcb_qtail = NULL; |
| 3517 | |
| 3518 | spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags); |
| 3519 | |
| 3520 | tcb->skb = skb; |
| 3521 | |
| 3522 | if (skb->data != NULL && skb->len - skb->data_len >= 6) { |
| 3523 | shbufva = (u16 *) skb->data; |
| 3524 | |
| 3525 | if ((shbufva[0] == 0xffff) && |
| 3526 | (shbufva[1] == 0xffff) && (shbufva[2] == 0xffff)) { |
| 3527 | tcb->flags |= fMP_DEST_BROAD; |
| 3528 | } else if ((shbufva[0] & 0x3) == 0x0001) { |
| 3529 | tcb->flags |= fMP_DEST_MULTI; |
| 3530 | } |
| 3531 | } |
| 3532 | |
| 3533 | tcb->next = NULL; |
| 3534 | |
| 3535 | /* Call the NIC specific send handler. */ |
| 3536 | status = nic_send_packet(adapter, tcb); |
| 3537 | |
| 3538 | if (status != 0) { |
| 3539 | spin_lock_irqsave(&adapter->tcb_ready_qlock, flags); |
| 3540 | |
| 3541 | if (adapter->tx_ring.tcb_qtail) |
| 3542 | adapter->tx_ring.tcb_qtail->next = tcb; |
| 3543 | else |
| 3544 | /* Apparently ready Q is empty. */ |
| 3545 | adapter->tx_ring.tcb_qhead = tcb; |
| 3546 | |
| 3547 | adapter->tx_ring.tcb_qtail = tcb; |
| 3548 | spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags); |
| 3549 | return status; |
| 3550 | } |
| 3551 | WARN_ON(adapter->tx_ring.used > NUM_TCB); |
| 3552 | return 0; |
| 3553 | } |
| 3554 | |
| 3555 | /** |
| 3556 | * et131x_send_packets - This function is called by the OS to send packets |
| 3557 | * @skb: the packet(s) to send |
| 3558 | * @netdev:device on which to TX the above packet(s) |
| 3559 | * |
| 3560 | * Return 0 in almost all cases; non-zero value in extreme hard failure only |
| 3561 | */ |
Francois Romieu | eb7a6ca | 2011-10-23 19:11:02 +0200 | [diff] [blame] | 3562 | static int et131x_send_packets(struct sk_buff *skb, struct net_device *netdev) |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 3563 | { |
| 3564 | int status = 0; |
Mark Einon | 06709e9 | 2011-10-20 01:18:46 +0100 | [diff] [blame] | 3565 | struct et131x_adapter *adapter = netdev_priv(netdev); |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 3566 | |
| 3567 | /* Send these packets |
| 3568 | * |
| 3569 | * NOTE: The Linux Tx entry point is only given one packet at a time |
| 3570 | * to Tx, so the PacketCount and it's array used makes no sense here |
| 3571 | */ |
| 3572 | |
| 3573 | /* TCB is not available */ |
| 3574 | if (adapter->tx_ring.used >= NUM_TCB) { |
| 3575 | /* NOTE: If there's an error on send, no need to queue the |
| 3576 | * packet under Linux; if we just send an error up to the |
| 3577 | * netif layer, it will resend the skb to us. |
| 3578 | */ |
| 3579 | status = -ENOMEM; |
| 3580 | } else { |
| 3581 | /* We need to see if the link is up; if it's not, make the |
| 3582 | * netif layer think we're good and drop the packet |
| 3583 | */ |
| 3584 | if ((adapter->flags & fMP_ADAPTER_FAIL_SEND_MASK) || |
| 3585 | !netif_carrier_ok(netdev)) { |
| 3586 | dev_kfree_skb_any(skb); |
| 3587 | skb = NULL; |
| 3588 | |
| 3589 | adapter->net_stats.tx_dropped++; |
| 3590 | } else { |
| 3591 | status = send_packet(skb, adapter); |
| 3592 | if (status != 0 && status != -ENOMEM) { |
| 3593 | /* On any other error, make netif think we're |
| 3594 | * OK and drop the packet |
| 3595 | */ |
| 3596 | dev_kfree_skb_any(skb); |
| 3597 | skb = NULL; |
| 3598 | adapter->net_stats.tx_dropped++; |
| 3599 | } |
| 3600 | } |
| 3601 | } |
| 3602 | return status; |
| 3603 | } |
| 3604 | |
| 3605 | /** |
| 3606 | * free_send_packet - Recycle a struct tcb |
| 3607 | * @adapter: pointer to our adapter |
| 3608 | * @tcb: pointer to struct tcb |
| 3609 | * |
| 3610 | * Complete the packet if necessary |
| 3611 | * Assumption - Send spinlock has been acquired |
| 3612 | */ |
| 3613 | static inline void free_send_packet(struct et131x_adapter *adapter, |
| 3614 | struct tcb *tcb) |
| 3615 | { |
| 3616 | unsigned long flags; |
| 3617 | struct tx_desc *desc = NULL; |
| 3618 | struct net_device_stats *stats = &adapter->net_stats; |
| 3619 | |
| 3620 | if (tcb->flags & fMP_DEST_BROAD) |
| 3621 | atomic_inc(&adapter->stats.broadcast_pkts_xmtd); |
| 3622 | else if (tcb->flags & fMP_DEST_MULTI) |
| 3623 | atomic_inc(&adapter->stats.multicast_pkts_xmtd); |
| 3624 | else |
| 3625 | atomic_inc(&adapter->stats.unicast_pkts_xmtd); |
| 3626 | |
| 3627 | if (tcb->skb) { |
| 3628 | stats->tx_bytes += tcb->skb->len; |
| 3629 | |
| 3630 | /* Iterate through the TX descriptors on the ring |
| 3631 | * corresponding to this packet and umap the fragments |
| 3632 | * they point to |
| 3633 | */ |
| 3634 | do { |
| 3635 | desc = (struct tx_desc *) |
| 3636 | (adapter->tx_ring.tx_desc_ring + |
| 3637 | INDEX10(tcb->index_start)); |
| 3638 | |
Mark Einon | 26dc751 | 2011-10-20 01:18:47 +0100 | [diff] [blame] | 3639 | dma_unmap_single(&adapter->pdev->dev, |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 3640 | desc->addr_lo, |
Mark Einon | 26dc751 | 2011-10-20 01:18:47 +0100 | [diff] [blame] | 3641 | desc->len_vlan, DMA_TO_DEVICE); |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 3642 | |
| 3643 | add_10bit(&tcb->index_start, 1); |
| 3644 | if (INDEX10(tcb->index_start) >= |
| 3645 | NUM_DESC_PER_RING_TX) { |
| 3646 | tcb->index_start &= ~ET_DMA10_MASK; |
| 3647 | tcb->index_start ^= ET_DMA10_WRAP; |
| 3648 | } |
| 3649 | } while (desc != (adapter->tx_ring.tx_desc_ring + |
| 3650 | INDEX10(tcb->index))); |
| 3651 | |
| 3652 | dev_kfree_skb_any(tcb->skb); |
| 3653 | } |
| 3654 | |
| 3655 | memset(tcb, 0, sizeof(struct tcb)); |
| 3656 | |
| 3657 | /* Add the TCB to the Ready Q */ |
| 3658 | spin_lock_irqsave(&adapter->tcb_ready_qlock, flags); |
| 3659 | |
| 3660 | adapter->net_stats.tx_packets++; |
| 3661 | |
| 3662 | if (adapter->tx_ring.tcb_qtail) |
| 3663 | adapter->tx_ring.tcb_qtail->next = tcb; |
| 3664 | else |
| 3665 | /* Apparently ready Q is empty. */ |
| 3666 | adapter->tx_ring.tcb_qhead = tcb; |
| 3667 | |
| 3668 | adapter->tx_ring.tcb_qtail = tcb; |
| 3669 | |
| 3670 | spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags); |
| 3671 | WARN_ON(adapter->tx_ring.used < 0); |
| 3672 | } |
| 3673 | |
| 3674 | /** |
| 3675 | * et131x_free_busy_send_packets - Free and complete the stopped active sends |
| 3676 | * @adapter: pointer to our adapter |
| 3677 | * |
| 3678 | * Assumption - Send spinlock has been acquired |
| 3679 | */ |
Francois Romieu | eb7a6ca | 2011-10-23 19:11:02 +0200 | [diff] [blame] | 3680 | static void et131x_free_busy_send_packets(struct et131x_adapter *adapter) |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 3681 | { |
| 3682 | struct tcb *tcb; |
| 3683 | unsigned long flags; |
| 3684 | u32 freed = 0; |
| 3685 | |
| 3686 | /* Any packets being sent? Check the first TCB on the send list */ |
| 3687 | spin_lock_irqsave(&adapter->tcb_send_qlock, flags); |
| 3688 | |
| 3689 | tcb = adapter->tx_ring.send_head; |
| 3690 | |
| 3691 | while (tcb != NULL && freed < NUM_TCB) { |
| 3692 | struct tcb *next = tcb->next; |
| 3693 | |
| 3694 | adapter->tx_ring.send_head = next; |
| 3695 | |
| 3696 | if (next == NULL) |
| 3697 | adapter->tx_ring.send_tail = NULL; |
| 3698 | |
| 3699 | adapter->tx_ring.used--; |
| 3700 | |
| 3701 | spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags); |
| 3702 | |
| 3703 | freed++; |
| 3704 | free_send_packet(adapter, tcb); |
| 3705 | |
| 3706 | spin_lock_irqsave(&adapter->tcb_send_qlock, flags); |
| 3707 | |
| 3708 | tcb = adapter->tx_ring.send_head; |
| 3709 | } |
| 3710 | |
| 3711 | WARN_ON(freed == NUM_TCB); |
| 3712 | |
| 3713 | spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags); |
| 3714 | |
| 3715 | adapter->tx_ring.used = 0; |
| 3716 | } |
| 3717 | |
| 3718 | /** |
| 3719 | * et131x_handle_send_interrupt - Interrupt handler for sending processing |
| 3720 | * @adapter: pointer to our adapter |
| 3721 | * |
| 3722 | * Re-claim the send resources, complete sends and get more to send from |
| 3723 | * the send wait queue. |
| 3724 | * |
| 3725 | * Assumption - Send spinlock has been acquired |
| 3726 | */ |
Francois Romieu | eb7a6ca | 2011-10-23 19:11:02 +0200 | [diff] [blame] | 3727 | static void et131x_handle_send_interrupt(struct et131x_adapter *adapter) |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 3728 | { |
| 3729 | unsigned long flags; |
| 3730 | u32 serviced; |
| 3731 | struct tcb *tcb; |
| 3732 | u32 index; |
| 3733 | |
| 3734 | serviced = readl(&adapter->regs->txdma.new_service_complete); |
| 3735 | index = INDEX10(serviced); |
| 3736 | |
| 3737 | /* Has the ring wrapped? Process any descriptors that do not have |
| 3738 | * the same "wrap" indicator as the current completion indicator |
| 3739 | */ |
| 3740 | spin_lock_irqsave(&adapter->tcb_send_qlock, flags); |
| 3741 | |
| 3742 | tcb = adapter->tx_ring.send_head; |
| 3743 | |
| 3744 | while (tcb && |
| 3745 | ((serviced ^ tcb->index) & ET_DMA10_WRAP) && |
| 3746 | index < INDEX10(tcb->index)) { |
| 3747 | adapter->tx_ring.used--; |
| 3748 | adapter->tx_ring.send_head = tcb->next; |
| 3749 | if (tcb->next == NULL) |
| 3750 | adapter->tx_ring.send_tail = NULL; |
| 3751 | |
| 3752 | spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags); |
| 3753 | free_send_packet(adapter, tcb); |
| 3754 | spin_lock_irqsave(&adapter->tcb_send_qlock, flags); |
| 3755 | |
| 3756 | /* Goto the next packet */ |
| 3757 | tcb = adapter->tx_ring.send_head; |
| 3758 | } |
| 3759 | while (tcb && |
| 3760 | !((serviced ^ tcb->index) & ET_DMA10_WRAP) |
| 3761 | && index > (tcb->index & ET_DMA10_MASK)) { |
| 3762 | adapter->tx_ring.used--; |
| 3763 | adapter->tx_ring.send_head = tcb->next; |
| 3764 | if (tcb->next == NULL) |
| 3765 | adapter->tx_ring.send_tail = NULL; |
| 3766 | |
| 3767 | spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags); |
| 3768 | free_send_packet(adapter, tcb); |
| 3769 | spin_lock_irqsave(&adapter->tcb_send_qlock, flags); |
| 3770 | |
| 3771 | /* Goto the next packet */ |
| 3772 | tcb = adapter->tx_ring.send_head; |
| 3773 | } |
| 3774 | |
| 3775 | /* Wake up the queue when we hit a low-water mark */ |
| 3776 | if (adapter->tx_ring.used <= NUM_TCB / 3) |
| 3777 | netif_wake_queue(adapter->netdev); |
| 3778 | |
| 3779 | spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags); |
| 3780 | } |
| 3781 | |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 3782 | static int et131x_get_settings(struct net_device *netdev, |
| 3783 | struct ethtool_cmd *cmd) |
| 3784 | { |
| 3785 | struct et131x_adapter *adapter = netdev_priv(netdev); |
| 3786 | |
| 3787 | return phy_ethtool_gset(adapter->phydev, cmd); |
| 3788 | } |
| 3789 | |
| 3790 | static int et131x_set_settings(struct net_device *netdev, |
| 3791 | struct ethtool_cmd *cmd) |
| 3792 | { |
| 3793 | struct et131x_adapter *adapter = netdev_priv(netdev); |
| 3794 | |
| 3795 | return phy_ethtool_sset(adapter->phydev, cmd); |
| 3796 | } |
| 3797 | |
| 3798 | static int et131x_get_regs_len(struct net_device *netdev) |
| 3799 | { |
| 3800 | #define ET131X_REGS_LEN 256 |
| 3801 | return ET131X_REGS_LEN * sizeof(u32); |
| 3802 | } |
| 3803 | |
| 3804 | static void et131x_get_regs(struct net_device *netdev, |
| 3805 | struct ethtool_regs *regs, void *regs_data) |
| 3806 | { |
| 3807 | struct et131x_adapter *adapter = netdev_priv(netdev); |
| 3808 | struct address_map __iomem *aregs = adapter->regs; |
| 3809 | u32 *regs_buff = regs_data; |
| 3810 | u32 num = 0; |
| 3811 | |
| 3812 | memset(regs_data, 0, et131x_get_regs_len(netdev)); |
| 3813 | |
| 3814 | regs->version = (1 << 24) | (adapter->pdev->revision << 16) | |
| 3815 | adapter->pdev->device; |
| 3816 | |
| 3817 | /* PHY regs */ |
| 3818 | et131x_mii_read(adapter, MII_BMCR, (u16 *)®s_buff[num++]); |
| 3819 | et131x_mii_read(adapter, MII_BMSR, (u16 *)®s_buff[num++]); |
| 3820 | et131x_mii_read(adapter, MII_PHYSID1, (u16 *)®s_buff[num++]); |
| 3821 | et131x_mii_read(adapter, MII_PHYSID2, (u16 *)®s_buff[num++]); |
| 3822 | et131x_mii_read(adapter, MII_ADVERTISE, (u16 *)®s_buff[num++]); |
| 3823 | et131x_mii_read(adapter, MII_LPA, (u16 *)®s_buff[num++]); |
| 3824 | et131x_mii_read(adapter, MII_EXPANSION, (u16 *)®s_buff[num++]); |
| 3825 | /* Autoneg next page transmit reg */ |
| 3826 | et131x_mii_read(adapter, 0x07, (u16 *)®s_buff[num++]); |
| 3827 | /* Link partner next page reg */ |
| 3828 | et131x_mii_read(adapter, 0x08, (u16 *)®s_buff[num++]); |
| 3829 | et131x_mii_read(adapter, MII_CTRL1000, (u16 *)®s_buff[num++]); |
| 3830 | et131x_mii_read(adapter, MII_STAT1000, (u16 *)®s_buff[num++]); |
| 3831 | et131x_mii_read(adapter, MII_ESTATUS, (u16 *)®s_buff[num++]); |
| 3832 | et131x_mii_read(adapter, PHY_INDEX_REG, (u16 *)®s_buff[num++]); |
| 3833 | et131x_mii_read(adapter, PHY_DATA_REG, (u16 *)®s_buff[num++]); |
| 3834 | et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG, |
| 3835 | (u16 *)®s_buff[num++]); |
| 3836 | et131x_mii_read(adapter, PHY_LOOPBACK_CONTROL, |
| 3837 | (u16 *)®s_buff[num++]); |
| 3838 | et131x_mii_read(adapter, PHY_LOOPBACK_CONTROL+1, |
| 3839 | (u16 *)®s_buff[num++]); |
| 3840 | et131x_mii_read(adapter, PHY_REGISTER_MGMT_CONTROL, |
| 3841 | (u16 *)®s_buff[num++]); |
| 3842 | et131x_mii_read(adapter, PHY_CONFIG, (u16 *)®s_buff[num++]); |
| 3843 | et131x_mii_read(adapter, PHY_PHY_CONTROL, (u16 *)®s_buff[num++]); |
| 3844 | et131x_mii_read(adapter, PHY_INTERRUPT_MASK, (u16 *)®s_buff[num++]); |
| 3845 | et131x_mii_read(adapter, PHY_INTERRUPT_STATUS, |
| 3846 | (u16 *)®s_buff[num++]); |
| 3847 | et131x_mii_read(adapter, PHY_PHY_STATUS, (u16 *)®s_buff[num++]); |
| 3848 | et131x_mii_read(adapter, PHY_LED_1, (u16 *)®s_buff[num++]); |
| 3849 | et131x_mii_read(adapter, PHY_LED_2, (u16 *)®s_buff[num++]); |
| 3850 | |
| 3851 | /* Global regs */ |
| 3852 | regs_buff[num++] = readl(&aregs->global.txq_start_addr); |
| 3853 | regs_buff[num++] = readl(&aregs->global.txq_end_addr); |
| 3854 | regs_buff[num++] = readl(&aregs->global.rxq_start_addr); |
| 3855 | regs_buff[num++] = readl(&aregs->global.rxq_end_addr); |
| 3856 | regs_buff[num++] = readl(&aregs->global.pm_csr); |
| 3857 | regs_buff[num++] = adapter->stats.interrupt_status; |
| 3858 | regs_buff[num++] = readl(&aregs->global.int_mask); |
| 3859 | regs_buff[num++] = readl(&aregs->global.int_alias_clr_en); |
| 3860 | regs_buff[num++] = readl(&aregs->global.int_status_alias); |
| 3861 | regs_buff[num++] = readl(&aregs->global.sw_reset); |
| 3862 | regs_buff[num++] = readl(&aregs->global.slv_timer); |
| 3863 | regs_buff[num++] = readl(&aregs->global.msi_config); |
| 3864 | regs_buff[num++] = readl(&aregs->global.loopback); |
| 3865 | regs_buff[num++] = readl(&aregs->global.watchdog_timer); |
| 3866 | |
| 3867 | /* TXDMA regs */ |
| 3868 | regs_buff[num++] = readl(&aregs->txdma.csr); |
| 3869 | regs_buff[num++] = readl(&aregs->txdma.pr_base_hi); |
| 3870 | regs_buff[num++] = readl(&aregs->txdma.pr_base_lo); |
| 3871 | regs_buff[num++] = readl(&aregs->txdma.pr_num_des); |
| 3872 | regs_buff[num++] = readl(&aregs->txdma.txq_wr_addr); |
| 3873 | regs_buff[num++] = readl(&aregs->txdma.txq_wr_addr_ext); |
| 3874 | regs_buff[num++] = readl(&aregs->txdma.txq_rd_addr); |
| 3875 | regs_buff[num++] = readl(&aregs->txdma.dma_wb_base_hi); |
| 3876 | regs_buff[num++] = readl(&aregs->txdma.dma_wb_base_lo); |
| 3877 | regs_buff[num++] = readl(&aregs->txdma.service_request); |
| 3878 | regs_buff[num++] = readl(&aregs->txdma.service_complete); |
| 3879 | regs_buff[num++] = readl(&aregs->txdma.cache_rd_index); |
| 3880 | regs_buff[num++] = readl(&aregs->txdma.cache_wr_index); |
| 3881 | regs_buff[num++] = readl(&aregs->txdma.tx_dma_error); |
| 3882 | regs_buff[num++] = readl(&aregs->txdma.desc_abort_cnt); |
| 3883 | regs_buff[num++] = readl(&aregs->txdma.payload_abort_cnt); |
| 3884 | regs_buff[num++] = readl(&aregs->txdma.writeback_abort_cnt); |
| 3885 | regs_buff[num++] = readl(&aregs->txdma.desc_timeout_cnt); |
| 3886 | regs_buff[num++] = readl(&aregs->txdma.payload_timeout_cnt); |
| 3887 | regs_buff[num++] = readl(&aregs->txdma.writeback_timeout_cnt); |
| 3888 | regs_buff[num++] = readl(&aregs->txdma.desc_error_cnt); |
| 3889 | regs_buff[num++] = readl(&aregs->txdma.payload_error_cnt); |
| 3890 | regs_buff[num++] = readl(&aregs->txdma.writeback_error_cnt); |
| 3891 | regs_buff[num++] = readl(&aregs->txdma.dropped_tlp_cnt); |
| 3892 | regs_buff[num++] = readl(&aregs->txdma.new_service_complete); |
| 3893 | regs_buff[num++] = readl(&aregs->txdma.ethernet_packet_cnt); |
| 3894 | |
| 3895 | /* RXDMA regs */ |
| 3896 | regs_buff[num++] = readl(&aregs->rxdma.csr); |
| 3897 | regs_buff[num++] = readl(&aregs->rxdma.dma_wb_base_hi); |
| 3898 | regs_buff[num++] = readl(&aregs->rxdma.dma_wb_base_lo); |
| 3899 | regs_buff[num++] = readl(&aregs->rxdma.num_pkt_done); |
| 3900 | regs_buff[num++] = readl(&aregs->rxdma.max_pkt_time); |
| 3901 | regs_buff[num++] = readl(&aregs->rxdma.rxq_rd_addr); |
| 3902 | regs_buff[num++] = readl(&aregs->rxdma.rxq_rd_addr_ext); |
| 3903 | regs_buff[num++] = readl(&aregs->rxdma.rxq_wr_addr); |
| 3904 | regs_buff[num++] = readl(&aregs->rxdma.psr_base_hi); |
| 3905 | regs_buff[num++] = readl(&aregs->rxdma.psr_base_lo); |
| 3906 | regs_buff[num++] = readl(&aregs->rxdma.psr_num_des); |
| 3907 | regs_buff[num++] = readl(&aregs->rxdma.psr_avail_offset); |
| 3908 | regs_buff[num++] = readl(&aregs->rxdma.psr_full_offset); |
| 3909 | regs_buff[num++] = readl(&aregs->rxdma.psr_access_index); |
| 3910 | regs_buff[num++] = readl(&aregs->rxdma.psr_min_des); |
| 3911 | regs_buff[num++] = readl(&aregs->rxdma.fbr0_base_lo); |
| 3912 | regs_buff[num++] = readl(&aregs->rxdma.fbr0_base_hi); |
| 3913 | regs_buff[num++] = readl(&aregs->rxdma.fbr0_num_des); |
| 3914 | regs_buff[num++] = readl(&aregs->rxdma.fbr0_avail_offset); |
| 3915 | regs_buff[num++] = readl(&aregs->rxdma.fbr0_full_offset); |
| 3916 | regs_buff[num++] = readl(&aregs->rxdma.fbr0_rd_index); |
| 3917 | regs_buff[num++] = readl(&aregs->rxdma.fbr0_min_des); |
| 3918 | regs_buff[num++] = readl(&aregs->rxdma.fbr1_base_lo); |
| 3919 | regs_buff[num++] = readl(&aregs->rxdma.fbr1_base_hi); |
| 3920 | regs_buff[num++] = readl(&aregs->rxdma.fbr1_num_des); |
| 3921 | regs_buff[num++] = readl(&aregs->rxdma.fbr1_avail_offset); |
| 3922 | regs_buff[num++] = readl(&aregs->rxdma.fbr1_full_offset); |
| 3923 | regs_buff[num++] = readl(&aregs->rxdma.fbr1_rd_index); |
| 3924 | regs_buff[num++] = readl(&aregs->rxdma.fbr1_min_des); |
| 3925 | } |
| 3926 | |
| 3927 | #define ET131X_DRVINFO_LEN 32 /* value from ethtool.h */ |
| 3928 | static void et131x_get_drvinfo(struct net_device *netdev, |
| 3929 | struct ethtool_drvinfo *info) |
| 3930 | { |
| 3931 | struct et131x_adapter *adapter = netdev_priv(netdev); |
| 3932 | |
| 3933 | strncpy(info->driver, DRIVER_NAME, ET131X_DRVINFO_LEN); |
| 3934 | strncpy(info->version, DRIVER_VERSION, ET131X_DRVINFO_LEN); |
| 3935 | strncpy(info->bus_info, pci_name(adapter->pdev), ET131X_DRVINFO_LEN); |
| 3936 | } |
| 3937 | |
| 3938 | static struct ethtool_ops et131x_ethtool_ops = { |
| 3939 | .get_settings = et131x_get_settings, |
| 3940 | .set_settings = et131x_set_settings, |
| 3941 | .get_drvinfo = et131x_get_drvinfo, |
| 3942 | .get_regs_len = et131x_get_regs_len, |
| 3943 | .get_regs = et131x_get_regs, |
| 3944 | .get_link = ethtool_op_get_link, |
| 3945 | }; |
| 3946 | |
Francois Romieu | eb7a6ca | 2011-10-23 19:11:02 +0200 | [diff] [blame] | 3947 | static void et131x_set_ethtool_ops(struct net_device *netdev) |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 3948 | { |
| 3949 | SET_ETHTOOL_OPS(netdev, &et131x_ethtool_ops); |
| 3950 | } |
| 3951 | |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 3952 | /** |
| 3953 | * et131x_hwaddr_init - set up the MAC Address on the ET1310 |
| 3954 | * @adapter: pointer to our private adapter structure |
| 3955 | */ |
Francois Romieu | eb7a6ca | 2011-10-23 19:11:02 +0200 | [diff] [blame] | 3956 | static void et131x_hwaddr_init(struct et131x_adapter *adapter) |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 3957 | { |
| 3958 | /* If have our default mac from init and no mac address from |
| 3959 | * EEPROM then we need to generate the last octet and set it on the |
| 3960 | * device |
| 3961 | */ |
| 3962 | if (adapter->rom_addr[0] == 0x00 && |
| 3963 | adapter->rom_addr[1] == 0x00 && |
| 3964 | adapter->rom_addr[2] == 0x00 && |
| 3965 | adapter->rom_addr[3] == 0x00 && |
| 3966 | adapter->rom_addr[4] == 0x00 && |
| 3967 | adapter->rom_addr[5] == 0x00) { |
| 3968 | /* |
| 3969 | * We need to randomly generate the last octet so we |
| 3970 | * decrease our chances of setting the mac address to |
| 3971 | * same as another one of our cards in the system |
| 3972 | */ |
| 3973 | get_random_bytes(&adapter->addr[5], 1); |
| 3974 | /* |
| 3975 | * We have the default value in the register we are |
| 3976 | * working with so we need to copy the current |
| 3977 | * address into the permanent address |
| 3978 | */ |
| 3979 | memcpy(adapter->rom_addr, |
| 3980 | adapter->addr, ETH_ALEN); |
| 3981 | } else { |
| 3982 | /* We do not have an override address, so set the |
| 3983 | * current address to the permanent address and add |
| 3984 | * it to the device |
| 3985 | */ |
| 3986 | memcpy(adapter->addr, |
| 3987 | adapter->rom_addr, ETH_ALEN); |
| 3988 | } |
| 3989 | } |
| 3990 | |
| 3991 | /** |
| 3992 | * et131x_pci_init - initial PCI setup |
| 3993 | * @adapter: pointer to our private adapter structure |
| 3994 | * @pdev: our PCI device |
| 3995 | * |
| 3996 | * Perform the initial setup of PCI registers and if possible initialise |
| 3997 | * the MAC address. At this point the I/O registers have yet to be mapped |
| 3998 | */ |
| 3999 | static int et131x_pci_init(struct et131x_adapter *adapter, |
| 4000 | struct pci_dev *pdev) |
| 4001 | { |
Francois Romieu | d14e3d0 | 2011-10-23 19:12:14 +0200 | [diff] [blame] | 4002 | int cap = pci_pcie_cap(pdev); |
| 4003 | u16 max_payload; |
| 4004 | u16 ctl; |
| 4005 | int i, rc; |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 4006 | |
Francois Romieu | d14e3d0 | 2011-10-23 19:12:14 +0200 | [diff] [blame] | 4007 | rc = et131x_init_eeprom(adapter); |
| 4008 | if (rc < 0) |
| 4009 | goto out; |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 4010 | |
Francois Romieu | d14e3d0 | 2011-10-23 19:12:14 +0200 | [diff] [blame] | 4011 | if (!cap) { |
| 4012 | dev_err(&pdev->dev, "Missing PCIe capabilities\n"); |
| 4013 | goto err_out; |
| 4014 | } |
joseph daniel | bf3313a | 2012-05-01 00:30:34 +0600 | [diff] [blame] | 4015 | |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 4016 | /* Let's set up the PORT LOGIC Register. First we need to know what |
| 4017 | * the max_payload_size is |
| 4018 | */ |
Francois Romieu | d14e3d0 | 2011-10-23 19:12:14 +0200 | [diff] [blame] | 4019 | if (pci_read_config_word(pdev, cap + PCI_EXP_DEVCAP, &max_payload)) { |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 4020 | dev_err(&pdev->dev, |
| 4021 | "Could not read PCI config space for Max Payload Size\n"); |
Francois Romieu | d14e3d0 | 2011-10-23 19:12:14 +0200 | [diff] [blame] | 4022 | goto err_out; |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 4023 | } |
| 4024 | |
| 4025 | /* Program the Ack/Nak latency and replay timers */ |
Francois Romieu | d14e3d0 | 2011-10-23 19:12:14 +0200 | [diff] [blame] | 4026 | max_payload &= 0x07; |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 4027 | |
| 4028 | if (max_payload < 2) { |
| 4029 | static const u16 acknak[2] = { 0x76, 0xD0 }; |
| 4030 | static const u16 replay[2] = { 0x1E0, 0x2ED }; |
| 4031 | |
| 4032 | if (pci_write_config_word(pdev, ET1310_PCI_ACK_NACK, |
| 4033 | acknak[max_payload])) { |
| 4034 | dev_err(&pdev->dev, |
| 4035 | "Could not write PCI config space for ACK/NAK\n"); |
Francois Romieu | d14e3d0 | 2011-10-23 19:12:14 +0200 | [diff] [blame] | 4036 | goto err_out; |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 4037 | } |
| 4038 | if (pci_write_config_word(pdev, ET1310_PCI_REPLAY, |
| 4039 | replay[max_payload])) { |
| 4040 | dev_err(&pdev->dev, |
| 4041 | "Could not write PCI config space for Replay Timer\n"); |
Francois Romieu | d14e3d0 | 2011-10-23 19:12:14 +0200 | [diff] [blame] | 4042 | goto err_out; |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 4043 | } |
| 4044 | } |
| 4045 | |
| 4046 | /* l0s and l1 latency timers. We are using default values. |
| 4047 | * Representing 001 for L0s and 010 for L1 |
| 4048 | */ |
| 4049 | if (pci_write_config_byte(pdev, ET1310_PCI_L0L1LATENCY, 0x11)) { |
| 4050 | dev_err(&pdev->dev, |
| 4051 | "Could not write PCI config space for Latency Timers\n"); |
Francois Romieu | d14e3d0 | 2011-10-23 19:12:14 +0200 | [diff] [blame] | 4052 | goto err_out; |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 4053 | } |
| 4054 | |
| 4055 | /* Change the max read size to 2k */ |
Francois Romieu | d14e3d0 | 2011-10-23 19:12:14 +0200 | [diff] [blame] | 4056 | if (pci_read_config_word(pdev, cap + PCI_EXP_DEVCTL, &ctl)) { |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 4057 | dev_err(&pdev->dev, |
| 4058 | "Could not read PCI config space for Max read size\n"); |
Francois Romieu | d14e3d0 | 2011-10-23 19:12:14 +0200 | [diff] [blame] | 4059 | goto err_out; |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 4060 | } |
| 4061 | |
joseph daniel | bf3313a | 2012-05-01 00:30:34 +0600 | [diff] [blame] | 4062 | ctl = (ctl & ~PCI_EXP_DEVCTL_READRQ) | (0x04 << 12); |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 4063 | |
Francois Romieu | d14e3d0 | 2011-10-23 19:12:14 +0200 | [diff] [blame] | 4064 | if (pci_write_config_word(pdev, cap + PCI_EXP_DEVCTL, ctl)) { |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 4065 | dev_err(&pdev->dev, |
| 4066 | "Could not write PCI config space for Max read size\n"); |
Francois Romieu | d14e3d0 | 2011-10-23 19:12:14 +0200 | [diff] [blame] | 4067 | goto err_out; |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 4068 | } |
| 4069 | |
| 4070 | /* Get MAC address from config space if an eeprom exists, otherwise |
| 4071 | * the MAC address there will not be valid |
| 4072 | */ |
| 4073 | if (!adapter->has_eeprom) { |
| 4074 | et131x_hwaddr_init(adapter); |
| 4075 | return 0; |
| 4076 | } |
| 4077 | |
| 4078 | for (i = 0; i < ETH_ALEN; i++) { |
| 4079 | if (pci_read_config_byte(pdev, ET1310_PCI_MAC_ADDRESS + i, |
| 4080 | adapter->rom_addr + i)) { |
| 4081 | dev_err(&pdev->dev, "Could not read PCI config space for MAC address\n"); |
Francois Romieu | d14e3d0 | 2011-10-23 19:12:14 +0200 | [diff] [blame] | 4082 | goto err_out; |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 4083 | } |
| 4084 | } |
| 4085 | memcpy(adapter->addr, adapter->rom_addr, ETH_ALEN); |
Francois Romieu | d14e3d0 | 2011-10-23 19:12:14 +0200 | [diff] [blame] | 4086 | out: |
| 4087 | return rc; |
| 4088 | err_out: |
| 4089 | rc = -EIO; |
| 4090 | goto out; |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 4091 | } |
| 4092 | |
| 4093 | /** |
| 4094 | * et131x_error_timer_handler |
| 4095 | * @data: timer-specific variable; here a pointer to our adapter structure |
| 4096 | * |
| 4097 | * The routine called when the error timer expires, to track the number of |
| 4098 | * recurring errors. |
| 4099 | */ |
Francois Romieu | eb7a6ca | 2011-10-23 19:11:02 +0200 | [diff] [blame] | 4100 | static void et131x_error_timer_handler(unsigned long data) |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 4101 | { |
| 4102 | struct et131x_adapter *adapter = (struct et131x_adapter *) data; |
| 4103 | struct phy_device *phydev = adapter->phydev; |
| 4104 | |
| 4105 | if (et1310_in_phy_coma(adapter)) { |
| 4106 | /* Bring the device immediately out of coma, to |
| 4107 | * prevent it from sleeping indefinitely, this |
| 4108 | * mechanism could be improved! */ |
| 4109 | et1310_disable_phy_coma(adapter); |
| 4110 | adapter->boot_coma = 20; |
| 4111 | } else { |
| 4112 | et1310_update_macstat_host_counters(adapter); |
| 4113 | } |
| 4114 | |
| 4115 | if (!phydev->link && adapter->boot_coma < 11) |
| 4116 | adapter->boot_coma++; |
| 4117 | |
| 4118 | if (adapter->boot_coma == 10) { |
| 4119 | if (!phydev->link) { |
| 4120 | if (!et1310_in_phy_coma(adapter)) { |
| 4121 | /* NOTE - This was originally a 'sync with |
| 4122 | * interrupt'. How to do that under Linux? |
| 4123 | */ |
| 4124 | et131x_enable_interrupts(adapter); |
| 4125 | et1310_enable_phy_coma(adapter); |
| 4126 | } |
| 4127 | } |
| 4128 | } |
| 4129 | |
| 4130 | /* This is a periodic timer, so reschedule */ |
| 4131 | mod_timer(&adapter->error_timer, jiffies + |
| 4132 | TX_ERROR_PERIOD * HZ / 1000); |
| 4133 | } |
| 4134 | |
| 4135 | /** |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 4136 | * et131x_adapter_memory_alloc |
| 4137 | * @adapter: pointer to our private adapter structure |
| 4138 | * |
| 4139 | * Returns 0 on success, errno on failure (as defined in errno.h). |
| 4140 | * |
| 4141 | * Allocate all the memory blocks for send, receive and others. |
| 4142 | */ |
Francois Romieu | eb7a6ca | 2011-10-23 19:11:02 +0200 | [diff] [blame] | 4143 | static int et131x_adapter_memory_alloc(struct et131x_adapter *adapter) |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 4144 | { |
| 4145 | int status; |
| 4146 | |
| 4147 | /* Allocate memory for the Tx Ring */ |
| 4148 | status = et131x_tx_dma_memory_alloc(adapter); |
| 4149 | if (status != 0) { |
| 4150 | dev_err(&adapter->pdev->dev, |
| 4151 | "et131x_tx_dma_memory_alloc FAILED\n"); |
| 4152 | return status; |
| 4153 | } |
| 4154 | /* Receive buffer memory allocation */ |
| 4155 | status = et131x_rx_dma_memory_alloc(adapter); |
| 4156 | if (status != 0) { |
| 4157 | dev_err(&adapter->pdev->dev, |
| 4158 | "et131x_rx_dma_memory_alloc FAILED\n"); |
| 4159 | et131x_tx_dma_memory_free(adapter); |
| 4160 | return status; |
| 4161 | } |
| 4162 | |
| 4163 | /* Init receive data structures */ |
| 4164 | status = et131x_init_recv(adapter); |
| 4165 | if (status != 0) { |
| 4166 | dev_err(&adapter->pdev->dev, |
| 4167 | "et131x_init_recv FAILED\n"); |
| 4168 | et131x_tx_dma_memory_free(adapter); |
| 4169 | et131x_rx_dma_memory_free(adapter); |
| 4170 | } |
| 4171 | return status; |
| 4172 | } |
| 4173 | |
| 4174 | /** |
| 4175 | * et131x_adapter_memory_free - Free all memory allocated for use by Tx & Rx |
| 4176 | * @adapter: pointer to our private adapter structure |
| 4177 | */ |
Francois Romieu | eb7a6ca | 2011-10-23 19:11:02 +0200 | [diff] [blame] | 4178 | static void et131x_adapter_memory_free(struct et131x_adapter *adapter) |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 4179 | { |
| 4180 | /* Free DMA memory */ |
| 4181 | et131x_tx_dma_memory_free(adapter); |
| 4182 | et131x_rx_dma_memory_free(adapter); |
| 4183 | } |
| 4184 | |
| 4185 | static void et131x_adjust_link(struct net_device *netdev) |
| 4186 | { |
| 4187 | struct et131x_adapter *adapter = netdev_priv(netdev); |
| 4188 | struct phy_device *phydev = adapter->phydev; |
| 4189 | |
| 4190 | if (netif_carrier_ok(netdev)) { |
| 4191 | adapter->boot_coma = 20; |
| 4192 | |
| 4193 | if (phydev && phydev->speed == SPEED_10) { |
| 4194 | /* |
| 4195 | * NOTE - Is there a way to query this without |
| 4196 | * TruePHY? |
| 4197 | * && TRU_QueryCoreType(adapter->hTruePhy, 0)== |
| 4198 | * EMI_TRUEPHY_A13O) { |
| 4199 | */ |
| 4200 | u16 register18; |
| 4201 | |
| 4202 | et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG, |
| 4203 | ®ister18); |
| 4204 | et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG, |
| 4205 | register18 | 0x4); |
| 4206 | et131x_mii_write(adapter, PHY_INDEX_REG, |
| 4207 | register18 | 0x8402); |
| 4208 | et131x_mii_write(adapter, PHY_DATA_REG, |
| 4209 | register18 | 511); |
| 4210 | et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG, |
| 4211 | register18); |
| 4212 | } |
| 4213 | |
| 4214 | et1310_config_flow_control(adapter); |
| 4215 | |
| 4216 | if (phydev && phydev->speed == SPEED_1000 && |
| 4217 | adapter->registry_jumbo_packet > 2048) { |
| 4218 | u16 reg; |
| 4219 | |
| 4220 | et131x_mii_read(adapter, PHY_CONFIG, ®); |
| 4221 | reg &= ~ET_PHY_CONFIG_TX_FIFO_DEPTH; |
| 4222 | reg |= ET_PHY_CONFIG_FIFO_DEPTH_32; |
| 4223 | et131x_mii_write(adapter, PHY_CONFIG, reg); |
| 4224 | } |
| 4225 | |
| 4226 | et131x_set_rx_dma_timer(adapter); |
| 4227 | et1310_config_mac_regs2(adapter); |
| 4228 | } |
| 4229 | |
| 4230 | if (phydev && phydev->link != adapter->link) { |
| 4231 | /* |
| 4232 | * Check to see if we are in coma mode and if |
| 4233 | * so, disable it because we will not be able |
| 4234 | * to read PHY values until we are out. |
| 4235 | */ |
| 4236 | if (et1310_in_phy_coma(adapter)) |
| 4237 | et1310_disable_phy_coma(adapter); |
| 4238 | |
| 4239 | if (phydev->link) { |
| 4240 | adapter->boot_coma = 20; |
| 4241 | } else { |
| 4242 | dev_warn(&adapter->pdev->dev, |
| 4243 | "Link down - cable problem ?\n"); |
| 4244 | adapter->boot_coma = 0; |
| 4245 | |
| 4246 | if (phydev->speed == SPEED_10) { |
| 4247 | /* NOTE - Is there a way to query this without |
| 4248 | * TruePHY? |
| 4249 | * && TRU_QueryCoreType(adapter->hTruePhy, 0) == |
| 4250 | * EMI_TRUEPHY_A13O) |
| 4251 | */ |
| 4252 | u16 register18; |
| 4253 | |
| 4254 | et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG, |
| 4255 | ®ister18); |
| 4256 | et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG, |
| 4257 | register18 | 0x4); |
| 4258 | et131x_mii_write(adapter, PHY_INDEX_REG, |
| 4259 | register18 | 0x8402); |
| 4260 | et131x_mii_write(adapter, PHY_DATA_REG, |
| 4261 | register18 | 511); |
| 4262 | et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG, |
| 4263 | register18); |
| 4264 | } |
| 4265 | |
| 4266 | /* Free the packets being actively sent & stopped */ |
| 4267 | et131x_free_busy_send_packets(adapter); |
| 4268 | |
| 4269 | /* Re-initialize the send structures */ |
| 4270 | et131x_init_send(adapter); |
| 4271 | |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 4272 | /* |
| 4273 | * Bring the device back to the state it was during |
| 4274 | * init prior to autonegotiation being complete. This |
| 4275 | * way, when we get the auto-neg complete interrupt, |
| 4276 | * we can complete init by calling config_mac_regs2. |
| 4277 | */ |
| 4278 | et131x_soft_reset(adapter); |
| 4279 | |
| 4280 | /* Setup ET1310 as per the documentation */ |
| 4281 | et131x_adapter_setup(adapter); |
| 4282 | |
| 4283 | /* perform reset of tx/rx */ |
| 4284 | et131x_disable_txrx(netdev); |
| 4285 | et131x_enable_txrx(netdev); |
| 4286 | } |
| 4287 | |
| 4288 | adapter->link = phydev->link; |
| 4289 | |
| 4290 | phy_print_status(phydev); |
| 4291 | } |
| 4292 | } |
| 4293 | |
| 4294 | static int et131x_mii_probe(struct net_device *netdev) |
| 4295 | { |
| 4296 | struct et131x_adapter *adapter = netdev_priv(netdev); |
| 4297 | struct phy_device *phydev = NULL; |
| 4298 | |
| 4299 | phydev = phy_find_first(adapter->mii_bus); |
| 4300 | if (!phydev) { |
| 4301 | dev_err(&adapter->pdev->dev, "no PHY found\n"); |
| 4302 | return -ENODEV; |
| 4303 | } |
| 4304 | |
| 4305 | phydev = phy_connect(netdev, dev_name(&phydev->dev), |
| 4306 | &et131x_adjust_link, 0, PHY_INTERFACE_MODE_MII); |
| 4307 | |
| 4308 | if (IS_ERR(phydev)) { |
| 4309 | dev_err(&adapter->pdev->dev, "Could not attach to PHY\n"); |
| 4310 | return PTR_ERR(phydev); |
| 4311 | } |
| 4312 | |
| 4313 | phydev->supported &= (SUPPORTED_10baseT_Half |
| 4314 | | SUPPORTED_10baseT_Full |
| 4315 | | SUPPORTED_100baseT_Half |
| 4316 | | SUPPORTED_100baseT_Full |
| 4317 | | SUPPORTED_Autoneg |
| 4318 | | SUPPORTED_MII |
| 4319 | | SUPPORTED_TP); |
| 4320 | |
| 4321 | if (adapter->pdev->device != ET131X_PCI_DEVICE_ID_FAST) |
| 4322 | phydev->supported |= SUPPORTED_1000baseT_Full; |
| 4323 | |
| 4324 | phydev->advertising = phydev->supported; |
| 4325 | adapter->phydev = phydev; |
| 4326 | |
Adnan Ali | 397d3e6 | 2012-05-25 18:56:40 +0100 | [diff] [blame] | 4327 | dev_info(&adapter->pdev->dev, "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n", |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 4328 | phydev->drv->name, dev_name(&phydev->dev)); |
| 4329 | |
| 4330 | return 0; |
| 4331 | } |
| 4332 | |
| 4333 | /** |
| 4334 | * et131x_adapter_init |
| 4335 | * @adapter: pointer to the private adapter struct |
| 4336 | * @pdev: pointer to the PCI device |
| 4337 | * |
| 4338 | * Initialize the data structures for the et131x_adapter object and link |
| 4339 | * them together with the platform provided device structures. |
| 4340 | */ |
| 4341 | static struct et131x_adapter *et131x_adapter_init(struct net_device *netdev, |
| 4342 | struct pci_dev *pdev) |
| 4343 | { |
| 4344 | static const u8 default_mac[] = { 0x00, 0x05, 0x3d, 0x00, 0x02, 0x00 }; |
| 4345 | |
| 4346 | struct et131x_adapter *adapter; |
| 4347 | |
| 4348 | /* Allocate private adapter struct and copy in relevant information */ |
| 4349 | adapter = netdev_priv(netdev); |
| 4350 | adapter->pdev = pci_dev_get(pdev); |
| 4351 | adapter->netdev = netdev; |
| 4352 | |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 4353 | /* Initialize spinlocks here */ |
| 4354 | spin_lock_init(&adapter->lock); |
| 4355 | spin_lock_init(&adapter->tcb_send_qlock); |
| 4356 | spin_lock_init(&adapter->tcb_ready_qlock); |
| 4357 | spin_lock_init(&adapter->send_hw_lock); |
| 4358 | spin_lock_init(&adapter->rcv_lock); |
| 4359 | spin_lock_init(&adapter->rcv_pend_lock); |
| 4360 | spin_lock_init(&adapter->fbr_lock); |
| 4361 | spin_lock_init(&adapter->phy_lock); |
| 4362 | |
| 4363 | adapter->registry_jumbo_packet = 1514; /* 1514-9216 */ |
| 4364 | |
| 4365 | /* Set the MAC address to a default */ |
| 4366 | memcpy(adapter->addr, default_mac, ETH_ALEN); |
| 4367 | |
| 4368 | return adapter; |
| 4369 | } |
| 4370 | |
| 4371 | /** |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 4372 | * et131x_pci_remove |
| 4373 | * @pdev: a pointer to the device's pci_dev structure |
| 4374 | * |
| 4375 | * Registered in the pci_driver structure, this function is called when the |
| 4376 | * PCI subsystem detects that a PCI device which matches the information |
| 4377 | * contained in the pci_device_id table has been removed. |
| 4378 | */ |
| 4379 | static void __devexit et131x_pci_remove(struct pci_dev *pdev) |
| 4380 | { |
| 4381 | struct net_device *netdev = pci_get_drvdata(pdev); |
| 4382 | struct et131x_adapter *adapter = netdev_priv(netdev); |
| 4383 | |
| 4384 | unregister_netdev(netdev); |
Francois Romieu | fa9f0a6 | 2011-10-23 19:11:35 +0200 | [diff] [blame] | 4385 | phy_disconnect(adapter->phydev); |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 4386 | mdiobus_unregister(adapter->mii_bus); |
| 4387 | kfree(adapter->mii_bus->irq); |
| 4388 | mdiobus_free(adapter->mii_bus); |
| 4389 | |
| 4390 | et131x_adapter_memory_free(adapter); |
| 4391 | iounmap(adapter->regs); |
| 4392 | pci_dev_put(pdev); |
| 4393 | |
| 4394 | free_netdev(netdev); |
| 4395 | pci_release_regions(pdev); |
| 4396 | pci_disable_device(pdev); |
| 4397 | } |
| 4398 | |
Mark Einon | a4d444b | 2011-10-23 10:22:50 +0100 | [diff] [blame] | 4399 | /** |
| 4400 | * et131x_up - Bring up a device for use. |
| 4401 | * @netdev: device to be opened |
| 4402 | */ |
Francois Romieu | eb7a6ca | 2011-10-23 19:11:02 +0200 | [diff] [blame] | 4403 | static void et131x_up(struct net_device *netdev) |
Mark Einon | a4d444b | 2011-10-23 10:22:50 +0100 | [diff] [blame] | 4404 | { |
| 4405 | struct et131x_adapter *adapter = netdev_priv(netdev); |
| 4406 | |
| 4407 | et131x_enable_txrx(netdev); |
| 4408 | phy_start(adapter->phydev); |
| 4409 | } |
| 4410 | |
| 4411 | /** |
| 4412 | * et131x_down - Bring down the device |
Justin P. Mattock | ac399bc | 2012-02-20 18:23:09 -0800 | [diff] [blame] | 4413 | * @netdev: device to be brought down |
Mark Einon | a4d444b | 2011-10-23 10:22:50 +0100 | [diff] [blame] | 4414 | */ |
Francois Romieu | eb7a6ca | 2011-10-23 19:11:02 +0200 | [diff] [blame] | 4415 | static void et131x_down(struct net_device *netdev) |
Mark Einon | a4d444b | 2011-10-23 10:22:50 +0100 | [diff] [blame] | 4416 | { |
| 4417 | struct et131x_adapter *adapter = netdev_priv(netdev); |
| 4418 | |
| 4419 | /* Save the timestamp for the TX watchdog, prevent a timeout */ |
| 4420 | netdev->trans_start = jiffies; |
| 4421 | |
| 4422 | phy_stop(adapter->phydev); |
| 4423 | et131x_disable_txrx(netdev); |
| 4424 | } |
| 4425 | |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 4426 | #ifdef CONFIG_PM_SLEEP |
| 4427 | static int et131x_suspend(struct device *dev) |
| 4428 | { |
| 4429 | struct pci_dev *pdev = to_pci_dev(dev); |
| 4430 | struct net_device *netdev = pci_get_drvdata(pdev); |
| 4431 | |
| 4432 | if (netif_running(netdev)) { |
| 4433 | netif_device_detach(netdev); |
| 4434 | et131x_down(netdev); |
| 4435 | pci_save_state(pdev); |
| 4436 | } |
| 4437 | |
| 4438 | return 0; |
| 4439 | } |
| 4440 | |
| 4441 | static int et131x_resume(struct device *dev) |
| 4442 | { |
| 4443 | struct pci_dev *pdev = to_pci_dev(dev); |
| 4444 | struct net_device *netdev = pci_get_drvdata(pdev); |
| 4445 | |
| 4446 | if (netif_running(netdev)) { |
| 4447 | pci_restore_state(pdev); |
| 4448 | et131x_up(netdev); |
| 4449 | netif_device_attach(netdev); |
| 4450 | } |
| 4451 | |
| 4452 | return 0; |
| 4453 | } |
| 4454 | |
Mark Einon | 2e9ff8d | 2011-11-04 17:58:02 +0000 | [diff] [blame] | 4455 | static SIMPLE_DEV_PM_OPS(et131x_pm_ops, et131x_suspend, et131x_resume); |
| 4456 | #define ET131X_PM_OPS (&et131x_pm_ops) |
| 4457 | #else |
| 4458 | #define ET131X_PM_OPS NULL |
| 4459 | #endif |
| 4460 | |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 4461 | /** |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 4462 | * et131x_isr - The Interrupt Service Routine for the driver. |
| 4463 | * @irq: the IRQ on which the interrupt was received. |
| 4464 | * @dev_id: device-specific info (here a pointer to a net_device struct) |
| 4465 | * |
| 4466 | * Returns a value indicating if the interrupt was handled. |
| 4467 | */ |
| 4468 | irqreturn_t et131x_isr(int irq, void *dev_id) |
| 4469 | { |
| 4470 | bool handled = true; |
| 4471 | struct net_device *netdev = (struct net_device *)dev_id; |
| 4472 | struct et131x_adapter *adapter = NULL; |
| 4473 | u32 status; |
| 4474 | |
| 4475 | if (!netif_device_present(netdev)) { |
| 4476 | handled = false; |
| 4477 | goto out; |
| 4478 | } |
| 4479 | |
| 4480 | adapter = netdev_priv(netdev); |
| 4481 | |
| 4482 | /* If the adapter is in low power state, then it should not |
| 4483 | * recognize any interrupt |
| 4484 | */ |
| 4485 | |
| 4486 | /* Disable Device Interrupts */ |
| 4487 | et131x_disable_interrupts(adapter); |
| 4488 | |
| 4489 | /* Get a copy of the value in the interrupt status register |
| 4490 | * so we can process the interrupting section |
| 4491 | */ |
| 4492 | status = readl(&adapter->regs->global.int_status); |
| 4493 | |
| 4494 | if (adapter->flowcontrol == FLOW_TXONLY || |
| 4495 | adapter->flowcontrol == FLOW_BOTH) { |
| 4496 | status &= ~INT_MASK_ENABLE; |
| 4497 | } else { |
| 4498 | status &= ~INT_MASK_ENABLE_NO_FLOW; |
| 4499 | } |
| 4500 | |
| 4501 | /* Make sure this is our interrupt */ |
| 4502 | if (!status) { |
| 4503 | handled = false; |
| 4504 | et131x_enable_interrupts(adapter); |
| 4505 | goto out; |
| 4506 | } |
| 4507 | |
| 4508 | /* This is our interrupt, so process accordingly */ |
| 4509 | |
| 4510 | if (status & ET_INTR_WATCHDOG) { |
| 4511 | struct tcb *tcb = adapter->tx_ring.send_head; |
| 4512 | |
| 4513 | if (tcb) |
| 4514 | if (++tcb->stale > 1) |
| 4515 | status |= ET_INTR_TXDMA_ISR; |
| 4516 | |
| 4517 | if (adapter->rx_ring.unfinished_receives) |
| 4518 | status |= ET_INTR_RXDMA_XFR_DONE; |
| 4519 | else if (tcb == NULL) |
| 4520 | writel(0, &adapter->regs->global.watchdog_timer); |
| 4521 | |
| 4522 | status &= ~ET_INTR_WATCHDOG; |
| 4523 | } |
| 4524 | |
| 4525 | if (status == 0) { |
| 4526 | /* This interrupt has in some way been "handled" by |
| 4527 | * the ISR. Either it was a spurious Rx interrupt, or |
| 4528 | * it was a Tx interrupt that has been filtered by |
| 4529 | * the ISR. |
| 4530 | */ |
| 4531 | et131x_enable_interrupts(adapter); |
| 4532 | goto out; |
| 4533 | } |
| 4534 | |
| 4535 | /* We need to save the interrupt status value for use in our |
| 4536 | * DPC. We will clear the software copy of that in that |
| 4537 | * routine. |
| 4538 | */ |
| 4539 | adapter->stats.interrupt_status = status; |
| 4540 | |
| 4541 | /* Schedule the ISR handler as a bottom-half task in the |
| 4542 | * kernel's tq_immediate queue, and mark the queue for |
| 4543 | * execution |
| 4544 | */ |
| 4545 | schedule_work(&adapter->task); |
| 4546 | out: |
| 4547 | return IRQ_RETVAL(handled); |
| 4548 | } |
| 4549 | |
| 4550 | /** |
| 4551 | * et131x_isr_handler - The ISR handler |
| 4552 | * @p_adapter, a pointer to the device's private adapter structure |
| 4553 | * |
| 4554 | * scheduled to run in a deferred context by the ISR. This is where the ISR's |
| 4555 | * work actually gets done. |
| 4556 | */ |
Francois Romieu | eb7a6ca | 2011-10-23 19:11:02 +0200 | [diff] [blame] | 4557 | static void et131x_isr_handler(struct work_struct *work) |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 4558 | { |
| 4559 | struct et131x_adapter *adapter = |
| 4560 | container_of(work, struct et131x_adapter, task); |
| 4561 | u32 status = adapter->stats.interrupt_status; |
| 4562 | struct address_map __iomem *iomem = adapter->regs; |
| 4563 | |
| 4564 | /* |
| 4565 | * These first two are by far the most common. Once handled, we clear |
| 4566 | * their two bits in the status word. If the word is now zero, we |
| 4567 | * exit. |
| 4568 | */ |
| 4569 | /* Handle all the completed Transmit interrupts */ |
| 4570 | if (status & ET_INTR_TXDMA_ISR) |
| 4571 | et131x_handle_send_interrupt(adapter); |
| 4572 | |
| 4573 | /* Handle all the completed Receives interrupts */ |
| 4574 | if (status & ET_INTR_RXDMA_XFR_DONE) |
| 4575 | et131x_handle_recv_interrupt(adapter); |
| 4576 | |
| 4577 | status &= 0xffffffd7; |
| 4578 | |
| 4579 | if (status) { |
| 4580 | /* Handle the TXDMA Error interrupt */ |
| 4581 | if (status & ET_INTR_TXDMA_ERR) { |
| 4582 | u32 txdma_err; |
| 4583 | |
| 4584 | /* Following read also clears the register (COR) */ |
| 4585 | txdma_err = readl(&iomem->txdma.tx_dma_error); |
| 4586 | |
| 4587 | dev_warn(&adapter->pdev->dev, |
| 4588 | "TXDMA_ERR interrupt, error = %d\n", |
| 4589 | txdma_err); |
| 4590 | } |
| 4591 | |
| 4592 | /* Handle Free Buffer Ring 0 and 1 Low interrupt */ |
| 4593 | if (status & |
| 4594 | (ET_INTR_RXDMA_FB_R0_LOW | ET_INTR_RXDMA_FB_R1_LOW)) { |
| 4595 | /* |
| 4596 | * This indicates the number of unused buffers in |
| 4597 | * RXDMA free buffer ring 0 is <= the limit you |
| 4598 | * programmed. Free buffer resources need to be |
| 4599 | * returned. Free buffers are consumed as packets |
| 4600 | * are passed from the network to the host. The host |
| 4601 | * becomes aware of the packets from the contents of |
| 4602 | * the packet status ring. This ring is queried when |
| 4603 | * the packet done interrupt occurs. Packets are then |
| 4604 | * passed to the OS. When the OS is done with the |
| 4605 | * packets the resources can be returned to the |
| 4606 | * ET1310 for re-use. This interrupt is one method of |
| 4607 | * returning resources. |
| 4608 | */ |
| 4609 | |
| 4610 | /* If the user has flow control on, then we will |
| 4611 | * send a pause packet, otherwise just exit |
| 4612 | */ |
| 4613 | if (adapter->flowcontrol == FLOW_TXONLY || |
| 4614 | adapter->flowcontrol == FLOW_BOTH) { |
| 4615 | u32 pm_csr; |
| 4616 | |
| 4617 | /* Tell the device to send a pause packet via |
| 4618 | * the back pressure register (bp req and |
| 4619 | * bp xon/xoff) |
| 4620 | */ |
| 4621 | pm_csr = readl(&iomem->global.pm_csr); |
| 4622 | if (!et1310_in_phy_coma(adapter)) |
| 4623 | writel(3, &iomem->txmac.bp_ctrl); |
| 4624 | } |
| 4625 | } |
| 4626 | |
| 4627 | /* Handle Packet Status Ring Low Interrupt */ |
| 4628 | if (status & ET_INTR_RXDMA_STAT_LOW) { |
| 4629 | |
| 4630 | /* |
| 4631 | * Same idea as with the two Free Buffer Rings. |
| 4632 | * Packets going from the network to the host each |
| 4633 | * consume a free buffer resource and a packet status |
| 4634 | * resource. These resoures are passed to the OS. |
| 4635 | * When the OS is done with the resources, they need |
| 4636 | * to be returned to the ET1310. This is one method |
| 4637 | * of returning the resources. |
| 4638 | */ |
| 4639 | } |
| 4640 | |
| 4641 | /* Handle RXDMA Error Interrupt */ |
| 4642 | if (status & ET_INTR_RXDMA_ERR) { |
| 4643 | /* |
| 4644 | * The rxdma_error interrupt is sent when a time-out |
| 4645 | * on a request issued by the JAGCore has occurred or |
| 4646 | * a completion is returned with an un-successful |
| 4647 | * status. In both cases the request is considered |
| 4648 | * complete. The JAGCore will automatically re-try the |
| 4649 | * request in question. Normally information on events |
| 4650 | * like these are sent to the host using the "Advanced |
| 4651 | * Error Reporting" capability. This interrupt is |
| 4652 | * another way of getting similar information. The |
| 4653 | * only thing required is to clear the interrupt by |
| 4654 | * reading the ISR in the global resources. The |
| 4655 | * JAGCore will do a re-try on the request. Normally |
| 4656 | * you should never see this interrupt. If you start |
| 4657 | * to see this interrupt occurring frequently then |
| 4658 | * something bad has occurred. A reset might be the |
| 4659 | * thing to do. |
| 4660 | */ |
| 4661 | /* TRAP();*/ |
| 4662 | |
| 4663 | dev_warn(&adapter->pdev->dev, |
| 4664 | "RxDMA_ERR interrupt, error %x\n", |
| 4665 | readl(&iomem->txmac.tx_test)); |
| 4666 | } |
| 4667 | |
| 4668 | /* Handle the Wake on LAN Event */ |
| 4669 | if (status & ET_INTR_WOL) { |
| 4670 | /* |
| 4671 | * This is a secondary interrupt for wake on LAN. |
| 4672 | * The driver should never see this, if it does, |
| 4673 | * something serious is wrong. We will TRAP the |
| 4674 | * message when we are in DBG mode, otherwise we |
| 4675 | * will ignore it. |
| 4676 | */ |
| 4677 | dev_err(&adapter->pdev->dev, "WAKE_ON_LAN interrupt\n"); |
| 4678 | } |
| 4679 | |
| 4680 | /* Let's move on to the TxMac */ |
| 4681 | if (status & ET_INTR_TXMAC) { |
| 4682 | u32 err = readl(&iomem->txmac.err); |
| 4683 | |
| 4684 | /* |
| 4685 | * When any of the errors occur and TXMAC generates |
| 4686 | * an interrupt to report these errors, it usually |
| 4687 | * means that TXMAC has detected an error in the data |
| 4688 | * stream retrieved from the on-chip Tx Q. All of |
| 4689 | * these errors are catastrophic and TXMAC won't be |
| 4690 | * able to recover data when these errors occur. In |
| 4691 | * a nutshell, the whole Tx path will have to be reset |
| 4692 | * and re-configured afterwards. |
| 4693 | */ |
| 4694 | dev_warn(&adapter->pdev->dev, |
| 4695 | "TXMAC interrupt, error 0x%08x\n", |
| 4696 | err); |
| 4697 | |
| 4698 | /* If we are debugging, we want to see this error, |
| 4699 | * otherwise we just want the device to be reset and |
| 4700 | * continue |
| 4701 | */ |
| 4702 | } |
| 4703 | |
| 4704 | /* Handle RXMAC Interrupt */ |
| 4705 | if (status & ET_INTR_RXMAC) { |
| 4706 | /* |
| 4707 | * These interrupts are catastrophic to the device, |
| 4708 | * what we need to do is disable the interrupts and |
| 4709 | * set the flag to cause us to reset so we can solve |
| 4710 | * this issue. |
| 4711 | */ |
| 4712 | /* MP_SET_FLAG( adapter, |
| 4713 | fMP_ADAPTER_HARDWARE_ERROR); */ |
| 4714 | |
| 4715 | dev_warn(&adapter->pdev->dev, |
| 4716 | "RXMAC interrupt, error 0x%08x. Requesting reset\n", |
| 4717 | readl(&iomem->rxmac.err_reg)); |
| 4718 | |
| 4719 | dev_warn(&adapter->pdev->dev, |
| 4720 | "Enable 0x%08x, Diag 0x%08x\n", |
| 4721 | readl(&iomem->rxmac.ctrl), |
| 4722 | readl(&iomem->rxmac.rxq_diag)); |
| 4723 | |
| 4724 | /* |
| 4725 | * If we are debugging, we want to see this error, |
| 4726 | * otherwise we just want the device to be reset and |
| 4727 | * continue |
| 4728 | */ |
| 4729 | } |
| 4730 | |
| 4731 | /* Handle MAC_STAT Interrupt */ |
| 4732 | if (status & ET_INTR_MAC_STAT) { |
| 4733 | /* |
| 4734 | * This means at least one of the un-masked counters |
| 4735 | * in the MAC_STAT block has rolled over. Use this |
| 4736 | * to maintain the top, software managed bits of the |
| 4737 | * counter(s). |
| 4738 | */ |
| 4739 | et1310_handle_macstat_interrupt(adapter); |
| 4740 | } |
| 4741 | |
| 4742 | /* Handle SLV Timeout Interrupt */ |
| 4743 | if (status & ET_INTR_SLV_TIMEOUT) { |
| 4744 | /* |
| 4745 | * This means a timeout has occurred on a read or |
| 4746 | * write request to one of the JAGCore registers. The |
| 4747 | * Global Resources block has terminated the request |
| 4748 | * and on a read request, returned a "fake" value. |
| 4749 | * The most likely reasons are: Bad Address or the |
| 4750 | * addressed module is in a power-down state and |
| 4751 | * can't respond. |
| 4752 | */ |
| 4753 | } |
| 4754 | } |
| 4755 | et131x_enable_interrupts(adapter); |
| 4756 | } |
| 4757 | |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 4758 | /** |
| 4759 | * et131x_stats - Return the current device statistics. |
| 4760 | * @netdev: device whose stats are being queried |
| 4761 | * |
| 4762 | * Returns 0 on success, errno on failure (as defined in errno.h) |
| 4763 | */ |
| 4764 | static struct net_device_stats *et131x_stats(struct net_device *netdev) |
| 4765 | { |
| 4766 | struct et131x_adapter *adapter = netdev_priv(netdev); |
| 4767 | struct net_device_stats *stats = &adapter->net_stats; |
| 4768 | struct ce_stats *devstat = &adapter->stats; |
| 4769 | |
| 4770 | stats->rx_errors = devstat->rx_length_errs + |
| 4771 | devstat->rx_align_errs + |
| 4772 | devstat->rx_crc_errs + |
| 4773 | devstat->rx_code_violations + |
| 4774 | devstat->rx_other_errs; |
| 4775 | stats->tx_errors = devstat->tx_max_pkt_errs; |
| 4776 | stats->multicast = devstat->multicast_pkts_rcvd; |
| 4777 | stats->collisions = devstat->tx_collisions; |
| 4778 | |
| 4779 | stats->rx_length_errors = devstat->rx_length_errs; |
| 4780 | stats->rx_over_errors = devstat->rx_overflows; |
| 4781 | stats->rx_crc_errors = devstat->rx_crc_errs; |
| 4782 | |
| 4783 | /* NOTE: These stats don't have corresponding values in CE_STATS, |
| 4784 | * so we're going to have to update these directly from within the |
| 4785 | * TX/RX code |
| 4786 | */ |
| 4787 | /* stats->rx_bytes = 20; devstat->; */ |
| 4788 | /* stats->tx_bytes = 20; devstat->; */ |
| 4789 | /* stats->rx_dropped = devstat->; */ |
| 4790 | /* stats->tx_dropped = devstat->; */ |
| 4791 | |
| 4792 | /* NOTE: Not used, can't find analogous statistics */ |
| 4793 | /* stats->rx_frame_errors = devstat->; */ |
| 4794 | /* stats->rx_fifo_errors = devstat->; */ |
| 4795 | /* stats->rx_missed_errors = devstat->; */ |
| 4796 | |
| 4797 | /* stats->tx_aborted_errors = devstat->; */ |
| 4798 | /* stats->tx_carrier_errors = devstat->; */ |
| 4799 | /* stats->tx_fifo_errors = devstat->; */ |
| 4800 | /* stats->tx_heartbeat_errors = devstat->; */ |
| 4801 | /* stats->tx_window_errors = devstat->; */ |
| 4802 | return stats; |
| 4803 | } |
| 4804 | |
| 4805 | /** |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 4806 | * et131x_open - Open the device for use. |
| 4807 | * @netdev: device to be opened |
| 4808 | * |
| 4809 | * Returns 0 on success, errno on failure (as defined in errno.h) |
| 4810 | */ |
Francois Romieu | eb7a6ca | 2011-10-23 19:11:02 +0200 | [diff] [blame] | 4811 | static int et131x_open(struct net_device *netdev) |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 4812 | { |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 4813 | struct et131x_adapter *adapter = netdev_priv(netdev); |
Francois Romieu | 5f3eb88 | 2011-10-23 19:12:01 +0200 | [diff] [blame] | 4814 | struct pci_dev *pdev = adapter->pdev; |
| 4815 | unsigned int irq = pdev->irq; |
| 4816 | int result; |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 4817 | |
| 4818 | /* Start the timer to track NIC errors */ |
| 4819 | init_timer(&adapter->error_timer); |
| 4820 | adapter->error_timer.expires = jiffies + TX_ERROR_PERIOD * HZ / 1000; |
| 4821 | adapter->error_timer.function = et131x_error_timer_handler; |
| 4822 | adapter->error_timer.data = (unsigned long)adapter; |
| 4823 | add_timer(&adapter->error_timer); |
| 4824 | |
joseph daniel | bf3313a | 2012-05-01 00:30:34 +0600 | [diff] [blame] | 4825 | result = request_irq(irq, et131x_isr, |
| 4826 | IRQF_SHARED, netdev->name, netdev); |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 4827 | if (result) { |
Francois Romieu | 5f3eb88 | 2011-10-23 19:12:01 +0200 | [diff] [blame] | 4828 | dev_err(&pdev->dev, "could not register IRQ %d\n", irq); |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 4829 | return result; |
| 4830 | } |
| 4831 | |
| 4832 | adapter->flags |= fMP_ADAPTER_INTERRUPT_IN_USE; |
| 4833 | |
| 4834 | et131x_up(netdev); |
| 4835 | |
| 4836 | return result; |
| 4837 | } |
| 4838 | |
| 4839 | /** |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 4840 | * et131x_close - Close the device |
| 4841 | * @netdev: device to be closed |
| 4842 | * |
| 4843 | * Returns 0 on success, errno on failure (as defined in errno.h) |
| 4844 | */ |
Francois Romieu | eb7a6ca | 2011-10-23 19:11:02 +0200 | [diff] [blame] | 4845 | static int et131x_close(struct net_device *netdev) |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 4846 | { |
| 4847 | struct et131x_adapter *adapter = netdev_priv(netdev); |
| 4848 | |
| 4849 | et131x_down(netdev); |
| 4850 | |
| 4851 | adapter->flags &= ~fMP_ADAPTER_INTERRUPT_IN_USE; |
Francois Romieu | 5f3eb88 | 2011-10-23 19:12:01 +0200 | [diff] [blame] | 4852 | free_irq(adapter->pdev->irq, netdev); |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 4853 | |
| 4854 | /* Stop the error timer */ |
| 4855 | return del_timer_sync(&adapter->error_timer); |
| 4856 | } |
| 4857 | |
| 4858 | /** |
| 4859 | * et131x_ioctl - The I/O Control handler for the driver |
| 4860 | * @netdev: device on which the control request is being made |
| 4861 | * @reqbuf: a pointer to the IOCTL request buffer |
| 4862 | * @cmd: the IOCTL command code |
| 4863 | * |
| 4864 | * Returns 0 on success, errno on failure (as defined in errno.h) |
| 4865 | */ |
Mark Einon | 09a3fc2 | 2011-10-23 10:22:53 +0100 | [diff] [blame] | 4866 | static int et131x_ioctl(struct net_device *netdev, struct ifreq *reqbuf, |
| 4867 | int cmd) |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 4868 | { |
| 4869 | struct et131x_adapter *adapter = netdev_priv(netdev); |
| 4870 | |
| 4871 | if (!adapter->phydev) |
| 4872 | return -EINVAL; |
| 4873 | |
| 4874 | return phy_mii_ioctl(adapter->phydev, reqbuf, cmd); |
| 4875 | } |
| 4876 | |
| 4877 | /** |
| 4878 | * et131x_set_packet_filter - Configures the Rx Packet filtering on the device |
| 4879 | * @adapter: pointer to our private adapter structure |
| 4880 | * |
| 4881 | * FIXME: lot of dups with MAC code |
| 4882 | * |
| 4883 | * Returns 0 on success, errno on failure |
| 4884 | */ |
| 4885 | static int et131x_set_packet_filter(struct et131x_adapter *adapter) |
| 4886 | { |
Francois Romieu | 834d0ee | 2011-10-23 19:11:19 +0200 | [diff] [blame] | 4887 | int filter = adapter->packet_filter; |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 4888 | int status = 0; |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 4889 | u32 ctrl; |
| 4890 | u32 pf_ctrl; |
| 4891 | |
| 4892 | ctrl = readl(&adapter->regs->rxmac.ctrl); |
| 4893 | pf_ctrl = readl(&adapter->regs->rxmac.pf_ctrl); |
| 4894 | |
| 4895 | /* Default to disabled packet filtering. Enable it in the individual |
| 4896 | * case statements that require the device to filter something |
| 4897 | */ |
| 4898 | ctrl |= 0x04; |
| 4899 | |
| 4900 | /* Set us to be in promiscuous mode so we receive everything, this |
| 4901 | * is also true when we get a packet filter of 0 |
| 4902 | */ |
| 4903 | if ((filter & ET131X_PACKET_TYPE_PROMISCUOUS) || filter == 0) |
| 4904 | pf_ctrl &= ~7; /* Clear filter bits */ |
| 4905 | else { |
| 4906 | /* |
| 4907 | * Set us up with Multicast packet filtering. Three cases are |
| 4908 | * possible - (1) we have a multi-cast list, (2) we receive ALL |
| 4909 | * multicast entries or (3) we receive none. |
| 4910 | */ |
| 4911 | if (filter & ET131X_PACKET_TYPE_ALL_MULTICAST) |
| 4912 | pf_ctrl &= ~2; /* Multicast filter bit */ |
| 4913 | else { |
| 4914 | et1310_setup_device_for_multicast(adapter); |
| 4915 | pf_ctrl |= 2; |
| 4916 | ctrl &= ~0x04; |
| 4917 | } |
| 4918 | |
| 4919 | /* Set us up with Unicast packet filtering */ |
| 4920 | if (filter & ET131X_PACKET_TYPE_DIRECTED) { |
| 4921 | et1310_setup_device_for_unicast(adapter); |
| 4922 | pf_ctrl |= 4; |
| 4923 | ctrl &= ~0x04; |
| 4924 | } |
| 4925 | |
| 4926 | /* Set us up with Broadcast packet filtering */ |
| 4927 | if (filter & ET131X_PACKET_TYPE_BROADCAST) { |
| 4928 | pf_ctrl |= 1; /* Broadcast filter bit */ |
| 4929 | ctrl &= ~0x04; |
| 4930 | } else |
| 4931 | pf_ctrl &= ~1; |
| 4932 | |
| 4933 | /* Setup the receive mac configuration registers - Packet |
| 4934 | * Filter control + the enable / disable for packet filter |
| 4935 | * in the control reg. |
| 4936 | */ |
| 4937 | writel(pf_ctrl, &adapter->regs->rxmac.pf_ctrl); |
| 4938 | writel(ctrl, &adapter->regs->rxmac.ctrl); |
| 4939 | } |
| 4940 | return status; |
| 4941 | } |
| 4942 | |
| 4943 | /** |
| 4944 | * et131x_multicast - The handler to configure multicasting on the interface |
| 4945 | * @netdev: a pointer to a net_device struct representing the device |
| 4946 | */ |
| 4947 | static void et131x_multicast(struct net_device *netdev) |
| 4948 | { |
| 4949 | struct et131x_adapter *adapter = netdev_priv(netdev); |
Francois Romieu | 834d0ee | 2011-10-23 19:11:19 +0200 | [diff] [blame] | 4950 | int packet_filter; |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 4951 | unsigned long flags; |
| 4952 | struct netdev_hw_addr *ha; |
| 4953 | int i; |
| 4954 | |
| 4955 | spin_lock_irqsave(&adapter->lock, flags); |
| 4956 | |
| 4957 | /* Before we modify the platform-independent filter flags, store them |
| 4958 | * locally. This allows us to determine if anything's changed and if |
| 4959 | * we even need to bother the hardware |
| 4960 | */ |
| 4961 | packet_filter = adapter->packet_filter; |
| 4962 | |
| 4963 | /* Clear the 'multicast' flag locally; because we only have a single |
| 4964 | * flag to check multicast, and multiple multicast addresses can be |
| 4965 | * set, this is the easiest way to determine if more than one |
| 4966 | * multicast address is being set. |
| 4967 | */ |
| 4968 | packet_filter &= ~ET131X_PACKET_TYPE_MULTICAST; |
| 4969 | |
| 4970 | /* Check the net_device flags and set the device independent flags |
| 4971 | * accordingly |
| 4972 | */ |
| 4973 | |
| 4974 | if (netdev->flags & IFF_PROMISC) |
| 4975 | adapter->packet_filter |= ET131X_PACKET_TYPE_PROMISCUOUS; |
| 4976 | else |
| 4977 | adapter->packet_filter &= ~ET131X_PACKET_TYPE_PROMISCUOUS; |
| 4978 | |
| 4979 | if (netdev->flags & IFF_ALLMULTI) |
| 4980 | adapter->packet_filter |= ET131X_PACKET_TYPE_ALL_MULTICAST; |
| 4981 | |
| 4982 | if (netdev_mc_count(netdev) > NIC_MAX_MCAST_LIST) |
| 4983 | adapter->packet_filter |= ET131X_PACKET_TYPE_ALL_MULTICAST; |
| 4984 | |
| 4985 | if (netdev_mc_count(netdev) < 1) { |
| 4986 | adapter->packet_filter &= ~ET131X_PACKET_TYPE_ALL_MULTICAST; |
| 4987 | adapter->packet_filter &= ~ET131X_PACKET_TYPE_MULTICAST; |
| 4988 | } else |
| 4989 | adapter->packet_filter |= ET131X_PACKET_TYPE_MULTICAST; |
| 4990 | |
| 4991 | /* Set values in the private adapter struct */ |
| 4992 | i = 0; |
| 4993 | netdev_for_each_mc_addr(ha, netdev) { |
| 4994 | if (i == NIC_MAX_MCAST_LIST) |
| 4995 | break; |
| 4996 | memcpy(adapter->multicast_list[i++], ha->addr, ETH_ALEN); |
| 4997 | } |
| 4998 | adapter->multicast_addr_count = i; |
| 4999 | |
| 5000 | /* Are the new flags different from the previous ones? If not, then no |
| 5001 | * action is required |
| 5002 | * |
| 5003 | * NOTE - This block will always update the multicast_list with the |
| 5004 | * hardware, even if the addresses aren't the same. |
| 5005 | */ |
| 5006 | if (packet_filter != adapter->packet_filter) { |
| 5007 | /* Call the device's filter function */ |
| 5008 | et131x_set_packet_filter(adapter); |
| 5009 | } |
| 5010 | spin_unlock_irqrestore(&adapter->lock, flags); |
| 5011 | } |
| 5012 | |
| 5013 | /** |
| 5014 | * et131x_tx - The handler to tx a packet on the device |
| 5015 | * @skb: data to be Tx'd |
| 5016 | * @netdev: device on which data is to be Tx'd |
| 5017 | * |
| 5018 | * Returns 0 on success, errno on failure (as defined in errno.h) |
| 5019 | */ |
| 5020 | static int et131x_tx(struct sk_buff *skb, struct net_device *netdev) |
| 5021 | { |
| 5022 | int status = 0; |
Mark Einon | 06709e9 | 2011-10-20 01:18:46 +0100 | [diff] [blame] | 5023 | struct et131x_adapter *adapter = netdev_priv(netdev); |
| 5024 | |
| 5025 | /* stop the queue if it's getting full */ |
Mark Einon | 09a3fc2 | 2011-10-23 10:22:53 +0100 | [diff] [blame] | 5026 | if (adapter->tx_ring.used >= NUM_TCB - 1 && |
| 5027 | !netif_queue_stopped(netdev)) |
Mark Einon | 06709e9 | 2011-10-20 01:18:46 +0100 | [diff] [blame] | 5028 | netif_stop_queue(netdev); |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 5029 | |
| 5030 | /* Save the timestamp for the TX timeout watchdog */ |
| 5031 | netdev->trans_start = jiffies; |
| 5032 | |
| 5033 | /* Call the device-specific data Tx routine */ |
| 5034 | status = et131x_send_packets(skb, netdev); |
| 5035 | |
| 5036 | /* Check status and manage the netif queue if necessary */ |
| 5037 | if (status != 0) { |
Mark Einon | 09a3fc2 | 2011-10-23 10:22:53 +0100 | [diff] [blame] | 5038 | if (status == -ENOMEM) |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 5039 | status = NETDEV_TX_BUSY; |
Mark Einon | 09a3fc2 | 2011-10-23 10:22:53 +0100 | [diff] [blame] | 5040 | else |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 5041 | status = NETDEV_TX_OK; |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 5042 | } |
| 5043 | return status; |
| 5044 | } |
| 5045 | |
| 5046 | /** |
| 5047 | * et131x_tx_timeout - Timeout handler |
| 5048 | * @netdev: a pointer to a net_device struct representing the device |
| 5049 | * |
| 5050 | * The handler called when a Tx request times out. The timeout period is |
| 5051 | * specified by the 'tx_timeo" element in the net_device structure (see |
| 5052 | * et131x_alloc_device() to see how this value is set). |
| 5053 | */ |
| 5054 | static void et131x_tx_timeout(struct net_device *netdev) |
| 5055 | { |
| 5056 | struct et131x_adapter *adapter = netdev_priv(netdev); |
| 5057 | struct tcb *tcb; |
| 5058 | unsigned long flags; |
| 5059 | |
| 5060 | /* If the device is closed, ignore the timeout */ |
| 5061 | if (~(adapter->flags & fMP_ADAPTER_INTERRUPT_IN_USE)) |
| 5062 | return; |
| 5063 | |
| 5064 | /* Any nonrecoverable hardware error? |
| 5065 | * Checks adapter->flags for any failure in phy reading |
| 5066 | */ |
| 5067 | if (adapter->flags & fMP_ADAPTER_NON_RECOVER_ERROR) |
| 5068 | return; |
| 5069 | |
| 5070 | /* Hardware failure? */ |
| 5071 | if (adapter->flags & fMP_ADAPTER_HARDWARE_ERROR) { |
| 5072 | dev_err(&adapter->pdev->dev, "hardware error - reset\n"); |
| 5073 | return; |
| 5074 | } |
| 5075 | |
| 5076 | /* Is send stuck? */ |
| 5077 | spin_lock_irqsave(&adapter->tcb_send_qlock, flags); |
| 5078 | |
| 5079 | tcb = adapter->tx_ring.send_head; |
| 5080 | |
| 5081 | if (tcb != NULL) { |
| 5082 | tcb->count++; |
| 5083 | |
| 5084 | if (tcb->count > NIC_SEND_HANG_THRESHOLD) { |
| 5085 | spin_unlock_irqrestore(&adapter->tcb_send_qlock, |
| 5086 | flags); |
| 5087 | |
| 5088 | dev_warn(&adapter->pdev->dev, |
| 5089 | "Send stuck - reset. tcb->WrIndex %x, flags 0x%08x\n", |
| 5090 | tcb->index, |
| 5091 | tcb->flags); |
| 5092 | |
| 5093 | adapter->net_stats.tx_errors++; |
| 5094 | |
| 5095 | /* perform reset of tx/rx */ |
| 5096 | et131x_disable_txrx(netdev); |
| 5097 | et131x_enable_txrx(netdev); |
| 5098 | return; |
| 5099 | } |
| 5100 | } |
| 5101 | |
| 5102 | spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags); |
| 5103 | } |
| 5104 | |
| 5105 | /** |
| 5106 | * et131x_change_mtu - The handler called to change the MTU for the device |
| 5107 | * @netdev: device whose MTU is to be changed |
| 5108 | * @new_mtu: the desired MTU |
| 5109 | * |
| 5110 | * Returns 0 on success, errno on failure (as defined in errno.h) |
| 5111 | */ |
| 5112 | static int et131x_change_mtu(struct net_device *netdev, int new_mtu) |
| 5113 | { |
| 5114 | int result = 0; |
| 5115 | struct et131x_adapter *adapter = netdev_priv(netdev); |
| 5116 | |
| 5117 | /* Make sure the requested MTU is valid */ |
| 5118 | if (new_mtu < 64 || new_mtu > 9216) |
| 5119 | return -EINVAL; |
| 5120 | |
| 5121 | et131x_disable_txrx(netdev); |
| 5122 | et131x_handle_send_interrupt(adapter); |
| 5123 | et131x_handle_recv_interrupt(adapter); |
| 5124 | |
| 5125 | /* Set the new MTU */ |
| 5126 | netdev->mtu = new_mtu; |
| 5127 | |
| 5128 | /* Free Rx DMA memory */ |
| 5129 | et131x_adapter_memory_free(adapter); |
| 5130 | |
| 5131 | /* Set the config parameter for Jumbo Packet support */ |
| 5132 | adapter->registry_jumbo_packet = new_mtu + 14; |
| 5133 | et131x_soft_reset(adapter); |
| 5134 | |
| 5135 | /* Alloc and init Rx DMA memory */ |
| 5136 | result = et131x_adapter_memory_alloc(adapter); |
| 5137 | if (result != 0) { |
| 5138 | dev_warn(&adapter->pdev->dev, |
| 5139 | "Change MTU failed; couldn't re-alloc DMA memory\n"); |
| 5140 | return result; |
| 5141 | } |
| 5142 | |
| 5143 | et131x_init_send(adapter); |
| 5144 | |
| 5145 | et131x_hwaddr_init(adapter); |
| 5146 | memcpy(netdev->dev_addr, adapter->addr, ETH_ALEN); |
| 5147 | |
| 5148 | /* Init the device with the new settings */ |
| 5149 | et131x_adapter_setup(adapter); |
| 5150 | |
| 5151 | et131x_enable_txrx(netdev); |
| 5152 | |
| 5153 | return result; |
| 5154 | } |
| 5155 | |
| 5156 | /** |
| 5157 | * et131x_set_mac_addr - handler to change the MAC address for the device |
| 5158 | * @netdev: device whose MAC is to be changed |
| 5159 | * @new_mac: the desired MAC address |
| 5160 | * |
| 5161 | * Returns 0 on success, errno on failure (as defined in errno.h) |
| 5162 | * |
| 5163 | * IMPLEMENTED BY : blux http://berndlux.de 22.01.2007 21:14 |
| 5164 | */ |
| 5165 | static int et131x_set_mac_addr(struct net_device *netdev, void *new_mac) |
| 5166 | { |
| 5167 | int result = 0; |
| 5168 | struct et131x_adapter *adapter = netdev_priv(netdev); |
| 5169 | struct sockaddr *address = new_mac; |
| 5170 | |
| 5171 | /* begin blux */ |
| 5172 | |
| 5173 | if (adapter == NULL) |
| 5174 | return -ENODEV; |
| 5175 | |
| 5176 | /* Make sure the requested MAC is valid */ |
| 5177 | if (!is_valid_ether_addr(address->sa_data)) |
Danny Kukawka | d8aa3e2 | 2012-02-21 13:07:51 +0100 | [diff] [blame] | 5178 | return -EADDRNOTAVAIL; |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 5179 | |
| 5180 | et131x_disable_txrx(netdev); |
| 5181 | et131x_handle_send_interrupt(adapter); |
| 5182 | et131x_handle_recv_interrupt(adapter); |
| 5183 | |
| 5184 | /* Set the new MAC */ |
| 5185 | /* netdev->set_mac_address = &new_mac; */ |
| 5186 | |
| 5187 | memcpy(netdev->dev_addr, address->sa_data, netdev->addr_len); |
| 5188 | |
| 5189 | printk(KERN_INFO "%s: Setting MAC address to %pM\n", |
| 5190 | netdev->name, netdev->dev_addr); |
| 5191 | |
| 5192 | /* Free Rx DMA memory */ |
| 5193 | et131x_adapter_memory_free(adapter); |
| 5194 | |
| 5195 | et131x_soft_reset(adapter); |
| 5196 | |
| 5197 | /* Alloc and init Rx DMA memory */ |
| 5198 | result = et131x_adapter_memory_alloc(adapter); |
| 5199 | if (result != 0) { |
| 5200 | dev_err(&adapter->pdev->dev, |
| 5201 | "Change MAC failed; couldn't re-alloc DMA memory\n"); |
| 5202 | return result; |
| 5203 | } |
| 5204 | |
| 5205 | et131x_init_send(adapter); |
| 5206 | |
| 5207 | et131x_hwaddr_init(adapter); |
| 5208 | |
| 5209 | /* Init the device with the new settings */ |
| 5210 | et131x_adapter_setup(adapter); |
| 5211 | |
| 5212 | et131x_enable_txrx(netdev); |
| 5213 | |
| 5214 | return result; |
| 5215 | } |
| 5216 | |
| 5217 | static const struct net_device_ops et131x_netdev_ops = { |
| 5218 | .ndo_open = et131x_open, |
| 5219 | .ndo_stop = et131x_close, |
| 5220 | .ndo_start_xmit = et131x_tx, |
Linus Torvalds | aa77677 | 2011-10-26 15:39:02 +0200 | [diff] [blame] | 5221 | .ndo_set_rx_mode = et131x_multicast, |
Mark Einon | d279674 | 2011-10-20 01:18:30 +0100 | [diff] [blame] | 5222 | .ndo_tx_timeout = et131x_tx_timeout, |
| 5223 | .ndo_change_mtu = et131x_change_mtu, |
| 5224 | .ndo_set_mac_address = et131x_set_mac_addr, |
| 5225 | .ndo_validate_addr = eth_validate_addr, |
| 5226 | .ndo_get_stats = et131x_stats, |
| 5227 | .ndo_do_ioctl = et131x_ioctl, |
| 5228 | }; |
| 5229 | |
| 5230 | /** |
Mark Einon | 5da2b15 | 2011-10-23 10:22:49 +0100 | [diff] [blame] | 5231 | * et131x_pci_setup - Perform device initialization |
| 5232 | * @pdev: a pointer to the device's pci_dev structure |
| 5233 | * @ent: this device's entry in the pci_device_id table |
| 5234 | * |
| 5235 | * Returns 0 on success, errno on failure (as defined in errno.h) |
| 5236 | * |
| 5237 | * Registered in the pci_driver structure, this function is called when the |
| 5238 | * PCI subsystem finds a new PCI device which matches the information |
| 5239 | * contained in the pci_device_id table. This routine is the equivalent to |
| 5240 | * a device insertion routine. |
| 5241 | */ |
| 5242 | static int __devinit et131x_pci_setup(struct pci_dev *pdev, |
| 5243 | const struct pci_device_id *ent) |
| 5244 | { |
Mark Einon | 5da2b15 | 2011-10-23 10:22:49 +0100 | [diff] [blame] | 5245 | struct net_device *netdev; |
| 5246 | struct et131x_adapter *adapter; |
Francois Romieu | fa9f0a6 | 2011-10-23 19:11:35 +0200 | [diff] [blame] | 5247 | int rc; |
Mark Einon | 5da2b15 | 2011-10-23 10:22:49 +0100 | [diff] [blame] | 5248 | int ii; |
| 5249 | |
Francois Romieu | fa9f0a6 | 2011-10-23 19:11:35 +0200 | [diff] [blame] | 5250 | rc = pci_enable_device(pdev); |
| 5251 | if (rc < 0) { |
Mark Einon | 5da2b15 | 2011-10-23 10:22:49 +0100 | [diff] [blame] | 5252 | dev_err(&pdev->dev, "pci_enable_device() failed\n"); |
Francois Romieu | fa9f0a6 | 2011-10-23 19:11:35 +0200 | [diff] [blame] | 5253 | goto out; |
Mark Einon | 5da2b15 | 2011-10-23 10:22:49 +0100 | [diff] [blame] | 5254 | } |
| 5255 | |
| 5256 | /* Perform some basic PCI checks */ |
| 5257 | if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { |
| 5258 | dev_err(&pdev->dev, "Can't find PCI device's base address\n"); |
Francois Romieu | fa9f0a6 | 2011-10-23 19:11:35 +0200 | [diff] [blame] | 5259 | rc = -ENODEV; |
Mark Einon | 5da2b15 | 2011-10-23 10:22:49 +0100 | [diff] [blame] | 5260 | goto err_disable; |
| 5261 | } |
| 5262 | |
Francois Romieu | fa9f0a6 | 2011-10-23 19:11:35 +0200 | [diff] [blame] | 5263 | rc = pci_request_regions(pdev, DRIVER_NAME); |
| 5264 | if (rc < 0) { |
Mark Einon | 5da2b15 | 2011-10-23 10:22:49 +0100 | [diff] [blame] | 5265 | dev_err(&pdev->dev, "Can't get PCI resources\n"); |
| 5266 | goto err_disable; |
| 5267 | } |
| 5268 | |
| 5269 | pci_set_master(pdev); |
| 5270 | |
| 5271 | /* Check the DMA addressing support of this device */ |
| 5272 | if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) { |
Francois Romieu | fa9f0a6 | 2011-10-23 19:11:35 +0200 | [diff] [blame] | 5273 | rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); |
| 5274 | if (rc < 0) { |
Mark Einon | 5da2b15 | 2011-10-23 10:22:49 +0100 | [diff] [blame] | 5275 | dev_err(&pdev->dev, |
| 5276 | "Unable to obtain 64 bit DMA for consistent allocations\n"); |
| 5277 | goto err_release_res; |
| 5278 | } |
| 5279 | } else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) { |
Francois Romieu | fa9f0a6 | 2011-10-23 19:11:35 +0200 | [diff] [blame] | 5280 | rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); |
| 5281 | if (rc < 0) { |
Mark Einon | 5da2b15 | 2011-10-23 10:22:49 +0100 | [diff] [blame] | 5282 | dev_err(&pdev->dev, |
| 5283 | "Unable to obtain 32 bit DMA for consistent allocations\n"); |
| 5284 | goto err_release_res; |
| 5285 | } |
| 5286 | } else { |
| 5287 | dev_err(&pdev->dev, "No usable DMA addressing method\n"); |
Francois Romieu | fa9f0a6 | 2011-10-23 19:11:35 +0200 | [diff] [blame] | 5288 | rc = -EIO; |
Mark Einon | 5da2b15 | 2011-10-23 10:22:49 +0100 | [diff] [blame] | 5289 | goto err_release_res; |
| 5290 | } |
| 5291 | |
| 5292 | /* Allocate netdev and private adapter structs */ |
Francois Romieu | fa9f0a6 | 2011-10-23 19:11:35 +0200 | [diff] [blame] | 5293 | netdev = alloc_etherdev(sizeof(struct et131x_adapter)); |
Mark Einon | 5da2b15 | 2011-10-23 10:22:49 +0100 | [diff] [blame] | 5294 | if (!netdev) { |
| 5295 | dev_err(&pdev->dev, "Couldn't alloc netdev struct\n"); |
Francois Romieu | fa9f0a6 | 2011-10-23 19:11:35 +0200 | [diff] [blame] | 5296 | rc = -ENOMEM; |
Mark Einon | 5da2b15 | 2011-10-23 10:22:49 +0100 | [diff] [blame] | 5297 | goto err_release_res; |
| 5298 | } |
| 5299 | |
Francois Romieu | fa9f0a6 | 2011-10-23 19:11:35 +0200 | [diff] [blame] | 5300 | netdev->watchdog_timeo = ET131X_TX_TIMEOUT; |
| 5301 | netdev->netdev_ops = &et131x_netdev_ops; |
| 5302 | |
Mark Einon | 5da2b15 | 2011-10-23 10:22:49 +0100 | [diff] [blame] | 5303 | SET_NETDEV_DEV(netdev, &pdev->dev); |
| 5304 | et131x_set_ethtool_ops(netdev); |
| 5305 | |
| 5306 | adapter = et131x_adapter_init(netdev, pdev); |
| 5307 | |
Francois Romieu | fa9f0a6 | 2011-10-23 19:11:35 +0200 | [diff] [blame] | 5308 | rc = et131x_pci_init(adapter, pdev); |
| 5309 | if (rc < 0) |
| 5310 | goto err_free_dev; |
Mark Einon | 5da2b15 | 2011-10-23 10:22:49 +0100 | [diff] [blame] | 5311 | |
| 5312 | /* Map the bus-relative registers to system virtual memory */ |
| 5313 | adapter->regs = pci_ioremap_bar(pdev, 0); |
| 5314 | if (!adapter->regs) { |
| 5315 | dev_err(&pdev->dev, "Cannot map device registers\n"); |
Francois Romieu | fa9f0a6 | 2011-10-23 19:11:35 +0200 | [diff] [blame] | 5316 | rc = -ENOMEM; |
Mark Einon | 5da2b15 | 2011-10-23 10:22:49 +0100 | [diff] [blame] | 5317 | goto err_free_dev; |
| 5318 | } |
| 5319 | |
| 5320 | /* If Phy COMA mode was enabled when we went down, disable it here. */ |
| 5321 | writel(ET_PMCSR_INIT, &adapter->regs->global.pm_csr); |
| 5322 | |
| 5323 | /* Issue a global reset to the et1310 */ |
| 5324 | et131x_soft_reset(adapter); |
| 5325 | |
| 5326 | /* Disable all interrupts (paranoid) */ |
| 5327 | et131x_disable_interrupts(adapter); |
| 5328 | |
| 5329 | /* Allocate DMA memory */ |
Francois Romieu | fa9f0a6 | 2011-10-23 19:11:35 +0200 | [diff] [blame] | 5330 | rc = et131x_adapter_memory_alloc(adapter); |
| 5331 | if (rc < 0) { |
Mark Einon | 5da2b15 | 2011-10-23 10:22:49 +0100 | [diff] [blame] | 5332 | dev_err(&pdev->dev, "Could not alloc adapater memory (DMA)\n"); |
| 5333 | goto err_iounmap; |
| 5334 | } |
| 5335 | |
| 5336 | /* Init send data structures */ |
| 5337 | et131x_init_send(adapter); |
| 5338 | |
| 5339 | /* Set up the task structure for the ISR's deferred handler */ |
| 5340 | INIT_WORK(&adapter->task, et131x_isr_handler); |
| 5341 | |
| 5342 | /* Copy address into the net_device struct */ |
| 5343 | memcpy(netdev->dev_addr, adapter->addr, ETH_ALEN); |
| 5344 | |
| 5345 | /* Init variable for counting how long we do not have link status */ |
| 5346 | adapter->boot_coma = 0; |
| 5347 | et1310_disable_phy_coma(adapter); |
| 5348 | |
Francois Romieu | fa9f0a6 | 2011-10-23 19:11:35 +0200 | [diff] [blame] | 5349 | rc = -ENOMEM; |
| 5350 | |
Mark Einon | 5da2b15 | 2011-10-23 10:22:49 +0100 | [diff] [blame] | 5351 | /* Setup the mii_bus struct */ |
| 5352 | adapter->mii_bus = mdiobus_alloc(); |
| 5353 | if (!adapter->mii_bus) { |
| 5354 | dev_err(&pdev->dev, "Alloc of mii_bus struct failed\n"); |
| 5355 | goto err_mem_free; |
| 5356 | } |
| 5357 | |
| 5358 | adapter->mii_bus->name = "et131x_eth_mii"; |
| 5359 | snprintf(adapter->mii_bus->id, MII_BUS_ID_SIZE, "%x", |
| 5360 | (adapter->pdev->bus->number << 8) | adapter->pdev->devfn); |
| 5361 | adapter->mii_bus->priv = netdev; |
| 5362 | adapter->mii_bus->read = et131x_mdio_read; |
| 5363 | adapter->mii_bus->write = et131x_mdio_write; |
| 5364 | adapter->mii_bus->reset = et131x_mdio_reset; |
| 5365 | adapter->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL); |
| 5366 | if (!adapter->mii_bus->irq) { |
| 5367 | dev_err(&pdev->dev, "mii_bus irq allocation failed\n"); |
| 5368 | goto err_mdio_free; |
| 5369 | } |
| 5370 | |
| 5371 | for (ii = 0; ii < PHY_MAX_ADDR; ii++) |
| 5372 | adapter->mii_bus->irq[ii] = PHY_POLL; |
| 5373 | |
Francois Romieu | fa9f0a6 | 2011-10-23 19:11:35 +0200 | [diff] [blame] | 5374 | rc = mdiobus_register(adapter->mii_bus); |
| 5375 | if (rc < 0) { |
Mark Einon | 5da2b15 | 2011-10-23 10:22:49 +0100 | [diff] [blame] | 5376 | dev_err(&pdev->dev, "failed to register MII bus\n"); |
Mark Einon | 5da2b15 | 2011-10-23 10:22:49 +0100 | [diff] [blame] | 5377 | goto err_mdio_free_irq; |
| 5378 | } |
| 5379 | |
Francois Romieu | fa9f0a6 | 2011-10-23 19:11:35 +0200 | [diff] [blame] | 5380 | rc = et131x_mii_probe(netdev); |
| 5381 | if (rc < 0) { |
Mark Einon | 5da2b15 | 2011-10-23 10:22:49 +0100 | [diff] [blame] | 5382 | dev_err(&pdev->dev, "failed to probe MII bus\n"); |
| 5383 | goto err_mdio_unregister; |
| 5384 | } |
| 5385 | |
| 5386 | /* Setup et1310 as per the documentation */ |
| 5387 | et131x_adapter_setup(adapter); |
| 5388 | |
| 5389 | /* We can enable interrupts now |
| 5390 | * |
| 5391 | * NOTE - Because registration of interrupt handler is done in the |
| 5392 | * device's open(), defer enabling device interrupts to that |
| 5393 | * point |
| 5394 | */ |
| 5395 | |
| 5396 | /* Register the net_device struct with the Linux network layer */ |
Francois Romieu | fa9f0a6 | 2011-10-23 19:11:35 +0200 | [diff] [blame] | 5397 | rc = register_netdev(netdev); |
| 5398 | if (rc < 0) { |
Mark Einon | 5da2b15 | 2011-10-23 10:22:49 +0100 | [diff] [blame] | 5399 | dev_err(&pdev->dev, "register_netdev() failed\n"); |
Francois Romieu | fa9f0a6 | 2011-10-23 19:11:35 +0200 | [diff] [blame] | 5400 | goto err_phy_disconnect; |
Mark Einon | 5da2b15 | 2011-10-23 10:22:49 +0100 | [diff] [blame] | 5401 | } |
| 5402 | |
| 5403 | /* Register the net_device struct with the PCI subsystem. Save a copy |
| 5404 | * of the PCI config space for this device now that the device has |
| 5405 | * been initialized, just in case it needs to be quickly restored. |
| 5406 | */ |
| 5407 | pci_set_drvdata(pdev, netdev); |
Francois Romieu | fa9f0a6 | 2011-10-23 19:11:35 +0200 | [diff] [blame] | 5408 | out: |
| 5409 | return rc; |
Mark Einon | 5da2b15 | 2011-10-23 10:22:49 +0100 | [diff] [blame] | 5410 | |
Francois Romieu | fa9f0a6 | 2011-10-23 19:11:35 +0200 | [diff] [blame] | 5411 | err_phy_disconnect: |
| 5412 | phy_disconnect(adapter->phydev); |
Mark Einon | 5da2b15 | 2011-10-23 10:22:49 +0100 | [diff] [blame] | 5413 | err_mdio_unregister: |
| 5414 | mdiobus_unregister(adapter->mii_bus); |
| 5415 | err_mdio_free_irq: |
| 5416 | kfree(adapter->mii_bus->irq); |
| 5417 | err_mdio_free: |
| 5418 | mdiobus_free(adapter->mii_bus); |
| 5419 | err_mem_free: |
| 5420 | et131x_adapter_memory_free(adapter); |
| 5421 | err_iounmap: |
| 5422 | iounmap(adapter->regs); |
| 5423 | err_free_dev: |
| 5424 | pci_dev_put(pdev); |
| 5425 | free_netdev(netdev); |
| 5426 | err_release_res: |
| 5427 | pci_release_regions(pdev); |
| 5428 | err_disable: |
| 5429 | pci_disable_device(pdev); |
Francois Romieu | fa9f0a6 | 2011-10-23 19:11:35 +0200 | [diff] [blame] | 5430 | goto out; |
Mark Einon | 5da2b15 | 2011-10-23 10:22:49 +0100 | [diff] [blame] | 5431 | } |
| 5432 | |
Mark Einon | 5da2b15 | 2011-10-23 10:22:49 +0100 | [diff] [blame] | 5433 | static DEFINE_PCI_DEVICE_TABLE(et131x_pci_table) = { |
| 5434 | { PCI_VDEVICE(ATT, ET131X_PCI_DEVICE_ID_GIG), 0UL}, |
| 5435 | { PCI_VDEVICE(ATT, ET131X_PCI_DEVICE_ID_FAST), 0UL}, |
| 5436 | {0,} |
| 5437 | }; |
| 5438 | MODULE_DEVICE_TABLE(pci, et131x_pci_table); |
| 5439 | |
| 5440 | static struct pci_driver et131x_driver = { |
| 5441 | .name = DRIVER_NAME, |
| 5442 | .id_table = et131x_pci_table, |
| 5443 | .probe = et131x_pci_setup, |
| 5444 | .remove = __devexit_p(et131x_pci_remove), |
| 5445 | .driver.pm = ET131X_PM_OPS, |
| 5446 | }; |
| 5447 | |
Devendra Naga | 89812b1 | 2012-07-10 12:11:03 +0530 | [diff] [blame^] | 5448 | module_pci_driver(et131x_driver); |