blob: 193da6799979f0e4a6f9f260939a50b9fbe4d757 [file] [log] [blame]
Johannes Bergab69bde2013-06-17 22:44:02 +02001/*
2 * Copyright (c) 2013 Johannes Berg <johannes@sipsolutions.net>
3 *
4 * This file is free software: you may copy, redistribute and/or modify it
5 * under the terms of the GNU General Public License as published by the
6 * Free Software Foundation, either version 2 of the License, or (at your
7 * option) any later version.
8 *
9 * This file is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 *
17 * This file incorporates work covered by the following copyright and
18 * permission notice:
19 *
20 * Copyright (c) 2012 Qualcomm Atheros, Inc.
21 *
22 * Permission to use, copy, modify, and/or distribute this software for any
23 * purpose with or without fee is hereby granted, provided that the above
24 * copyright notice and this permission notice appear in all copies.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
27 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
28 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
29 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
30 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
31 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
32 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
33 */
34
35#include <linux/module.h>
36#include <linux/pci.h>
37#include <linux/interrupt.h>
38#include <linux/ip.h>
39#include <linux/ipv6.h>
40#include <linux/if_vlan.h>
41#include <linux/mdio.h>
42#include <linux/aer.h>
43#include <linux/bitops.h>
44#include <linux/netdevice.h>
45#include <linux/etherdevice.h>
46#include <net/ip6_checksum.h>
47#include <linux/crc32.h>
48#include "alx.h"
49#include "hw.h"
50#include "reg.h"
51
52const char alx_drv_name[] = "alx";
53
Tobias Regnery0c58ee02016-09-09 12:19:55 +020054static bool msix = false;
55module_param(msix, bool, 0);
56MODULE_PARM_DESC(msix, "Enable msi-x interrupt support");
Johannes Bergab69bde2013-06-17 22:44:02 +020057
58static void alx_free_txbuf(struct alx_priv *alx, int entry)
59{
60 struct alx_buffer *txb = &alx->txq.bufs[entry];
61
62 if (dma_unmap_len(txb, size)) {
63 dma_unmap_single(&alx->hw.pdev->dev,
64 dma_unmap_addr(txb, dma),
65 dma_unmap_len(txb, size),
66 DMA_TO_DEVICE);
67 dma_unmap_len_set(txb, size, 0);
68 }
69
70 if (txb->skb) {
71 dev_kfree_skb_any(txb->skb);
72 txb->skb = NULL;
73 }
74}
75
76static int alx_refill_rx_ring(struct alx_priv *alx, gfp_t gfp)
77{
78 struct alx_rx_queue *rxq = &alx->rxq;
79 struct sk_buff *skb;
80 struct alx_buffer *cur_buf;
81 dma_addr_t dma;
82 u16 cur, next, count = 0;
83
84 next = cur = rxq->write_idx;
85 if (++next == alx->rx_ringsz)
86 next = 0;
87 cur_buf = &rxq->bufs[cur];
88
89 while (!cur_buf->skb && next != rxq->read_idx) {
90 struct alx_rfd *rfd = &rxq->rfd[cur];
91
Feng Tang881d0322016-06-12 17:36:37 +080092 /*
93 * When DMA RX address is set to something like
94 * 0x....fc0, it will be very likely to cause DMA
95 * RFD overflow issue.
96 *
97 * To work around it, we apply rx skb with 64 bytes
98 * longer space, and offset the address whenever
99 * 0x....fc0 is detected.
100 */
101 skb = __netdev_alloc_skb(alx->dev, alx->rxbuf_size + 64, gfp);
Johannes Bergab69bde2013-06-17 22:44:02 +0200102 if (!skb)
103 break;
Feng Tang881d0322016-06-12 17:36:37 +0800104
105 if (((unsigned long)skb->data & 0xfff) == 0xfc0)
106 skb_reserve(skb, 64);
107
Johannes Bergab69bde2013-06-17 22:44:02 +0200108 dma = dma_map_single(&alx->hw.pdev->dev,
109 skb->data, alx->rxbuf_size,
110 DMA_FROM_DEVICE);
111 if (dma_mapping_error(&alx->hw.pdev->dev, dma)) {
112 dev_kfree_skb(skb);
113 break;
114 }
115
116 /* Unfortunately, RX descriptor buffers must be 4-byte
117 * aligned, so we can't use IP alignment.
118 */
119 if (WARN_ON(dma & 3)) {
120 dev_kfree_skb(skb);
121 break;
122 }
123
124 cur_buf->skb = skb;
125 dma_unmap_len_set(cur_buf, size, alx->rxbuf_size);
126 dma_unmap_addr_set(cur_buf, dma, dma);
127 rfd->addr = cpu_to_le64(dma);
128
129 cur = next;
130 if (++next == alx->rx_ringsz)
131 next = 0;
132 cur_buf = &rxq->bufs[cur];
133 count++;
134 }
135
136 if (count) {
137 /* flush all updates before updating hardware */
138 wmb();
139 rxq->write_idx = cur;
140 alx_write_mem16(&alx->hw, ALX_RFD_PIDX, cur);
141 }
142
143 return count;
144}
145
146static inline int alx_tpd_avail(struct alx_priv *alx)
147{
148 struct alx_tx_queue *txq = &alx->txq;
149
150 if (txq->write_idx >= txq->read_idx)
151 return alx->tx_ringsz + txq->read_idx - txq->write_idx - 1;
152 return txq->read_idx - txq->write_idx - 1;
153}
154
155static bool alx_clean_tx_irq(struct alx_priv *alx)
156{
157 struct alx_tx_queue *txq = &alx->txq;
158 u16 hw_read_idx, sw_read_idx;
159 unsigned int total_bytes = 0, total_packets = 0;
160 int budget = ALX_DEFAULT_TX_WORK;
161
162 sw_read_idx = txq->read_idx;
163 hw_read_idx = alx_read_mem16(&alx->hw, ALX_TPD_PRI0_CIDX);
164
165 if (sw_read_idx != hw_read_idx) {
166 while (sw_read_idx != hw_read_idx && budget > 0) {
167 struct sk_buff *skb;
168
169 skb = txq->bufs[sw_read_idx].skb;
170 if (skb) {
171 total_bytes += skb->len;
172 total_packets++;
173 budget--;
174 }
175
176 alx_free_txbuf(alx, sw_read_idx);
177
178 if (++sw_read_idx == alx->tx_ringsz)
179 sw_read_idx = 0;
180 }
181 txq->read_idx = sw_read_idx;
182
183 netdev_completed_queue(alx->dev, total_packets, total_bytes);
184 }
185
186 if (netif_queue_stopped(alx->dev) && netif_carrier_ok(alx->dev) &&
187 alx_tpd_avail(alx) > alx->tx_ringsz/4)
188 netif_wake_queue(alx->dev);
189
190 return sw_read_idx == hw_read_idx;
191}
192
193static void alx_schedule_link_check(struct alx_priv *alx)
194{
195 schedule_work(&alx->link_check_wk);
196}
197
198static void alx_schedule_reset(struct alx_priv *alx)
199{
200 schedule_work(&alx->reset_wk);
201}
202
Eric Dumazet7a05dc62015-01-11 10:32:18 -0800203static int alx_clean_rx_irq(struct alx_priv *alx, int budget)
Johannes Bergab69bde2013-06-17 22:44:02 +0200204{
205 struct alx_rx_queue *rxq = &alx->rxq;
206 struct alx_rrd *rrd;
207 struct alx_buffer *rxb;
208 struct sk_buff *skb;
209 u16 length, rfd_cleaned = 0;
Eric Dumazet7a05dc62015-01-11 10:32:18 -0800210 int work = 0;
Johannes Bergab69bde2013-06-17 22:44:02 +0200211
Eric Dumazet7a05dc62015-01-11 10:32:18 -0800212 while (work < budget) {
Johannes Bergab69bde2013-06-17 22:44:02 +0200213 rrd = &rxq->rrd[rxq->rrd_read_idx];
214 if (!(rrd->word3 & cpu_to_le32(1 << RRD_UPDATED_SHIFT)))
215 break;
216 rrd->word3 &= ~cpu_to_le32(1 << RRD_UPDATED_SHIFT);
217
218 if (ALX_GET_FIELD(le32_to_cpu(rrd->word0),
219 RRD_SI) != rxq->read_idx ||
220 ALX_GET_FIELD(le32_to_cpu(rrd->word0),
221 RRD_NOR) != 1) {
222 alx_schedule_reset(alx);
Eric Dumazet7a05dc62015-01-11 10:32:18 -0800223 return work;
Johannes Bergab69bde2013-06-17 22:44:02 +0200224 }
225
226 rxb = &rxq->bufs[rxq->read_idx];
227 dma_unmap_single(&alx->hw.pdev->dev,
228 dma_unmap_addr(rxb, dma),
229 dma_unmap_len(rxb, size),
230 DMA_FROM_DEVICE);
231 dma_unmap_len_set(rxb, size, 0);
232 skb = rxb->skb;
233 rxb->skb = NULL;
234
235 if (rrd->word3 & cpu_to_le32(1 << RRD_ERR_RES_SHIFT) ||
236 rrd->word3 & cpu_to_le32(1 << RRD_ERR_LEN_SHIFT)) {
237 rrd->word3 = 0;
238 dev_kfree_skb_any(skb);
239 goto next_pkt;
240 }
241
242 length = ALX_GET_FIELD(le32_to_cpu(rrd->word3),
243 RRD_PKTLEN) - ETH_FCS_LEN;
244 skb_put(skb, length);
245 skb->protocol = eth_type_trans(skb, alx->dev);
246
247 skb_checksum_none_assert(skb);
248 if (alx->dev->features & NETIF_F_RXCSUM &&
249 !(rrd->word3 & (cpu_to_le32(1 << RRD_ERR_L4_SHIFT) |
250 cpu_to_le32(1 << RRD_ERR_IPV4_SHIFT)))) {
251 switch (ALX_GET_FIELD(le32_to_cpu(rrd->word2),
252 RRD_PID)) {
253 case RRD_PID_IPV6UDP:
254 case RRD_PID_IPV4UDP:
255 case RRD_PID_IPV4TCP:
256 case RRD_PID_IPV6TCP:
257 skb->ip_summed = CHECKSUM_UNNECESSARY;
258 break;
259 }
260 }
261
262 napi_gro_receive(&alx->napi, skb);
Eric Dumazet7a05dc62015-01-11 10:32:18 -0800263 work++;
Johannes Bergab69bde2013-06-17 22:44:02 +0200264
265next_pkt:
266 if (++rxq->read_idx == alx->rx_ringsz)
267 rxq->read_idx = 0;
268 if (++rxq->rrd_read_idx == alx->rx_ringsz)
269 rxq->rrd_read_idx = 0;
270
271 if (++rfd_cleaned > ALX_RX_ALLOC_THRESH)
272 rfd_cleaned -= alx_refill_rx_ring(alx, GFP_ATOMIC);
273 }
274
275 if (rfd_cleaned)
276 alx_refill_rx_ring(alx, GFP_ATOMIC);
277
Eric Dumazet7a05dc62015-01-11 10:32:18 -0800278 return work;
Johannes Bergab69bde2013-06-17 22:44:02 +0200279}
280
281static int alx_poll(struct napi_struct *napi, int budget)
282{
283 struct alx_priv *alx = container_of(napi, struct alx_priv, napi);
284 struct alx_hw *hw = &alx->hw;
Johannes Bergab69bde2013-06-17 22:44:02 +0200285 unsigned long flags;
Eric Dumazet7a05dc62015-01-11 10:32:18 -0800286 bool tx_complete;
287 int work;
Johannes Bergab69bde2013-06-17 22:44:02 +0200288
Eric Dumazet7a05dc62015-01-11 10:32:18 -0800289 tx_complete = alx_clean_tx_irq(alx);
290 work = alx_clean_rx_irq(alx, budget);
Johannes Bergab69bde2013-06-17 22:44:02 +0200291
Eric Dumazet7a05dc62015-01-11 10:32:18 -0800292 if (!tx_complete || work == budget)
293 return budget;
Johannes Bergab69bde2013-06-17 22:44:02 +0200294
295 napi_complete(&alx->napi);
296
297 /* enable interrupt */
Tobias Regnerydc39a782016-09-09 12:19:54 +0200298 if (alx->flags & ALX_FLAG_USING_MSIX) {
299 alx_mask_msix(hw, 1, false);
300 } else {
301 spin_lock_irqsave(&alx->irq_lock, flags);
302 alx->int_mask |= ALX_ISR_TX_Q0 | ALX_ISR_RX_Q0;
303 alx_write_mem32(hw, ALX_IMR, alx->int_mask);
304 spin_unlock_irqrestore(&alx->irq_lock, flags);
305 }
Johannes Bergab69bde2013-06-17 22:44:02 +0200306
307 alx_post_write(hw);
308
Eric Dumazet7a05dc62015-01-11 10:32:18 -0800309 return work;
Johannes Bergab69bde2013-06-17 22:44:02 +0200310}
311
Tobias Regnerya0373ae2016-09-09 12:19:53 +0200312static bool alx_intr_handle_misc(struct alx_priv *alx, u32 intr)
Johannes Bergab69bde2013-06-17 22:44:02 +0200313{
314 struct alx_hw *hw = &alx->hw;
Johannes Bergab69bde2013-06-17 22:44:02 +0200315
316 if (intr & ALX_ISR_FATAL) {
317 netif_warn(alx, hw, alx->dev,
318 "fatal interrupt 0x%x, resetting\n", intr);
319 alx_schedule_reset(alx);
Tobias Regnerya0373ae2016-09-09 12:19:53 +0200320 return true;
Johannes Bergab69bde2013-06-17 22:44:02 +0200321 }
322
323 if (intr & ALX_ISR_ALERT)
324 netdev_warn(alx->dev, "alert interrupt: 0x%x\n", intr);
325
326 if (intr & ALX_ISR_PHY) {
327 /* suppress PHY interrupt, because the source
328 * is from PHY internal. only the internal status
329 * is cleared, the interrupt status could be cleared.
330 */
331 alx->int_mask &= ~ALX_ISR_PHY;
Tobias Regnerya0373ae2016-09-09 12:19:53 +0200332 alx_write_mem32(hw, ALX_IMR, alx->int_mask);
Johannes Bergab69bde2013-06-17 22:44:02 +0200333 alx_schedule_link_check(alx);
334 }
335
Tobias Regnerya0373ae2016-09-09 12:19:53 +0200336 return false;
337}
338
339static irqreturn_t alx_intr_handle(struct alx_priv *alx, u32 intr)
340{
341 struct alx_hw *hw = &alx->hw;
342
343 spin_lock(&alx->irq_lock);
344
345 /* ACK interrupt */
346 alx_write_mem32(hw, ALX_ISR, intr | ALX_ISR_DIS);
347 intr &= alx->int_mask;
348
349 if (alx_intr_handle_misc(alx, intr))
350 goto out;
351
Johannes Bergab69bde2013-06-17 22:44:02 +0200352 if (intr & (ALX_ISR_TX_Q0 | ALX_ISR_RX_Q0)) {
353 napi_schedule(&alx->napi);
354 /* mask rx/tx interrupt, enable them when napi complete */
355 alx->int_mask &= ~ALX_ISR_ALL_QUEUES;
Johannes Bergab69bde2013-06-17 22:44:02 +0200356 alx_write_mem32(hw, ALX_IMR, alx->int_mask);
Tobias Regnerya0373ae2016-09-09 12:19:53 +0200357 }
Johannes Bergab69bde2013-06-17 22:44:02 +0200358
359 alx_write_mem32(hw, ALX_ISR, 0);
360
361 out:
362 spin_unlock(&alx->irq_lock);
363 return IRQ_HANDLED;
364}
365
Tobias Regnerydc39a782016-09-09 12:19:54 +0200366static irqreturn_t alx_intr_msix_ring(int irq, void *data)
367{
368 struct alx_priv *alx = data;
369 struct alx_hw *hw = &alx->hw;
370
371 /* mask interrupt to ACK chip */
372 alx_mask_msix(hw, 1, true);
373 /* clear interrupt status */
374 alx_write_mem32(hw, ALX_ISR, (ALX_ISR_TX_Q0 | ALX_ISR_RX_Q0));
375
376 napi_schedule(&alx->napi);
377
378 return IRQ_HANDLED;
379}
380
381static irqreturn_t alx_intr_msix_misc(int irq, void *data)
382{
383 struct alx_priv *alx = data;
384 struct alx_hw *hw = &alx->hw;
385 u32 intr;
386
387 /* mask interrupt to ACK chip */
388 alx_mask_msix(hw, 0, true);
389
390 /* read interrupt status */
391 intr = alx_read_mem32(hw, ALX_ISR);
392 intr &= (alx->int_mask & ~ALX_ISR_ALL_QUEUES);
393
394 if (alx_intr_handle_misc(alx, intr))
395 return IRQ_HANDLED;
396
397 /* clear interrupt status */
398 alx_write_mem32(hw, ALX_ISR, intr);
399
400 /* enable interrupt again */
401 alx_mask_msix(hw, 0, false);
402
403 return IRQ_HANDLED;
404}
405
Johannes Bergab69bde2013-06-17 22:44:02 +0200406static irqreturn_t alx_intr_msi(int irq, void *data)
407{
408 struct alx_priv *alx = data;
409
410 return alx_intr_handle(alx, alx_read_mem32(&alx->hw, ALX_ISR));
411}
412
413static irqreturn_t alx_intr_legacy(int irq, void *data)
414{
415 struct alx_priv *alx = data;
416 struct alx_hw *hw = &alx->hw;
417 u32 intr;
418
419 intr = alx_read_mem32(hw, ALX_ISR);
420
421 if (intr & ALX_ISR_DIS || !(intr & alx->int_mask))
422 return IRQ_NONE;
423
424 return alx_intr_handle(alx, intr);
425}
426
427static void alx_init_ring_ptrs(struct alx_priv *alx)
428{
429 struct alx_hw *hw = &alx->hw;
430 u32 addr_hi = ((u64)alx->descmem.dma) >> 32;
431
432 alx->rxq.read_idx = 0;
433 alx->rxq.write_idx = 0;
434 alx->rxq.rrd_read_idx = 0;
435 alx_write_mem32(hw, ALX_RX_BASE_ADDR_HI, addr_hi);
436 alx_write_mem32(hw, ALX_RRD_ADDR_LO, alx->rxq.rrd_dma);
437 alx_write_mem32(hw, ALX_RRD_RING_SZ, alx->rx_ringsz);
438 alx_write_mem32(hw, ALX_RFD_ADDR_LO, alx->rxq.rfd_dma);
439 alx_write_mem32(hw, ALX_RFD_RING_SZ, alx->rx_ringsz);
440 alx_write_mem32(hw, ALX_RFD_BUF_SZ, alx->rxbuf_size);
441
442 alx->txq.read_idx = 0;
443 alx->txq.write_idx = 0;
444 alx_write_mem32(hw, ALX_TX_BASE_ADDR_HI, addr_hi);
445 alx_write_mem32(hw, ALX_TPD_PRI0_ADDR_LO, alx->txq.tpd_dma);
446 alx_write_mem32(hw, ALX_TPD_RING_SZ, alx->tx_ringsz);
447
448 /* load these pointers into the chip */
449 alx_write_mem32(hw, ALX_SRAM9, ALX_SRAM_LOAD_PTR);
450}
451
452static void alx_free_txring_buf(struct alx_priv *alx)
453{
454 struct alx_tx_queue *txq = &alx->txq;
455 int i;
456
457 if (!txq->bufs)
458 return;
459
460 for (i = 0; i < alx->tx_ringsz; i++)
461 alx_free_txbuf(alx, i);
462
463 memset(txq->bufs, 0, alx->tx_ringsz * sizeof(struct alx_buffer));
464 memset(txq->tpd, 0, alx->tx_ringsz * sizeof(struct alx_txd));
465 txq->write_idx = 0;
466 txq->read_idx = 0;
467
468 netdev_reset_queue(alx->dev);
469}
470
471static void alx_free_rxring_buf(struct alx_priv *alx)
472{
473 struct alx_rx_queue *rxq = &alx->rxq;
474 struct alx_buffer *cur_buf;
475 u16 i;
476
477 if (rxq == NULL)
478 return;
479
480 for (i = 0; i < alx->rx_ringsz; i++) {
481 cur_buf = rxq->bufs + i;
482 if (cur_buf->skb) {
483 dma_unmap_single(&alx->hw.pdev->dev,
484 dma_unmap_addr(cur_buf, dma),
485 dma_unmap_len(cur_buf, size),
486 DMA_FROM_DEVICE);
487 dev_kfree_skb(cur_buf->skb);
488 cur_buf->skb = NULL;
489 dma_unmap_len_set(cur_buf, size, 0);
490 dma_unmap_addr_set(cur_buf, dma, 0);
491 }
492 }
493
494 rxq->write_idx = 0;
495 rxq->read_idx = 0;
496 rxq->rrd_read_idx = 0;
497}
498
499static void alx_free_buffers(struct alx_priv *alx)
500{
501 alx_free_txring_buf(alx);
502 alx_free_rxring_buf(alx);
503}
504
505static int alx_reinit_rings(struct alx_priv *alx)
506{
507 alx_free_buffers(alx);
508
509 alx_init_ring_ptrs(alx);
510
511 if (!alx_refill_rx_ring(alx, GFP_KERNEL))
512 return -ENOMEM;
513
514 return 0;
515}
516
517static void alx_add_mc_addr(struct alx_hw *hw, const u8 *addr, u32 *mc_hash)
518{
519 u32 crc32, bit, reg;
520
521 crc32 = ether_crc(ETH_ALEN, addr);
522 reg = (crc32 >> 31) & 0x1;
523 bit = (crc32 >> 26) & 0x1F;
524
525 mc_hash[reg] |= BIT(bit);
526}
527
528static void __alx_set_rx_mode(struct net_device *netdev)
529{
530 struct alx_priv *alx = netdev_priv(netdev);
531 struct alx_hw *hw = &alx->hw;
532 struct netdev_hw_addr *ha;
533 u32 mc_hash[2] = {};
534
535 if (!(netdev->flags & IFF_ALLMULTI)) {
536 netdev_for_each_mc_addr(ha, netdev)
537 alx_add_mc_addr(hw, ha->addr, mc_hash);
538
539 alx_write_mem32(hw, ALX_HASH_TBL0, mc_hash[0]);
540 alx_write_mem32(hw, ALX_HASH_TBL1, mc_hash[1]);
541 }
542
543 hw->rx_ctrl &= ~(ALX_MAC_CTRL_MULTIALL_EN | ALX_MAC_CTRL_PROMISC_EN);
544 if (netdev->flags & IFF_PROMISC)
545 hw->rx_ctrl |= ALX_MAC_CTRL_PROMISC_EN;
546 if (netdev->flags & IFF_ALLMULTI)
547 hw->rx_ctrl |= ALX_MAC_CTRL_MULTIALL_EN;
548
549 alx_write_mem32(hw, ALX_MAC_CTRL, hw->rx_ctrl);
550}
551
552static void alx_set_rx_mode(struct net_device *netdev)
553{
554 __alx_set_rx_mode(netdev);
555}
556
557static int alx_set_mac_address(struct net_device *netdev, void *data)
558{
559 struct alx_priv *alx = netdev_priv(netdev);
560 struct alx_hw *hw = &alx->hw;
561 struct sockaddr *addr = data;
562
563 if (!is_valid_ether_addr(addr->sa_data))
564 return -EADDRNOTAVAIL;
565
566 if (netdev->addr_assign_type & NET_ADDR_RANDOM)
567 netdev->addr_assign_type ^= NET_ADDR_RANDOM;
568
569 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
570 memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len);
571 alx_set_macaddr(hw, hw->mac_addr);
572
573 return 0;
574}
575
Tobias Regnery8c2a4c82016-11-15 12:43:08 +0100576static int alx_alloc_tx_ring(struct alx_priv *alx, struct alx_tx_queue *txq,
577 int offset)
Johannes Bergab69bde2013-06-17 22:44:02 +0200578{
Tobias Regnery8c2a4c82016-11-15 12:43:08 +0100579 txq->bufs = kcalloc(alx->tx_ringsz, sizeof(struct alx_buffer), GFP_KERNEL);
580 if (!txq->bufs)
Johannes Bergab69bde2013-06-17 22:44:02 +0200581 return -ENOMEM;
582
Tobias Regnery8c2a4c82016-11-15 12:43:08 +0100583 txq->tpd = alx->descmem.virt + offset;
584 txq->tpd_dma = alx->descmem.dma + offset;
585 offset += sizeof(struct alx_txd) * alx->tx_ringsz;
586
587 return offset;
588}
589
590static int alx_alloc_rx_ring(struct alx_priv *alx, struct alx_rx_queue *rxq,
591 int offset)
592{
593 rxq->bufs = kcalloc(alx->rx_ringsz, sizeof(struct alx_buffer), GFP_KERNEL);
594 if (!rxq->bufs)
595 return -ENOMEM;
596
597 rxq->rrd = alx->descmem.virt + offset;
598 rxq->rrd_dma = alx->descmem.dma + offset;
599 offset += sizeof(struct alx_rrd) * alx->rx_ringsz;
600
601 rxq->rfd = alx->descmem.virt + offset;
602 rxq->rfd_dma = alx->descmem.dma + offset;
603 offset += sizeof(struct alx_rfd) * alx->rx_ringsz;
604
605 return offset;
606}
607
608static int alx_alloc_rings(struct alx_priv *alx)
609{
610 int offset = 0;
Johannes Bergab69bde2013-06-17 22:44:02 +0200611
612 /* physical tx/rx ring descriptors
613 *
614 * Allocate them as a single chunk because they must not cross a
615 * 4G boundary (hardware has a single register for high 32 bits
616 * of addresses only)
617 */
618 alx->descmem.size = sizeof(struct alx_txd) * alx->tx_ringsz +
619 sizeof(struct alx_rrd) * alx->rx_ringsz +
620 sizeof(struct alx_rfd) * alx->rx_ringsz;
621 alx->descmem.virt = dma_zalloc_coherent(&alx->hw.pdev->dev,
622 alx->descmem.size,
623 &alx->descmem.dma,
624 GFP_KERNEL);
625 if (!alx->descmem.virt)
Tobias Regnery8c2a4c82016-11-15 12:43:08 +0100626 return -ENOMEM;
Johannes Bergab69bde2013-06-17 22:44:02 +0200627
Tobias Regnery8c2a4c82016-11-15 12:43:08 +0100628 /* alignment requirements */
Johannes Bergab69bde2013-06-17 22:44:02 +0200629 BUILD_BUG_ON(sizeof(struct alx_txd) % 8);
Johannes Bergab69bde2013-06-17 22:44:02 +0200630 BUILD_BUG_ON(sizeof(struct alx_rrd) % 8);
631
Tobias Regnery8c2a4c82016-11-15 12:43:08 +0100632 offset = alx_alloc_tx_ring(alx, &alx->txq, offset);
633 if (offset < 0) {
634 netdev_err(alx->dev, "Allocation of tx buffer failed!\n");
Tobias Regneryb0999222016-11-15 12:43:10 +0100635 return -ENOMEM;
Tobias Regnery8c2a4c82016-11-15 12:43:08 +0100636 }
Johannes Bergab69bde2013-06-17 22:44:02 +0200637
Tobias Regnery8c2a4c82016-11-15 12:43:08 +0100638 offset = alx_alloc_rx_ring(alx, &alx->rxq, offset);
639 if (offset < 0) {
640 netdev_err(alx->dev, "Allocation of rx buffer failed!\n");
Tobias Regneryb0999222016-11-15 12:43:10 +0100641 return -ENOMEM;
Tobias Regnery8c2a4c82016-11-15 12:43:08 +0100642 }
Johannes Bergab69bde2013-06-17 22:44:02 +0200643
Johannes Bergab69bde2013-06-17 22:44:02 +0200644 alx_reinit_rings(alx);
Tobias Regnery8c2a4c82016-11-15 12:43:08 +0100645
Johannes Bergab69bde2013-06-17 22:44:02 +0200646 return 0;
647}
648
649static void alx_free_rings(struct alx_priv *alx)
650{
Johannes Bergab69bde2013-06-17 22:44:02 +0200651 alx_free_buffers(alx);
652
653 kfree(alx->txq.bufs);
654 kfree(alx->rxq.bufs);
655
Tobias Regneryb0999222016-11-15 12:43:10 +0100656 if (!alx->descmem.virt)
657 dma_free_coherent(&alx->hw.pdev->dev,
658 alx->descmem.size,
659 alx->descmem.virt,
660 alx->descmem.dma);
661}
662
663static void alx_free_napis(struct alx_priv *alx)
664{
665 struct alx_napi *np;
666
667 np = alx->qnapi[0];
668 if (!np)
669 return;
670
671 netif_napi_del(&alx->napi);
672 kfree(np->txq);
673 kfree(np->rxq);
674 kfree(np);
675 alx->qnapi[0] = NULL;
676}
677
678static int alx_alloc_napis(struct alx_priv *alx)
679{
680 struct alx_napi *np;
681 struct alx_rx_queue *rxq;
682 struct alx_tx_queue *txq;
683
684 alx->int_mask &= ~ALX_ISR_ALL_QUEUES;
685 alx->int_mask |= ALX_ISR_TX_Q0 | ALX_ISR_RX_Q0;
686
687 /* allocate alx_napi structures */
688 np = kzalloc(sizeof(struct alx_napi), GFP_KERNEL);
689 if (!np)
690 goto err_out;
691
692 np->alx = alx;
693 netif_napi_add(alx->dev, &alx->napi, alx_poll, 64);
694 alx->qnapi[0] = np;
695
696 /* allocate tx queues */
697 np = alx->qnapi[0];
698 txq = kzalloc(sizeof(*txq), GFP_KERNEL);
699 if (!txq)
700 goto err_out;
701
702 np->txq = txq;
703 txq->count = alx->tx_ringsz;
704 txq->netdev = alx->dev;
705 txq->dev = &alx->hw.pdev->dev;
706
707 /* allocate rx queues */
708 np = alx->qnapi[0];
709 rxq = kzalloc(sizeof(*rxq), GFP_KERNEL);
710 if (!rxq)
711 goto err_out;
712
713 np->rxq = rxq;
714 rxq->np = alx->qnapi[0];
715 rxq->count = alx->rx_ringsz;
716 rxq->netdev = alx->dev;
717 rxq->dev = &alx->hw.pdev->dev;
718
719 return 0;
720
721err_out:
722 netdev_err(alx->dev, "error allocating internal structures\n");
723 alx_free_napis(alx);
724 return -ENOMEM;
Johannes Bergab69bde2013-06-17 22:44:02 +0200725}
726
727static void alx_config_vector_mapping(struct alx_priv *alx)
728{
729 struct alx_hw *hw = &alx->hw;
Tobias Regnerydc39a782016-09-09 12:19:54 +0200730 u32 tbl = 0;
Johannes Bergab69bde2013-06-17 22:44:02 +0200731
Tobias Regnerydc39a782016-09-09 12:19:54 +0200732 if (alx->flags & ALX_FLAG_USING_MSIX) {
733 tbl |= 1 << ALX_MSI_MAP_TBL1_TXQ0_SHIFT;
734 tbl |= 1 << ALX_MSI_MAP_TBL1_RXQ0_SHIFT;
735 }
736
737 alx_write_mem32(hw, ALX_MSI_MAP_TBL1, tbl);
Johannes Bergab69bde2013-06-17 22:44:02 +0200738 alx_write_mem32(hw, ALX_MSI_MAP_TBL2, 0);
739 alx_write_mem32(hw, ALX_MSI_ID_MAP, 0);
740}
741
Tobias Regnerydc39a782016-09-09 12:19:54 +0200742static bool alx_enable_msix(struct alx_priv *alx)
743{
744 int i, err, num_vec = 2;
745
746 alx->msix_entries = kcalloc(num_vec, sizeof(struct msix_entry),
747 GFP_KERNEL);
748 if (!alx->msix_entries) {
749 netdev_warn(alx->dev, "Allocation of msix entries failed!\n");
750 return false;
751 }
752
753 for (i = 0; i < num_vec; i++)
754 alx->msix_entries[i].entry = i;
755
756 err = pci_enable_msix(alx->hw.pdev, alx->msix_entries, num_vec);
757 if (err) {
758 kfree(alx->msix_entries);
759 netdev_warn(alx->dev, "Enabling MSI-X interrupts failed!\n");
760 return false;
761 }
762
763 alx->num_vec = num_vec;
764 return true;
765}
766
767static int alx_request_msix(struct alx_priv *alx)
768{
769 struct net_device *netdev = alx->dev;
770 int i, err, vector = 0, free_vector = 0;
771
772 err = request_irq(alx->msix_entries[0].vector, alx_intr_msix_misc,
773 0, netdev->name, alx);
774 if (err)
775 goto out_err;
776
777 vector++;
778 sprintf(alx->irq_lbl, "%s-TxRx-0", netdev->name);
779
780 err = request_irq(alx->msix_entries[vector].vector,
781 alx_intr_msix_ring, 0, alx->irq_lbl, alx);
782 if (err)
783 goto out_free;
784
785 return 0;
786
787out_free:
788 free_irq(alx->msix_entries[free_vector++].vector, alx);
789
790 vector--;
791 for (i = 0; i < vector; i++)
792 free_irq(alx->msix_entries[free_vector++].vector, alx);
793
794out_err:
795 return err;
796}
797
Tobias Regnery9ee7b682016-09-09 12:19:52 +0200798static void alx_init_intr(struct alx_priv *alx, bool msix)
799{
Tobias Regnerydc39a782016-09-09 12:19:54 +0200800 if (msix) {
801 if (alx_enable_msix(alx))
802 alx->flags |= ALX_FLAG_USING_MSIX;
803 }
804
Tobias Regnery9ee7b682016-09-09 12:19:52 +0200805 if (!(alx->flags & ALX_FLAG_USING_MSIX)) {
Tobias Regnerydc39a782016-09-09 12:19:54 +0200806 alx->num_vec = 1;
807
Tobias Regnery9ee7b682016-09-09 12:19:52 +0200808 if (!pci_enable_msi(alx->hw.pdev))
809 alx->flags |= ALX_FLAG_USING_MSI;
810 }
811}
812
813static void alx_disable_advanced_intr(struct alx_priv *alx)
814{
Tobias Regnerydc39a782016-09-09 12:19:54 +0200815 if (alx->flags & ALX_FLAG_USING_MSIX) {
816 kfree(alx->msix_entries);
817 pci_disable_msix(alx->hw.pdev);
818 alx->flags &= ~ALX_FLAG_USING_MSIX;
819 }
820
Tobias Regnery9ee7b682016-09-09 12:19:52 +0200821 if (alx->flags & ALX_FLAG_USING_MSI) {
822 pci_disable_msi(alx->hw.pdev);
823 alx->flags &= ~ALX_FLAG_USING_MSI;
824 }
825}
826
Johannes Bergab69bde2013-06-17 22:44:02 +0200827static void alx_irq_enable(struct alx_priv *alx)
828{
829 struct alx_hw *hw = &alx->hw;
Tobias Regnerydc39a782016-09-09 12:19:54 +0200830 int i;
Johannes Bergab69bde2013-06-17 22:44:02 +0200831
832 /* level-1 interrupt switch */
833 alx_write_mem32(hw, ALX_ISR, 0);
834 alx_write_mem32(hw, ALX_IMR, alx->int_mask);
835 alx_post_write(hw);
Tobias Regnerydc39a782016-09-09 12:19:54 +0200836
837 if (alx->flags & ALX_FLAG_USING_MSIX)
838 /* enable all msix irqs */
839 for (i = 0; i < alx->num_vec; i++)
840 alx_mask_msix(hw, i, false);
Johannes Bergab69bde2013-06-17 22:44:02 +0200841}
842
843static void alx_irq_disable(struct alx_priv *alx)
844{
845 struct alx_hw *hw = &alx->hw;
Tobias Regnerydc39a782016-09-09 12:19:54 +0200846 int i;
Johannes Bergab69bde2013-06-17 22:44:02 +0200847
848 alx_write_mem32(hw, ALX_ISR, ALX_ISR_DIS);
849 alx_write_mem32(hw, ALX_IMR, 0);
850 alx_post_write(hw);
851
Tobias Regnerydc39a782016-09-09 12:19:54 +0200852 if (alx->flags & ALX_FLAG_USING_MSIX) {
853 for (i = 0; i < alx->num_vec; i++) {
854 alx_mask_msix(hw, i, true);
855 synchronize_irq(alx->msix_entries[i].vector);
856 }
857 } else {
858 synchronize_irq(alx->hw.pdev->irq);
859 }
Johannes Bergab69bde2013-06-17 22:44:02 +0200860}
861
862static int alx_request_irq(struct alx_priv *alx)
863{
864 struct pci_dev *pdev = alx->hw.pdev;
865 struct alx_hw *hw = &alx->hw;
866 int err;
867 u32 msi_ctrl;
868
869 msi_ctrl = (hw->imt >> 1) << ALX_MSI_RETRANS_TM_SHIFT;
870
Tobias Regnerydc39a782016-09-09 12:19:54 +0200871 if (alx->flags & ALX_FLAG_USING_MSIX) {
872 alx_write_mem32(hw, ALX_MSI_RETRANS_TIMER, msi_ctrl);
873 err = alx_request_msix(alx);
874 if (!err)
875 goto out;
876
877 /* msix request failed, realloc resources */
878 alx_disable_advanced_intr(alx);
879 alx_init_intr(alx, false);
880 }
881
Tobias Regnery9ee7b682016-09-09 12:19:52 +0200882 if (alx->flags & ALX_FLAG_USING_MSI) {
Johannes Bergab69bde2013-06-17 22:44:02 +0200883 alx_write_mem32(hw, ALX_MSI_RETRANS_TIMER,
884 msi_ctrl | ALX_MSI_MASK_SEL_LINE);
885 err = request_irq(pdev->irq, alx_intr_msi, 0,
886 alx->dev->name, alx);
887 if (!err)
888 goto out;
889 /* fall back to legacy interrupt */
Tobias Regnery9ee7b682016-09-09 12:19:52 +0200890 alx->flags &= ~ALX_FLAG_USING_MSI;
Johannes Bergab69bde2013-06-17 22:44:02 +0200891 pci_disable_msi(alx->hw.pdev);
892 }
893
894 alx_write_mem32(hw, ALX_MSI_RETRANS_TIMER, 0);
895 err = request_irq(pdev->irq, alx_intr_legacy, IRQF_SHARED,
896 alx->dev->name, alx);
897out:
898 if (!err)
899 alx_config_vector_mapping(alx);
Tobias Regnerydc39a782016-09-09 12:19:54 +0200900 else
901 netdev_err(alx->dev, "IRQ registration failed!\n");
Johannes Bergab69bde2013-06-17 22:44:02 +0200902 return err;
903}
904
905static void alx_free_irq(struct alx_priv *alx)
906{
907 struct pci_dev *pdev = alx->hw.pdev;
Tobias Regnerydc39a782016-09-09 12:19:54 +0200908 int i;
Johannes Bergab69bde2013-06-17 22:44:02 +0200909
Tobias Regnerydc39a782016-09-09 12:19:54 +0200910 if (alx->flags & ALX_FLAG_USING_MSIX) {
911 /* we have only 2 vectors without multi queue support */
912 for (i = 0; i < 2; i++)
913 free_irq(alx->msix_entries[i].vector, alx);
914 } else {
915 free_irq(pdev->irq, alx);
916 }
Johannes Bergab69bde2013-06-17 22:44:02 +0200917
Tobias Regnery9ee7b682016-09-09 12:19:52 +0200918 alx_disable_advanced_intr(alx);
Johannes Bergab69bde2013-06-17 22:44:02 +0200919}
920
921static int alx_identify_hw(struct alx_priv *alx)
922{
923 struct alx_hw *hw = &alx->hw;
924 int rev = alx_hw_revision(hw);
925
926 if (rev > ALX_REV_C0)
927 return -EINVAL;
928
929 hw->max_dma_chnl = rev >= ALX_REV_B0 ? 4 : 2;
930
931 return 0;
932}
933
934static int alx_init_sw(struct alx_priv *alx)
935{
936 struct pci_dev *pdev = alx->hw.pdev;
937 struct alx_hw *hw = &alx->hw;
938 int err;
939
940 err = alx_identify_hw(alx);
941 if (err) {
942 dev_err(&pdev->dev, "unrecognized chip, aborting\n");
943 return err;
944 }
945
946 alx->hw.lnk_patch =
947 pdev->device == ALX_DEV_ID_AR8161 &&
948 pdev->subsystem_vendor == PCI_VENDOR_ID_ATTANSIC &&
949 pdev->subsystem_device == 0x0091 &&
950 pdev->revision == 0;
951
952 hw->smb_timer = 400;
953 hw->mtu = alx->dev->mtu;
Jarod Wilsonc406700c2016-01-06 09:36:37 -0500954 alx->rxbuf_size = ALX_MAX_FRAME_LEN(hw->mtu);
Jarod Wilson67bef942016-10-17 15:54:03 -0400955 /* MTU range: 34 - 9256 */
956 alx->dev->min_mtu = 34;
957 alx->dev->max_mtu = ALX_MAX_FRAME_LEN(ALX_MAX_FRAME_SIZE);
Johannes Bergab69bde2013-06-17 22:44:02 +0200958 alx->tx_ringsz = 256;
959 alx->rx_ringsz = 512;
Johannes Bergab69bde2013-06-17 22:44:02 +0200960 hw->imt = 200;
961 alx->int_mask = ALX_ISR_MISC;
962 hw->dma_chnl = hw->max_dma_chnl;
963 hw->ith_tpd = alx->tx_ringsz / 3;
964 hw->link_speed = SPEED_UNKNOWN;
Johannes Berga5b87cc2013-06-29 19:23:17 +0200965 hw->duplex = DUPLEX_UNKNOWN;
Johannes Bergab69bde2013-06-17 22:44:02 +0200966 hw->adv_cfg = ADVERTISED_Autoneg |
967 ADVERTISED_10baseT_Half |
968 ADVERTISED_10baseT_Full |
969 ADVERTISED_100baseT_Full |
970 ADVERTISED_100baseT_Half |
971 ADVERTISED_1000baseT_Full;
972 hw->flowctrl = ALX_FC_ANEG | ALX_FC_RX | ALX_FC_TX;
973
974 hw->rx_ctrl = ALX_MAC_CTRL_WOLSPED_SWEN |
975 ALX_MAC_CTRL_MHASH_ALG_HI5B |
976 ALX_MAC_CTRL_BRD_EN |
977 ALX_MAC_CTRL_PCRCE |
978 ALX_MAC_CTRL_CRCE |
979 ALX_MAC_CTRL_RXFC_EN |
980 ALX_MAC_CTRL_TXFC_EN |
981 7 << ALX_MAC_CTRL_PRMBLEN_SHIFT;
982
983 return err;
984}
985
986
987static netdev_features_t alx_fix_features(struct net_device *netdev,
988 netdev_features_t features)
989{
990 if (netdev->mtu > ALX_MAX_TSO_PKT_SIZE)
991 features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
992
993 return features;
994}
995
996static void alx_netif_stop(struct alx_priv *alx)
997{
Florian Westphal860e9532016-05-03 16:33:13 +0200998 netif_trans_update(alx->dev);
Johannes Bergab69bde2013-06-17 22:44:02 +0200999 if (netif_carrier_ok(alx->dev)) {
1000 netif_carrier_off(alx->dev);
1001 netif_tx_disable(alx->dev);
1002 napi_disable(&alx->napi);
1003 }
1004}
1005
1006static void alx_halt(struct alx_priv *alx)
1007{
1008 struct alx_hw *hw = &alx->hw;
1009
1010 alx_netif_stop(alx);
1011 hw->link_speed = SPEED_UNKNOWN;
Johannes Berga5b87cc2013-06-29 19:23:17 +02001012 hw->duplex = DUPLEX_UNKNOWN;
Johannes Bergab69bde2013-06-17 22:44:02 +02001013
1014 alx_reset_mac(hw);
1015
1016 /* disable l0s/l1 */
1017 alx_enable_aspm(hw, false, false);
1018 alx_irq_disable(alx);
1019 alx_free_buffers(alx);
1020}
1021
1022static void alx_configure(struct alx_priv *alx)
1023{
1024 struct alx_hw *hw = &alx->hw;
1025
1026 alx_configure_basic(hw);
1027 alx_disable_rss(hw);
1028 __alx_set_rx_mode(alx->dev);
1029
1030 alx_write_mem32(hw, ALX_MAC_CTRL, hw->rx_ctrl);
1031}
1032
1033static void alx_activate(struct alx_priv *alx)
1034{
1035 /* hardware setting lost, restore it */
1036 alx_reinit_rings(alx);
1037 alx_configure(alx);
1038
1039 /* clear old interrupts */
1040 alx_write_mem32(&alx->hw, ALX_ISR, ~(u32)ALX_ISR_DIS);
1041
1042 alx_irq_enable(alx);
1043
1044 alx_schedule_link_check(alx);
1045}
1046
1047static void alx_reinit(struct alx_priv *alx)
1048{
1049 ASSERT_RTNL();
1050
1051 alx_halt(alx);
1052 alx_activate(alx);
1053}
1054
1055static int alx_change_mtu(struct net_device *netdev, int mtu)
1056{
1057 struct alx_priv *alx = netdev_priv(netdev);
Jarod Wilsonc406700c2016-01-06 09:36:37 -05001058 int max_frame = ALX_MAX_FRAME_LEN(mtu);
Johannes Bergab69bde2013-06-17 22:44:02 +02001059
Johannes Bergab69bde2013-06-17 22:44:02 +02001060 netdev->mtu = mtu;
1061 alx->hw.mtu = mtu;
Jarod Wilsonc406700c2016-01-06 09:36:37 -05001062 alx->rxbuf_size = max(max_frame, ALX_DEF_RXBUF_SIZE);
Johannes Bergab69bde2013-06-17 22:44:02 +02001063 netdev_update_features(netdev);
1064 if (netif_running(netdev))
1065 alx_reinit(alx);
1066 return 0;
1067}
1068
1069static void alx_netif_start(struct alx_priv *alx)
1070{
1071 netif_tx_wake_all_queues(alx->dev);
1072 napi_enable(&alx->napi);
1073 netif_carrier_on(alx->dev);
1074}
1075
1076static int __alx_open(struct alx_priv *alx, bool resume)
1077{
1078 int err;
1079
Tobias Regnery0c58ee02016-09-09 12:19:55 +02001080 alx_init_intr(alx, msix);
Tobias Regnery9ee7b682016-09-09 12:19:52 +02001081
Johannes Bergab69bde2013-06-17 22:44:02 +02001082 if (!resume)
1083 netif_carrier_off(alx->dev);
1084
Tobias Regneryb0999222016-11-15 12:43:10 +01001085 err = alx_alloc_napis(alx);
Johannes Bergab69bde2013-06-17 22:44:02 +02001086 if (err)
Tobias Regnery0ca4e202016-09-13 12:06:57 +02001087 goto out_disable_adv_intr;
Johannes Bergab69bde2013-06-17 22:44:02 +02001088
Tobias Regneryb0999222016-11-15 12:43:10 +01001089 err = alx_alloc_rings(alx);
1090 if (err)
1091 goto out_free_rings;
1092
Johannes Bergab69bde2013-06-17 22:44:02 +02001093 alx_configure(alx);
1094
1095 err = alx_request_irq(alx);
1096 if (err)
1097 goto out_free_rings;
1098
1099 /* clear old interrupts */
1100 alx_write_mem32(&alx->hw, ALX_ISR, ~(u32)ALX_ISR_DIS);
1101
1102 alx_irq_enable(alx);
1103
1104 if (!resume)
1105 netif_tx_start_all_queues(alx->dev);
1106
1107 alx_schedule_link_check(alx);
1108 return 0;
1109
1110out_free_rings:
1111 alx_free_rings(alx);
Tobias Regneryb0999222016-11-15 12:43:10 +01001112 alx_free_napis(alx);
Tobias Regnery0ca4e202016-09-13 12:06:57 +02001113out_disable_adv_intr:
1114 alx_disable_advanced_intr(alx);
Johannes Bergab69bde2013-06-17 22:44:02 +02001115 return err;
1116}
1117
1118static void __alx_stop(struct alx_priv *alx)
1119{
1120 alx_halt(alx);
1121 alx_free_irq(alx);
1122 alx_free_rings(alx);
Tobias Regneryb0999222016-11-15 12:43:10 +01001123 alx_free_napis(alx);
Johannes Bergab69bde2013-06-17 22:44:02 +02001124}
1125
Johannes Berga5b87cc2013-06-29 19:23:17 +02001126static const char *alx_speed_desc(struct alx_hw *hw)
Johannes Bergab69bde2013-06-17 22:44:02 +02001127{
Johannes Berga5b87cc2013-06-29 19:23:17 +02001128 switch (alx_speed_to_ethadv(hw->link_speed, hw->duplex)) {
1129 case ADVERTISED_1000baseT_Full:
Johannes Bergab69bde2013-06-17 22:44:02 +02001130 return "1 Gbps Full";
Johannes Berga5b87cc2013-06-29 19:23:17 +02001131 case ADVERTISED_100baseT_Full:
Johannes Bergab69bde2013-06-17 22:44:02 +02001132 return "100 Mbps Full";
Johannes Berga5b87cc2013-06-29 19:23:17 +02001133 case ADVERTISED_100baseT_Half:
Johannes Bergab69bde2013-06-17 22:44:02 +02001134 return "100 Mbps Half";
Johannes Berga5b87cc2013-06-29 19:23:17 +02001135 case ADVERTISED_10baseT_Full:
Johannes Bergab69bde2013-06-17 22:44:02 +02001136 return "10 Mbps Full";
Johannes Berga5b87cc2013-06-29 19:23:17 +02001137 case ADVERTISED_10baseT_Half:
Johannes Bergab69bde2013-06-17 22:44:02 +02001138 return "10 Mbps Half";
1139 default:
1140 return "Unknown speed";
1141 }
1142}
1143
1144static void alx_check_link(struct alx_priv *alx)
1145{
1146 struct alx_hw *hw = &alx->hw;
1147 unsigned long flags;
Johannes Berga5b87cc2013-06-29 19:23:17 +02001148 int old_speed;
1149 u8 old_duplex;
Johannes Bergab69bde2013-06-17 22:44:02 +02001150 int err;
1151
1152 /* clear PHY internal interrupt status, otherwise the main
1153 * interrupt status will be asserted forever
1154 */
1155 alx_clear_phy_intr(hw);
1156
Johannes Berga5b87cc2013-06-29 19:23:17 +02001157 old_speed = hw->link_speed;
1158 old_duplex = hw->duplex;
1159 err = alx_read_phy_link(hw);
Johannes Bergab69bde2013-06-17 22:44:02 +02001160 if (err < 0)
1161 goto reset;
1162
1163 spin_lock_irqsave(&alx->irq_lock, flags);
1164 alx->int_mask |= ALX_ISR_PHY;
1165 alx_write_mem32(hw, ALX_IMR, alx->int_mask);
1166 spin_unlock_irqrestore(&alx->irq_lock, flags);
1167
Johannes Berga5b87cc2013-06-29 19:23:17 +02001168 if (old_speed == hw->link_speed)
Johannes Bergab69bde2013-06-17 22:44:02 +02001169 return;
Johannes Bergab69bde2013-06-17 22:44:02 +02001170
Johannes Berga5b87cc2013-06-29 19:23:17 +02001171 if (hw->link_speed != SPEED_UNKNOWN) {
Johannes Bergab69bde2013-06-17 22:44:02 +02001172 netif_info(alx, link, alx->dev,
Johannes Berga5b87cc2013-06-29 19:23:17 +02001173 "NIC Up: %s\n", alx_speed_desc(hw));
Johannes Bergab69bde2013-06-17 22:44:02 +02001174 alx_post_phy_link(hw);
1175 alx_enable_aspm(hw, true, true);
1176 alx_start_mac(hw);
1177
1178 if (old_speed == SPEED_UNKNOWN)
1179 alx_netif_start(alx);
1180 } else {
1181 /* link is now down */
1182 alx_netif_stop(alx);
1183 netif_info(alx, link, alx->dev, "Link Down\n");
1184 err = alx_reset_mac(hw);
1185 if (err)
1186 goto reset;
1187 alx_irq_disable(alx);
1188
1189 /* MAC reset causes all HW settings to be lost, restore all */
1190 err = alx_reinit_rings(alx);
1191 if (err)
1192 goto reset;
1193 alx_configure(alx);
1194 alx_enable_aspm(hw, false, true);
1195 alx_post_phy_link(hw);
1196 alx_irq_enable(alx);
1197 }
1198
1199 return;
1200
1201reset:
1202 alx_schedule_reset(alx);
1203}
1204
1205static int alx_open(struct net_device *netdev)
1206{
1207 return __alx_open(netdev_priv(netdev), false);
1208}
1209
1210static int alx_stop(struct net_device *netdev)
1211{
1212 __alx_stop(netdev_priv(netdev));
1213 return 0;
1214}
1215
Johannes Bergab69bde2013-06-17 22:44:02 +02001216static void alx_link_check(struct work_struct *work)
1217{
1218 struct alx_priv *alx;
1219
1220 alx = container_of(work, struct alx_priv, link_check_wk);
1221
1222 rtnl_lock();
1223 alx_check_link(alx);
1224 rtnl_unlock();
1225}
1226
1227static void alx_reset(struct work_struct *work)
1228{
1229 struct alx_priv *alx = container_of(work, struct alx_priv, reset_wk);
1230
1231 rtnl_lock();
1232 alx_reinit(alx);
1233 rtnl_unlock();
1234}
1235
Tobias Regneryab725982016-08-25 20:09:53 +02001236static int alx_tpd_req(struct sk_buff *skb)
1237{
1238 int num;
1239
1240 num = skb_shinfo(skb)->nr_frags + 1;
1241 /* we need one extra descriptor for LSOv2 */
1242 if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
1243 num++;
1244
1245 return num;
1246}
1247
Johannes Bergab69bde2013-06-17 22:44:02 +02001248static int alx_tx_csum(struct sk_buff *skb, struct alx_txd *first)
1249{
1250 u8 cso, css;
1251
1252 if (skb->ip_summed != CHECKSUM_PARTIAL)
1253 return 0;
1254
1255 cso = skb_checksum_start_offset(skb);
1256 if (cso & 1)
1257 return -EINVAL;
1258
1259 css = cso + skb->csum_offset;
1260 first->word1 |= cpu_to_le32((cso >> 1) << TPD_CXSUMSTART_SHIFT);
1261 first->word1 |= cpu_to_le32((css >> 1) << TPD_CXSUMOFFSET_SHIFT);
1262 first->word1 |= cpu_to_le32(1 << TPD_CXSUM_EN_SHIFT);
1263
1264 return 0;
1265}
1266
Tobias Regneryab725982016-08-25 20:09:53 +02001267static int alx_tso(struct sk_buff *skb, struct alx_txd *first)
1268{
1269 int err;
1270
1271 if (skb->ip_summed != CHECKSUM_PARTIAL)
1272 return 0;
1273
1274 if (!skb_is_gso(skb))
1275 return 0;
1276
1277 err = skb_cow_head(skb, 0);
1278 if (err < 0)
1279 return err;
1280
1281 if (skb->protocol == htons(ETH_P_IP)) {
1282 struct iphdr *iph = ip_hdr(skb);
1283
1284 iph->check = 0;
1285 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
1286 0, IPPROTO_TCP, 0);
1287 first->word1 |= 1 << TPD_IPV4_SHIFT;
1288 } else if (skb_is_gso_v6(skb)) {
1289 ipv6_hdr(skb)->payload_len = 0;
1290 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
1291 &ipv6_hdr(skb)->daddr,
1292 0, IPPROTO_TCP, 0);
1293 /* LSOv2: the first TPD only provides the packet length */
1294 first->adrl.l.pkt_len = skb->len;
1295 first->word1 |= 1 << TPD_LSO_V2_SHIFT;
1296 }
1297
1298 first->word1 |= 1 << TPD_LSO_EN_SHIFT;
1299 first->word1 |= (skb_transport_offset(skb) &
1300 TPD_L4HDROFFSET_MASK) << TPD_L4HDROFFSET_SHIFT;
1301 first->word1 |= (skb_shinfo(skb)->gso_size &
1302 TPD_MSS_MASK) << TPD_MSS_SHIFT;
1303 return 1;
1304}
1305
Johannes Bergab69bde2013-06-17 22:44:02 +02001306static int alx_map_tx_skb(struct alx_priv *alx, struct sk_buff *skb)
1307{
1308 struct alx_tx_queue *txq = &alx->txq;
1309 struct alx_txd *tpd, *first_tpd;
1310 dma_addr_t dma;
1311 int maplen, f, first_idx = txq->write_idx;
1312
1313 first_tpd = &txq->tpd[txq->write_idx];
1314 tpd = first_tpd;
1315
Tobias Regneryab725982016-08-25 20:09:53 +02001316 if (tpd->word1 & (1 << TPD_LSO_V2_SHIFT)) {
1317 if (++txq->write_idx == alx->tx_ringsz)
1318 txq->write_idx = 0;
1319
1320 tpd = &txq->tpd[txq->write_idx];
1321 tpd->len = first_tpd->len;
1322 tpd->vlan_tag = first_tpd->vlan_tag;
1323 tpd->word1 = first_tpd->word1;
1324 }
1325
Johannes Bergab69bde2013-06-17 22:44:02 +02001326 maplen = skb_headlen(skb);
1327 dma = dma_map_single(&alx->hw.pdev->dev, skb->data, maplen,
1328 DMA_TO_DEVICE);
1329 if (dma_mapping_error(&alx->hw.pdev->dev, dma))
1330 goto err_dma;
1331
1332 dma_unmap_len_set(&txq->bufs[txq->write_idx], size, maplen);
1333 dma_unmap_addr_set(&txq->bufs[txq->write_idx], dma, dma);
1334
1335 tpd->adrl.addr = cpu_to_le64(dma);
1336 tpd->len = cpu_to_le16(maplen);
1337
1338 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
1339 struct skb_frag_struct *frag;
1340
1341 frag = &skb_shinfo(skb)->frags[f];
1342
1343 if (++txq->write_idx == alx->tx_ringsz)
1344 txq->write_idx = 0;
1345 tpd = &txq->tpd[txq->write_idx];
1346
1347 tpd->word1 = first_tpd->word1;
1348
1349 maplen = skb_frag_size(frag);
1350 dma = skb_frag_dma_map(&alx->hw.pdev->dev, frag, 0,
1351 maplen, DMA_TO_DEVICE);
1352 if (dma_mapping_error(&alx->hw.pdev->dev, dma))
1353 goto err_dma;
1354 dma_unmap_len_set(&txq->bufs[txq->write_idx], size, maplen);
1355 dma_unmap_addr_set(&txq->bufs[txq->write_idx], dma, dma);
1356
1357 tpd->adrl.addr = cpu_to_le64(dma);
1358 tpd->len = cpu_to_le16(maplen);
1359 }
1360
1361 /* last TPD, set EOP flag and store skb */
1362 tpd->word1 |= cpu_to_le32(1 << TPD_EOP_SHIFT);
1363 txq->bufs[txq->write_idx].skb = skb;
1364
1365 if (++txq->write_idx == alx->tx_ringsz)
1366 txq->write_idx = 0;
1367
1368 return 0;
1369
1370err_dma:
1371 f = first_idx;
1372 while (f != txq->write_idx) {
1373 alx_free_txbuf(alx, f);
1374 if (++f == alx->tx_ringsz)
1375 f = 0;
1376 }
1377 return -ENOMEM;
1378}
1379
1380static netdev_tx_t alx_start_xmit(struct sk_buff *skb,
1381 struct net_device *netdev)
1382{
1383 struct alx_priv *alx = netdev_priv(netdev);
1384 struct alx_tx_queue *txq = &alx->txq;
1385 struct alx_txd *first;
Tobias Regneryab725982016-08-25 20:09:53 +02001386 int tso;
Johannes Bergab69bde2013-06-17 22:44:02 +02001387
Tobias Regneryab725982016-08-25 20:09:53 +02001388 if (alx_tpd_avail(alx) < alx_tpd_req(skb)) {
Johannes Bergab69bde2013-06-17 22:44:02 +02001389 netif_stop_queue(alx->dev);
1390 goto drop;
1391 }
1392
1393 first = &txq->tpd[txq->write_idx];
1394 memset(first, 0, sizeof(*first));
1395
Tobias Regneryab725982016-08-25 20:09:53 +02001396 tso = alx_tso(skb, first);
1397 if (tso < 0)
1398 goto drop;
1399 else if (!tso && alx_tx_csum(skb, first))
Johannes Bergab69bde2013-06-17 22:44:02 +02001400 goto drop;
1401
1402 if (alx_map_tx_skb(alx, skb) < 0)
1403 goto drop;
1404
1405 netdev_sent_queue(alx->dev, skb->len);
1406
1407 /* flush updates before updating hardware */
1408 wmb();
1409 alx_write_mem16(&alx->hw, ALX_TPD_PRI0_PIDX, txq->write_idx);
1410
1411 if (alx_tpd_avail(alx) < alx->tx_ringsz/8)
1412 netif_stop_queue(alx->dev);
1413
1414 return NETDEV_TX_OK;
1415
1416drop:
Eric W. Biederman548ff1e2014-03-15 15:53:09 -07001417 dev_kfree_skb_any(skb);
Johannes Bergab69bde2013-06-17 22:44:02 +02001418 return NETDEV_TX_OK;
1419}
1420
1421static void alx_tx_timeout(struct net_device *dev)
1422{
1423 struct alx_priv *alx = netdev_priv(dev);
1424
1425 alx_schedule_reset(alx);
1426}
1427
1428static int alx_mdio_read(struct net_device *netdev,
1429 int prtad, int devad, u16 addr)
1430{
1431 struct alx_priv *alx = netdev_priv(netdev);
1432 struct alx_hw *hw = &alx->hw;
1433 u16 val;
1434 int err;
1435
1436 if (prtad != hw->mdio.prtad)
1437 return -EINVAL;
1438
1439 if (devad == MDIO_DEVAD_NONE)
1440 err = alx_read_phy_reg(hw, addr, &val);
1441 else
1442 err = alx_read_phy_ext(hw, devad, addr, &val);
1443
1444 if (err)
1445 return err;
1446 return val;
1447}
1448
1449static int alx_mdio_write(struct net_device *netdev,
1450 int prtad, int devad, u16 addr, u16 val)
1451{
1452 struct alx_priv *alx = netdev_priv(netdev);
1453 struct alx_hw *hw = &alx->hw;
1454
1455 if (prtad != hw->mdio.prtad)
1456 return -EINVAL;
1457
1458 if (devad == MDIO_DEVAD_NONE)
1459 return alx_write_phy_reg(hw, addr, val);
1460
1461 return alx_write_phy_ext(hw, devad, addr, val);
1462}
1463
1464static int alx_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
1465{
1466 struct alx_priv *alx = netdev_priv(netdev);
1467
1468 if (!netif_running(netdev))
1469 return -EAGAIN;
1470
1471 return mdio_mii_ioctl(&alx->hw.mdio, if_mii(ifr), cmd);
1472}
1473
1474#ifdef CONFIG_NET_POLL_CONTROLLER
1475static void alx_poll_controller(struct net_device *netdev)
1476{
1477 struct alx_priv *alx = netdev_priv(netdev);
1478
Tobias Regnerydc39a782016-09-09 12:19:54 +02001479 if (alx->flags & ALX_FLAG_USING_MSIX) {
1480 alx_intr_msix_misc(0, alx);
1481 alx_intr_msix_ring(0, alx);
1482 } else if (alx->flags & ALX_FLAG_USING_MSI)
Johannes Bergab69bde2013-06-17 22:44:02 +02001483 alx_intr_msi(0, alx);
1484 else
1485 alx_intr_legacy(0, alx);
1486}
1487#endif
1488
Sabrina Dubrocaf1b6b102014-01-09 10:09:30 +01001489static struct rtnl_link_stats64 *alx_get_stats64(struct net_device *dev,
1490 struct rtnl_link_stats64 *net_stats)
1491{
1492 struct alx_priv *alx = netdev_priv(dev);
1493 struct alx_hw_stats *hw_stats = &alx->hw.stats;
1494
1495 spin_lock(&alx->stats_lock);
1496
1497 alx_update_hw_stats(&alx->hw);
1498
1499 net_stats->tx_bytes = hw_stats->tx_byte_cnt;
1500 net_stats->rx_bytes = hw_stats->rx_byte_cnt;
1501 net_stats->multicast = hw_stats->rx_mcast;
1502 net_stats->collisions = hw_stats->tx_single_col +
1503 hw_stats->tx_multi_col +
1504 hw_stats->tx_late_col +
1505 hw_stats->tx_abort_col;
1506
1507 net_stats->rx_errors = hw_stats->rx_frag +
1508 hw_stats->rx_fcs_err +
1509 hw_stats->rx_len_err +
1510 hw_stats->rx_ov_sz +
1511 hw_stats->rx_ov_rrd +
1512 hw_stats->rx_align_err +
1513 hw_stats->rx_ov_rxf;
1514
1515 net_stats->rx_fifo_errors = hw_stats->rx_ov_rxf;
1516 net_stats->rx_length_errors = hw_stats->rx_len_err;
1517 net_stats->rx_crc_errors = hw_stats->rx_fcs_err;
1518 net_stats->rx_frame_errors = hw_stats->rx_align_err;
1519 net_stats->rx_dropped = hw_stats->rx_ov_rrd;
1520
1521 net_stats->tx_errors = hw_stats->tx_late_col +
1522 hw_stats->tx_abort_col +
1523 hw_stats->tx_underrun +
1524 hw_stats->tx_trunc;
1525
1526 net_stats->tx_aborted_errors = hw_stats->tx_abort_col;
1527 net_stats->tx_fifo_errors = hw_stats->tx_underrun;
1528 net_stats->tx_window_errors = hw_stats->tx_late_col;
1529
1530 net_stats->tx_packets = hw_stats->tx_ok + net_stats->tx_errors;
1531 net_stats->rx_packets = hw_stats->rx_ok + net_stats->rx_errors;
1532
1533 spin_unlock(&alx->stats_lock);
1534
1535 return net_stats;
1536}
1537
Johannes Bergab69bde2013-06-17 22:44:02 +02001538static const struct net_device_ops alx_netdev_ops = {
1539 .ndo_open = alx_open,
1540 .ndo_stop = alx_stop,
1541 .ndo_start_xmit = alx_start_xmit,
Sabrina Dubrocaf1b6b102014-01-09 10:09:30 +01001542 .ndo_get_stats64 = alx_get_stats64,
Johannes Bergab69bde2013-06-17 22:44:02 +02001543 .ndo_set_rx_mode = alx_set_rx_mode,
1544 .ndo_validate_addr = eth_validate_addr,
1545 .ndo_set_mac_address = alx_set_mac_address,
1546 .ndo_change_mtu = alx_change_mtu,
1547 .ndo_do_ioctl = alx_ioctl,
1548 .ndo_tx_timeout = alx_tx_timeout,
1549 .ndo_fix_features = alx_fix_features,
1550#ifdef CONFIG_NET_POLL_CONTROLLER
1551 .ndo_poll_controller = alx_poll_controller,
1552#endif
1553};
1554
1555static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1556{
1557 struct net_device *netdev;
1558 struct alx_priv *alx;
1559 struct alx_hw *hw;
1560 bool phy_configured;
Johannes Thumshirncaa8e932016-06-07 09:44:06 +02001561 int err;
Johannes Bergab69bde2013-06-17 22:44:02 +02001562
1563 err = pci_enable_device_mem(pdev);
1564 if (err)
1565 return err;
1566
1567 /* The alx chip can DMA to 64-bit addresses, but it uses a single
1568 * shared register for the high 32 bits, so only a single, aligned,
1569 * 4 GB physical address range can be used for descriptors.
1570 */
Peter Senna Tschudin8d7f1fb2014-03-16 00:30:52 +01001571 if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
Johannes Bergab69bde2013-06-17 22:44:02 +02001572 dev_dbg(&pdev->dev, "DMA to 64-BIT addresses\n");
1573 } else {
Peter Senna Tschudin8d7f1fb2014-03-16 00:30:52 +01001574 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
Johannes Bergab69bde2013-06-17 22:44:02 +02001575 if (err) {
Peter Senna Tschudin8d7f1fb2014-03-16 00:30:52 +01001576 dev_err(&pdev->dev, "No usable DMA config, aborting\n");
1577 goto out_pci_disable;
Johannes Bergab69bde2013-06-17 22:44:02 +02001578 }
1579 }
1580
Johannes Thumshirncaa8e932016-06-07 09:44:06 +02001581 err = pci_request_mem_regions(pdev, alx_drv_name);
Johannes Bergab69bde2013-06-17 22:44:02 +02001582 if (err) {
1583 dev_err(&pdev->dev,
Johannes Thumshirncaa8e932016-06-07 09:44:06 +02001584 "pci_request_mem_regions failed\n");
Johannes Bergab69bde2013-06-17 22:44:02 +02001585 goto out_pci_disable;
1586 }
1587
1588 pci_enable_pcie_error_reporting(pdev);
1589 pci_set_master(pdev);
1590
Yijing Wangc3eb7a72013-09-11 10:07:00 +08001591 if (!pdev->pm_cap) {
Johannes Bergab69bde2013-06-17 22:44:02 +02001592 dev_err(&pdev->dev,
1593 "Can't find power management capability, aborting\n");
1594 err = -EIO;
1595 goto out_pci_release;
1596 }
1597
Johannes Bergab69bde2013-06-17 22:44:02 +02001598 netdev = alloc_etherdev(sizeof(*alx));
1599 if (!netdev) {
1600 err = -ENOMEM;
1601 goto out_pci_release;
1602 }
1603
1604 SET_NETDEV_DEV(netdev, &pdev->dev);
1605 alx = netdev_priv(netdev);
Maarten Lankhorsta8798a52013-07-11 15:53:21 +02001606 spin_lock_init(&alx->hw.mdio_lock);
1607 spin_lock_init(&alx->irq_lock);
John Greene3e5ccc22014-02-10 14:34:04 -05001608 spin_lock_init(&alx->stats_lock);
Johannes Bergab69bde2013-06-17 22:44:02 +02001609 alx->dev = netdev;
1610 alx->hw.pdev = pdev;
1611 alx->msg_enable = NETIF_MSG_LINK | NETIF_MSG_HW | NETIF_MSG_IFUP |
1612 NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR | NETIF_MSG_WOL;
1613 hw = &alx->hw;
1614 pci_set_drvdata(pdev, alx);
1615
1616 hw->hw_addr = pci_ioremap_bar(pdev, 0);
1617 if (!hw->hw_addr) {
1618 dev_err(&pdev->dev, "cannot map device registers\n");
1619 err = -EIO;
1620 goto out_free_netdev;
1621 }
1622
1623 netdev->netdev_ops = &alx_netdev_ops;
Wilfried Klaebe7ad24ea2014-05-11 00:12:32 +00001624 netdev->ethtool_ops = &alx_ethtool_ops;
Johannes Bergab69bde2013-06-17 22:44:02 +02001625 netdev->irq = pdev->irq;
1626 netdev->watchdog_timeo = ALX_WATCHDOG_TIME;
1627
1628 if (ent->driver_data & ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG)
1629 pdev->dev_flags |= PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG;
1630
1631 err = alx_init_sw(alx);
1632 if (err) {
1633 dev_err(&pdev->dev, "net device private data init failed\n");
1634 goto out_unmap;
1635 }
1636
1637 alx_reset_pcie(hw);
1638
1639 phy_configured = alx_phy_configured(hw);
1640
1641 if (!phy_configured)
1642 alx_reset_phy(hw);
1643
1644 err = alx_reset_mac(hw);
1645 if (err) {
1646 dev_err(&pdev->dev, "MAC Reset failed, error = %d\n", err);
1647 goto out_unmap;
1648 }
1649
1650 /* setup link to put it in a known good starting state */
1651 if (!phy_configured) {
1652 err = alx_setup_speed_duplex(hw, hw->adv_cfg, hw->flowctrl);
1653 if (err) {
1654 dev_err(&pdev->dev,
1655 "failed to configure PHY speed/duplex (err=%d)\n",
1656 err);
1657 goto out_unmap;
1658 }
1659 }
1660
Tobias Regneryab725982016-08-25 20:09:53 +02001661 netdev->hw_features = NETIF_F_SG |
1662 NETIF_F_HW_CSUM |
1663 NETIF_F_TSO |
1664 NETIF_F_TSO6;
Johannes Bergab69bde2013-06-17 22:44:02 +02001665
1666 if (alx_get_perm_macaddr(hw, hw->perm_addr)) {
1667 dev_warn(&pdev->dev,
1668 "Invalid permanent address programmed, using random one\n");
1669 eth_hw_addr_random(netdev);
1670 memcpy(hw->perm_addr, netdev->dev_addr, netdev->addr_len);
1671 }
1672
1673 memcpy(hw->mac_addr, hw->perm_addr, ETH_ALEN);
1674 memcpy(netdev->dev_addr, hw->mac_addr, ETH_ALEN);
1675 memcpy(netdev->perm_addr, hw->perm_addr, ETH_ALEN);
1676
1677 hw->mdio.prtad = 0;
1678 hw->mdio.mmds = 0;
1679 hw->mdio.dev = netdev;
1680 hw->mdio.mode_support = MDIO_SUPPORTS_C45 |
1681 MDIO_SUPPORTS_C22 |
1682 MDIO_EMULATE_C22;
1683 hw->mdio.mdio_read = alx_mdio_read;
1684 hw->mdio.mdio_write = alx_mdio_write;
1685
1686 if (!alx_get_phy_info(hw)) {
1687 dev_err(&pdev->dev, "failed to identify PHY\n");
1688 err = -EIO;
1689 goto out_unmap;
1690 }
1691
1692 INIT_WORK(&alx->link_check_wk, alx_link_check);
1693 INIT_WORK(&alx->reset_wk, alx_reset);
Johannes Bergab69bde2013-06-17 22:44:02 +02001694 netif_carrier_off(netdev);
1695
1696 err = register_netdev(netdev);
1697 if (err) {
1698 dev_err(&pdev->dev, "register netdevice failed\n");
1699 goto out_unmap;
1700 }
1701
Johannes Bergab69bde2013-06-17 22:44:02 +02001702 netdev_info(netdev,
1703 "Qualcomm Atheros AR816x/AR817x Ethernet [%pM]\n",
1704 netdev->dev_addr);
1705
1706 return 0;
1707
1708out_unmap:
1709 iounmap(hw->hw_addr);
1710out_free_netdev:
1711 free_netdev(netdev);
1712out_pci_release:
Johannes Thumshirncaa8e932016-06-07 09:44:06 +02001713 pci_release_mem_regions(pdev);
Johannes Bergab69bde2013-06-17 22:44:02 +02001714out_pci_disable:
1715 pci_disable_device(pdev);
1716 return err;
1717}
1718
1719static void alx_remove(struct pci_dev *pdev)
1720{
1721 struct alx_priv *alx = pci_get_drvdata(pdev);
1722 struct alx_hw *hw = &alx->hw;
1723
1724 cancel_work_sync(&alx->link_check_wk);
1725 cancel_work_sync(&alx->reset_wk);
1726
1727 /* restore permanent mac address */
1728 alx_set_macaddr(hw, hw->perm_addr);
1729
1730 unregister_netdev(alx->dev);
1731 iounmap(hw->hw_addr);
Johannes Thumshirncaa8e932016-06-07 09:44:06 +02001732 pci_release_mem_regions(pdev);
Johannes Bergab69bde2013-06-17 22:44:02 +02001733
1734 pci_disable_pcie_error_reporting(pdev);
1735 pci_disable_device(pdev);
Johannes Bergab69bde2013-06-17 22:44:02 +02001736
1737 free_netdev(alx->dev);
1738}
1739
1740#ifdef CONFIG_PM_SLEEP
1741static int alx_suspend(struct device *dev)
1742{
1743 struct pci_dev *pdev = to_pci_dev(dev);
Johannes Bergbc2bebe2013-07-03 21:48:11 +02001744 struct alx_priv *alx = pci_get_drvdata(pdev);
Johannes Bergab69bde2013-06-17 22:44:02 +02001745
Johannes Bergbc2bebe2013-07-03 21:48:11 +02001746 if (!netif_running(alx->dev))
1747 return 0;
1748 netif_device_detach(alx->dev);
1749 __alx_stop(alx);
Johannes Bergab69bde2013-06-17 22:44:02 +02001750 return 0;
1751}
1752
1753static int alx_resume(struct device *dev)
1754{
1755 struct pci_dev *pdev = to_pci_dev(dev);
1756 struct alx_priv *alx = pci_get_drvdata(pdev);
hahnjob54629e2013-11-12 18:19:24 +01001757 struct alx_hw *hw = &alx->hw;
1758
1759 alx_reset_phy(hw);
Johannes Bergab69bde2013-06-17 22:44:02 +02001760
Johannes Bergbc2bebe2013-07-03 21:48:11 +02001761 if (!netif_running(alx->dev))
1762 return 0;
1763 netif_device_attach(alx->dev);
1764 return __alx_open(alx, true);
Johannes Bergab69bde2013-06-17 22:44:02 +02001765}
Johannes Bergbc2bebe2013-07-03 21:48:11 +02001766
1767static SIMPLE_DEV_PM_OPS(alx_pm_ops, alx_suspend, alx_resume);
1768#define ALX_PM_OPS (&alx_pm_ops)
1769#else
1770#define ALX_PM_OPS NULL
Johannes Bergab69bde2013-06-17 22:44:02 +02001771#endif
1772
Johannes Bergbc2bebe2013-07-03 21:48:11 +02001773
Johannes Bergab69bde2013-06-17 22:44:02 +02001774static pci_ers_result_t alx_pci_error_detected(struct pci_dev *pdev,
1775 pci_channel_state_t state)
1776{
1777 struct alx_priv *alx = pci_get_drvdata(pdev);
1778 struct net_device *netdev = alx->dev;
1779 pci_ers_result_t rc = PCI_ERS_RESULT_NEED_RESET;
1780
1781 dev_info(&pdev->dev, "pci error detected\n");
1782
1783 rtnl_lock();
1784
1785 if (netif_running(netdev)) {
1786 netif_device_detach(netdev);
1787 alx_halt(alx);
1788 }
1789
1790 if (state == pci_channel_io_perm_failure)
1791 rc = PCI_ERS_RESULT_DISCONNECT;
1792 else
1793 pci_disable_device(pdev);
1794
1795 rtnl_unlock();
1796
1797 return rc;
1798}
1799
1800static pci_ers_result_t alx_pci_error_slot_reset(struct pci_dev *pdev)
1801{
1802 struct alx_priv *alx = pci_get_drvdata(pdev);
1803 struct alx_hw *hw = &alx->hw;
1804 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
1805
1806 dev_info(&pdev->dev, "pci error slot reset\n");
1807
1808 rtnl_lock();
1809
1810 if (pci_enable_device(pdev)) {
1811 dev_err(&pdev->dev, "Failed to re-enable PCI device after reset\n");
1812 goto out;
1813 }
1814
1815 pci_set_master(pdev);
Johannes Bergab69bde2013-06-17 22:44:02 +02001816
1817 alx_reset_pcie(hw);
1818 if (!alx_reset_mac(hw))
1819 rc = PCI_ERS_RESULT_RECOVERED;
1820out:
1821 pci_cleanup_aer_uncorrect_error_status(pdev);
1822
1823 rtnl_unlock();
1824
1825 return rc;
1826}
1827
1828static void alx_pci_error_resume(struct pci_dev *pdev)
1829{
1830 struct alx_priv *alx = pci_get_drvdata(pdev);
1831 struct net_device *netdev = alx->dev;
1832
1833 dev_info(&pdev->dev, "pci error resume\n");
1834
1835 rtnl_lock();
1836
1837 if (netif_running(netdev)) {
1838 alx_activate(alx);
1839 netif_device_attach(netdev);
1840 }
1841
1842 rtnl_unlock();
1843}
1844
1845static const struct pci_error_handlers alx_err_handlers = {
1846 .error_detected = alx_pci_error_detected,
1847 .slot_reset = alx_pci_error_slot_reset,
1848 .resume = alx_pci_error_resume,
1849};
1850
Benoit Taine9baa3c32014-08-08 15:56:03 +02001851static const struct pci_device_id alx_pci_tbl[] = {
Johannes Bergab69bde2013-06-17 22:44:02 +02001852 { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8161),
1853 .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG },
1854 { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_E2200),
1855 .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG },
Ben Pope0208e952015-11-17 18:21:07 -07001856 { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_E2400),
1857 .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG },
Owen Linb99b43b2016-08-26 13:49:09 +08001858 { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_E2500),
1859 .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG },
Johannes Bergab69bde2013-06-17 22:44:02 +02001860 { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8162),
1861 .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG },
1862 { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8171) },
1863 { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8172) },
1864 {}
1865};
1866
1867static struct pci_driver alx_driver = {
1868 .name = alx_drv_name,
1869 .id_table = alx_pci_tbl,
1870 .probe = alx_probe,
1871 .remove = alx_remove,
Johannes Bergab69bde2013-06-17 22:44:02 +02001872 .err_handler = &alx_err_handlers,
1873 .driver.pm = ALX_PM_OPS,
1874};
1875
1876module_pci_driver(alx_driver);
1877MODULE_DEVICE_TABLE(pci, alx_pci_tbl);
1878MODULE_AUTHOR("Johannes Berg <johannes@sipsolutions.net>");
1879MODULE_AUTHOR("Qualcomm Corporation, <nic-devel@qualcomm.com>");
1880MODULE_DESCRIPTION(
1881 "Qualcomm Atheros(R) AR816x/AR817x PCI-E Ethernet Network Driver");
1882MODULE_LICENSE("GPL");