blob: 72dd61166a1c696c3f6b96365744d4d39626ce7b [file] [log] [blame]
Lendacky, Thomasc5aa9e32014-06-05 09:15:06 -05001/*
2 * AMD 10Gb Ethernet driver
3 *
4 * This file is available to you under your choice of the following two
5 * licenses:
6 *
7 * License 1: GPLv2
8 *
9 * Copyright (c) 2014 Advanced Micro Devices, Inc.
10 *
11 * This file is free software; you may copy, redistribute and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation, either version 2 of the License, or (at
14 * your option) any later version.
15 *
16 * This file is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program. If not, see <http://www.gnu.org/licenses/>.
23 *
24 * This file incorporates work covered by the following copyright and
25 * permission notice:
26 * The Synopsys DWC ETHER XGMAC Software Driver and documentation
27 * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
28 * Inc. unless otherwise expressly agreed to in writing between Synopsys
29 * and you.
30 *
31 * The Software IS NOT an item of Licensed Software or Licensed Product
32 * under any End User Software License Agreement or Agreement for Licensed
33 * Product with Synopsys or any supplement thereto. Permission is hereby
34 * granted, free of charge, to any person obtaining a copy of this software
35 * annotated with this license and the Software, to deal in the Software
36 * without restriction, including without limitation the rights to use,
37 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
38 * of the Software, and to permit persons to whom the Software is furnished
39 * to do so, subject to the following conditions:
40 *
41 * The above copyright notice and this permission notice shall be included
42 * in all copies or substantial portions of the Software.
43 *
44 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
45 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
46 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
47 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
48 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
49 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
50 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
51 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
52 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
53 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
54 * THE POSSIBILITY OF SUCH DAMAGE.
55 *
56 *
57 * License 2: Modified BSD
58 *
59 * Copyright (c) 2014 Advanced Micro Devices, Inc.
60 * All rights reserved.
61 *
62 * Redistribution and use in source and binary forms, with or without
63 * modification, are permitted provided that the following conditions are met:
64 * * Redistributions of source code must retain the above copyright
65 * notice, this list of conditions and the following disclaimer.
66 * * Redistributions in binary form must reproduce the above copyright
67 * notice, this list of conditions and the following disclaimer in the
68 * documentation and/or other materials provided with the distribution.
69 * * Neither the name of Advanced Micro Devices, Inc. nor the
70 * names of its contributors may be used to endorse or promote products
71 * derived from this software without specific prior written permission.
72 *
73 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
74 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
75 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
76 * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
77 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
78 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
79 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
80 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
81 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
82 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
83 *
84 * This file incorporates work covered by the following copyright and
85 * permission notice:
86 * The Synopsys DWC ETHER XGMAC Software Driver and documentation
87 * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
88 * Inc. unless otherwise expressly agreed to in writing between Synopsys
89 * and you.
90 *
91 * The Software IS NOT an item of Licensed Software or Licensed Product
92 * under any End User Software License Agreement or Agreement for Licensed
93 * Product with Synopsys or any supplement thereto. Permission is hereby
94 * granted, free of charge, to any person obtaining a copy of this software
95 * annotated with this license and the Software, to deal in the Software
96 * without restriction, including without limitation the rights to use,
97 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
98 * of the Software, and to permit persons to whom the Software is furnished
99 * to do so, subject to the following conditions:
100 *
101 * The above copyright notice and this permission notice shall be included
102 * in all copies or substantial portions of the Software.
103 *
104 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
105 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
106 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
107 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
108 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
109 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
110 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
111 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
112 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
113 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
114 * THE POSSIBILITY OF SUCH DAMAGE.
115 */
116
117#include <linux/spinlock.h>
118#include <linux/tcp.h>
119#include <linux/if_vlan.h>
120#include <linux/phy.h>
121#include <net/busy_poll.h>
122#include <linux/clk.h>
123#include <linux/if_ether.h>
124
125#include "xgbe.h"
126#include "xgbe-common.h"
127
128
129static int xgbe_poll(struct napi_struct *, int);
130static void xgbe_set_rx_mode(struct net_device *);
131
132static inline unsigned int xgbe_tx_avail_desc(struct xgbe_ring *ring)
133{
134 return (ring->rdesc_count - (ring->cur - ring->dirty));
135}
136
137static int xgbe_calc_rx_buf_size(struct net_device *netdev, unsigned int mtu)
138{
139 unsigned int rx_buf_size;
140
141 if (mtu > XGMAC_JUMBO_PACKET_MTU) {
142 netdev_alert(netdev, "MTU exceeds maximum supported value\n");
143 return -EINVAL;
144 }
145
146 rx_buf_size = mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
Lendacky, Thomasd0a8ba62014-06-24 16:19:06 -0500147 if (rx_buf_size < XGBE_RX_MIN_BUF_SIZE)
148 rx_buf_size = XGBE_RX_MIN_BUF_SIZE;
149 rx_buf_size = (rx_buf_size + XGBE_RX_BUF_ALIGN - 1) &
150 ~(XGBE_RX_BUF_ALIGN - 1);
Lendacky, Thomasc5aa9e32014-06-05 09:15:06 -0500151
152 return rx_buf_size;
153}
154
155static void xgbe_enable_rx_tx_ints(struct xgbe_prv_data *pdata)
156{
157 struct xgbe_hw_if *hw_if = &pdata->hw_if;
158 struct xgbe_channel *channel;
159 unsigned int i;
160
161 channel = pdata->channel;
162 for (i = 0; i < pdata->channel_count; i++, channel++) {
163 if (channel->tx_ring)
164 hw_if->enable_int(channel,
165 XGMAC_INT_DMA_CH_SR_TI);
166 if (channel->rx_ring)
167 hw_if->enable_int(channel,
168 XGMAC_INT_DMA_CH_SR_RI);
169 }
170}
171
172static void xgbe_disable_rx_tx_ints(struct xgbe_prv_data *pdata)
173{
174 struct xgbe_hw_if *hw_if = &pdata->hw_if;
175 struct xgbe_channel *channel;
176 unsigned int i;
177
178 channel = pdata->channel;
179 for (i = 0; i < pdata->channel_count; i++, channel++) {
180 if (channel->tx_ring)
181 hw_if->disable_int(channel,
182 XGMAC_INT_DMA_CH_SR_TI);
183 if (channel->rx_ring)
184 hw_if->disable_int(channel,
185 XGMAC_INT_DMA_CH_SR_RI);
186 }
187}
188
189static irqreturn_t xgbe_isr(int irq, void *data)
190{
191 struct xgbe_prv_data *pdata = data;
192 struct xgbe_hw_if *hw_if = &pdata->hw_if;
193 struct xgbe_channel *channel;
194 unsigned int dma_isr, dma_ch_isr;
195 unsigned int mac_isr;
196 unsigned int i;
197
198 /* The DMA interrupt status register also reports MAC and MTL
199 * interrupts. So for polling mode, we just need to check for
200 * this register to be non-zero
201 */
202 dma_isr = XGMAC_IOREAD(pdata, DMA_ISR);
203 if (!dma_isr)
204 goto isr_done;
205
206 DBGPR("-->xgbe_isr\n");
207
208 DBGPR(" DMA_ISR = %08x\n", dma_isr);
209 DBGPR(" DMA_DS0 = %08x\n", XGMAC_IOREAD(pdata, DMA_DSR0));
210 DBGPR(" DMA_DS1 = %08x\n", XGMAC_IOREAD(pdata, DMA_DSR1));
211
212 for (i = 0; i < pdata->channel_count; i++) {
213 if (!(dma_isr & (1 << i)))
214 continue;
215
216 channel = pdata->channel + i;
217
218 dma_ch_isr = XGMAC_DMA_IOREAD(channel, DMA_CH_SR);
219 DBGPR(" DMA_CH%u_ISR = %08x\n", i, dma_ch_isr);
220
221 if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, TI) ||
222 XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RI)) {
223 if (napi_schedule_prep(&pdata->napi)) {
224 /* Disable Tx and Rx interrupts */
225 xgbe_disable_rx_tx_ints(pdata);
226
227 /* Turn on polling */
228 __napi_schedule(&pdata->napi);
229 }
230 }
231
232 /* Restart the device on a Fatal Bus Error */
233 if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, FBE))
234 schedule_work(&pdata->restart_work);
235
236 /* Clear all interrupt signals */
237 XGMAC_DMA_IOWRITE(channel, DMA_CH_SR, dma_ch_isr);
238 }
239
240 if (XGMAC_GET_BITS(dma_isr, DMA_ISR, MACIS)) {
241 mac_isr = XGMAC_IOREAD(pdata, MAC_ISR);
242
243 if (XGMAC_GET_BITS(mac_isr, MAC_ISR, MMCTXIS))
244 hw_if->tx_mmc_int(pdata);
245
246 if (XGMAC_GET_BITS(mac_isr, MAC_ISR, MMCRXIS))
247 hw_if->rx_mmc_int(pdata);
248 }
249
250 DBGPR(" DMA_ISR = %08x\n", XGMAC_IOREAD(pdata, DMA_ISR));
251
252 DBGPR("<--xgbe_isr\n");
253
254isr_done:
255 return IRQ_HANDLED;
256}
257
258static enum hrtimer_restart xgbe_tx_timer(struct hrtimer *timer)
259{
260 struct xgbe_channel *channel = container_of(timer,
261 struct xgbe_channel,
262 tx_timer);
263 struct xgbe_ring *ring = channel->tx_ring;
264 struct xgbe_prv_data *pdata = channel->pdata;
265 unsigned long flags;
266
267 DBGPR("-->xgbe_tx_timer\n");
268
269 spin_lock_irqsave(&ring->lock, flags);
270
271 if (napi_schedule_prep(&pdata->napi)) {
272 /* Disable Tx and Rx interrupts */
273 xgbe_disable_rx_tx_ints(pdata);
274
275 /* Turn on polling */
276 __napi_schedule(&pdata->napi);
277 }
278
279 channel->tx_timer_active = 0;
280
281 spin_unlock_irqrestore(&ring->lock, flags);
282
283 DBGPR("<--xgbe_tx_timer\n");
284
285 return HRTIMER_NORESTART;
286}
287
288static void xgbe_init_tx_timers(struct xgbe_prv_data *pdata)
289{
290 struct xgbe_channel *channel;
291 unsigned int i;
292
293 DBGPR("-->xgbe_init_tx_timers\n");
294
295 channel = pdata->channel;
296 for (i = 0; i < pdata->channel_count; i++, channel++) {
297 if (!channel->tx_ring)
298 break;
299
300 DBGPR(" %s adding tx timer\n", channel->name);
301 hrtimer_init(&channel->tx_timer, CLOCK_MONOTONIC,
302 HRTIMER_MODE_REL);
303 channel->tx_timer.function = xgbe_tx_timer;
304 }
305
306 DBGPR("<--xgbe_init_tx_timers\n");
307}
308
309static void xgbe_stop_tx_timers(struct xgbe_prv_data *pdata)
310{
311 struct xgbe_channel *channel;
312 unsigned int i;
313
314 DBGPR("-->xgbe_stop_tx_timers\n");
315
316 channel = pdata->channel;
317 for (i = 0; i < pdata->channel_count; i++, channel++) {
318 if (!channel->tx_ring)
319 break;
320
321 DBGPR(" %s deleting tx timer\n", channel->name);
322 channel->tx_timer_active = 0;
323 hrtimer_cancel(&channel->tx_timer);
324 }
325
326 DBGPR("<--xgbe_stop_tx_timers\n");
327}
328
329void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata)
330{
331 unsigned int mac_hfr0, mac_hfr1, mac_hfr2;
332 struct xgbe_hw_features *hw_feat = &pdata->hw_feat;
333
334 DBGPR("-->xgbe_get_all_hw_features\n");
335
336 mac_hfr0 = XGMAC_IOREAD(pdata, MAC_HWF0R);
337 mac_hfr1 = XGMAC_IOREAD(pdata, MAC_HWF1R);
338 mac_hfr2 = XGMAC_IOREAD(pdata, MAC_HWF2R);
339
340 memset(hw_feat, 0, sizeof(*hw_feat));
341
342 /* Hardware feature register 0 */
343 hw_feat->gmii = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, GMIISEL);
344 hw_feat->vlhash = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, VLHASH);
345 hw_feat->sma = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SMASEL);
346 hw_feat->rwk = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RWKSEL);
347 hw_feat->mgk = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MGKSEL);
348 hw_feat->mmc = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MMCSEL);
349 hw_feat->aoe = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, ARPOFFSEL);
350 hw_feat->ts = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSEL);
351 hw_feat->eee = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, EEESEL);
352 hw_feat->tx_coe = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TXCOESEL);
353 hw_feat->rx_coe = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RXCOESEL);
354 hw_feat->addn_mac = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R,
355 ADDMACADRSEL);
356 hw_feat->ts_src = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSTSSEL);
357 hw_feat->sa_vlan_ins = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SAVLANINS);
358
359 /* Hardware feature register 1 */
360 hw_feat->rx_fifo_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
361 RXFIFOSIZE);
362 hw_feat->tx_fifo_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
363 TXFIFOSIZE);
364 hw_feat->dcb = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DCBEN);
365 hw_feat->sph = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, SPHEN);
366 hw_feat->tso = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, TSOEN);
367 hw_feat->dma_debug = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DBGMEMA);
368 hw_feat->hash_table_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
369 HASHTBLSZ);
370 hw_feat->l3l4_filter_num = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
371 L3L4FNUM);
372
373 /* Hardware feature register 2 */
374 hw_feat->rx_q_cnt = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXQCNT);
375 hw_feat->tx_q_cnt = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXQCNT);
376 hw_feat->rx_ch_cnt = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXCHCNT);
377 hw_feat->tx_ch_cnt = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXCHCNT);
378 hw_feat->pps_out_num = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, PPSOUTNUM);
379 hw_feat->aux_snap_num = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, AUXSNAPNUM);
380
Lendacky, Thomasb85e4d82014-06-24 16:19:29 -0500381 /* Translate the Hash Table size into actual number */
382 switch (hw_feat->hash_table_size) {
383 case 0:
384 break;
385 case 1:
386 hw_feat->hash_table_size = 64;
387 break;
388 case 2:
389 hw_feat->hash_table_size = 128;
390 break;
391 case 3:
392 hw_feat->hash_table_size = 256;
393 break;
394 }
395
Lendacky, Thomasc5aa9e32014-06-05 09:15:06 -0500396 /* The Queue and Channel counts are zero based so increment them
397 * to get the actual number
398 */
399 hw_feat->rx_q_cnt++;
400 hw_feat->tx_q_cnt++;
401 hw_feat->rx_ch_cnt++;
402 hw_feat->tx_ch_cnt++;
403
404 DBGPR("<--xgbe_get_all_hw_features\n");
405}
406
407static void xgbe_napi_enable(struct xgbe_prv_data *pdata, unsigned int add)
408{
409 if (add)
410 netif_napi_add(pdata->netdev, &pdata->napi, xgbe_poll,
411 NAPI_POLL_WEIGHT);
412 napi_enable(&pdata->napi);
413}
414
415static void xgbe_napi_disable(struct xgbe_prv_data *pdata)
416{
417 napi_disable(&pdata->napi);
418}
419
420void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata)
421{
422 struct xgbe_hw_if *hw_if = &pdata->hw_if;
423
424 DBGPR("-->xgbe_init_tx_coalesce\n");
425
426 pdata->tx_usecs = XGMAC_INIT_DMA_TX_USECS;
427 pdata->tx_frames = XGMAC_INIT_DMA_TX_FRAMES;
428
429 hw_if->config_tx_coalesce(pdata);
430
431 DBGPR("<--xgbe_init_tx_coalesce\n");
432}
433
434void xgbe_init_rx_coalesce(struct xgbe_prv_data *pdata)
435{
436 struct xgbe_hw_if *hw_if = &pdata->hw_if;
437
438 DBGPR("-->xgbe_init_rx_coalesce\n");
439
440 pdata->rx_riwt = hw_if->usec_to_riwt(pdata, XGMAC_INIT_DMA_RX_USECS);
441 pdata->rx_frames = XGMAC_INIT_DMA_RX_FRAMES;
442
443 hw_if->config_rx_coalesce(pdata);
444
445 DBGPR("<--xgbe_init_rx_coalesce\n");
446}
447
448static void xgbe_free_tx_skbuff(struct xgbe_prv_data *pdata)
449{
450 struct xgbe_desc_if *desc_if = &pdata->desc_if;
451 struct xgbe_channel *channel;
452 struct xgbe_ring *ring;
453 struct xgbe_ring_data *rdata;
454 unsigned int i, j;
455
456 DBGPR("-->xgbe_free_tx_skbuff\n");
457
458 channel = pdata->channel;
459 for (i = 0; i < pdata->channel_count; i++, channel++) {
460 ring = channel->tx_ring;
461 if (!ring)
462 break;
463
464 for (j = 0; j < ring->rdesc_count; j++) {
Lendacky, Thomasd0a8ba62014-06-24 16:19:06 -0500465 rdata = XGBE_GET_DESC_DATA(ring, j);
Lendacky, Thomasc5aa9e32014-06-05 09:15:06 -0500466 desc_if->unmap_skb(pdata, rdata);
467 }
468 }
469
470 DBGPR("<--xgbe_free_tx_skbuff\n");
471}
472
473static void xgbe_free_rx_skbuff(struct xgbe_prv_data *pdata)
474{
475 struct xgbe_desc_if *desc_if = &pdata->desc_if;
476 struct xgbe_channel *channel;
477 struct xgbe_ring *ring;
478 struct xgbe_ring_data *rdata;
479 unsigned int i, j;
480
481 DBGPR("-->xgbe_free_rx_skbuff\n");
482
483 channel = pdata->channel;
484 for (i = 0; i < pdata->channel_count; i++, channel++) {
485 ring = channel->rx_ring;
486 if (!ring)
487 break;
488
489 for (j = 0; j < ring->rdesc_count; j++) {
Lendacky, Thomasd0a8ba62014-06-24 16:19:06 -0500490 rdata = XGBE_GET_DESC_DATA(ring, j);
Lendacky, Thomasc5aa9e32014-06-05 09:15:06 -0500491 desc_if->unmap_skb(pdata, rdata);
492 }
493 }
494
495 DBGPR("<--xgbe_free_rx_skbuff\n");
496}
497
498int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
499{
500 struct xgbe_prv_data *pdata = netdev_priv(netdev);
501 struct xgbe_hw_if *hw_if = &pdata->hw_if;
502 unsigned long flags;
503
504 DBGPR("-->xgbe_powerdown\n");
505
506 if (!netif_running(netdev) ||
507 (caller == XGMAC_IOCTL_CONTEXT && pdata->power_down)) {
508 netdev_alert(netdev, "Device is already powered down\n");
509 DBGPR("<--xgbe_powerdown\n");
510 return -EINVAL;
511 }
512
513 phy_stop(pdata->phydev);
514
515 spin_lock_irqsave(&pdata->lock, flags);
516
517 if (caller == XGMAC_DRIVER_CONTEXT)
518 netif_device_detach(netdev);
519
520 netif_tx_stop_all_queues(netdev);
521 xgbe_napi_disable(pdata);
522
523 /* Powerdown Tx/Rx */
524 hw_if->powerdown_tx(pdata);
525 hw_if->powerdown_rx(pdata);
526
527 pdata->power_down = 1;
528
529 spin_unlock_irqrestore(&pdata->lock, flags);
530
531 DBGPR("<--xgbe_powerdown\n");
532
533 return 0;
534}
535
536int xgbe_powerup(struct net_device *netdev, unsigned int caller)
537{
538 struct xgbe_prv_data *pdata = netdev_priv(netdev);
539 struct xgbe_hw_if *hw_if = &pdata->hw_if;
540 unsigned long flags;
541
542 DBGPR("-->xgbe_powerup\n");
543
544 if (!netif_running(netdev) ||
545 (caller == XGMAC_IOCTL_CONTEXT && !pdata->power_down)) {
546 netdev_alert(netdev, "Device is already powered up\n");
547 DBGPR("<--xgbe_powerup\n");
548 return -EINVAL;
549 }
550
551 spin_lock_irqsave(&pdata->lock, flags);
552
553 pdata->power_down = 0;
554
555 phy_start(pdata->phydev);
556
557 /* Enable Tx/Rx */
558 hw_if->powerup_tx(pdata);
559 hw_if->powerup_rx(pdata);
560
561 if (caller == XGMAC_DRIVER_CONTEXT)
562 netif_device_attach(netdev);
563
564 xgbe_napi_enable(pdata, 0);
565 netif_tx_start_all_queues(netdev);
566
567 spin_unlock_irqrestore(&pdata->lock, flags);
568
569 DBGPR("<--xgbe_powerup\n");
570
571 return 0;
572}
573
574static int xgbe_start(struct xgbe_prv_data *pdata)
575{
576 struct xgbe_hw_if *hw_if = &pdata->hw_if;
577 struct net_device *netdev = pdata->netdev;
578
579 DBGPR("-->xgbe_start\n");
580
581 xgbe_set_rx_mode(netdev);
582
583 hw_if->init(pdata);
584
585 phy_start(pdata->phydev);
586
587 hw_if->enable_tx(pdata);
588 hw_if->enable_rx(pdata);
589
590 xgbe_init_tx_timers(pdata);
591
592 xgbe_napi_enable(pdata, 1);
593 netif_tx_start_all_queues(netdev);
594
595 DBGPR("<--xgbe_start\n");
596
597 return 0;
598}
599
600static void xgbe_stop(struct xgbe_prv_data *pdata)
601{
602 struct xgbe_hw_if *hw_if = &pdata->hw_if;
603 struct net_device *netdev = pdata->netdev;
604
605 DBGPR("-->xgbe_stop\n");
606
607 phy_stop(pdata->phydev);
608
609 netif_tx_stop_all_queues(netdev);
610 xgbe_napi_disable(pdata);
611
612 xgbe_stop_tx_timers(pdata);
613
614 hw_if->disable_tx(pdata);
615 hw_if->disable_rx(pdata);
616
617 DBGPR("<--xgbe_stop\n");
618}
619
620static void xgbe_restart_dev(struct xgbe_prv_data *pdata, unsigned int reset)
621{
622 struct xgbe_hw_if *hw_if = &pdata->hw_if;
623
624 DBGPR("-->xgbe_restart_dev\n");
625
626 /* If not running, "restart" will happen on open */
627 if (!netif_running(pdata->netdev))
628 return;
629
630 xgbe_stop(pdata);
631 synchronize_irq(pdata->irq_number);
632
633 xgbe_free_tx_skbuff(pdata);
634 xgbe_free_rx_skbuff(pdata);
635
636 /* Issue software reset to device if requested */
637 if (reset)
638 hw_if->exit(pdata);
639
640 xgbe_start(pdata);
641
642 DBGPR("<--xgbe_restart_dev\n");
643}
644
645static void xgbe_restart(struct work_struct *work)
646{
647 struct xgbe_prv_data *pdata = container_of(work,
648 struct xgbe_prv_data,
649 restart_work);
650
651 rtnl_lock();
652
653 xgbe_restart_dev(pdata, 1);
654
655 rtnl_unlock();
656}
657
658static void xgbe_prep_vlan(struct sk_buff *skb, struct xgbe_packet_data *packet)
659{
660 if (vlan_tx_tag_present(skb))
661 packet->vlan_ctag = vlan_tx_tag_get(skb);
662}
663
664static int xgbe_prep_tso(struct sk_buff *skb, struct xgbe_packet_data *packet)
665{
666 int ret;
667
668 if (!XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
669 TSO_ENABLE))
670 return 0;
671
672 ret = skb_cow_head(skb, 0);
673 if (ret)
674 return ret;
675
676 packet->header_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
677 packet->tcp_header_len = tcp_hdrlen(skb);
678 packet->tcp_payload_len = skb->len - packet->header_len;
679 packet->mss = skb_shinfo(skb)->gso_size;
680 DBGPR(" packet->header_len=%u\n", packet->header_len);
681 DBGPR(" packet->tcp_header_len=%u, packet->tcp_payload_len=%u\n",
682 packet->tcp_header_len, packet->tcp_payload_len);
683 DBGPR(" packet->mss=%u\n", packet->mss);
684
685 return 0;
686}
687
688static int xgbe_is_tso(struct sk_buff *skb)
689{
690 if (skb->ip_summed != CHECKSUM_PARTIAL)
691 return 0;
692
693 if (!skb_is_gso(skb))
694 return 0;
695
696 DBGPR(" TSO packet to be processed\n");
697
698 return 1;
699}
700
701static void xgbe_packet_info(struct xgbe_ring *ring, struct sk_buff *skb,
702 struct xgbe_packet_data *packet)
703{
704 struct skb_frag_struct *frag;
705 unsigned int context_desc;
706 unsigned int len;
707 unsigned int i;
708
709 context_desc = 0;
710 packet->rdesc_count = 0;
711
712 if (xgbe_is_tso(skb)) {
713 /* TSO requires an extra desriptor if mss is different */
714 if (skb_shinfo(skb)->gso_size != ring->tx.cur_mss) {
715 context_desc = 1;
716 packet->rdesc_count++;
717 }
718
719 /* TSO requires an extra desriptor for TSO header */
720 packet->rdesc_count++;
721
722 XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
723 TSO_ENABLE, 1);
724 XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
725 CSUM_ENABLE, 1);
726 } else if (skb->ip_summed == CHECKSUM_PARTIAL)
727 XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
728 CSUM_ENABLE, 1);
729
730 if (vlan_tx_tag_present(skb)) {
731 /* VLAN requires an extra descriptor if tag is different */
732 if (vlan_tx_tag_get(skb) != ring->tx.cur_vlan_ctag)
733 /* We can share with the TSO context descriptor */
734 if (!context_desc) {
735 context_desc = 1;
736 packet->rdesc_count++;
737 }
738
739 XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
740 VLAN_CTAG, 1);
741 }
742
743 for (len = skb_headlen(skb); len;) {
744 packet->rdesc_count++;
Lendacky, Thomasd0a8ba62014-06-24 16:19:06 -0500745 len -= min_t(unsigned int, len, XGBE_TX_MAX_BUF_SIZE);
Lendacky, Thomasc5aa9e32014-06-05 09:15:06 -0500746 }
747
748 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
749 frag = &skb_shinfo(skb)->frags[i];
750 for (len = skb_frag_size(frag); len; ) {
751 packet->rdesc_count++;
Lendacky, Thomasd0a8ba62014-06-24 16:19:06 -0500752 len -= min_t(unsigned int, len, XGBE_TX_MAX_BUF_SIZE);
Lendacky, Thomasc5aa9e32014-06-05 09:15:06 -0500753 }
754 }
755}
756
757static int xgbe_open(struct net_device *netdev)
758{
759 struct xgbe_prv_data *pdata = netdev_priv(netdev);
760 struct xgbe_hw_if *hw_if = &pdata->hw_if;
761 struct xgbe_desc_if *desc_if = &pdata->desc_if;
762 int ret;
763
764 DBGPR("-->xgbe_open\n");
765
766 /* Enable the clock */
767 ret = clk_prepare_enable(pdata->sysclock);
768 if (ret) {
769 netdev_alert(netdev, "clk_prepare_enable failed\n");
770 return ret;
771 }
772
773 /* Calculate the Rx buffer size before allocating rings */
774 ret = xgbe_calc_rx_buf_size(netdev, netdev->mtu);
775 if (ret < 0)
776 goto err_clk;
777 pdata->rx_buf_size = ret;
778
779 /* Allocate the ring descriptors and buffers */
780 ret = desc_if->alloc_ring_resources(pdata);
781 if (ret)
782 goto err_clk;
783
784 /* Initialize the device restart work struct */
785 INIT_WORK(&pdata->restart_work, xgbe_restart);
786
787 /* Request interrupts */
788 ret = devm_request_irq(pdata->dev, netdev->irq, xgbe_isr, 0,
789 netdev->name, pdata);
790 if (ret) {
791 netdev_alert(netdev, "error requesting irq %d\n",
792 pdata->irq_number);
793 goto err_irq;
794 }
795 pdata->irq_number = netdev->irq;
796
797 ret = xgbe_start(pdata);
798 if (ret)
799 goto err_start;
800
801 DBGPR("<--xgbe_open\n");
802
803 return 0;
804
805err_start:
806 hw_if->exit(pdata);
807
808 devm_free_irq(pdata->dev, pdata->irq_number, pdata);
809 pdata->irq_number = 0;
810
811err_irq:
812 desc_if->free_ring_resources(pdata);
813
814err_clk:
815 clk_disable_unprepare(pdata->sysclock);
816
817 return ret;
818}
819
820static int xgbe_close(struct net_device *netdev)
821{
822 struct xgbe_prv_data *pdata = netdev_priv(netdev);
823 struct xgbe_hw_if *hw_if = &pdata->hw_if;
824 struct xgbe_desc_if *desc_if = &pdata->desc_if;
825
826 DBGPR("-->xgbe_close\n");
827
828 /* Stop the device */
829 xgbe_stop(pdata);
830
831 /* Issue software reset to device */
832 hw_if->exit(pdata);
833
834 /* Free all the ring data */
835 desc_if->free_ring_resources(pdata);
836
837 /* Release the interrupt */
838 if (pdata->irq_number != 0) {
839 devm_free_irq(pdata->dev, pdata->irq_number, pdata);
840 pdata->irq_number = 0;
841 }
842
843 /* Disable the clock */
844 clk_disable_unprepare(pdata->sysclock);
845
846 DBGPR("<--xgbe_close\n");
847
848 return 0;
849}
850
851static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
852{
853 struct xgbe_prv_data *pdata = netdev_priv(netdev);
854 struct xgbe_hw_if *hw_if = &pdata->hw_if;
855 struct xgbe_desc_if *desc_if = &pdata->desc_if;
856 struct xgbe_channel *channel;
857 struct xgbe_ring *ring;
858 struct xgbe_packet_data *packet;
859 unsigned long flags;
860 int ret;
861
862 DBGPR("-->xgbe_xmit: skb->len = %d\n", skb->len);
863
864 channel = pdata->channel + skb->queue_mapping;
865 ring = channel->tx_ring;
866 packet = &ring->packet_data;
867
868 ret = NETDEV_TX_OK;
869
870 spin_lock_irqsave(&ring->lock, flags);
871
872 if (skb->len == 0) {
873 netdev_err(netdev, "empty skb received from stack\n");
874 dev_kfree_skb_any(skb);
875 goto tx_netdev_return;
876 }
877
878 /* Calculate preliminary packet info */
879 memset(packet, 0, sizeof(*packet));
880 xgbe_packet_info(ring, skb, packet);
881
882 /* Check that there are enough descriptors available */
883 if (packet->rdesc_count > xgbe_tx_avail_desc(ring)) {
884 DBGPR(" Tx queue stopped, not enough descriptors available\n");
885 netif_stop_subqueue(netdev, channel->queue_index);
886 ring->tx.queue_stopped = 1;
887 ret = NETDEV_TX_BUSY;
888 goto tx_netdev_return;
889 }
890
891 ret = xgbe_prep_tso(skb, packet);
892 if (ret) {
893 netdev_err(netdev, "error processing TSO packet\n");
894 dev_kfree_skb_any(skb);
895 goto tx_netdev_return;
896 }
897 xgbe_prep_vlan(skb, packet);
898
899 if (!desc_if->map_tx_skb(channel, skb)) {
900 dev_kfree_skb_any(skb);
901 goto tx_netdev_return;
902 }
903
904 /* Configure required descriptor fields for transmission */
905 hw_if->pre_xmit(channel);
906
907#ifdef XGMAC_ENABLE_TX_PKT_DUMP
908 xgbe_print_pkt(netdev, skb, true);
909#endif
910
911tx_netdev_return:
912 spin_unlock_irqrestore(&ring->lock, flags);
913
914 DBGPR("<--xgbe_xmit\n");
915
916 return ret;
917}
918
919static void xgbe_set_rx_mode(struct net_device *netdev)
920{
921 struct xgbe_prv_data *pdata = netdev_priv(netdev);
922 struct xgbe_hw_if *hw_if = &pdata->hw_if;
923 unsigned int pr_mode, am_mode;
924
925 DBGPR("-->xgbe_set_rx_mode\n");
926
927 pr_mode = ((netdev->flags & IFF_PROMISC) != 0);
928 am_mode = ((netdev->flags & IFF_ALLMULTI) != 0);
929
Lendacky, Thomasc5aa9e32014-06-05 09:15:06 -0500930 hw_if->set_promiscuous_mode(pdata, pr_mode);
931 hw_if->set_all_multicast_mode(pdata, am_mode);
Lendacky, Thomasb85e4d82014-06-24 16:19:29 -0500932
933 hw_if->add_mac_addresses(pdata);
Lendacky, Thomasc5aa9e32014-06-05 09:15:06 -0500934
935 DBGPR("<--xgbe_set_rx_mode\n");
936}
937
938static int xgbe_set_mac_address(struct net_device *netdev, void *addr)
939{
940 struct xgbe_prv_data *pdata = netdev_priv(netdev);
941 struct xgbe_hw_if *hw_if = &pdata->hw_if;
942 struct sockaddr *saddr = addr;
943
944 DBGPR("-->xgbe_set_mac_address\n");
945
946 if (!is_valid_ether_addr(saddr->sa_data))
947 return -EADDRNOTAVAIL;
948
949 memcpy(netdev->dev_addr, saddr->sa_data, netdev->addr_len);
950
951 hw_if->set_mac_address(pdata, netdev->dev_addr);
952
953 DBGPR("<--xgbe_set_mac_address\n");
954
955 return 0;
956}
957
958static int xgbe_change_mtu(struct net_device *netdev, int mtu)
959{
960 struct xgbe_prv_data *pdata = netdev_priv(netdev);
961 int ret;
962
963 DBGPR("-->xgbe_change_mtu\n");
964
965 ret = xgbe_calc_rx_buf_size(netdev, mtu);
966 if (ret < 0)
967 return ret;
968
969 pdata->rx_buf_size = ret;
970 netdev->mtu = mtu;
971
972 xgbe_restart_dev(pdata, 0);
973
974 DBGPR("<--xgbe_change_mtu\n");
975
976 return 0;
977}
978
979static struct rtnl_link_stats64 *xgbe_get_stats64(struct net_device *netdev,
980 struct rtnl_link_stats64 *s)
981{
982 struct xgbe_prv_data *pdata = netdev_priv(netdev);
983 struct xgbe_mmc_stats *pstats = &pdata->mmc_stats;
984
985 DBGPR("-->%s\n", __func__);
986
987 pdata->hw_if.read_mmc_stats(pdata);
988
989 s->rx_packets = pstats->rxframecount_gb;
990 s->rx_bytes = pstats->rxoctetcount_gb;
991 s->rx_errors = pstats->rxframecount_gb -
992 pstats->rxbroadcastframes_g -
993 pstats->rxmulticastframes_g -
994 pstats->rxunicastframes_g;
995 s->multicast = pstats->rxmulticastframes_g;
996 s->rx_length_errors = pstats->rxlengtherror;
997 s->rx_crc_errors = pstats->rxcrcerror;
998 s->rx_fifo_errors = pstats->rxfifooverflow;
999
1000 s->tx_packets = pstats->txframecount_gb;
1001 s->tx_bytes = pstats->txoctetcount_gb;
1002 s->tx_errors = pstats->txframecount_gb - pstats->txframecount_g;
1003 s->tx_dropped = netdev->stats.tx_dropped;
1004
1005 DBGPR("<--%s\n", __func__);
1006
1007 return s;
1008}
1009
Lendacky, Thomas801c62d2014-06-24 16:19:24 -05001010static int xgbe_vlan_rx_add_vid(struct net_device *netdev, __be16 proto,
1011 u16 vid)
1012{
1013 struct xgbe_prv_data *pdata = netdev_priv(netdev);
1014 struct xgbe_hw_if *hw_if = &pdata->hw_if;
1015
1016 DBGPR("-->%s\n", __func__);
1017
1018 set_bit(vid, pdata->active_vlans);
1019 hw_if->update_vlan_hash_table(pdata);
1020
1021 DBGPR("<--%s\n", __func__);
1022
1023 return 0;
1024}
1025
1026static int xgbe_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto,
1027 u16 vid)
1028{
1029 struct xgbe_prv_data *pdata = netdev_priv(netdev);
1030 struct xgbe_hw_if *hw_if = &pdata->hw_if;
1031
1032 DBGPR("-->%s\n", __func__);
1033
1034 clear_bit(vid, pdata->active_vlans);
1035 hw_if->update_vlan_hash_table(pdata);
1036
1037 DBGPR("<--%s\n", __func__);
1038
1039 return 0;
1040}
1041
Lendacky, Thomasc5aa9e32014-06-05 09:15:06 -05001042#ifdef CONFIG_NET_POLL_CONTROLLER
1043static void xgbe_poll_controller(struct net_device *netdev)
1044{
1045 struct xgbe_prv_data *pdata = netdev_priv(netdev);
1046
1047 DBGPR("-->xgbe_poll_controller\n");
1048
1049 disable_irq(pdata->irq_number);
1050
1051 xgbe_isr(pdata->irq_number, pdata);
1052
1053 enable_irq(pdata->irq_number);
1054
1055 DBGPR("<--xgbe_poll_controller\n");
1056}
1057#endif /* End CONFIG_NET_POLL_CONTROLLER */
1058
1059static int xgbe_set_features(struct net_device *netdev,
1060 netdev_features_t features)
1061{
1062 struct xgbe_prv_data *pdata = netdev_priv(netdev);
1063 struct xgbe_hw_if *hw_if = &pdata->hw_if;
Lendacky, Thomas801c62d2014-06-24 16:19:24 -05001064 unsigned int rxcsum, rxvlan, rxvlan_filter;
Lendacky, Thomasc5aa9e32014-06-05 09:15:06 -05001065
Lendacky, Thomas801c62d2014-06-24 16:19:24 -05001066 rxcsum = pdata->netdev_features & NETIF_F_RXCSUM;
1067 rxvlan = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_RX;
1068 rxvlan_filter = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_FILTER;
Lendacky, Thomasc5aa9e32014-06-05 09:15:06 -05001069
Lendacky, Thomas801c62d2014-06-24 16:19:24 -05001070 if ((features & NETIF_F_RXCSUM) && !rxcsum)
Lendacky, Thomasc5aa9e32014-06-05 09:15:06 -05001071 hw_if->enable_rx_csum(pdata);
Lendacky, Thomas801c62d2014-06-24 16:19:24 -05001072 else if (!(features & NETIF_F_RXCSUM) && rxcsum)
Lendacky, Thomasc5aa9e32014-06-05 09:15:06 -05001073 hw_if->disable_rx_csum(pdata);
Lendacky, Thomasc5aa9e32014-06-05 09:15:06 -05001074
Lendacky, Thomas801c62d2014-06-24 16:19:24 -05001075 if ((features & NETIF_F_HW_VLAN_CTAG_RX) && !rxvlan)
Lendacky, Thomasc5aa9e32014-06-05 09:15:06 -05001076 hw_if->enable_rx_vlan_stripping(pdata);
Lendacky, Thomas801c62d2014-06-24 16:19:24 -05001077 else if (!(features & NETIF_F_HW_VLAN_CTAG_RX) && rxvlan)
Lendacky, Thomasc5aa9e32014-06-05 09:15:06 -05001078 hw_if->disable_rx_vlan_stripping(pdata);
Lendacky, Thomas801c62d2014-06-24 16:19:24 -05001079
1080 if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) && !rxvlan_filter)
1081 hw_if->enable_rx_vlan_filtering(pdata);
1082 else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) && rxvlan_filter)
1083 hw_if->disable_rx_vlan_filtering(pdata);
Lendacky, Thomasc5aa9e32014-06-05 09:15:06 -05001084
1085 pdata->netdev_features = features;
1086
1087 DBGPR("<--xgbe_set_features\n");
1088
1089 return 0;
1090}
1091
1092static const struct net_device_ops xgbe_netdev_ops = {
1093 .ndo_open = xgbe_open,
1094 .ndo_stop = xgbe_close,
1095 .ndo_start_xmit = xgbe_xmit,
1096 .ndo_set_rx_mode = xgbe_set_rx_mode,
1097 .ndo_set_mac_address = xgbe_set_mac_address,
1098 .ndo_validate_addr = eth_validate_addr,
1099 .ndo_change_mtu = xgbe_change_mtu,
1100 .ndo_get_stats64 = xgbe_get_stats64,
Lendacky, Thomas801c62d2014-06-24 16:19:24 -05001101 .ndo_vlan_rx_add_vid = xgbe_vlan_rx_add_vid,
1102 .ndo_vlan_rx_kill_vid = xgbe_vlan_rx_kill_vid,
Lendacky, Thomasc5aa9e32014-06-05 09:15:06 -05001103#ifdef CONFIG_NET_POLL_CONTROLLER
1104 .ndo_poll_controller = xgbe_poll_controller,
1105#endif
1106 .ndo_set_features = xgbe_set_features,
1107};
1108
1109struct net_device_ops *xgbe_get_netdev_ops(void)
1110{
1111 return (struct net_device_ops *)&xgbe_netdev_ops;
1112}
1113
1114static int xgbe_tx_poll(struct xgbe_channel *channel)
1115{
1116 struct xgbe_prv_data *pdata = channel->pdata;
1117 struct xgbe_hw_if *hw_if = &pdata->hw_if;
1118 struct xgbe_desc_if *desc_if = &pdata->desc_if;
1119 struct xgbe_ring *ring = channel->tx_ring;
1120 struct xgbe_ring_data *rdata;
1121 struct xgbe_ring_desc *rdesc;
1122 struct net_device *netdev = pdata->netdev;
1123 unsigned long flags;
1124 int processed = 0;
1125
1126 DBGPR("-->xgbe_tx_poll\n");
1127
1128 /* Nothing to do if there isn't a Tx ring for this channel */
1129 if (!ring)
1130 return 0;
1131
1132 spin_lock_irqsave(&ring->lock, flags);
1133
Lendacky, Thomasd0a8ba62014-06-24 16:19:06 -05001134 while ((processed < XGBE_TX_DESC_MAX_PROC) &&
1135 (ring->dirty < ring->cur)) {
1136 rdata = XGBE_GET_DESC_DATA(ring, ring->dirty);
Lendacky, Thomasc5aa9e32014-06-05 09:15:06 -05001137 rdesc = rdata->rdesc;
1138
1139 if (!hw_if->tx_complete(rdesc))
1140 break;
1141
1142#ifdef XGMAC_ENABLE_TX_DESC_DUMP
1143 xgbe_dump_tx_desc(ring, ring->dirty, 1, 0);
1144#endif
1145
1146 /* Free the SKB and reset the descriptor for re-use */
1147 desc_if->unmap_skb(pdata, rdata);
1148 hw_if->tx_desc_reset(rdata);
1149
1150 processed++;
1151 ring->dirty++;
1152 }
1153
1154 if ((ring->tx.queue_stopped == 1) &&
Lendacky, Thomasd0a8ba62014-06-24 16:19:06 -05001155 (xgbe_tx_avail_desc(ring) > XGBE_TX_DESC_MIN_FREE)) {
Lendacky, Thomasc5aa9e32014-06-05 09:15:06 -05001156 ring->tx.queue_stopped = 0;
1157 netif_wake_subqueue(netdev, channel->queue_index);
1158 }
1159
1160 DBGPR("<--xgbe_tx_poll: processed=%d\n", processed);
1161
1162 spin_unlock_irqrestore(&ring->lock, flags);
1163
1164 return processed;
1165}
1166
1167static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
1168{
1169 struct xgbe_prv_data *pdata = channel->pdata;
1170 struct xgbe_hw_if *hw_if = &pdata->hw_if;
1171 struct xgbe_desc_if *desc_if = &pdata->desc_if;
1172 struct xgbe_ring *ring = channel->rx_ring;
1173 struct xgbe_ring_data *rdata;
1174 struct xgbe_packet_data *packet;
1175 struct net_device *netdev = pdata->netdev;
1176 struct sk_buff *skb;
1177 unsigned int incomplete, error;
1178 unsigned int cur_len, put_len, max_len;
1179 int received = 0;
1180
1181 DBGPR("-->xgbe_rx_poll: budget=%d\n", budget);
1182
1183 /* Nothing to do if there isn't a Rx ring for this channel */
1184 if (!ring)
1185 return 0;
1186
1187 packet = &ring->packet_data;
1188 while (received < budget) {
1189 DBGPR(" cur = %d\n", ring->cur);
1190
1191 /* Clear the packet data information */
1192 memset(packet, 0, sizeof(*packet));
1193 skb = NULL;
1194 error = 0;
1195 cur_len = 0;
1196
1197read_again:
Lendacky, Thomasd0a8ba62014-06-24 16:19:06 -05001198 rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
Lendacky, Thomasc5aa9e32014-06-05 09:15:06 -05001199
1200 if (hw_if->dev_read(channel))
1201 break;
1202
1203 received++;
1204 ring->cur++;
1205 ring->dirty++;
1206
1207 dma_unmap_single(pdata->dev, rdata->skb_dma,
1208 rdata->skb_dma_len, DMA_FROM_DEVICE);
1209 rdata->skb_dma = 0;
1210
1211 incomplete = XGMAC_GET_BITS(packet->attributes,
1212 RX_PACKET_ATTRIBUTES,
1213 INCOMPLETE);
1214
1215 /* Earlier error, just drain the remaining data */
1216 if (incomplete && error)
1217 goto read_again;
1218
1219 if (error || packet->errors) {
1220 if (packet->errors)
1221 DBGPR("Error in received packet\n");
1222 dev_kfree_skb(skb);
1223 continue;
1224 }
1225
1226 put_len = rdata->len - cur_len;
1227 if (skb) {
1228 if (pskb_expand_head(skb, 0, put_len, GFP_ATOMIC)) {
1229 DBGPR("pskb_expand_head error\n");
1230 if (incomplete) {
1231 error = 1;
1232 goto read_again;
1233 }
1234
1235 dev_kfree_skb(skb);
1236 continue;
1237 }
1238 memcpy(skb_tail_pointer(skb), rdata->skb->data,
1239 put_len);
1240 } else {
1241 skb = rdata->skb;
1242 rdata->skb = NULL;
1243 }
1244 skb_put(skb, put_len);
1245 cur_len += put_len;
1246
1247 if (incomplete)
1248 goto read_again;
1249
1250 /* Be sure we don't exceed the configured MTU */
1251 max_len = netdev->mtu + ETH_HLEN;
1252 if (!(netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
1253 (skb->protocol == htons(ETH_P_8021Q)))
1254 max_len += VLAN_HLEN;
1255
1256 if (skb->len > max_len) {
1257 DBGPR("packet length exceeds configured MTU\n");
1258 dev_kfree_skb(skb);
1259 continue;
1260 }
1261
1262#ifdef XGMAC_ENABLE_RX_PKT_DUMP
1263 xgbe_print_pkt(netdev, skb, false);
1264#endif
1265
1266 skb_checksum_none_assert(skb);
1267 if (XGMAC_GET_BITS(packet->attributes,
1268 RX_PACKET_ATTRIBUTES, CSUM_DONE))
1269 skb->ip_summed = CHECKSUM_UNNECESSARY;
1270
1271 if (XGMAC_GET_BITS(packet->attributes,
1272 RX_PACKET_ATTRIBUTES, VLAN_CTAG))
1273 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1274 packet->vlan_ctag);
1275
1276 skb->dev = netdev;
1277 skb->protocol = eth_type_trans(skb, netdev);
1278 skb_record_rx_queue(skb, channel->queue_index);
1279 skb_mark_napi_id(skb, &pdata->napi);
1280
1281 netdev->last_rx = jiffies;
1282 napi_gro_receive(&pdata->napi, skb);
1283 }
1284
1285 if (received) {
1286 desc_if->realloc_skb(channel);
1287
1288 /* Update the Rx Tail Pointer Register with address of
1289 * the last cleaned entry */
Lendacky, Thomasd0a8ba62014-06-24 16:19:06 -05001290 rdata = XGBE_GET_DESC_DATA(ring, ring->rx.realloc_index - 1);
Lendacky, Thomasc5aa9e32014-06-05 09:15:06 -05001291 XGMAC_DMA_IOWRITE(channel, DMA_CH_RDTR_LO,
1292 lower_32_bits(rdata->rdesc_dma));
1293 }
1294
1295 DBGPR("<--xgbe_rx_poll: received = %d\n", received);
1296
1297 return received;
1298}
1299
1300static int xgbe_poll(struct napi_struct *napi, int budget)
1301{
1302 struct xgbe_prv_data *pdata = container_of(napi, struct xgbe_prv_data,
1303 napi);
1304 struct xgbe_channel *channel;
1305 int processed;
1306 unsigned int i;
1307
1308 DBGPR("-->xgbe_poll: budget=%d\n", budget);
1309
1310 /* Cleanup Tx ring first */
1311 channel = pdata->channel;
1312 for (i = 0; i < pdata->channel_count; i++, channel++)
1313 xgbe_tx_poll(channel);
1314
1315 /* Process Rx ring next */
1316 processed = 0;
1317 channel = pdata->channel;
1318 for (i = 0; i < pdata->channel_count; i++, channel++)
1319 processed += xgbe_rx_poll(channel, budget - processed);
1320
1321 /* If we processed everything, we are done */
1322 if (processed < budget) {
1323 /* Turn off polling */
1324 napi_complete(napi);
1325
1326 /* Enable Tx and Rx interrupts */
1327 xgbe_enable_rx_tx_ints(pdata);
1328 }
1329
1330 DBGPR("<--xgbe_poll: received = %d\n", processed);
1331
1332 return processed;
1333}
1334
1335void xgbe_dump_tx_desc(struct xgbe_ring *ring, unsigned int idx,
1336 unsigned int count, unsigned int flag)
1337{
1338 struct xgbe_ring_data *rdata;
1339 struct xgbe_ring_desc *rdesc;
1340
1341 while (count--) {
Lendacky, Thomasd0a8ba62014-06-24 16:19:06 -05001342 rdata = XGBE_GET_DESC_DATA(ring, idx);
Lendacky, Thomasc5aa9e32014-06-05 09:15:06 -05001343 rdesc = rdata->rdesc;
1344 DBGPR("TX_NORMAL_DESC[%d %s] = %08x:%08x:%08x:%08x\n", idx,
1345 (flag == 1) ? "QUEUED FOR TX" : "TX BY DEVICE",
1346 le32_to_cpu(rdesc->desc0), le32_to_cpu(rdesc->desc1),
1347 le32_to_cpu(rdesc->desc2), le32_to_cpu(rdesc->desc3));
1348 idx++;
1349 }
1350}
1351
1352void xgbe_dump_rx_desc(struct xgbe_ring *ring, struct xgbe_ring_desc *desc,
1353 unsigned int idx)
1354{
1355 DBGPR("RX_NORMAL_DESC[%d RX BY DEVICE] = %08x:%08x:%08x:%08x\n", idx,
1356 le32_to_cpu(desc->desc0), le32_to_cpu(desc->desc1),
1357 le32_to_cpu(desc->desc2), le32_to_cpu(desc->desc3));
1358}
1359
1360void xgbe_print_pkt(struct net_device *netdev, struct sk_buff *skb, bool tx_rx)
1361{
1362 struct ethhdr *eth = (struct ethhdr *)skb->data;
1363 unsigned char *buf = skb->data;
1364 unsigned char buffer[128];
1365 unsigned int i, j;
1366
1367 netdev_alert(netdev, "\n************** SKB dump ****************\n");
1368
1369 netdev_alert(netdev, "%s packet of %d bytes\n",
1370 (tx_rx ? "TX" : "RX"), skb->len);
1371
1372 netdev_alert(netdev, "Dst MAC addr: %pM\n", eth->h_dest);
1373 netdev_alert(netdev, "Src MAC addr: %pM\n", eth->h_source);
1374 netdev_alert(netdev, "Protocol: 0x%04hx\n", ntohs(eth->h_proto));
1375
1376 for (i = 0, j = 0; i < skb->len;) {
1377 j += snprintf(buffer + j, sizeof(buffer) - j, "%02hhx",
1378 buf[i++]);
1379
1380 if ((i % 32) == 0) {
1381 netdev_alert(netdev, " 0x%04x: %s\n", i - 32, buffer);
1382 j = 0;
1383 } else if ((i % 16) == 0) {
1384 buffer[j++] = ' ';
1385 buffer[j++] = ' ';
1386 } else if ((i % 4) == 0) {
1387 buffer[j++] = ' ';
1388 }
1389 }
1390 if (i % 32)
1391 netdev_alert(netdev, " 0x%04x: %s\n", i - (i % 32), buffer);
1392
1393 netdev_alert(netdev, "\n************** SKB dump ****************\n");
1394}