blob: e9fed23b2c33791d7f0a030c519499eddd62116b [file] [log] [blame]
Lendacky, Thomasc5aa9e32014-06-05 09:15:06 -05001/*
2 * AMD 10Gb Ethernet driver
3 *
4 * This file is available to you under your choice of the following two
5 * licenses:
6 *
7 * License 1: GPLv2
8 *
9 * Copyright (c) 2014 Advanced Micro Devices, Inc.
10 *
11 * This file is free software; you may copy, redistribute and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation, either version 2 of the License, or (at
14 * your option) any later version.
15 *
16 * This file is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program. If not, see <http://www.gnu.org/licenses/>.
23 *
24 * This file incorporates work covered by the following copyright and
25 * permission notice:
26 * The Synopsys DWC ETHER XGMAC Software Driver and documentation
27 * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
28 * Inc. unless otherwise expressly agreed to in writing between Synopsys
29 * and you.
30 *
31 * The Software IS NOT an item of Licensed Software or Licensed Product
32 * under any End User Software License Agreement or Agreement for Licensed
33 * Product with Synopsys or any supplement thereto. Permission is hereby
34 * granted, free of charge, to any person obtaining a copy of this software
35 * annotated with this license and the Software, to deal in the Software
36 * without restriction, including without limitation the rights to use,
37 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
38 * of the Software, and to permit persons to whom the Software is furnished
39 * to do so, subject to the following conditions:
40 *
41 * The above copyright notice and this permission notice shall be included
42 * in all copies or substantial portions of the Software.
43 *
44 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
45 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
46 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
47 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
48 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
49 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
50 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
51 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
52 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
53 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
54 * THE POSSIBILITY OF SUCH DAMAGE.
55 *
56 *
57 * License 2: Modified BSD
58 *
59 * Copyright (c) 2014 Advanced Micro Devices, Inc.
60 * All rights reserved.
61 *
62 * Redistribution and use in source and binary forms, with or without
63 * modification, are permitted provided that the following conditions are met:
64 * * Redistributions of source code must retain the above copyright
65 * notice, this list of conditions and the following disclaimer.
66 * * Redistributions in binary form must reproduce the above copyright
67 * notice, this list of conditions and the following disclaimer in the
68 * documentation and/or other materials provided with the distribution.
69 * * Neither the name of Advanced Micro Devices, Inc. nor the
70 * names of its contributors may be used to endorse or promote products
71 * derived from this software without specific prior written permission.
72 *
73 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
74 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
75 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
76 * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
77 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
78 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
79 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
80 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
81 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
82 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
83 *
84 * This file incorporates work covered by the following copyright and
85 * permission notice:
86 * The Synopsys DWC ETHER XGMAC Software Driver and documentation
87 * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
88 * Inc. unless otherwise expressly agreed to in writing between Synopsys
89 * and you.
90 *
91 * The Software IS NOT an item of Licensed Software or Licensed Product
92 * under any End User Software License Agreement or Agreement for Licensed
93 * Product with Synopsys or any supplement thereto. Permission is hereby
94 * granted, free of charge, to any person obtaining a copy of this software
95 * annotated with this license and the Software, to deal in the Software
96 * without restriction, including without limitation the rights to use,
97 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
98 * of the Software, and to permit persons to whom the Software is furnished
99 * to do so, subject to the following conditions:
100 *
101 * The above copyright notice and this permission notice shall be included
102 * in all copies or substantial portions of the Software.
103 *
104 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
105 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
106 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
107 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
108 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
109 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
110 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
111 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
112 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
113 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
114 * THE POSSIBILITY OF SUCH DAMAGE.
115 */
116
117#include <linux/phy.h>
118#include <linux/clk.h>
Lendacky, Thomas801c62d2014-06-24 16:19:24 -0500119#include <linux/bitrev.h>
Lendacky, Thomasb85e4d82014-06-24 16:19:29 -0500120#include <linux/crc32.h>
Lendacky, Thomasc5aa9e32014-06-05 09:15:06 -0500121
122#include "xgbe.h"
123#include "xgbe-common.h"
124
125
126static unsigned int xgbe_usec_to_riwt(struct xgbe_prv_data *pdata,
127 unsigned int usec)
128{
129 unsigned long rate;
130 unsigned int ret;
131
132 DBGPR("-->xgbe_usec_to_riwt\n");
133
134 rate = clk_get_rate(pdata->sysclock);
135
136 /*
137 * Convert the input usec value to the watchdog timer value. Each
138 * watchdog timer value is equivalent to 256 clock cycles.
139 * Calculate the required value as:
140 * ( usec * ( system_clock_mhz / 10^6 ) / 256
141 */
142 ret = (usec * (rate / 1000000)) / 256;
143
144 DBGPR("<--xgbe_usec_to_riwt\n");
145
146 return ret;
147}
148
149static unsigned int xgbe_riwt_to_usec(struct xgbe_prv_data *pdata,
150 unsigned int riwt)
151{
152 unsigned long rate;
153 unsigned int ret;
154
155 DBGPR("-->xgbe_riwt_to_usec\n");
156
157 rate = clk_get_rate(pdata->sysclock);
158
159 /*
160 * Convert the input watchdog timer value to the usec value. Each
161 * watchdog timer value is equivalent to 256 clock cycles.
162 * Calculate the required value as:
163 * ( riwt * 256 ) / ( system_clock_mhz / 10^6 )
164 */
165 ret = (riwt * 256) / (rate / 1000000);
166
167 DBGPR("<--xgbe_riwt_to_usec\n");
168
169 return ret;
170}
171
172static int xgbe_config_pblx8(struct xgbe_prv_data *pdata)
173{
174 struct xgbe_channel *channel;
175 unsigned int i;
176
177 channel = pdata->channel;
178 for (i = 0; i < pdata->channel_count; i++, channel++)
179 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_CR, PBLX8,
180 pdata->pblx8);
181
182 return 0;
183}
184
185static int xgbe_get_tx_pbl_val(struct xgbe_prv_data *pdata)
186{
187 return XGMAC_DMA_IOREAD_BITS(pdata->channel, DMA_CH_TCR, PBL);
188}
189
190static int xgbe_config_tx_pbl_val(struct xgbe_prv_data *pdata)
191{
192 struct xgbe_channel *channel;
193 unsigned int i;
194
195 channel = pdata->channel;
196 for (i = 0; i < pdata->channel_count; i++, channel++) {
197 if (!channel->tx_ring)
198 break;
199
200 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, PBL,
201 pdata->tx_pbl);
202 }
203
204 return 0;
205}
206
207static int xgbe_get_rx_pbl_val(struct xgbe_prv_data *pdata)
208{
209 return XGMAC_DMA_IOREAD_BITS(pdata->channel, DMA_CH_RCR, PBL);
210}
211
212static int xgbe_config_rx_pbl_val(struct xgbe_prv_data *pdata)
213{
214 struct xgbe_channel *channel;
215 unsigned int i;
216
217 channel = pdata->channel;
218 for (i = 0; i < pdata->channel_count; i++, channel++) {
219 if (!channel->rx_ring)
220 break;
221
222 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, PBL,
223 pdata->rx_pbl);
224 }
225
226 return 0;
227}
228
229static int xgbe_config_osp_mode(struct xgbe_prv_data *pdata)
230{
231 struct xgbe_channel *channel;
232 unsigned int i;
233
234 channel = pdata->channel;
235 for (i = 0; i < pdata->channel_count; i++, channel++) {
236 if (!channel->tx_ring)
237 break;
238
239 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, OSP,
240 pdata->tx_osp_mode);
241 }
242
243 return 0;
244}
245
246static int xgbe_config_rsf_mode(struct xgbe_prv_data *pdata, unsigned int val)
247{
248 unsigned int i;
249
250 for (i = 0; i < pdata->hw_feat.rx_q_cnt; i++)
251 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RSF, val);
252
253 return 0;
254}
255
256static int xgbe_config_tsf_mode(struct xgbe_prv_data *pdata, unsigned int val)
257{
258 unsigned int i;
259
260 for (i = 0; i < pdata->hw_feat.tx_q_cnt; i++)
261 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TSF, val);
262
263 return 0;
264}
265
266static int xgbe_config_rx_threshold(struct xgbe_prv_data *pdata,
267 unsigned int val)
268{
269 unsigned int i;
270
271 for (i = 0; i < pdata->hw_feat.rx_q_cnt; i++)
272 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RTC, val);
273
274 return 0;
275}
276
277static int xgbe_config_tx_threshold(struct xgbe_prv_data *pdata,
278 unsigned int val)
279{
280 unsigned int i;
281
282 for (i = 0; i < pdata->hw_feat.tx_q_cnt; i++)
283 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TTC, val);
284
285 return 0;
286}
287
288static int xgbe_config_rx_coalesce(struct xgbe_prv_data *pdata)
289{
290 struct xgbe_channel *channel;
291 unsigned int i;
292
293 channel = pdata->channel;
294 for (i = 0; i < pdata->channel_count; i++, channel++) {
295 if (!channel->rx_ring)
296 break;
297
298 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RIWT, RWT,
299 pdata->rx_riwt);
300 }
301
302 return 0;
303}
304
305static int xgbe_config_tx_coalesce(struct xgbe_prv_data *pdata)
306{
307 return 0;
308}
309
310static void xgbe_config_rx_buffer_size(struct xgbe_prv_data *pdata)
311{
312 struct xgbe_channel *channel;
313 unsigned int i;
314
315 channel = pdata->channel;
316 for (i = 0; i < pdata->channel_count; i++, channel++) {
317 if (!channel->rx_ring)
318 break;
319
320 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, RBSZ,
321 pdata->rx_buf_size);
322 }
323}
324
325static void xgbe_config_tso_mode(struct xgbe_prv_data *pdata)
326{
327 struct xgbe_channel *channel;
328 unsigned int i;
329
330 channel = pdata->channel;
331 for (i = 0; i < pdata->channel_count; i++, channel++) {
332 if (!channel->tx_ring)
333 break;
334
335 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, TSE, 1);
336 }
337}
338
339static int xgbe_disable_tx_flow_control(struct xgbe_prv_data *pdata)
340{
341 unsigned int max_q_count, q_count;
342 unsigned int reg, reg_val;
343 unsigned int i;
344
345 /* Clear MTL flow control */
346 for (i = 0; i < pdata->hw_feat.rx_q_cnt; i++)
347 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 0);
348
349 /* Clear MAC flow control */
350 max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES;
351 q_count = min_t(unsigned int, pdata->hw_feat.rx_q_cnt, max_q_count);
352 reg = MAC_Q0TFCR;
353 for (i = 0; i < q_count; i++) {
354 reg_val = XGMAC_IOREAD(pdata, reg);
355 XGMAC_SET_BITS(reg_val, MAC_Q0TFCR, TFE, 0);
356 XGMAC_IOWRITE(pdata, reg, reg_val);
357
358 reg += MAC_QTFCR_INC;
359 }
360
361 return 0;
362}
363
364static int xgbe_enable_tx_flow_control(struct xgbe_prv_data *pdata)
365{
366 unsigned int max_q_count, q_count;
367 unsigned int reg, reg_val;
368 unsigned int i;
369
370 /* Set MTL flow control */
371 for (i = 0; i < pdata->hw_feat.rx_q_cnt; i++)
372 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 1);
373
374 /* Set MAC flow control */
375 max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES;
376 q_count = min_t(unsigned int, pdata->hw_feat.rx_q_cnt, max_q_count);
377 reg = MAC_Q0TFCR;
378 for (i = 0; i < q_count; i++) {
379 reg_val = XGMAC_IOREAD(pdata, reg);
380
381 /* Enable transmit flow control */
382 XGMAC_SET_BITS(reg_val, MAC_Q0TFCR, TFE, 1);
383 /* Set pause time */
384 XGMAC_SET_BITS(reg_val, MAC_Q0TFCR, PT, 0xffff);
385
386 XGMAC_IOWRITE(pdata, reg, reg_val);
387
388 reg += MAC_QTFCR_INC;
389 }
390
391 return 0;
392}
393
394static int xgbe_disable_rx_flow_control(struct xgbe_prv_data *pdata)
395{
396 XGMAC_IOWRITE_BITS(pdata, MAC_RFCR, RFE, 0);
397
398 return 0;
399}
400
401static int xgbe_enable_rx_flow_control(struct xgbe_prv_data *pdata)
402{
403 XGMAC_IOWRITE_BITS(pdata, MAC_RFCR, RFE, 1);
404
405 return 0;
406}
407
408static int xgbe_config_tx_flow_control(struct xgbe_prv_data *pdata)
409{
410 if (pdata->tx_pause)
411 xgbe_enable_tx_flow_control(pdata);
412 else
413 xgbe_disable_tx_flow_control(pdata);
414
415 return 0;
416}
417
418static int xgbe_config_rx_flow_control(struct xgbe_prv_data *pdata)
419{
420 if (pdata->rx_pause)
421 xgbe_enable_rx_flow_control(pdata);
422 else
423 xgbe_disable_rx_flow_control(pdata);
424
425 return 0;
426}
427
428static void xgbe_config_flow_control(struct xgbe_prv_data *pdata)
429{
430 xgbe_config_tx_flow_control(pdata);
431 xgbe_config_rx_flow_control(pdata);
432}
433
434static void xgbe_enable_dma_interrupts(struct xgbe_prv_data *pdata)
435{
436 struct xgbe_channel *channel;
437 unsigned int dma_ch_isr, dma_ch_ier;
438 unsigned int i;
439
440 channel = pdata->channel;
441 for (i = 0; i < pdata->channel_count; i++, channel++) {
442 /* Clear all the interrupts which are set */
443 dma_ch_isr = XGMAC_DMA_IOREAD(channel, DMA_CH_SR);
444 XGMAC_DMA_IOWRITE(channel, DMA_CH_SR, dma_ch_isr);
445
446 /* Clear all interrupt enable bits */
447 dma_ch_ier = 0;
448
449 /* Enable following interrupts
450 * NIE - Normal Interrupt Summary Enable
451 * AIE - Abnormal Interrupt Summary Enable
452 * FBEE - Fatal Bus Error Enable
453 */
454 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, NIE, 1);
455 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, AIE, 1);
456 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, FBEE, 1);
457
458 if (channel->tx_ring) {
459 /* Enable the following Tx interrupts
460 * TIE - Transmit Interrupt Enable (unless polling)
461 */
462 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 1);
463 }
464 if (channel->rx_ring) {
465 /* Enable following Rx interrupts
466 * RBUE - Receive Buffer Unavailable Enable
467 * RIE - Receive Interrupt Enable
468 */
469 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RBUE, 1);
470 XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 1);
471 }
472
473 XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, dma_ch_ier);
474 }
475}
476
477static void xgbe_enable_mtl_interrupts(struct xgbe_prv_data *pdata)
478{
479 unsigned int mtl_q_isr;
480 unsigned int q_count, i;
481
482 q_count = max(pdata->hw_feat.tx_q_cnt, pdata->hw_feat.rx_q_cnt);
483 for (i = 0; i < q_count; i++) {
484 /* Clear all the interrupts which are set */
485 mtl_q_isr = XGMAC_MTL_IOREAD(pdata, i, MTL_Q_ISR);
486 XGMAC_MTL_IOWRITE(pdata, i, MTL_Q_ISR, mtl_q_isr);
487
488 /* No MTL interrupts to be enabled */
Lendacky, Thomas91f87342014-07-02 13:04:34 -0500489 XGMAC_MTL_IOWRITE(pdata, i, MTL_Q_IER, 0);
Lendacky, Thomasc5aa9e32014-06-05 09:15:06 -0500490 }
491}
492
493static void xgbe_enable_mac_interrupts(struct xgbe_prv_data *pdata)
494{
495 /* No MAC interrupts to be enabled */
496 XGMAC_IOWRITE(pdata, MAC_IER, 0);
497
498 /* Enable all counter interrupts */
499 XGMAC_IOWRITE_BITS(pdata, MMC_RIER, ALL_INTERRUPTS, 0xff);
500 XGMAC_IOWRITE_BITS(pdata, MMC_TIER, ALL_INTERRUPTS, 0xff);
501}
502
503static int xgbe_set_gmii_speed(struct xgbe_prv_data *pdata)
504{
505 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, 0x3);
506
507 return 0;
508}
509
510static int xgbe_set_gmii_2500_speed(struct xgbe_prv_data *pdata)
511{
512 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, 0x2);
513
514 return 0;
515}
516
517static int xgbe_set_xgmii_speed(struct xgbe_prv_data *pdata)
518{
519 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, 0);
520
521 return 0;
522}
523
524static int xgbe_set_promiscuous_mode(struct xgbe_prv_data *pdata,
525 unsigned int enable)
526{
527 unsigned int val = enable ? 1 : 0;
528
529 if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PR) == val)
530 return 0;
531
532 DBGPR(" %s promiscuous mode\n", enable ? "entering" : "leaving");
533 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, val);
534
535 return 0;
536}
537
538static int xgbe_set_all_multicast_mode(struct xgbe_prv_data *pdata,
539 unsigned int enable)
540{
541 unsigned int val = enable ? 1 : 0;
542
543 if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PM) == val)
544 return 0;
545
546 DBGPR(" %s allmulti mode\n", enable ? "entering" : "leaving");
547 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, val);
548
549 return 0;
550}
551
Lendacky, Thomasb85e4d82014-06-24 16:19:29 -0500552static void xgbe_set_mac_reg(struct xgbe_prv_data *pdata,
553 struct netdev_hw_addr *ha, unsigned int *mac_reg)
Lendacky, Thomasc5aa9e32014-06-05 09:15:06 -0500554{
Lendacky, Thomasc5aa9e32014-06-05 09:15:06 -0500555 unsigned int mac_addr_hi, mac_addr_lo;
556 u8 *mac_addr;
Lendacky, Thomasc5aa9e32014-06-05 09:15:06 -0500557
Lendacky, Thomasb85e4d82014-06-24 16:19:29 -0500558 mac_addr_lo = 0;
559 mac_addr_hi = 0;
Lendacky, Thomasc5aa9e32014-06-05 09:15:06 -0500560
Lendacky, Thomasb85e4d82014-06-24 16:19:29 -0500561 if (ha) {
Lendacky, Thomasc5aa9e32014-06-05 09:15:06 -0500562 mac_addr = (u8 *)&mac_addr_lo;
563 mac_addr[0] = ha->addr[0];
564 mac_addr[1] = ha->addr[1];
565 mac_addr[2] = ha->addr[2];
566 mac_addr[3] = ha->addr[3];
567 mac_addr = (u8 *)&mac_addr_hi;
568 mac_addr[0] = ha->addr[4];
569 mac_addr[1] = ha->addr[5];
570
Lendacky, Thomasb85e4d82014-06-24 16:19:29 -0500571 DBGPR(" adding mac address %pM at 0x%04x\n", ha->addr,
572 *mac_reg);
Lendacky, Thomasc5aa9e32014-06-05 09:15:06 -0500573
574 XGMAC_SET_BITS(mac_addr_hi, MAC_MACA1HR, AE, 1);
Lendacky, Thomasc5aa9e32014-06-05 09:15:06 -0500575 }
576
Lendacky, Thomasb85e4d82014-06-24 16:19:29 -0500577 XGMAC_IOWRITE(pdata, *mac_reg, mac_addr_hi);
578 *mac_reg += MAC_MACA_INC;
579 XGMAC_IOWRITE(pdata, *mac_reg, mac_addr_lo);
580 *mac_reg += MAC_MACA_INC;
581}
Lendacky, Thomasc5aa9e32014-06-05 09:15:06 -0500582
Lendacky, Thomasb85e4d82014-06-24 16:19:29 -0500583static void xgbe_set_mac_addn_addrs(struct xgbe_prv_data *pdata)
584{
585 struct net_device *netdev = pdata->netdev;
586 struct netdev_hw_addr *ha;
587 unsigned int mac_reg;
588 unsigned int addn_macs;
Lendacky, Thomasc5aa9e32014-06-05 09:15:06 -0500589
Lendacky, Thomasb85e4d82014-06-24 16:19:29 -0500590 mac_reg = MAC_MACA1HR;
591 addn_macs = pdata->hw_feat.addn_mac;
Lendacky, Thomasc5aa9e32014-06-05 09:15:06 -0500592
Lendacky, Thomasb85e4d82014-06-24 16:19:29 -0500593 if (netdev_uc_count(netdev) > addn_macs) {
594 xgbe_set_promiscuous_mode(pdata, 1);
595 } else {
596 netdev_for_each_uc_addr(ha, netdev) {
597 xgbe_set_mac_reg(pdata, ha, &mac_reg);
598 addn_macs--;
599 }
Lendacky, Thomasc5aa9e32014-06-05 09:15:06 -0500600
Lendacky, Thomasb85e4d82014-06-24 16:19:29 -0500601 if (netdev_mc_count(netdev) > addn_macs) {
602 xgbe_set_all_multicast_mode(pdata, 1);
603 } else {
604 netdev_for_each_mc_addr(ha, netdev) {
605 xgbe_set_mac_reg(pdata, ha, &mac_reg);
606 addn_macs--;
607 }
Lendacky, Thomasc5aa9e32014-06-05 09:15:06 -0500608 }
609 }
610
611 /* Clear remaining additional MAC address entries */
Lendacky, Thomasb85e4d82014-06-24 16:19:29 -0500612 while (addn_macs--)
613 xgbe_set_mac_reg(pdata, NULL, &mac_reg);
614}
615
616static void xgbe_set_mac_hash_table(struct xgbe_prv_data *pdata)
617{
618 struct net_device *netdev = pdata->netdev;
619 struct netdev_hw_addr *ha;
620 unsigned int hash_reg;
621 unsigned int hash_table_shift, hash_table_count;
622 u32 hash_table[XGBE_MAC_HASH_TABLE_SIZE];
623 u32 crc;
624 unsigned int i;
625
626 hash_table_shift = 26 - (pdata->hw_feat.hash_table_size >> 7);
627 hash_table_count = pdata->hw_feat.hash_table_size / 32;
628 memset(hash_table, 0, sizeof(hash_table));
629
630 /* Build the MAC Hash Table register values */
631 netdev_for_each_uc_addr(ha, netdev) {
632 crc = bitrev32(~crc32_le(~0, ha->addr, ETH_ALEN));
633 crc >>= hash_table_shift;
634 hash_table[crc >> 5] |= (1 << (crc & 0x1f));
Lendacky, Thomasc5aa9e32014-06-05 09:15:06 -0500635 }
636
Lendacky, Thomasb85e4d82014-06-24 16:19:29 -0500637 netdev_for_each_mc_addr(ha, netdev) {
638 crc = bitrev32(~crc32_le(~0, ha->addr, ETH_ALEN));
639 crc >>= hash_table_shift;
640 hash_table[crc >> 5] |= (1 << (crc & 0x1f));
641 }
642
643 /* Set the MAC Hash Table registers */
644 hash_reg = MAC_HTR0;
645 for (i = 0; i < hash_table_count; i++) {
646 XGMAC_IOWRITE(pdata, hash_reg, hash_table[i]);
647 hash_reg += MAC_HTR_INC;
648 }
649}
650
651static int xgbe_add_mac_addresses(struct xgbe_prv_data *pdata)
652{
653 if (pdata->hw_feat.hash_table_size)
654 xgbe_set_mac_hash_table(pdata);
655 else
656 xgbe_set_mac_addn_addrs(pdata);
657
Lendacky, Thomasc5aa9e32014-06-05 09:15:06 -0500658 return 0;
659}
660
661static int xgbe_set_mac_address(struct xgbe_prv_data *pdata, u8 *addr)
662{
663 unsigned int mac_addr_hi, mac_addr_lo;
664
665 mac_addr_hi = (addr[5] << 8) | (addr[4] << 0);
666 mac_addr_lo = (addr[3] << 24) | (addr[2] << 16) |
667 (addr[1] << 8) | (addr[0] << 0);
668
669 XGMAC_IOWRITE(pdata, MAC_MACA0HR, mac_addr_hi);
670 XGMAC_IOWRITE(pdata, MAC_MACA0LR, mac_addr_lo);
671
672 return 0;
673}
674
675static int xgbe_read_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
676 int mmd_reg)
677{
678 unsigned int mmd_address;
679 int mmd_data;
680
681 if (mmd_reg & MII_ADDR_C45)
682 mmd_address = mmd_reg & ~MII_ADDR_C45;
683 else
684 mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
685
686 /* The PCS registers are accessed using mmio. The underlying APB3
687 * management interface uses indirect addressing to access the MMD
688 * register sets. This requires accessing of the PCS register in two
689 * phases, an address phase and a data phase.
690 *
691 * The mmio interface is based on 32-bit offsets and values. All
692 * register offsets must therefore be adjusted by left shifting the
693 * offset 2 bits and reading 32 bits of data.
694 */
695 mutex_lock(&pdata->xpcs_mutex);
696 XPCS_IOWRITE(pdata, PCS_MMD_SELECT << 2, mmd_address >> 8);
697 mmd_data = XPCS_IOREAD(pdata, (mmd_address & 0xff) << 2);
698 mutex_unlock(&pdata->xpcs_mutex);
699
700 return mmd_data;
701}
702
703static void xgbe_write_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
704 int mmd_reg, int mmd_data)
705{
706 unsigned int mmd_address;
707
708 if (mmd_reg & MII_ADDR_C45)
709 mmd_address = mmd_reg & ~MII_ADDR_C45;
710 else
711 mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
712
713 /* The PCS registers are accessed using mmio. The underlying APB3
714 * management interface uses indirect addressing to access the MMD
715 * register sets. This requires accessing of the PCS register in two
716 * phases, an address phase and a data phase.
717 *
718 * The mmio interface is based on 32-bit offsets and values. All
719 * register offsets must therefore be adjusted by left shifting the
720 * offset 2 bits and reading 32 bits of data.
721 */
722 mutex_lock(&pdata->xpcs_mutex);
723 XPCS_IOWRITE(pdata, PCS_MMD_SELECT << 2, mmd_address >> 8);
724 XPCS_IOWRITE(pdata, (mmd_address & 0xff) << 2, mmd_data);
725 mutex_unlock(&pdata->xpcs_mutex);
726}
727
728static int xgbe_tx_complete(struct xgbe_ring_desc *rdesc)
729{
730 return !XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN);
731}
732
733static int xgbe_disable_rx_csum(struct xgbe_prv_data *pdata)
734{
735 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, IPC, 0);
736
737 return 0;
738}
739
740static int xgbe_enable_rx_csum(struct xgbe_prv_data *pdata)
741{
742 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, IPC, 1);
743
744 return 0;
745}
746
747static int xgbe_enable_rx_vlan_stripping(struct xgbe_prv_data *pdata)
748{
749 /* Put the VLAN tag in the Rx descriptor */
750 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLRXS, 1);
751
752 /* Don't check the VLAN type */
753 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, DOVLTC, 1);
754
755 /* Check only C-TAG (0x8100) packets */
756 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ERSVLM, 0);
757
758 /* Don't consider an S-TAG (0x88A8) packet as a VLAN packet */
759 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ESVL, 0);
760
761 /* Enable VLAN tag stripping */
762 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLS, 0x3);
763
764 return 0;
765}
766
767static int xgbe_disable_rx_vlan_stripping(struct xgbe_prv_data *pdata)
768{
769 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLS, 0);
770
771 return 0;
772}
773
Lendacky, Thomas801c62d2014-06-24 16:19:24 -0500774static int xgbe_enable_rx_vlan_filtering(struct xgbe_prv_data *pdata)
775{
776 /* Enable VLAN filtering */
777 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VTFE, 1);
778
779 /* Enable VLAN Hash Table filtering */
780 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VTHM, 1);
781
782 /* Disable VLAN tag inverse matching */
783 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VTIM, 0);
784
785 /* Only filter on the lower 12-bits of the VLAN tag */
786 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ETV, 1);
787
788 /* In order for the VLAN Hash Table filtering to be effective,
789 * the VLAN tag identifier in the VLAN Tag Register must not
790 * be zero. Set the VLAN tag identifier to "1" to enable the
791 * VLAN Hash Table filtering. This implies that a VLAN tag of
792 * 1 will always pass filtering.
793 */
794 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VL, 1);
795
796 return 0;
797}
798
799static int xgbe_disable_rx_vlan_filtering(struct xgbe_prv_data *pdata)
800{
801 /* Disable VLAN filtering */
802 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VTFE, 0);
803
804 return 0;
805}
806
807#ifndef CRCPOLY_LE
808#define CRCPOLY_LE 0xedb88320
809#endif
810static u32 xgbe_vid_crc32_le(__le16 vid_le)
811{
812 u32 poly = CRCPOLY_LE;
813 u32 crc = ~0;
814 u32 temp = 0;
815 unsigned char *data = (unsigned char *)&vid_le;
816 unsigned char data_byte = 0;
817 int i, bits;
818
819 bits = get_bitmask_order(VLAN_VID_MASK);
820 for (i = 0; i < bits; i++) {
821 if ((i % 8) == 0)
822 data_byte = data[i / 8];
823
824 temp = ((crc & 1) ^ data_byte) & 1;
825 crc >>= 1;
826 data_byte >>= 1;
827
828 if (temp)
829 crc ^= poly;
830 }
831
832 return crc;
833}
834
835static int xgbe_update_vlan_hash_table(struct xgbe_prv_data *pdata)
836{
837 u32 crc;
838 u16 vid;
839 __le16 vid_le;
840 u16 vlan_hash_table = 0;
841
842 /* Generate the VLAN Hash Table value */
843 for_each_set_bit(vid, pdata->active_vlans, VLAN_N_VID) {
844 /* Get the CRC32 value of the VLAN ID */
845 vid_le = cpu_to_le16(vid);
846 crc = bitrev32(~xgbe_vid_crc32_le(vid_le)) >> 28;
847
848 vlan_hash_table |= (1 << crc);
849 }
850
851 /* Set the VLAN Hash Table filtering register */
852 XGMAC_IOWRITE_BITS(pdata, MAC_VLANHTR, VLHT, vlan_hash_table);
853
854 return 0;
855}
856
Lendacky, Thomasc5aa9e32014-06-05 09:15:06 -0500857static void xgbe_tx_desc_reset(struct xgbe_ring_data *rdata)
858{
859 struct xgbe_ring_desc *rdesc = rdata->rdesc;
860
861 /* Reset the Tx descriptor
862 * Set buffer 1 (lo) address to zero
863 * Set buffer 1 (hi) address to zero
864 * Reset all other control bits (IC, TTSE, B2L & B1L)
865 * Reset all other control bits (OWN, CTXT, FD, LD, CPC, CIC, etc)
866 */
867 rdesc->desc0 = 0;
868 rdesc->desc1 = 0;
869 rdesc->desc2 = 0;
870 rdesc->desc3 = 0;
871}
872
873static void xgbe_tx_desc_init(struct xgbe_channel *channel)
874{
875 struct xgbe_ring *ring = channel->tx_ring;
876 struct xgbe_ring_data *rdata;
877 struct xgbe_ring_desc *rdesc;
878 int i;
879 int start_index = ring->cur;
880
881 DBGPR("-->tx_desc_init\n");
882
883 /* Initialze all descriptors */
884 for (i = 0; i < ring->rdesc_count; i++) {
Lendacky, Thomasd0a8ba62014-06-24 16:19:06 -0500885 rdata = XGBE_GET_DESC_DATA(ring, i);
Lendacky, Thomasc5aa9e32014-06-05 09:15:06 -0500886 rdesc = rdata->rdesc;
887
888 /* Initialize Tx descriptor
889 * Set buffer 1 (lo) address to zero
890 * Set buffer 1 (hi) address to zero
891 * Reset all other control bits (IC, TTSE, B2L & B1L)
892 * Reset all other control bits (OWN, CTXT, FD, LD, CPC, CIC,
893 * etc)
894 */
895 rdesc->desc0 = 0;
896 rdesc->desc1 = 0;
897 rdesc->desc2 = 0;
898 rdesc->desc3 = 0;
899 }
900
901 /* Make sure everything is written to the descriptor(s) before
902 * telling the device about them
903 */
904 wmb();
905
906 /* Update the total number of Tx descriptors */
907 XGMAC_DMA_IOWRITE(channel, DMA_CH_TDRLR, ring->rdesc_count - 1);
908
909 /* Update the starting address of descriptor ring */
Lendacky, Thomasd0a8ba62014-06-24 16:19:06 -0500910 rdata = XGBE_GET_DESC_DATA(ring, start_index);
Lendacky, Thomasc5aa9e32014-06-05 09:15:06 -0500911 XGMAC_DMA_IOWRITE(channel, DMA_CH_TDLR_HI,
912 upper_32_bits(rdata->rdesc_dma));
913 XGMAC_DMA_IOWRITE(channel, DMA_CH_TDLR_LO,
914 lower_32_bits(rdata->rdesc_dma));
915
916 DBGPR("<--tx_desc_init\n");
917}
918
919static void xgbe_rx_desc_reset(struct xgbe_ring_data *rdata)
920{
921 struct xgbe_ring_desc *rdesc = rdata->rdesc;
922
923 /* Reset the Rx descriptor
924 * Set buffer 1 (lo) address to dma address (lo)
925 * Set buffer 1 (hi) address to dma address (hi)
926 * Set buffer 2 (lo) address to zero
927 * Set buffer 2 (hi) address to zero and set control bits
928 * OWN and INTE
929 */
930 rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->skb_dma));
931 rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->skb_dma));
932 rdesc->desc2 = 0;
933
934 rdesc->desc3 = 0;
935 if (rdata->interrupt)
936 XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE, 1);
937
938 /* Since the Rx DMA engine is likely running, make sure everything
939 * is written to the descriptor(s) before setting the OWN bit
940 * for the descriptor
941 */
942 wmb();
943
944 XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN, 1);
945
946 /* Make sure ownership is written to the descriptor */
947 wmb();
948}
949
950static void xgbe_rx_desc_init(struct xgbe_channel *channel)
951{
952 struct xgbe_prv_data *pdata = channel->pdata;
953 struct xgbe_ring *ring = channel->rx_ring;
954 struct xgbe_ring_data *rdata;
955 struct xgbe_ring_desc *rdesc;
956 unsigned int start_index = ring->cur;
957 unsigned int rx_coalesce, rx_frames;
958 unsigned int i;
959
960 DBGPR("-->rx_desc_init\n");
961
962 rx_coalesce = (pdata->rx_riwt || pdata->rx_frames) ? 1 : 0;
963 rx_frames = pdata->rx_frames;
964
965 /* Initialize all descriptors */
966 for (i = 0; i < ring->rdesc_count; i++) {
Lendacky, Thomasd0a8ba62014-06-24 16:19:06 -0500967 rdata = XGBE_GET_DESC_DATA(ring, i);
Lendacky, Thomasc5aa9e32014-06-05 09:15:06 -0500968 rdesc = rdata->rdesc;
969
970 /* Initialize Rx descriptor
971 * Set buffer 1 (lo) address to dma address (lo)
972 * Set buffer 1 (hi) address to dma address (hi)
973 * Set buffer 2 (lo) address to zero
974 * Set buffer 2 (hi) address to zero and set control
975 * bits OWN and INTE appropriateley
976 */
977 rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->skb_dma));
978 rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->skb_dma));
979 rdesc->desc2 = 0;
980 rdesc->desc3 = 0;
981 XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN, 1);
982 XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE, 1);
983 rdata->interrupt = 1;
984 if (rx_coalesce && (!rx_frames || ((i + 1) % rx_frames))) {
985 /* Clear interrupt on completion bit */
986 XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE,
987 0);
988 rdata->interrupt = 0;
989 }
990 }
991
992 /* Make sure everything is written to the descriptors before
993 * telling the device about them
994 */
995 wmb();
996
997 /* Update the total number of Rx descriptors */
998 XGMAC_DMA_IOWRITE(channel, DMA_CH_RDRLR, ring->rdesc_count - 1);
999
1000 /* Update the starting address of descriptor ring */
Lendacky, Thomasd0a8ba62014-06-24 16:19:06 -05001001 rdata = XGBE_GET_DESC_DATA(ring, start_index);
Lendacky, Thomasc5aa9e32014-06-05 09:15:06 -05001002 XGMAC_DMA_IOWRITE(channel, DMA_CH_RDLR_HI,
1003 upper_32_bits(rdata->rdesc_dma));
1004 XGMAC_DMA_IOWRITE(channel, DMA_CH_RDLR_LO,
1005 lower_32_bits(rdata->rdesc_dma));
1006
1007 /* Update the Rx Descriptor Tail Pointer */
Lendacky, Thomasd0a8ba62014-06-24 16:19:06 -05001008 rdata = XGBE_GET_DESC_DATA(ring, start_index + ring->rdesc_count - 1);
Lendacky, Thomasc5aa9e32014-06-05 09:15:06 -05001009 XGMAC_DMA_IOWRITE(channel, DMA_CH_RDTR_LO,
1010 lower_32_bits(rdata->rdesc_dma));
1011
1012 DBGPR("<--rx_desc_init\n");
1013}
1014
1015static void xgbe_pre_xmit(struct xgbe_channel *channel)
1016{
1017 struct xgbe_prv_data *pdata = channel->pdata;
1018 struct xgbe_ring *ring = channel->tx_ring;
1019 struct xgbe_ring_data *rdata;
1020 struct xgbe_ring_desc *rdesc;
1021 struct xgbe_packet_data *packet = &ring->packet_data;
1022 unsigned int csum, tso, vlan;
1023 unsigned int tso_context, vlan_context;
1024 unsigned int tx_coalesce, tx_frames;
1025 int start_index = ring->cur;
1026 int i;
1027
1028 DBGPR("-->xgbe_pre_xmit\n");
1029
1030 csum = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1031 CSUM_ENABLE);
1032 tso = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1033 TSO_ENABLE);
1034 vlan = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1035 VLAN_CTAG);
1036
1037 if (tso && (packet->mss != ring->tx.cur_mss))
1038 tso_context = 1;
1039 else
1040 tso_context = 0;
1041
1042 if (vlan && (packet->vlan_ctag != ring->tx.cur_vlan_ctag))
1043 vlan_context = 1;
1044 else
1045 vlan_context = 0;
1046
1047 tx_coalesce = (pdata->tx_usecs || pdata->tx_frames) ? 1 : 0;
1048 tx_frames = pdata->tx_frames;
1049 if (tx_coalesce && !channel->tx_timer_active)
1050 ring->coalesce_count = 0;
1051
Lendacky, Thomasd0a8ba62014-06-24 16:19:06 -05001052 rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
Lendacky, Thomasc5aa9e32014-06-05 09:15:06 -05001053 rdesc = rdata->rdesc;
1054
1055 /* Create a context descriptor if this is a TSO packet */
1056 if (tso_context || vlan_context) {
1057 if (tso_context) {
1058 DBGPR(" TSO context descriptor, mss=%u\n",
1059 packet->mss);
1060
1061 /* Set the MSS size */
1062 XGMAC_SET_BITS_LE(rdesc->desc2, TX_CONTEXT_DESC2,
1063 MSS, packet->mss);
1064
1065 /* Mark it as a CONTEXT descriptor */
1066 XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
1067 CTXT, 1);
1068
1069 /* Indicate this descriptor contains the MSS */
1070 XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
1071 TCMSSV, 1);
1072
1073 ring->tx.cur_mss = packet->mss;
1074 }
1075
1076 if (vlan_context) {
1077 DBGPR(" VLAN context descriptor, ctag=%u\n",
1078 packet->vlan_ctag);
1079
1080 /* Mark it as a CONTEXT descriptor */
1081 XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
1082 CTXT, 1);
1083
1084 /* Set the VLAN tag */
1085 XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
1086 VT, packet->vlan_ctag);
1087
1088 /* Indicate this descriptor contains the VLAN tag */
1089 XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
1090 VLTV, 1);
1091
1092 ring->tx.cur_vlan_ctag = packet->vlan_ctag;
1093 }
1094
1095 ring->cur++;
Lendacky, Thomasd0a8ba62014-06-24 16:19:06 -05001096 rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
Lendacky, Thomasc5aa9e32014-06-05 09:15:06 -05001097 rdesc = rdata->rdesc;
1098 }
1099
1100 /* Update buffer address (for TSO this is the header) */
1101 rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->skb_dma));
1102 rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->skb_dma));
1103
1104 /* Update the buffer length */
1105 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, HL_B1L,
1106 rdata->skb_dma_len);
1107
1108 /* VLAN tag insertion check */
1109 if (vlan)
1110 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, VTIR,
1111 TX_NORMAL_DESC2_VLAN_INSERT);
1112
1113 /* Set IC bit based on Tx coalescing settings */
1114 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, IC, 1);
1115 if (tx_coalesce && (!tx_frames ||
1116 (++ring->coalesce_count % tx_frames)))
1117 /* Clear IC bit */
1118 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, IC, 0);
1119
1120 /* Mark it as First Descriptor */
1121 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, FD, 1);
1122
1123 /* Mark it as a NORMAL descriptor */
1124 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT, 0);
1125
1126 /* Set OWN bit if not the first descriptor */
1127 if (ring->cur != start_index)
1128 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1);
1129
1130 if (tso) {
1131 /* Enable TSO */
1132 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TSE, 1);
1133 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TCPPL,
1134 packet->tcp_payload_len);
1135 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TCPHDRLEN,
1136 packet->tcp_header_len / 4);
1137 } else {
1138 /* Enable CRC and Pad Insertion */
1139 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CPC, 0);
1140
1141 /* Enable HW CSUM */
1142 if (csum)
1143 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3,
1144 CIC, 0x3);
1145
1146 /* Set the total length to be transmitted */
1147 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, FL,
1148 packet->length);
1149 }
1150
1151 for (i = ring->cur - start_index + 1; i < packet->rdesc_count; i++) {
1152 ring->cur++;
Lendacky, Thomasd0a8ba62014-06-24 16:19:06 -05001153 rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
Lendacky, Thomasc5aa9e32014-06-05 09:15:06 -05001154 rdesc = rdata->rdesc;
1155
1156 /* Update buffer address */
1157 rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->skb_dma));
1158 rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->skb_dma));
1159
1160 /* Update the buffer length */
1161 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, HL_B1L,
1162 rdata->skb_dma_len);
1163
1164 /* Set IC bit based on Tx coalescing settings */
1165 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, IC, 1);
1166 if (tx_coalesce && (!tx_frames ||
1167 (++ring->coalesce_count % tx_frames)))
1168 /* Clear IC bit */
1169 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, IC, 0);
1170
1171 /* Set OWN bit */
1172 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1);
1173
1174 /* Mark it as NORMAL descriptor */
1175 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT, 0);
1176
1177 /* Enable HW CSUM */
1178 if (csum)
1179 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3,
1180 CIC, 0x3);
1181 }
1182
1183 /* Set LAST bit for the last descriptor */
1184 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, LD, 1);
1185
1186 /* In case the Tx DMA engine is running, make sure everything
1187 * is written to the descriptor(s) before setting the OWN bit
1188 * for the first descriptor
1189 */
1190 wmb();
1191
1192 /* Set OWN bit for the first descriptor */
Lendacky, Thomasd0a8ba62014-06-24 16:19:06 -05001193 rdata = XGBE_GET_DESC_DATA(ring, start_index);
Lendacky, Thomasc5aa9e32014-06-05 09:15:06 -05001194 rdesc = rdata->rdesc;
1195 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1);
1196
1197#ifdef XGMAC_ENABLE_TX_DESC_DUMP
1198 xgbe_dump_tx_desc(ring, start_index, packet->rdesc_count, 1);
1199#endif
1200
1201 /* Make sure ownership is written to the descriptor */
1202 wmb();
1203
1204 /* Issue a poll command to Tx DMA by writing address
1205 * of next immediate free descriptor */
1206 ring->cur++;
Lendacky, Thomasd0a8ba62014-06-24 16:19:06 -05001207 rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
Lendacky, Thomasc5aa9e32014-06-05 09:15:06 -05001208 XGMAC_DMA_IOWRITE(channel, DMA_CH_TDTR_LO,
1209 lower_32_bits(rdata->rdesc_dma));
1210
1211 /* Start the Tx coalescing timer */
1212 if (tx_coalesce && !channel->tx_timer_active) {
1213 channel->tx_timer_active = 1;
1214 hrtimer_start(&channel->tx_timer,
1215 ktime_set(0, pdata->tx_usecs * NSEC_PER_USEC),
1216 HRTIMER_MODE_REL);
1217 }
1218
1219 DBGPR(" %s: descriptors %u to %u written\n",
1220 channel->name, start_index & (ring->rdesc_count - 1),
1221 (ring->cur - 1) & (ring->rdesc_count - 1));
1222
1223 DBGPR("<--xgbe_pre_xmit\n");
1224}
1225
1226static int xgbe_dev_read(struct xgbe_channel *channel)
1227{
1228 struct xgbe_ring *ring = channel->rx_ring;
1229 struct xgbe_ring_data *rdata;
1230 struct xgbe_ring_desc *rdesc;
1231 struct xgbe_packet_data *packet = &ring->packet_data;
Lendacky, Thomasc52e9c62014-06-24 16:19:18 -05001232 struct net_device *netdev = channel->pdata->netdev;
Lendacky, Thomasc5aa9e32014-06-05 09:15:06 -05001233 unsigned int err, etlt;
1234
1235 DBGPR("-->xgbe_dev_read: cur = %d\n", ring->cur);
1236
Lendacky, Thomasd0a8ba62014-06-24 16:19:06 -05001237 rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
Lendacky, Thomasc5aa9e32014-06-05 09:15:06 -05001238 rdesc = rdata->rdesc;
1239
1240 /* Check for data availability */
1241 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN))
1242 return 1;
1243
1244#ifdef XGMAC_ENABLE_RX_DESC_DUMP
1245 xgbe_dump_rx_desc(ring, rdesc, ring->cur);
1246#endif
1247
1248 /* Get the packet length */
1249 rdata->len = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, PL);
1250
1251 if (!XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, LD)) {
1252 /* Not all the data has been transferred for this packet */
1253 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1254 INCOMPLETE, 1);
1255 return 0;
1256 }
1257
1258 /* This is the last of the data for this packet */
1259 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1260 INCOMPLETE, 0);
1261
1262 /* Set checksum done indicator as appropriate */
1263 if (channel->pdata->netdev->features & NETIF_F_RXCSUM)
1264 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1265 CSUM_DONE, 1);
1266
1267 /* Check for errors (only valid in last descriptor) */
1268 err = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ES);
1269 etlt = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ETLT);
1270 DBGPR(" err=%u, etlt=%#x\n", err, etlt);
1271
1272 if (!err || (err && !etlt)) {
Lendacky, Thomasc52e9c62014-06-24 16:19:18 -05001273 if ((etlt == 0x09) &&
1274 (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
Lendacky, Thomasc5aa9e32014-06-05 09:15:06 -05001275 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1276 VLAN_CTAG, 1);
1277 packet->vlan_ctag = XGMAC_GET_BITS_LE(rdesc->desc0,
1278 RX_NORMAL_DESC0,
1279 OVT);
1280 DBGPR(" vlan-ctag=0x%04x\n", packet->vlan_ctag);
1281 }
1282 } else {
1283 if ((etlt == 0x05) || (etlt == 0x06))
1284 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1285 CSUM_DONE, 0);
1286 else
1287 XGMAC_SET_BITS(packet->errors, RX_PACKET_ERRORS,
1288 FRAME, 1);
1289 }
1290
1291 DBGPR("<--xgbe_dev_read: %s - descriptor=%u (cur=%d)\n", channel->name,
1292 ring->cur & (ring->rdesc_count - 1), ring->cur);
1293
1294 return 0;
1295}
1296
1297static int xgbe_is_context_desc(struct xgbe_ring_desc *rdesc)
1298{
1299 /* Rx and Tx share CTXT bit, so check TDES3.CTXT bit */
1300 return XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT);
1301}
1302
1303static int xgbe_is_last_desc(struct xgbe_ring_desc *rdesc)
1304{
1305 /* Rx and Tx share LD bit, so check TDES3.LD bit */
1306 return XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, LD);
1307}
1308
1309static void xgbe_save_interrupt_status(struct xgbe_channel *channel,
1310 enum xgbe_int_state int_state)
1311{
1312 unsigned int dma_ch_ier;
1313
1314 if (int_state == XGMAC_INT_STATE_SAVE) {
1315 channel->saved_ier = XGMAC_DMA_IOREAD(channel, DMA_CH_IER);
Lendacky, Thomasd0a8ba62014-06-24 16:19:06 -05001316 channel->saved_ier &= XGBE_DMA_INTERRUPT_MASK;
Lendacky, Thomasc5aa9e32014-06-05 09:15:06 -05001317 } else {
1318 dma_ch_ier = XGMAC_DMA_IOREAD(channel, DMA_CH_IER);
1319 dma_ch_ier |= channel->saved_ier;
1320 XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, dma_ch_ier);
1321 }
1322}
1323
1324static int xgbe_enable_int(struct xgbe_channel *channel,
1325 enum xgbe_int int_id)
1326{
1327 switch (int_id) {
1328 case XGMAC_INT_DMA_ISR_DC0IS:
1329 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, TIE, 1);
1330 break;
1331 case XGMAC_INT_DMA_CH_SR_TI:
1332 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, TIE, 1);
1333 break;
1334 case XGMAC_INT_DMA_CH_SR_TPS:
1335 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, TXSE, 1);
1336 break;
1337 case XGMAC_INT_DMA_CH_SR_TBU:
1338 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, TBUE, 1);
1339 break;
1340 case XGMAC_INT_DMA_CH_SR_RI:
1341 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, RIE, 1);
1342 break;
1343 case XGMAC_INT_DMA_CH_SR_RBU:
1344 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, RBUE, 1);
1345 break;
1346 case XGMAC_INT_DMA_CH_SR_RPS:
1347 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, RSE, 1);
1348 break;
1349 case XGMAC_INT_DMA_CH_SR_FBE:
1350 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, FBEE, 1);
1351 break;
1352 case XGMAC_INT_DMA_ALL:
1353 xgbe_save_interrupt_status(channel, XGMAC_INT_STATE_RESTORE);
1354 break;
1355 default:
1356 return -1;
1357 }
1358
1359 return 0;
1360}
1361
1362static int xgbe_disable_int(struct xgbe_channel *channel,
1363 enum xgbe_int int_id)
1364{
1365 unsigned int dma_ch_ier;
1366
1367 switch (int_id) {
1368 case XGMAC_INT_DMA_ISR_DC0IS:
1369 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, TIE, 0);
1370 break;
1371 case XGMAC_INT_DMA_CH_SR_TI:
1372 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, TIE, 0);
1373 break;
1374 case XGMAC_INT_DMA_CH_SR_TPS:
1375 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, TXSE, 0);
1376 break;
1377 case XGMAC_INT_DMA_CH_SR_TBU:
1378 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, TBUE, 0);
1379 break;
1380 case XGMAC_INT_DMA_CH_SR_RI:
1381 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, RIE, 0);
1382 break;
1383 case XGMAC_INT_DMA_CH_SR_RBU:
1384 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, RBUE, 0);
1385 break;
1386 case XGMAC_INT_DMA_CH_SR_RPS:
1387 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, RSE, 0);
1388 break;
1389 case XGMAC_INT_DMA_CH_SR_FBE:
1390 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, FBEE, 0);
1391 break;
1392 case XGMAC_INT_DMA_ALL:
1393 xgbe_save_interrupt_status(channel, XGMAC_INT_STATE_SAVE);
1394
1395 dma_ch_ier = XGMAC_DMA_IOREAD(channel, DMA_CH_IER);
Lendacky, Thomasd0a8ba62014-06-24 16:19:06 -05001396 dma_ch_ier &= ~XGBE_DMA_INTERRUPT_MASK;
Lendacky, Thomasc5aa9e32014-06-05 09:15:06 -05001397 XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, dma_ch_ier);
1398 break;
1399 default:
1400 return -1;
1401 }
1402
1403 return 0;
1404}
1405
1406static int xgbe_exit(struct xgbe_prv_data *pdata)
1407{
1408 unsigned int count = 2000;
1409
1410 DBGPR("-->xgbe_exit\n");
1411
1412 /* Issue a software reset */
1413 XGMAC_IOWRITE_BITS(pdata, DMA_MR, SWR, 1);
1414 usleep_range(10, 15);
1415
1416 /* Poll Until Poll Condition */
1417 while (count-- && XGMAC_IOREAD_BITS(pdata, DMA_MR, SWR))
1418 usleep_range(500, 600);
1419
1420 if (!count)
1421 return -EBUSY;
1422
1423 DBGPR("<--xgbe_exit\n");
1424
1425 return 0;
1426}
1427
1428static int xgbe_flush_tx_queues(struct xgbe_prv_data *pdata)
1429{
1430 unsigned int i, count;
1431
1432 for (i = 0; i < pdata->hw_feat.tx_q_cnt; i++)
1433 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, FTQ, 1);
1434
1435 /* Poll Until Poll Condition */
1436 for (i = 0; i < pdata->hw_feat.tx_q_cnt; i++) {
1437 count = 2000;
1438 while (count-- && XGMAC_MTL_IOREAD_BITS(pdata, i,
1439 MTL_Q_TQOMR, FTQ))
1440 usleep_range(500, 600);
1441
1442 if (!count)
1443 return -EBUSY;
1444 }
1445
1446 return 0;
1447}
1448
1449static void xgbe_config_dma_bus(struct xgbe_prv_data *pdata)
1450{
1451 /* Set enhanced addressing mode */
1452 XGMAC_IOWRITE_BITS(pdata, DMA_SBMR, EAME, 1);
1453
1454 /* Set the System Bus mode */
1455 XGMAC_IOWRITE_BITS(pdata, DMA_SBMR, UNDEF, 1);
1456}
1457
1458static void xgbe_config_dma_cache(struct xgbe_prv_data *pdata)
1459{
1460 unsigned int arcache, awcache;
1461
1462 arcache = 0;
Lendacky, Thomasd0a8ba62014-06-24 16:19:06 -05001463 XGMAC_SET_BITS(arcache, DMA_AXIARCR, DRC, XGBE_DMA_ARCACHE);
1464 XGMAC_SET_BITS(arcache, DMA_AXIARCR, DRD, XGBE_DMA_ARDOMAIN);
1465 XGMAC_SET_BITS(arcache, DMA_AXIARCR, TEC, XGBE_DMA_ARCACHE);
1466 XGMAC_SET_BITS(arcache, DMA_AXIARCR, TED, XGBE_DMA_ARDOMAIN);
1467 XGMAC_SET_BITS(arcache, DMA_AXIARCR, THC, XGBE_DMA_ARCACHE);
1468 XGMAC_SET_BITS(arcache, DMA_AXIARCR, THD, XGBE_DMA_ARDOMAIN);
Lendacky, Thomasc5aa9e32014-06-05 09:15:06 -05001469 XGMAC_IOWRITE(pdata, DMA_AXIARCR, arcache);
1470
1471 awcache = 0;
Lendacky, Thomasd0a8ba62014-06-24 16:19:06 -05001472 XGMAC_SET_BITS(awcache, DMA_AXIAWCR, DWC, XGBE_DMA_AWCACHE);
1473 XGMAC_SET_BITS(awcache, DMA_AXIAWCR, DWD, XGBE_DMA_AWDOMAIN);
1474 XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPC, XGBE_DMA_AWCACHE);
1475 XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPD, XGBE_DMA_AWDOMAIN);
1476 XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHC, XGBE_DMA_AWCACHE);
1477 XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHD, XGBE_DMA_AWDOMAIN);
1478 XGMAC_SET_BITS(awcache, DMA_AXIAWCR, TDC, XGBE_DMA_AWCACHE);
1479 XGMAC_SET_BITS(awcache, DMA_AXIAWCR, TDD, XGBE_DMA_AWDOMAIN);
Lendacky, Thomasc5aa9e32014-06-05 09:15:06 -05001480 XGMAC_IOWRITE(pdata, DMA_AXIAWCR, awcache);
1481}
1482
1483static void xgbe_config_mtl_mode(struct xgbe_prv_data *pdata)
1484{
1485 unsigned int i;
1486
1487 /* Set Tx to weighted round robin scheduling algorithm (when
1488 * traffic class is using ETS algorithm)
1489 */
1490 XGMAC_IOWRITE_BITS(pdata, MTL_OMR, ETSALG, MTL_ETSALG_WRR);
1491
1492 /* Set Tx traffic classes to strict priority algorithm */
1493 for (i = 0; i < XGBE_TC_CNT; i++)
1494 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA, MTL_TSA_SP);
1495
1496 /* Set Rx to strict priority algorithm */
1497 XGMAC_IOWRITE_BITS(pdata, MTL_OMR, RAA, MTL_RAA_SP);
1498}
1499
1500static unsigned int xgbe_calculate_per_queue_fifo(unsigned long fifo_size,
1501 unsigned char queue_count)
1502{
1503 unsigned int q_fifo_size = 0;
1504 enum xgbe_mtl_fifo_size p_fifo = XGMAC_MTL_FIFO_SIZE_256;
1505
1506 /* Calculate Tx/Rx fifo share per queue */
1507 switch (fifo_size) {
1508 case 0:
Lendacky, Thomasd0a8ba62014-06-24 16:19:06 -05001509 q_fifo_size = XGBE_FIFO_SIZE_B(128);
Lendacky, Thomasc5aa9e32014-06-05 09:15:06 -05001510 break;
1511 case 1:
Lendacky, Thomasd0a8ba62014-06-24 16:19:06 -05001512 q_fifo_size = XGBE_FIFO_SIZE_B(256);
Lendacky, Thomasc5aa9e32014-06-05 09:15:06 -05001513 break;
1514 case 2:
Lendacky, Thomasd0a8ba62014-06-24 16:19:06 -05001515 q_fifo_size = XGBE_FIFO_SIZE_B(512);
Lendacky, Thomasc5aa9e32014-06-05 09:15:06 -05001516 break;
1517 case 3:
Lendacky, Thomasd0a8ba62014-06-24 16:19:06 -05001518 q_fifo_size = XGBE_FIFO_SIZE_KB(1);
Lendacky, Thomasc5aa9e32014-06-05 09:15:06 -05001519 break;
1520 case 4:
Lendacky, Thomasd0a8ba62014-06-24 16:19:06 -05001521 q_fifo_size = XGBE_FIFO_SIZE_KB(2);
Lendacky, Thomasc5aa9e32014-06-05 09:15:06 -05001522 break;
1523 case 5:
Lendacky, Thomasd0a8ba62014-06-24 16:19:06 -05001524 q_fifo_size = XGBE_FIFO_SIZE_KB(4);
Lendacky, Thomasc5aa9e32014-06-05 09:15:06 -05001525 break;
1526 case 6:
Lendacky, Thomasd0a8ba62014-06-24 16:19:06 -05001527 q_fifo_size = XGBE_FIFO_SIZE_KB(8);
Lendacky, Thomasc5aa9e32014-06-05 09:15:06 -05001528 break;
1529 case 7:
Lendacky, Thomasd0a8ba62014-06-24 16:19:06 -05001530 q_fifo_size = XGBE_FIFO_SIZE_KB(16);
Lendacky, Thomasc5aa9e32014-06-05 09:15:06 -05001531 break;
1532 case 8:
Lendacky, Thomasd0a8ba62014-06-24 16:19:06 -05001533 q_fifo_size = XGBE_FIFO_SIZE_KB(32);
Lendacky, Thomasc5aa9e32014-06-05 09:15:06 -05001534 break;
1535 case 9:
Lendacky, Thomasd0a8ba62014-06-24 16:19:06 -05001536 q_fifo_size = XGBE_FIFO_SIZE_KB(64);
Lendacky, Thomasc5aa9e32014-06-05 09:15:06 -05001537 break;
1538 case 10:
Lendacky, Thomasd0a8ba62014-06-24 16:19:06 -05001539 q_fifo_size = XGBE_FIFO_SIZE_KB(128);
Lendacky, Thomasc5aa9e32014-06-05 09:15:06 -05001540 break;
1541 case 11:
Lendacky, Thomasd0a8ba62014-06-24 16:19:06 -05001542 q_fifo_size = XGBE_FIFO_SIZE_KB(256);
Lendacky, Thomasc5aa9e32014-06-05 09:15:06 -05001543 break;
1544 }
1545 q_fifo_size = q_fifo_size / queue_count;
1546
1547 /* Set the queue fifo size programmable value */
Lendacky, Thomasd0a8ba62014-06-24 16:19:06 -05001548 if (q_fifo_size >= XGBE_FIFO_SIZE_KB(256))
Lendacky, Thomasc5aa9e32014-06-05 09:15:06 -05001549 p_fifo = XGMAC_MTL_FIFO_SIZE_256K;
Lendacky, Thomasd0a8ba62014-06-24 16:19:06 -05001550 else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(128))
Lendacky, Thomasc5aa9e32014-06-05 09:15:06 -05001551 p_fifo = XGMAC_MTL_FIFO_SIZE_128K;
Lendacky, Thomasd0a8ba62014-06-24 16:19:06 -05001552 else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(64))
Lendacky, Thomasc5aa9e32014-06-05 09:15:06 -05001553 p_fifo = XGMAC_MTL_FIFO_SIZE_64K;
Lendacky, Thomasd0a8ba62014-06-24 16:19:06 -05001554 else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(32))
Lendacky, Thomasc5aa9e32014-06-05 09:15:06 -05001555 p_fifo = XGMAC_MTL_FIFO_SIZE_32K;
Lendacky, Thomasd0a8ba62014-06-24 16:19:06 -05001556 else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(16))
Lendacky, Thomasc5aa9e32014-06-05 09:15:06 -05001557 p_fifo = XGMAC_MTL_FIFO_SIZE_16K;
Lendacky, Thomasd0a8ba62014-06-24 16:19:06 -05001558 else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(8))
Lendacky, Thomasc5aa9e32014-06-05 09:15:06 -05001559 p_fifo = XGMAC_MTL_FIFO_SIZE_8K;
Lendacky, Thomasd0a8ba62014-06-24 16:19:06 -05001560 else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(4))
Lendacky, Thomasc5aa9e32014-06-05 09:15:06 -05001561 p_fifo = XGMAC_MTL_FIFO_SIZE_4K;
Lendacky, Thomasd0a8ba62014-06-24 16:19:06 -05001562 else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(2))
Lendacky, Thomasc5aa9e32014-06-05 09:15:06 -05001563 p_fifo = XGMAC_MTL_FIFO_SIZE_2K;
Lendacky, Thomasd0a8ba62014-06-24 16:19:06 -05001564 else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(1))
Lendacky, Thomasc5aa9e32014-06-05 09:15:06 -05001565 p_fifo = XGMAC_MTL_FIFO_SIZE_1K;
Lendacky, Thomasd0a8ba62014-06-24 16:19:06 -05001566 else if (q_fifo_size >= XGBE_FIFO_SIZE_B(512))
Lendacky, Thomasc5aa9e32014-06-05 09:15:06 -05001567 p_fifo = XGMAC_MTL_FIFO_SIZE_512;
Lendacky, Thomasd0a8ba62014-06-24 16:19:06 -05001568 else if (q_fifo_size >= XGBE_FIFO_SIZE_B(256))
Lendacky, Thomasc5aa9e32014-06-05 09:15:06 -05001569 p_fifo = XGMAC_MTL_FIFO_SIZE_256;
1570
1571 return p_fifo;
1572}
1573
1574static void xgbe_config_tx_fifo_size(struct xgbe_prv_data *pdata)
1575{
1576 enum xgbe_mtl_fifo_size fifo_size;
1577 unsigned int i;
1578
1579 fifo_size = xgbe_calculate_per_queue_fifo(pdata->hw_feat.tx_fifo_size,
1580 pdata->hw_feat.tx_q_cnt);
1581
1582 for (i = 0; i < pdata->hw_feat.tx_q_cnt; i++)
1583 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TQS, fifo_size);
1584
1585 netdev_notice(pdata->netdev, "%d Tx queues, %d byte fifo per queue\n",
1586 pdata->hw_feat.tx_q_cnt, ((fifo_size + 1) * 256));
1587}
1588
1589static void xgbe_config_rx_fifo_size(struct xgbe_prv_data *pdata)
1590{
1591 enum xgbe_mtl_fifo_size fifo_size;
1592 unsigned int i;
1593
1594 fifo_size = xgbe_calculate_per_queue_fifo(pdata->hw_feat.rx_fifo_size,
1595 pdata->hw_feat.rx_q_cnt);
1596
1597 for (i = 0; i < pdata->hw_feat.rx_q_cnt; i++)
1598 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RQS, fifo_size);
1599
1600 netdev_notice(pdata->netdev, "%d Rx queues, %d byte fifo per queue\n",
1601 pdata->hw_feat.rx_q_cnt, ((fifo_size + 1) * 256));
1602}
1603
1604static void xgbe_config_rx_queue_mapping(struct xgbe_prv_data *pdata)
1605{
1606 unsigned int i, reg, reg_val;
1607 unsigned int q_count = pdata->hw_feat.rx_q_cnt;
1608
1609 /* Select dynamic mapping of MTL Rx queue to DMA Rx channel */
1610 reg = MTL_RQDCM0R;
1611 reg_val = 0;
1612 for (i = 0; i < q_count;) {
1613 reg_val |= (0x80 << ((i++ % MTL_RQDCM_Q_PER_REG) << 3));
1614
1615 if ((i % MTL_RQDCM_Q_PER_REG) && (i != q_count))
1616 continue;
1617
1618 XGMAC_IOWRITE(pdata, reg, reg_val);
1619
1620 reg += MTL_RQDCM_INC;
1621 reg_val = 0;
1622 }
1623}
1624
1625static void xgbe_config_flow_control_threshold(struct xgbe_prv_data *pdata)
1626{
1627 unsigned int i;
1628
1629 for (i = 0; i < pdata->hw_feat.rx_q_cnt; i++) {
1630 /* Activate flow control when less than 4k left in fifo */
1631 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RFA, 2);
1632
1633 /* De-activate flow control when more than 6k left in fifo */
1634 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RFD, 4);
1635 }
1636}
1637
1638static void xgbe_config_mac_address(struct xgbe_prv_data *pdata)
1639{
1640 xgbe_set_mac_address(pdata, pdata->netdev->dev_addr);
Lendacky, Thomasb85e4d82014-06-24 16:19:29 -05001641
1642 /* Filtering is done using perfect filtering and hash filtering */
1643 if (pdata->hw_feat.hash_table_size) {
1644 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HPF, 1);
1645 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 1);
1646 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HMC, 1);
1647 }
Lendacky, Thomasc5aa9e32014-06-05 09:15:06 -05001648}
1649
1650static void xgbe_config_jumbo_enable(struct xgbe_prv_data *pdata)
1651{
1652 unsigned int val;
1653
1654 val = (pdata->netdev->mtu > XGMAC_STD_PACKET_MTU) ? 1 : 0;
1655
1656 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, JE, val);
1657}
1658
1659static void xgbe_config_checksum_offload(struct xgbe_prv_data *pdata)
1660{
1661 if (pdata->netdev->features & NETIF_F_RXCSUM)
1662 xgbe_enable_rx_csum(pdata);
1663 else
1664 xgbe_disable_rx_csum(pdata);
1665}
1666
1667static void xgbe_config_vlan_support(struct xgbe_prv_data *pdata)
1668{
Lendacky, Thomas6e5eed02014-06-24 16:19:12 -05001669 /* Indicate that VLAN Tx CTAGs come from context descriptors */
1670 XGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, CSVL, 0);
1671 XGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, VLTI, 1);
1672
Lendacky, Thomas801c62d2014-06-24 16:19:24 -05001673 /* Set the current VLAN Hash Table register value */
1674 xgbe_update_vlan_hash_table(pdata);
1675
1676 if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
1677 xgbe_enable_rx_vlan_filtering(pdata);
1678 else
1679 xgbe_disable_rx_vlan_filtering(pdata);
1680
Lendacky, Thomasc5aa9e32014-06-05 09:15:06 -05001681 if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
1682 xgbe_enable_rx_vlan_stripping(pdata);
1683 else
1684 xgbe_disable_rx_vlan_stripping(pdata);
1685}
1686
1687static void xgbe_tx_mmc_int(struct xgbe_prv_data *pdata)
1688{
1689 struct xgbe_mmc_stats *stats = &pdata->mmc_stats;
1690 unsigned int mmc_isr = XGMAC_IOREAD(pdata, MMC_TISR);
1691
1692 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXOCTETCOUNT_GB))
1693 stats->txoctetcount_gb +=
1694 XGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_GB_LO);
1695
1696 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXFRAMECOUNT_GB))
1697 stats->txframecount_gb +=
1698 XGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_GB_LO);
1699
1700 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXBROADCASTFRAMES_G))
1701 stats->txbroadcastframes_g +=
1702 XGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_G_LO);
1703
1704 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXMULTICASTFRAMES_G))
1705 stats->txmulticastframes_g +=
1706 XGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_G_LO);
1707
1708 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX64OCTETS_GB))
1709 stats->tx64octets_gb +=
1710 XGMAC_IOREAD(pdata, MMC_TX64OCTETS_GB_LO);
1711
1712 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX65TO127OCTETS_GB))
1713 stats->tx65to127octets_gb +=
1714 XGMAC_IOREAD(pdata, MMC_TX65TO127OCTETS_GB_LO);
1715
1716 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX128TO255OCTETS_GB))
1717 stats->tx128to255octets_gb +=
1718 XGMAC_IOREAD(pdata, MMC_TX128TO255OCTETS_GB_LO);
1719
1720 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX256TO511OCTETS_GB))
1721 stats->tx256to511octets_gb +=
1722 XGMAC_IOREAD(pdata, MMC_TX256TO511OCTETS_GB_LO);
1723
1724 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX512TO1023OCTETS_GB))
1725 stats->tx512to1023octets_gb +=
1726 XGMAC_IOREAD(pdata, MMC_TX512TO1023OCTETS_GB_LO);
1727
1728 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX1024TOMAXOCTETS_GB))
1729 stats->tx1024tomaxoctets_gb +=
1730 XGMAC_IOREAD(pdata, MMC_TX1024TOMAXOCTETS_GB_LO);
1731
1732 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXUNICASTFRAMES_GB))
1733 stats->txunicastframes_gb +=
1734 XGMAC_IOREAD(pdata, MMC_TXUNICASTFRAMES_GB_LO);
1735
1736 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXMULTICASTFRAMES_GB))
1737 stats->txmulticastframes_gb +=
1738 XGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_GB_LO);
1739
1740 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXBROADCASTFRAMES_GB))
1741 stats->txbroadcastframes_g +=
1742 XGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_GB_LO);
1743
1744 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXUNDERFLOWERROR))
1745 stats->txunderflowerror +=
1746 XGMAC_IOREAD(pdata, MMC_TXUNDERFLOWERROR_LO);
1747
1748 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXOCTETCOUNT_G))
1749 stats->txoctetcount_g +=
1750 XGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_G_LO);
1751
1752 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXFRAMECOUNT_G))
1753 stats->txframecount_g +=
1754 XGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_G_LO);
1755
1756 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXPAUSEFRAMES))
1757 stats->txpauseframes +=
1758 XGMAC_IOREAD(pdata, MMC_TXPAUSEFRAMES_LO);
1759
1760 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXVLANFRAMES_G))
1761 stats->txvlanframes_g +=
1762 XGMAC_IOREAD(pdata, MMC_TXVLANFRAMES_G_LO);
1763}
1764
1765static void xgbe_rx_mmc_int(struct xgbe_prv_data *pdata)
1766{
1767 struct xgbe_mmc_stats *stats = &pdata->mmc_stats;
1768 unsigned int mmc_isr = XGMAC_IOREAD(pdata, MMC_RISR);
1769
1770 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXFRAMECOUNT_GB))
1771 stats->rxframecount_gb +=
1772 XGMAC_IOREAD(pdata, MMC_RXFRAMECOUNT_GB_LO);
1773
1774 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOCTETCOUNT_GB))
1775 stats->rxoctetcount_gb +=
1776 XGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_GB_LO);
1777
1778 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOCTETCOUNT_G))
1779 stats->rxoctetcount_g +=
1780 XGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_G_LO);
1781
1782 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXBROADCASTFRAMES_G))
1783 stats->rxbroadcastframes_g +=
1784 XGMAC_IOREAD(pdata, MMC_RXBROADCASTFRAMES_G_LO);
1785
1786 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXMULTICASTFRAMES_G))
1787 stats->rxmulticastframes_g +=
1788 XGMAC_IOREAD(pdata, MMC_RXMULTICASTFRAMES_G_LO);
1789
1790 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXCRCERROR))
1791 stats->rxcrcerror +=
1792 XGMAC_IOREAD(pdata, MMC_RXCRCERROR_LO);
1793
1794 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXRUNTERROR))
1795 stats->rxrunterror +=
1796 XGMAC_IOREAD(pdata, MMC_RXRUNTERROR);
1797
1798 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXJABBERERROR))
1799 stats->rxjabbererror +=
1800 XGMAC_IOREAD(pdata, MMC_RXJABBERERROR);
1801
1802 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXUNDERSIZE_G))
1803 stats->rxundersize_g +=
1804 XGMAC_IOREAD(pdata, MMC_RXUNDERSIZE_G);
1805
1806 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOVERSIZE_G))
1807 stats->rxoversize_g +=
1808 XGMAC_IOREAD(pdata, MMC_RXOVERSIZE_G);
1809
1810 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX64OCTETS_GB))
1811 stats->rx64octets_gb +=
1812 XGMAC_IOREAD(pdata, MMC_RX64OCTETS_GB_LO);
1813
1814 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX65TO127OCTETS_GB))
1815 stats->rx65to127octets_gb +=
1816 XGMAC_IOREAD(pdata, MMC_RX65TO127OCTETS_GB_LO);
1817
1818 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX128TO255OCTETS_GB))
1819 stats->rx128to255octets_gb +=
1820 XGMAC_IOREAD(pdata, MMC_RX128TO255OCTETS_GB_LO);
1821
1822 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX256TO511OCTETS_GB))
1823 stats->rx256to511octets_gb +=
1824 XGMAC_IOREAD(pdata, MMC_RX256TO511OCTETS_GB_LO);
1825
1826 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX512TO1023OCTETS_GB))
1827 stats->rx512to1023octets_gb +=
1828 XGMAC_IOREAD(pdata, MMC_RX512TO1023OCTETS_GB_LO);
1829
1830 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX1024TOMAXOCTETS_GB))
1831 stats->rx1024tomaxoctets_gb +=
1832 XGMAC_IOREAD(pdata, MMC_RX1024TOMAXOCTETS_GB_LO);
1833
1834 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXUNICASTFRAMES_G))
1835 stats->rxunicastframes_g +=
1836 XGMAC_IOREAD(pdata, MMC_RXUNICASTFRAMES_G_LO);
1837
1838 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXLENGTHERROR))
1839 stats->rxlengtherror +=
1840 XGMAC_IOREAD(pdata, MMC_RXLENGTHERROR_LO);
1841
1842 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOUTOFRANGETYPE))
1843 stats->rxoutofrangetype +=
1844 XGMAC_IOREAD(pdata, MMC_RXOUTOFRANGETYPE_LO);
1845
1846 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXPAUSEFRAMES))
1847 stats->rxpauseframes +=
1848 XGMAC_IOREAD(pdata, MMC_RXPAUSEFRAMES_LO);
1849
1850 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXFIFOOVERFLOW))
1851 stats->rxfifooverflow +=
1852 XGMAC_IOREAD(pdata, MMC_RXFIFOOVERFLOW_LO);
1853
1854 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXVLANFRAMES_GB))
1855 stats->rxvlanframes_gb +=
1856 XGMAC_IOREAD(pdata, MMC_RXVLANFRAMES_GB_LO);
1857
1858 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXWATCHDOGERROR))
1859 stats->rxwatchdogerror +=
1860 XGMAC_IOREAD(pdata, MMC_RXWATCHDOGERROR);
1861}
1862
1863static void xgbe_read_mmc_stats(struct xgbe_prv_data *pdata)
1864{
1865 struct xgbe_mmc_stats *stats = &pdata->mmc_stats;
1866
1867 /* Freeze counters */
1868 XGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 1);
1869
1870 stats->txoctetcount_gb +=
1871 XGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_GB_LO);
1872
1873 stats->txframecount_gb +=
1874 XGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_GB_LO);
1875
1876 stats->txbroadcastframes_g +=
1877 XGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_G_LO);
1878
1879 stats->txmulticastframes_g +=
1880 XGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_G_LO);
1881
1882 stats->tx64octets_gb +=
1883 XGMAC_IOREAD(pdata, MMC_TX64OCTETS_GB_LO);
1884
1885 stats->tx65to127octets_gb +=
1886 XGMAC_IOREAD(pdata, MMC_TX65TO127OCTETS_GB_LO);
1887
1888 stats->tx128to255octets_gb +=
1889 XGMAC_IOREAD(pdata, MMC_TX128TO255OCTETS_GB_LO);
1890
1891 stats->tx256to511octets_gb +=
1892 XGMAC_IOREAD(pdata, MMC_TX256TO511OCTETS_GB_LO);
1893
1894 stats->tx512to1023octets_gb +=
1895 XGMAC_IOREAD(pdata, MMC_TX512TO1023OCTETS_GB_LO);
1896
1897 stats->tx1024tomaxoctets_gb +=
1898 XGMAC_IOREAD(pdata, MMC_TX1024TOMAXOCTETS_GB_LO);
1899
1900 stats->txunicastframes_gb +=
1901 XGMAC_IOREAD(pdata, MMC_TXUNICASTFRAMES_GB_LO);
1902
1903 stats->txmulticastframes_gb +=
1904 XGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_GB_LO);
1905
1906 stats->txbroadcastframes_g +=
1907 XGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_GB_LO);
1908
1909 stats->txunderflowerror +=
1910 XGMAC_IOREAD(pdata, MMC_TXUNDERFLOWERROR_LO);
1911
1912 stats->txoctetcount_g +=
1913 XGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_G_LO);
1914
1915 stats->txframecount_g +=
1916 XGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_G_LO);
1917
1918 stats->txpauseframes +=
1919 XGMAC_IOREAD(pdata, MMC_TXPAUSEFRAMES_LO);
1920
1921 stats->txvlanframes_g +=
1922 XGMAC_IOREAD(pdata, MMC_TXVLANFRAMES_G_LO);
1923
1924 stats->rxframecount_gb +=
1925 XGMAC_IOREAD(pdata, MMC_RXFRAMECOUNT_GB_LO);
1926
1927 stats->rxoctetcount_gb +=
1928 XGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_GB_LO);
1929
1930 stats->rxoctetcount_g +=
1931 XGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_G_LO);
1932
1933 stats->rxbroadcastframes_g +=
1934 XGMAC_IOREAD(pdata, MMC_RXBROADCASTFRAMES_G_LO);
1935
1936 stats->rxmulticastframes_g +=
1937 XGMAC_IOREAD(pdata, MMC_RXMULTICASTFRAMES_G_LO);
1938
1939 stats->rxcrcerror +=
1940 XGMAC_IOREAD(pdata, MMC_RXCRCERROR_LO);
1941
1942 stats->rxrunterror +=
1943 XGMAC_IOREAD(pdata, MMC_RXRUNTERROR);
1944
1945 stats->rxjabbererror +=
1946 XGMAC_IOREAD(pdata, MMC_RXJABBERERROR);
1947
1948 stats->rxundersize_g +=
1949 XGMAC_IOREAD(pdata, MMC_RXUNDERSIZE_G);
1950
1951 stats->rxoversize_g +=
1952 XGMAC_IOREAD(pdata, MMC_RXOVERSIZE_G);
1953
1954 stats->rx64octets_gb +=
1955 XGMAC_IOREAD(pdata, MMC_RX64OCTETS_GB_LO);
1956
1957 stats->rx65to127octets_gb +=
1958 XGMAC_IOREAD(pdata, MMC_RX65TO127OCTETS_GB_LO);
1959
1960 stats->rx128to255octets_gb +=
1961 XGMAC_IOREAD(pdata, MMC_RX128TO255OCTETS_GB_LO);
1962
1963 stats->rx256to511octets_gb +=
1964 XGMAC_IOREAD(pdata, MMC_RX256TO511OCTETS_GB_LO);
1965
1966 stats->rx512to1023octets_gb +=
1967 XGMAC_IOREAD(pdata, MMC_RX512TO1023OCTETS_GB_LO);
1968
1969 stats->rx1024tomaxoctets_gb +=
1970 XGMAC_IOREAD(pdata, MMC_RX1024TOMAXOCTETS_GB_LO);
1971
1972 stats->rxunicastframes_g +=
1973 XGMAC_IOREAD(pdata, MMC_RXUNICASTFRAMES_G_LO);
1974
1975 stats->rxlengtherror +=
1976 XGMAC_IOREAD(pdata, MMC_RXLENGTHERROR_LO);
1977
1978 stats->rxoutofrangetype +=
1979 XGMAC_IOREAD(pdata, MMC_RXOUTOFRANGETYPE_LO);
1980
1981 stats->rxpauseframes +=
1982 XGMAC_IOREAD(pdata, MMC_RXPAUSEFRAMES_LO);
1983
1984 stats->rxfifooverflow +=
1985 XGMAC_IOREAD(pdata, MMC_RXFIFOOVERFLOW_LO);
1986
1987 stats->rxvlanframes_gb +=
1988 XGMAC_IOREAD(pdata, MMC_RXVLANFRAMES_GB_LO);
1989
1990 stats->rxwatchdogerror +=
1991 XGMAC_IOREAD(pdata, MMC_RXWATCHDOGERROR);
1992
1993 /* Un-freeze counters */
1994 XGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 0);
1995}
1996
1997static void xgbe_config_mmc(struct xgbe_prv_data *pdata)
1998{
1999 /* Set counters to reset on read */
2000 XGMAC_IOWRITE_BITS(pdata, MMC_CR, ROR, 1);
2001
2002 /* Reset the counters */
2003 XGMAC_IOWRITE_BITS(pdata, MMC_CR, CR, 1);
2004}
2005
2006static void xgbe_enable_tx(struct xgbe_prv_data *pdata)
2007{
2008 struct xgbe_channel *channel;
2009 unsigned int i;
2010
2011 /* Enable each Tx DMA channel */
2012 channel = pdata->channel;
2013 for (i = 0; i < pdata->channel_count; i++, channel++) {
2014 if (!channel->tx_ring)
2015 break;
2016
2017 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, ST, 1);
2018 }
2019
2020 /* Enable each Tx queue */
2021 for (i = 0; i < pdata->hw_feat.tx_q_cnt; i++)
2022 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN,
2023 MTL_Q_ENABLED);
2024
2025 /* Enable MAC Tx */
2026 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 1);
2027}
2028
2029static void xgbe_disable_tx(struct xgbe_prv_data *pdata)
2030{
2031 struct xgbe_channel *channel;
2032 unsigned int i;
2033
2034 /* Disable MAC Tx */
2035 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0);
2036
2037 /* Disable each Tx queue */
2038 for (i = 0; i < pdata->hw_feat.tx_q_cnt; i++)
2039 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN, 0);
2040
2041 /* Disable each Tx DMA channel */
2042 channel = pdata->channel;
2043 for (i = 0; i < pdata->channel_count; i++, channel++) {
2044 if (!channel->tx_ring)
2045 break;
2046
2047 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, ST, 0);
2048 }
2049}
2050
2051static void xgbe_enable_rx(struct xgbe_prv_data *pdata)
2052{
2053 struct xgbe_channel *channel;
2054 unsigned int reg_val, i;
2055
2056 /* Enable each Rx DMA channel */
2057 channel = pdata->channel;
2058 for (i = 0; i < pdata->channel_count; i++, channel++) {
2059 if (!channel->rx_ring)
2060 break;
2061
2062 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, SR, 1);
2063 }
2064
2065 /* Enable each Rx queue */
2066 reg_val = 0;
2067 for (i = 0; i < pdata->hw_feat.rx_q_cnt; i++)
2068 reg_val |= (0x02 << (i << 1));
2069 XGMAC_IOWRITE(pdata, MAC_RQC0R, reg_val);
2070
2071 /* Enable MAC Rx */
2072 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, DCRCC, 1);
2073 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, CST, 1);
2074 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 1);
2075 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 1);
2076}
2077
2078static void xgbe_disable_rx(struct xgbe_prv_data *pdata)
2079{
2080 struct xgbe_channel *channel;
2081 unsigned int i;
2082
2083 /* Disable MAC Rx */
2084 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, DCRCC, 0);
2085 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, CST, 0);
2086 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 0);
2087 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 0);
2088
2089 /* Disable each Rx queue */
2090 XGMAC_IOWRITE(pdata, MAC_RQC0R, 0);
2091
2092 /* Disable each Rx DMA channel */
2093 channel = pdata->channel;
2094 for (i = 0; i < pdata->channel_count; i++, channel++) {
2095 if (!channel->rx_ring)
2096 break;
2097
2098 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, SR, 0);
2099 }
2100}
2101
2102static void xgbe_powerup_tx(struct xgbe_prv_data *pdata)
2103{
2104 struct xgbe_channel *channel;
2105 unsigned int i;
2106
2107 /* Enable each Tx DMA channel */
2108 channel = pdata->channel;
2109 for (i = 0; i < pdata->channel_count; i++, channel++) {
2110 if (!channel->tx_ring)
2111 break;
2112
2113 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, ST, 1);
2114 }
2115
2116 /* Enable MAC Tx */
2117 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 1);
2118}
2119
2120static void xgbe_powerdown_tx(struct xgbe_prv_data *pdata)
2121{
2122 struct xgbe_channel *channel;
2123 unsigned int i;
2124
2125 /* Disable MAC Tx */
2126 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0);
2127
2128 /* Disable each Tx DMA channel */
2129 channel = pdata->channel;
2130 for (i = 0; i < pdata->channel_count; i++, channel++) {
2131 if (!channel->tx_ring)
2132 break;
2133
2134 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, ST, 0);
2135 }
2136}
2137
2138static void xgbe_powerup_rx(struct xgbe_prv_data *pdata)
2139{
2140 struct xgbe_channel *channel;
2141 unsigned int i;
2142
2143 /* Enable each Rx DMA channel */
2144 channel = pdata->channel;
2145 for (i = 0; i < pdata->channel_count; i++, channel++) {
2146 if (!channel->rx_ring)
2147 break;
2148
2149 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, SR, 1);
2150 }
2151}
2152
2153static void xgbe_powerdown_rx(struct xgbe_prv_data *pdata)
2154{
2155 struct xgbe_channel *channel;
2156 unsigned int i;
2157
2158 /* Disable each Rx DMA channel */
2159 channel = pdata->channel;
2160 for (i = 0; i < pdata->channel_count; i++, channel++) {
2161 if (!channel->rx_ring)
2162 break;
2163
2164 XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, SR, 0);
2165 }
2166}
2167
2168static int xgbe_init(struct xgbe_prv_data *pdata)
2169{
2170 struct xgbe_desc_if *desc_if = &pdata->desc_if;
2171 int ret;
2172
2173 DBGPR("-->xgbe_init\n");
2174
2175 /* Flush Tx queues */
2176 ret = xgbe_flush_tx_queues(pdata);
2177 if (ret)
2178 return ret;
2179
2180 /*
2181 * Initialize DMA related features
2182 */
2183 xgbe_config_dma_bus(pdata);
2184 xgbe_config_dma_cache(pdata);
2185 xgbe_config_osp_mode(pdata);
2186 xgbe_config_pblx8(pdata);
2187 xgbe_config_tx_pbl_val(pdata);
2188 xgbe_config_rx_pbl_val(pdata);
2189 xgbe_config_rx_coalesce(pdata);
2190 xgbe_config_tx_coalesce(pdata);
2191 xgbe_config_rx_buffer_size(pdata);
2192 xgbe_config_tso_mode(pdata);
2193 desc_if->wrapper_tx_desc_init(pdata);
2194 desc_if->wrapper_rx_desc_init(pdata);
2195 xgbe_enable_dma_interrupts(pdata);
2196
2197 /*
2198 * Initialize MTL related features
2199 */
2200 xgbe_config_mtl_mode(pdata);
2201 xgbe_config_rx_queue_mapping(pdata);
2202 /*TODO: Program the priorities mapped to the Selected Traffic Classes
2203 in MTL_TC_Prty_Map0-3 registers */
2204 xgbe_config_tsf_mode(pdata, pdata->tx_sf_mode);
2205 xgbe_config_rsf_mode(pdata, pdata->rx_sf_mode);
2206 xgbe_config_tx_threshold(pdata, pdata->tx_threshold);
2207 xgbe_config_rx_threshold(pdata, pdata->rx_threshold);
2208 xgbe_config_tx_fifo_size(pdata);
2209 xgbe_config_rx_fifo_size(pdata);
2210 xgbe_config_flow_control_threshold(pdata);
2211 /*TODO: Queue to Traffic Class Mapping (Q2TCMAP) */
2212 /*TODO: Error Packet and undersized good Packet forwarding enable
2213 (FEP and FUP)
2214 */
2215 xgbe_enable_mtl_interrupts(pdata);
2216
2217 /* Transmit Class Weight */
2218 XGMAC_IOWRITE_BITS(pdata, MTL_Q_TCQWR, QW, 0x10);
2219
2220 /*
2221 * Initialize MAC related features
2222 */
2223 xgbe_config_mac_address(pdata);
2224 xgbe_config_jumbo_enable(pdata);
2225 xgbe_config_flow_control(pdata);
2226 xgbe_config_checksum_offload(pdata);
2227 xgbe_config_vlan_support(pdata);
2228 xgbe_config_mmc(pdata);
2229 xgbe_enable_mac_interrupts(pdata);
2230
2231 DBGPR("<--xgbe_init\n");
2232
2233 return 0;
2234}
2235
2236void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
2237{
2238 DBGPR("-->xgbe_init_function_ptrs\n");
2239
2240 hw_if->tx_complete = xgbe_tx_complete;
2241
2242 hw_if->set_promiscuous_mode = xgbe_set_promiscuous_mode;
2243 hw_if->set_all_multicast_mode = xgbe_set_all_multicast_mode;
Lendacky, Thomasb85e4d82014-06-24 16:19:29 -05002244 hw_if->add_mac_addresses = xgbe_add_mac_addresses;
Lendacky, Thomasc5aa9e32014-06-05 09:15:06 -05002245 hw_if->set_mac_address = xgbe_set_mac_address;
2246
2247 hw_if->enable_rx_csum = xgbe_enable_rx_csum;
2248 hw_if->disable_rx_csum = xgbe_disable_rx_csum;
2249
2250 hw_if->enable_rx_vlan_stripping = xgbe_enable_rx_vlan_stripping;
2251 hw_if->disable_rx_vlan_stripping = xgbe_disable_rx_vlan_stripping;
Lendacky, Thomas801c62d2014-06-24 16:19:24 -05002252 hw_if->enable_rx_vlan_filtering = xgbe_enable_rx_vlan_filtering;
2253 hw_if->disable_rx_vlan_filtering = xgbe_disable_rx_vlan_filtering;
2254 hw_if->update_vlan_hash_table = xgbe_update_vlan_hash_table;
Lendacky, Thomasc5aa9e32014-06-05 09:15:06 -05002255
2256 hw_if->read_mmd_regs = xgbe_read_mmd_regs;
2257 hw_if->write_mmd_regs = xgbe_write_mmd_regs;
2258
2259 hw_if->set_gmii_speed = xgbe_set_gmii_speed;
2260 hw_if->set_gmii_2500_speed = xgbe_set_gmii_2500_speed;
2261 hw_if->set_xgmii_speed = xgbe_set_xgmii_speed;
2262
2263 hw_if->enable_tx = xgbe_enable_tx;
2264 hw_if->disable_tx = xgbe_disable_tx;
2265 hw_if->enable_rx = xgbe_enable_rx;
2266 hw_if->disable_rx = xgbe_disable_rx;
2267
2268 hw_if->powerup_tx = xgbe_powerup_tx;
2269 hw_if->powerdown_tx = xgbe_powerdown_tx;
2270 hw_if->powerup_rx = xgbe_powerup_rx;
2271 hw_if->powerdown_rx = xgbe_powerdown_rx;
2272
2273 hw_if->pre_xmit = xgbe_pre_xmit;
2274 hw_if->dev_read = xgbe_dev_read;
2275 hw_if->enable_int = xgbe_enable_int;
2276 hw_if->disable_int = xgbe_disable_int;
2277 hw_if->init = xgbe_init;
2278 hw_if->exit = xgbe_exit;
2279
2280 /* Descriptor related Sequences have to be initialized here */
2281 hw_if->tx_desc_init = xgbe_tx_desc_init;
2282 hw_if->rx_desc_init = xgbe_rx_desc_init;
2283 hw_if->tx_desc_reset = xgbe_tx_desc_reset;
2284 hw_if->rx_desc_reset = xgbe_rx_desc_reset;
2285 hw_if->is_last_desc = xgbe_is_last_desc;
2286 hw_if->is_context_desc = xgbe_is_context_desc;
2287
2288 /* For FLOW ctrl */
2289 hw_if->config_tx_flow_control = xgbe_config_tx_flow_control;
2290 hw_if->config_rx_flow_control = xgbe_config_rx_flow_control;
2291
2292 /* For RX coalescing */
2293 hw_if->config_rx_coalesce = xgbe_config_rx_coalesce;
2294 hw_if->config_tx_coalesce = xgbe_config_tx_coalesce;
2295 hw_if->usec_to_riwt = xgbe_usec_to_riwt;
2296 hw_if->riwt_to_usec = xgbe_riwt_to_usec;
2297
2298 /* For RX and TX threshold config */
2299 hw_if->config_rx_threshold = xgbe_config_rx_threshold;
2300 hw_if->config_tx_threshold = xgbe_config_tx_threshold;
2301
2302 /* For RX and TX Store and Forward Mode config */
2303 hw_if->config_rsf_mode = xgbe_config_rsf_mode;
2304 hw_if->config_tsf_mode = xgbe_config_tsf_mode;
2305
2306 /* For TX DMA Operating on Second Frame config */
2307 hw_if->config_osp_mode = xgbe_config_osp_mode;
2308
2309 /* For RX and TX PBL config */
2310 hw_if->config_rx_pbl_val = xgbe_config_rx_pbl_val;
2311 hw_if->get_rx_pbl_val = xgbe_get_rx_pbl_val;
2312 hw_if->config_tx_pbl_val = xgbe_config_tx_pbl_val;
2313 hw_if->get_tx_pbl_val = xgbe_get_tx_pbl_val;
2314 hw_if->config_pblx8 = xgbe_config_pblx8;
2315
2316 /* For MMC statistics support */
2317 hw_if->tx_mmc_int = xgbe_tx_mmc_int;
2318 hw_if->rx_mmc_int = xgbe_rx_mmc_int;
2319 hw_if->read_mmc_stats = xgbe_read_mmc_stats;
2320
2321 DBGPR("<--xgbe_init_function_ptrs\n");
2322}