blob: 0f29dc44fcada688a71094df3ff5ee719185a57d [file] [log] [blame]
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001/**********************************************************************
2* Author: Cavium, Inc.
3*
4* Contact: support@cavium.com
5* Please include "LiquidIO" in the subject.
6*
7* Copyright (c) 2003-2015 Cavium, Inc.
8*
9* This file is free software; you can redistribute it and/or modify
10* it under the terms of the GNU General Public License, Version 2, as
11* published by the Free Software Foundation.
12*
13* This file is distributed in the hope that it will be useful, but
14* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16* NONINFRINGEMENT. See the GNU General Public License for more
17* details.
18*
19* This file may also be available under a different license from Cavium.
20* Contact Cavium, Inc. for more information
21**********************************************************************/
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070022#include <linux/netdevice.h>
23#include <linux/net_tstamp.h>
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070024#include <linux/pci.h>
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070025#include "liquidio_common.h"
26#include "octeon_droq.h"
27#include "octeon_iq.h"
28#include "response_manager.h"
29#include "octeon_device.h"
30#include "octeon_nic.h"
31#include "octeon_main.h"
32#include "octeon_network.h"
33#include "cn66xx_regs.h"
34#include "cn66xx_device.h"
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070035
Raghu Vatsavayi1f164712016-06-21 22:53:11 -070036static int octnet_get_link_stats(struct net_device *netdev);
37
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070038struct oct_mdio_cmd_context {
39 int octeon_id;
40 wait_queue_head_t wc;
41 int cond;
42};
43
44struct oct_mdio_cmd_resp {
45 u64 rh;
46 struct oct_mdio_cmd resp;
47 u64 status;
48};
49
50#define OCT_MDIO45_RESP_SIZE (sizeof(struct oct_mdio_cmd_resp))
51
52/* Octeon's interface mode of operation */
53enum {
54 INTERFACE_MODE_DISABLED,
55 INTERFACE_MODE_RGMII,
56 INTERFACE_MODE_GMII,
57 INTERFACE_MODE_SPI,
58 INTERFACE_MODE_PCIE,
59 INTERFACE_MODE_XAUI,
60 INTERFACE_MODE_SGMII,
61 INTERFACE_MODE_PICMG,
62 INTERFACE_MODE_NPI,
63 INTERFACE_MODE_LOOP,
64 INTERFACE_MODE_SRIO,
65 INTERFACE_MODE_ILK,
66 INTERFACE_MODE_RXAUI,
67 INTERFACE_MODE_QSGMII,
68 INTERFACE_MODE_AGL,
Raghu Vatsavayi9eb60842016-06-21 22:53:12 -070069 INTERFACE_MODE_XLAUI,
70 INTERFACE_MODE_XFI,
71 INTERFACE_MODE_10G_KR,
72 INTERFACE_MODE_40G_KR4,
73 INTERFACE_MODE_MIXED,
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070074};
75
76#define ARRAY_LENGTH(a) (sizeof(a) / sizeof((a)[0]))
77#define OCT_ETHTOOL_REGDUMP_LEN 4096
78#define OCT_ETHTOOL_REGSVER 1
79
Raghu Vatsavayi1f164712016-06-21 22:53:11 -070080/* statistics of PF */
81static const char oct_stats_strings[][ETH_GSTRING_LEN] = {
82 "rx_packets",
83 "tx_packets",
84 "rx_bytes",
85 "tx_bytes",
86 "rx_errors", /*jabber_err+l2_err+frame_err */
87 "tx_errors", /*fw_err_pko+fw_err_link+fw_err_drop */
88 "rx_dropped", /*st->fromwire.total_rcvd - st->fromwire.fw_total_rcvd
89 *+st->fromwire.dmac_drop + st->fromwire.fw_err_drop
90 */
91 "tx_dropped",
92
93 "tx_total_sent",
94 "tx_total_fwd",
95 "tx_err_pko",
96 "tx_err_link",
97 "tx_err_drop",
98
99 "tx_tso",
100 "tx_tso_packets",
101 "tx_tso_err",
Raghu Vatsavayi01fb2372016-07-03 13:56:47 -0700102 "tx_vxlan",
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700103
104 "mac_tx_total_pkts",
105 "mac_tx_total_bytes",
106 "mac_tx_mcast_pkts",
107 "mac_tx_bcast_pkts",
108 "mac_tx_ctl_packets", /*oct->link_stats.fromhost.ctl_sent */
109 "mac_tx_total_collisions",
110 "mac_tx_one_collision",
111 "mac_tx_multi_collison",
112 "mac_tx_max_collision_fail",
113 "mac_tx_max_deferal_fail",
114 "mac_tx_fifo_err",
115 "mac_tx_runts",
116
117 "rx_total_rcvd",
118 "rx_total_fwd",
119 "rx_jabber_err",
120 "rx_l2_err",
121 "rx_frame_err",
122 "rx_err_pko",
123 "rx_err_link",
124 "rx_err_drop",
125
Raghu Vatsavayi01fb2372016-07-03 13:56:47 -0700126 "rx_vxlan",
127 "rx_vxlan_err",
128
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700129 "rx_lro_pkts",
130 "rx_lro_bytes",
131 "rx_total_lro",
132
133 "rx_lro_aborts",
134 "rx_lro_aborts_port",
135 "rx_lro_aborts_seq",
136 "rx_lro_aborts_tsval",
137 "rx_lro_aborts_timer",
138 "rx_fwd_rate",
139
140 "mac_rx_total_rcvd",
141 "mac_rx_bytes",
142 "mac_rx_total_bcst",
143 "mac_rx_total_mcst",
144 "mac_rx_runts",
145 "mac_rx_ctl_packets",
146 "mac_rx_fifo_err",
147 "mac_rx_dma_drop",
148 "mac_rx_fcs_err",
149
150 "link_state_changes",
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700151};
152
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700153/* statistics of host tx queue */
154static const char oct_iq_stats_strings[][ETH_GSTRING_LEN] = {
155 "packets", /*oct->instr_queue[iq_no]->stats.tx_done*/
156 "bytes", /*oct->instr_queue[iq_no]->stats.tx_tot_bytes*/
157 "dropped",
158 "iq_busy",
159 "sgentry_sent",
160
161 "fw_instr_posted",
162 "fw_instr_processed",
163 "fw_instr_dropped",
164 "fw_bytes_sent",
165
166 "tso",
Raghu Vatsavayi01fb2372016-07-03 13:56:47 -0700167 "vxlan",
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700168 "txq_restart",
169};
170
171/* statistics of host rx queue */
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700172static const char oct_droq_stats_strings[][ETH_GSTRING_LEN] = {
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700173 "packets", /*oct->droq[oq_no]->stats.rx_pkts_received */
174 "bytes", /*oct->droq[oq_no]->stats.rx_bytes_received */
175 "dropped", /*oct->droq[oq_no]->stats.rx_dropped+
176 *oct->droq[oq_no]->stats.dropped_nodispatch+
177 *oct->droq[oq_no]->stats.dropped_toomany+
178 *oct->droq[oq_no]->stats.dropped_nomem
179 */
180 "dropped_nomem",
181 "dropped_toomany",
182 "fw_dropped",
183 "fw_pkts_received",
184 "fw_bytes_received",
185 "fw_dropped_nodispatch",
186
Raghu Vatsavayi01fb2372016-07-03 13:56:47 -0700187 "vxlan",
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700188 "buffer_alloc_failure",
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700189};
190
191#define OCTNIC_NCMD_AUTONEG_ON 0x1
192#define OCTNIC_NCMD_PHY_ON 0x2
193
194static int lio_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
195{
196 struct lio *lio = GET_LIO(netdev);
197 struct octeon_device *oct = lio->oct_dev;
198 struct oct_link_info *linfo;
199
200 linfo = &lio->linfo;
201
Raghu Vatsavayi9eb60842016-06-21 22:53:12 -0700202 if (linfo->link.s.if_mode == INTERFACE_MODE_XAUI ||
203 linfo->link.s.if_mode == INTERFACE_MODE_RXAUI ||
204 linfo->link.s.if_mode == INTERFACE_MODE_XFI) {
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700205 ecmd->port = PORT_FIBRE;
206 ecmd->supported =
207 (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE |
208 SUPPORTED_Pause);
209 ecmd->advertising =
210 (ADVERTISED_10000baseT_Full | ADVERTISED_Pause);
211 ecmd->transceiver = XCVR_EXTERNAL;
212 ecmd->autoneg = AUTONEG_DISABLE;
213
214 } else {
Raghu Vatsavayi9eb60842016-06-21 22:53:12 -0700215 dev_err(&oct->pci_dev->dev, "Unknown link interface reported %d\n",
216 linfo->link.s.if_mode);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700217 }
218
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -0700219 if (linfo->link.s.link_up) {
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700220 ethtool_cmd_speed_set(ecmd, linfo->link.s.speed);
221 ecmd->duplex = linfo->link.s.duplex;
222 } else {
223 ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
224 ecmd->duplex = DUPLEX_UNKNOWN;
225 }
226
227 return 0;
228}
229
230static void
231lio_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
232{
233 struct lio *lio;
234 struct octeon_device *oct;
235
236 lio = GET_LIO(netdev);
237 oct = lio->oct_dev;
238
239 memset(drvinfo, 0, sizeof(struct ethtool_drvinfo));
240 strcpy(drvinfo->driver, "liquidio");
241 strcpy(drvinfo->version, LIQUIDIO_VERSION);
242 strncpy(drvinfo->fw_version, oct->fw_info.liquidio_firmware_version,
243 ETHTOOL_FWVERS_LEN);
244 strncpy(drvinfo->bus_info, pci_name(oct->pci_dev), 32);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700245}
246
247static void
248lio_ethtool_get_channels(struct net_device *dev,
249 struct ethtool_channels *channel)
250{
251 struct lio *lio = GET_LIO(dev);
252 struct octeon_device *oct = lio->oct_dev;
253 u32 max_rx = 0, max_tx = 0, tx_count = 0, rx_count = 0;
254
255 if (OCTEON_CN6XXX(oct)) {
256 struct octeon_config *conf6x = CHIP_FIELD(oct, cn6xxx, conf);
257
258 max_rx = CFG_GET_OQ_MAX_Q(conf6x);
259 max_tx = CFG_GET_IQ_MAX_Q(conf6x);
260 rx_count = CFG_GET_NUM_RXQS_NIC_IF(conf6x, lio->ifidx);
261 tx_count = CFG_GET_NUM_TXQS_NIC_IF(conf6x, lio->ifidx);
262 }
263
264 channel->max_rx = max_rx;
265 channel->max_tx = max_tx;
266 channel->rx_count = rx_count;
267 channel->tx_count = tx_count;
268}
269
270static int lio_get_eeprom_len(struct net_device *netdev)
271{
272 u8 buf[128];
273 struct lio *lio = GET_LIO(netdev);
274 struct octeon_device *oct_dev = lio->oct_dev;
275 struct octeon_board_info *board_info;
276 int len;
277
278 board_info = (struct octeon_board_info *)(&oct_dev->boardinfo);
279 len = sprintf(buf, "boardname:%s serialnum:%s maj:%lld min:%lld\n",
280 board_info->name, board_info->serial_number,
281 board_info->major, board_info->minor);
282
283 return len;
284}
285
286static int
287lio_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
288 u8 *bytes)
289{
290 struct lio *lio = GET_LIO(netdev);
291 struct octeon_device *oct_dev = lio->oct_dev;
292 struct octeon_board_info *board_info;
293 int len;
294
295 if (eeprom->offset != 0)
296 return -EINVAL;
297
298 eeprom->magic = oct_dev->pci_dev->vendor;
299 board_info = (struct octeon_board_info *)(&oct_dev->boardinfo);
300 len =
301 sprintf((char *)bytes,
302 "boardname:%s serialnum:%s maj:%lld min:%lld\n",
303 board_info->name, board_info->serial_number,
304 board_info->major, board_info->minor);
305
306 return 0;
307}
308
309static int octnet_gpio_access(struct net_device *netdev, int addr, int val)
310{
311 struct lio *lio = GET_LIO(netdev);
312 struct octeon_device *oct = lio->oct_dev;
313 struct octnic_ctrl_pkt nctrl;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700314 int ret = 0;
315
316 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
317
318 nctrl.ncmd.u64 = 0;
319 nctrl.ncmd.s.cmd = OCTNET_CMD_GPIO_ACCESS;
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -0700320 nctrl.ncmd.s.param1 = addr;
321 nctrl.ncmd.s.param2 = val;
322 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700323 nctrl.wait_time = 100;
324 nctrl.netpndev = (u64)netdev;
325 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
326
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -0700327 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700328 if (ret < 0) {
329 dev_err(&oct->pci_dev->dev, "Failed to configure gpio value\n");
330 return -EINVAL;
331 }
332
333 return 0;
334}
335
336/* Callback for when mdio command response arrives
337 */
338static void octnet_mdio_resp_callback(struct octeon_device *oct,
339 u32 status,
340 void *buf)
341{
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700342 struct oct_mdio_cmd_context *mdio_cmd_ctx;
343 struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
344
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700345 mdio_cmd_ctx = (struct oct_mdio_cmd_context *)sc->ctxptr;
346
347 oct = lio_get_device(mdio_cmd_ctx->octeon_id);
348 if (status) {
349 dev_err(&oct->pci_dev->dev, "MIDO instruction failed. Status: %llx\n",
350 CVM_CAST64(status));
Raghu Vatsavayia7d5a3d2016-07-03 13:56:48 -0700351 WRITE_ONCE(mdio_cmd_ctx->cond, -1);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700352 } else {
Raghu Vatsavayia7d5a3d2016-07-03 13:56:48 -0700353 WRITE_ONCE(mdio_cmd_ctx->cond, 1);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700354 }
355 wake_up_interruptible(&mdio_cmd_ctx->wc);
356}
357
358/* This routine provides PHY access routines for
359 * mdio clause45 .
360 */
361static int
362octnet_mdio45_access(struct lio *lio, int op, int loc, int *value)
363{
364 struct octeon_device *oct_dev = lio->oct_dev;
365 struct octeon_soft_command *sc;
366 struct oct_mdio_cmd_resp *mdio_cmd_rsp;
367 struct oct_mdio_cmd_context *mdio_cmd_ctx;
368 struct oct_mdio_cmd *mdio_cmd;
369 int retval = 0;
370
371 sc = (struct octeon_soft_command *)
372 octeon_alloc_soft_command(oct_dev,
373 sizeof(struct oct_mdio_cmd),
374 sizeof(struct oct_mdio_cmd_resp),
375 sizeof(struct oct_mdio_cmd_context));
376
377 if (!sc)
378 return -ENOMEM;
379
380 mdio_cmd_ctx = (struct oct_mdio_cmd_context *)sc->ctxptr;
381 mdio_cmd_rsp = (struct oct_mdio_cmd_resp *)sc->virtrptr;
382 mdio_cmd = (struct oct_mdio_cmd *)sc->virtdptr;
383
Raghu Vatsavayia7d5a3d2016-07-03 13:56:48 -0700384 WRITE_ONCE(mdio_cmd_ctx->cond, 0);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700385 mdio_cmd_ctx->octeon_id = lio_get_device_id(oct_dev);
386 mdio_cmd->op = op;
387 mdio_cmd->mdio_addr = loc;
388 if (op)
389 mdio_cmd->value1 = *value;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700390 octeon_swap_8B_data((u64 *)mdio_cmd, sizeof(struct oct_mdio_cmd) / 8);
391
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -0700392 sc->iq_no = lio->linfo.txpciq[0].s.q_no;
393
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700394 octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC, OPCODE_NIC_MDIO45,
395 0, 0, 0);
396
397 sc->wait_time = 1000;
398 sc->callback = octnet_mdio_resp_callback;
399 sc->callback_arg = sc;
400
401 init_waitqueue_head(&mdio_cmd_ctx->wc);
402
403 retval = octeon_send_soft_command(oct_dev, sc);
404
Raghu Vatsavayiddc173a2016-06-14 16:54:43 -0700405 if (retval == IQ_SEND_FAILED) {
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700406 dev_err(&oct_dev->pci_dev->dev,
407 "octnet_mdio45_access instruction failed status: %x\n",
408 retval);
409 retval = -EBUSY;
410 } else {
411 /* Sleep on a wait queue till the cond flag indicates that the
412 * response arrived
413 */
414 sleep_cond(&mdio_cmd_ctx->wc, &mdio_cmd_ctx->cond);
415 retval = mdio_cmd_rsp->status;
416 if (retval) {
417 dev_err(&oct_dev->pci_dev->dev, "octnet mdio45 access failed\n");
418 retval = -EBUSY;
419 } else {
420 octeon_swap_8B_data((u64 *)(&mdio_cmd_rsp->resp),
421 sizeof(struct oct_mdio_cmd) / 8);
422
Raghu Vatsavayia7d5a3d2016-07-03 13:56:48 -0700423 if (READ_ONCE(mdio_cmd_ctx->cond) == 1) {
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700424 if (!op)
425 *value = mdio_cmd_rsp->resp.value1;
426 } else {
427 retval = -EINVAL;
428 }
429 }
430 }
431
432 octeon_free_soft_command(oct_dev, sc);
433
434 return retval;
435}
436
437static int lio_set_phys_id(struct net_device *netdev,
438 enum ethtool_phys_id_state state)
439{
440 struct lio *lio = GET_LIO(netdev);
441 struct octeon_device *oct = lio->oct_dev;
442 int value, ret;
443
444 switch (state) {
445 case ETHTOOL_ID_ACTIVE:
446 if (oct->chip_id == OCTEON_CN66XX) {
447 octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG,
448 VITESSE_PHY_GPIO_DRIVEON);
449 return 2;
450
451 } else if (oct->chip_id == OCTEON_CN68XX) {
452 /* Save the current LED settings */
453 ret = octnet_mdio45_access(lio, 0,
454 LIO68XX_LED_BEACON_ADDR,
455 &lio->phy_beacon_val);
456 if (ret)
457 return ret;
458
459 ret = octnet_mdio45_access(lio, 0,
460 LIO68XX_LED_CTRL_ADDR,
461 &lio->led_ctrl_val);
462 if (ret)
463 return ret;
464
465 /* Configure Beacon values */
466 value = LIO68XX_LED_BEACON_CFGON;
467 ret =
468 octnet_mdio45_access(lio, 1,
469 LIO68XX_LED_BEACON_ADDR,
470 &value);
471 if (ret)
472 return ret;
473
474 value = LIO68XX_LED_CTRL_CFGON;
475 ret =
476 octnet_mdio45_access(lio, 1,
477 LIO68XX_LED_CTRL_ADDR,
478 &value);
479 if (ret)
480 return ret;
481 } else {
482 return -EINVAL;
483 }
484 break;
485
486 case ETHTOOL_ID_ON:
487 if (oct->chip_id == OCTEON_CN66XX) {
488 octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG,
489 VITESSE_PHY_GPIO_HIGH);
490
491 } else if (oct->chip_id == OCTEON_CN68XX) {
492 return -EINVAL;
493 } else {
494 return -EINVAL;
495 }
496 break;
497
498 case ETHTOOL_ID_OFF:
499 if (oct->chip_id == OCTEON_CN66XX)
500 octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG,
501 VITESSE_PHY_GPIO_LOW);
502 else if (oct->chip_id == OCTEON_CN68XX)
503 return -EINVAL;
504 else
505 return -EINVAL;
506
507 break;
508
509 case ETHTOOL_ID_INACTIVE:
510 if (oct->chip_id == OCTEON_CN66XX) {
511 octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG,
512 VITESSE_PHY_GPIO_DRIVEOFF);
513 } else if (oct->chip_id == OCTEON_CN68XX) {
514 /* Restore LED settings */
515 ret = octnet_mdio45_access(lio, 1,
516 LIO68XX_LED_CTRL_ADDR,
517 &lio->led_ctrl_val);
518 if (ret)
519 return ret;
520
Dan Carpentercbdb9772015-06-24 17:47:02 +0300521 ret = octnet_mdio45_access(lio, 1,
522 LIO68XX_LED_BEACON_ADDR,
523 &lio->phy_beacon_val);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700524 if (ret)
525 return ret;
526
527 } else {
528 return -EINVAL;
529 }
530 break;
531
532 default:
533 return -EINVAL;
534 }
535
536 return 0;
537}
538
539static void
540lio_ethtool_get_ringparam(struct net_device *netdev,
541 struct ethtool_ringparam *ering)
542{
543 struct lio *lio = GET_LIO(netdev);
544 struct octeon_device *oct = lio->oct_dev;
545 u32 tx_max_pending = 0, rx_max_pending = 0, tx_pending = 0,
546 rx_pending = 0;
547
548 if (OCTEON_CN6XXX(oct)) {
549 struct octeon_config *conf6x = CHIP_FIELD(oct, cn6xxx, conf);
550
551 tx_max_pending = CN6XXX_MAX_IQ_DESCRIPTORS;
552 rx_max_pending = CN6XXX_MAX_OQ_DESCRIPTORS;
553 rx_pending = CFG_GET_NUM_RX_DESCS_NIC_IF(conf6x, lio->ifidx);
554 tx_pending = CFG_GET_NUM_TX_DESCS_NIC_IF(conf6x, lio->ifidx);
555 }
556
557 if (lio->mtu > OCTNET_DEFAULT_FRM_SIZE) {
558 ering->rx_pending = 0;
559 ering->rx_max_pending = 0;
560 ering->rx_mini_pending = 0;
561 ering->rx_jumbo_pending = rx_pending;
562 ering->rx_mini_max_pending = 0;
563 ering->rx_jumbo_max_pending = rx_max_pending;
564 } else {
565 ering->rx_pending = rx_pending;
566 ering->rx_max_pending = rx_max_pending;
567 ering->rx_mini_pending = 0;
568 ering->rx_jumbo_pending = 0;
569 ering->rx_mini_max_pending = 0;
570 ering->rx_jumbo_max_pending = 0;
571 }
572
573 ering->tx_pending = tx_pending;
574 ering->tx_max_pending = tx_max_pending;
575}
576
577static u32 lio_get_msglevel(struct net_device *netdev)
578{
579 struct lio *lio = GET_LIO(netdev);
580
581 return lio->msg_enable;
582}
583
584static void lio_set_msglevel(struct net_device *netdev, u32 msglvl)
585{
586 struct lio *lio = GET_LIO(netdev);
587
588 if ((msglvl ^ lio->msg_enable) & NETIF_MSG_HW) {
589 if (msglvl & NETIF_MSG_HW)
590 liquidio_set_feature(netdev,
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -0700591 OCTNET_CMD_VERBOSE_ENABLE, 0);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700592 else
593 liquidio_set_feature(netdev,
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -0700594 OCTNET_CMD_VERBOSE_DISABLE, 0);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700595 }
596
597 lio->msg_enable = msglvl;
598}
599
600static void
601lio_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
602{
603 /* Notes: Not supporting any auto negotiation in these
604 * drivers. Just report pause frame support.
605 */
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700606 struct lio *lio = GET_LIO(netdev);
607 struct octeon_device *oct = lio->oct_dev;
608
609 pause->autoneg = 0;
610
611 pause->tx_pause = oct->tx_pause;
612 pause->rx_pause = oct->rx_pause;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700613}
614
615static void
616lio_get_ethtool_stats(struct net_device *netdev,
Raghu Vatsavayia7d5a3d2016-07-03 13:56:48 -0700617 struct ethtool_stats *stats __attribute__((unused)),
618 u64 *data)
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700619{
620 struct lio *lio = GET_LIO(netdev);
621 struct octeon_device *oct_dev = lio->oct_dev;
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700622 struct net_device_stats *netstats = &netdev->stats;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700623 int i = 0, j;
624
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700625 netdev->netdev_ops->ndo_get_stats(netdev);
626 octnet_get_link_stats(netdev);
627
628 /*sum of oct->droq[oq_no]->stats->rx_pkts_received */
629 data[i++] = CVM_CAST64(netstats->rx_packets);
630 /*sum of oct->instr_queue[iq_no]->stats.tx_done */
631 data[i++] = CVM_CAST64(netstats->tx_packets);
632 /*sum of oct->droq[oq_no]->stats->rx_bytes_received */
633 data[i++] = CVM_CAST64(netstats->rx_bytes);
634 /*sum of oct->instr_queue[iq_no]->stats.tx_tot_bytes */
635 data[i++] = CVM_CAST64(netstats->tx_bytes);
636 data[i++] = CVM_CAST64(netstats->rx_errors);
637 data[i++] = CVM_CAST64(netstats->tx_errors);
638 /*sum of oct->droq[oq_no]->stats->rx_dropped +
639 *oct->droq[oq_no]->stats->dropped_nodispatch +
640 *oct->droq[oq_no]->stats->dropped_toomany +
641 *oct->droq[oq_no]->stats->dropped_nomem
642 */
643 data[i++] = CVM_CAST64(netstats->rx_dropped);
644 /*sum of oct->instr_queue[iq_no]->stats.tx_dropped */
645 data[i++] = CVM_CAST64(netstats->tx_dropped);
646
647 /*data[i++] = CVM_CAST64(stats->multicast); */
648 /*data[i++] = CVM_CAST64(stats->collisions); */
649
650 /* firmware tx stats */
651 /*per_core_stats[cvmx_get_core_num()].link_stats[mdata->from_ifidx].
652 *fromhost.fw_total_sent
653 */
654 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_total_sent);
655 /*per_core_stats[i].link_stats[port].fromwire.fw_total_fwd */
656 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_total_fwd);
657 /*per_core_stats[j].link_stats[i].fromhost.fw_err_pko */
658 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_pko);
659 /*per_core_stats[j].link_stats[i].fromhost.fw_err_link */
660 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_link);
661 /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.
662 *fw_err_drop
663 */
664 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_drop);
665
666 /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.fw_tso */
667 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_tso);
668 /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.
669 *fw_tso_fwd
670 */
671 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_tso_fwd);
672 /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.
673 *fw_err_tso
674 */
675 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_tso);
Raghu Vatsavayi01fb2372016-07-03 13:56:47 -0700676 /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.
677 *fw_tx_vxlan
678 */
679 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_tx_vxlan);
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700680
681 /* mac tx statistics */
682 /*CVMX_BGXX_CMRX_TX_STAT5 */
683 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.total_pkts_sent);
684 /*CVMX_BGXX_CMRX_TX_STAT4 */
685 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.total_bytes_sent);
686 /*CVMX_BGXX_CMRX_TX_STAT15 */
687 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.mcast_pkts_sent);
688 /*CVMX_BGXX_CMRX_TX_STAT14 */
689 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.bcast_pkts_sent);
690 /*CVMX_BGXX_CMRX_TX_STAT17 */
691 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.ctl_sent);
692 /*CVMX_BGXX_CMRX_TX_STAT0 */
693 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.total_collisions);
694 /*CVMX_BGXX_CMRX_TX_STAT3 */
695 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.one_collision_sent);
696 /*CVMX_BGXX_CMRX_TX_STAT2 */
697 data[i++] =
698 CVM_CAST64(oct_dev->link_stats.fromhost.multi_collision_sent);
699 /*CVMX_BGXX_CMRX_TX_STAT0 */
700 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.max_collision_fail);
701 /*CVMX_BGXX_CMRX_TX_STAT1 */
702 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.max_deferral_fail);
703 /*CVMX_BGXX_CMRX_TX_STAT16 */
704 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fifo_err);
705 /*CVMX_BGXX_CMRX_TX_STAT6 */
706 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.runts);
707
708 /* RX firmware stats */
709 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
710 *fw_total_rcvd
711 */
712 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_total_rcvd);
713 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
714 *fw_total_fwd
715 */
716 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_total_fwd);
717 /*per_core_stats[core_id].link_stats[ifidx].fromwire.jabber_err */
718 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.jabber_err);
719 /*per_core_stats[core_id].link_stats[ifidx].fromwire.l2_err */
720 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.l2_err);
721 /*per_core_stats[core_id].link_stats[ifidx].fromwire.frame_err */
722 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.frame_err);
723 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
724 *fw_err_pko
725 */
726 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_err_pko);
727 /*per_core_stats[j].link_stats[i].fromwire.fw_err_link */
728 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_err_link);
729 /*per_core_stats[cvmx_get_core_num()].link_stats[lro_ctx->ifidx].
730 *fromwire.fw_err_drop
731 */
732 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_err_drop);
733
Raghu Vatsavayi01fb2372016-07-03 13:56:47 -0700734 /*per_core_stats[cvmx_get_core_num()].link_stats[lro_ctx->ifidx].
735 *fromwire.fw_rx_vxlan
736 */
737 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_rx_vxlan);
738 /*per_core_stats[cvmx_get_core_num()].link_stats[lro_ctx->ifidx].
739 *fromwire.fw_rx_vxlan_err
740 */
741 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_rx_vxlan_err);
742
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700743 /* LRO */
744 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
745 *fw_lro_pkts
746 */
747 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_pkts);
748 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
749 *fw_lro_octs
750 */
751 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_octs);
752 /*per_core_stats[j].link_stats[i].fromwire.fw_total_lro */
753 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_total_lro);
754 /*per_core_stats[j].link_stats[i].fromwire.fw_lro_aborts */
755 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts);
756 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
757 *fw_lro_aborts_port
758 */
759 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_port);
760 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
761 *fw_lro_aborts_seq
762 */
763 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_seq);
764 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
765 *fw_lro_aborts_tsval
766 */
767 data[i++] =
768 CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_tsval);
769 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
770 *fw_lro_aborts_timer
771 */
772 /* intrmod: packet forward rate */
773 data[i++] =
774 CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_timer);
775 /*per_core_stats[j].link_stats[i].fromwire.fw_lro_aborts */
776 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fwd_rate);
777
778 /* mac: link-level stats */
779 /*CVMX_BGXX_CMRX_RX_STAT0 */
780 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.total_rcvd);
781 /*CVMX_BGXX_CMRX_RX_STAT1 */
782 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.bytes_rcvd);
783 /*CVMX_PKI_STATX_STAT5 */
784 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.total_bcst);
785 /*CVMX_PKI_STATX_STAT5 */
786 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.total_mcst);
787 /*wqe->word2.err_code or wqe->word2.err_level */
788 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.runts);
789 /*CVMX_BGXX_CMRX_RX_STAT2 */
790 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.ctl_rcvd);
791 /*CVMX_BGXX_CMRX_RX_STAT6 */
792 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fifo_err);
793 /*CVMX_BGXX_CMRX_RX_STAT4 */
794 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.dmac_drop);
795 /*wqe->word2.err_code or wqe->word2.err_level */
796 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fcs_err);
797 /*lio->link_changes*/
798 data[i++] = CVM_CAST64(lio->link_changes);
799
800 /* TX -- lio_update_stats(lio); */
801 for (j = 0; j < MAX_OCTEON_INSTR_QUEUES(oct_dev); j++) {
Raghu Vatsavayi63da8402016-06-21 22:53:03 -0700802 if (!(oct_dev->io_qmask.iq & (1ULL << j)))
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700803 continue;
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700804 /*packets to network port*/
805 /*# of packets tx to network */
806 data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_done);
807 /*# of bytes tx to network */
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700808 data[i++] =
809 CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_tot_bytes);
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700810 /*# of packets dropped */
811 data[i++] =
812 CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_dropped);
813 /*# of tx fails due to queue full */
814 data[i++] =
815 CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_iq_busy);
816 /*XXX gather entries sent */
817 data[i++] =
818 CVM_CAST64(oct_dev->instr_queue[j]->stats.sgentry_sent);
819
820 /*instruction to firmware: data and control */
821 /*# of instructions to the queue */
822 data[i++] =
823 CVM_CAST64(oct_dev->instr_queue[j]->stats.instr_posted);
824 /*# of instructions processed */
825 data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->
826 stats.instr_processed);
827 /*# of instructions could not be processed */
828 data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->
829 stats.instr_dropped);
830 /*bytes sent through the queue */
831 data[i++] =
832 CVM_CAST64(oct_dev->instr_queue[j]->stats.bytes_sent);
833
834 /*tso request*/
835 data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_gso);
Raghu Vatsavayi01fb2372016-07-03 13:56:47 -0700836 /*vxlan request*/
837 data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_vxlan);
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700838 /*txq restart*/
839 data[i++] =
840 CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_restart);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700841 }
842
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700843 /* RX */
844 /* for (j = 0; j < oct_dev->num_oqs; j++) { */
845 for (j = 0; j < MAX_OCTEON_OUTPUT_QUEUES(oct_dev); j++) {
Raghu Vatsavayi63da8402016-06-21 22:53:03 -0700846 if (!(oct_dev->io_qmask.oq & (1ULL << j)))
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700847 continue;
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700848
849 /*packets send to TCP/IP network stack */
850 /*# of packets to network stack */
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700851 data[i++] =
852 CVM_CAST64(oct_dev->droq[j]->stats.rx_pkts_received);
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700853 /*# of bytes to network stack */
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700854 data[i++] =
855 CVM_CAST64(oct_dev->droq[j]->stats.rx_bytes_received);
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700856 /*# of packets dropped */
857 data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem +
858 oct_dev->droq[j]->stats.dropped_toomany +
859 oct_dev->droq[j]->stats.rx_dropped);
860 data[i++] =
861 CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem);
862 data[i++] =
863 CVM_CAST64(oct_dev->droq[j]->stats.dropped_toomany);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700864 data[i++] =
865 CVM_CAST64(oct_dev->droq[j]->stats.rx_dropped);
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700866
867 /*control and data path*/
868 data[i++] =
869 CVM_CAST64(oct_dev->droq[j]->stats.pkts_received);
870 data[i++] =
871 CVM_CAST64(oct_dev->droq[j]->stats.bytes_received);
872 data[i++] =
873 CVM_CAST64(oct_dev->droq[j]->stats.dropped_nodispatch);
Raghu Vatsavayi01fb2372016-07-03 13:56:47 -0700874
875 data[i++] =
876 CVM_CAST64(oct_dev->droq[j]->stats.rx_vxlan);
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700877 data[i++] =
878 CVM_CAST64(oct_dev->droq[j]->stats.rx_alloc_failure);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700879 }
880}
881
882static void lio_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
883{
884 struct lio *lio = GET_LIO(netdev);
885 struct octeon_device *oct_dev = lio->oct_dev;
886 int num_iq_stats, num_oq_stats, i, j;
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700887 int num_stats;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700888
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700889 switch (stringset) {
890 case ETH_SS_STATS:
891 num_stats = ARRAY_SIZE(oct_stats_strings);
892 for (j = 0; j < num_stats; j++) {
893 sprintf(data, "%s", oct_stats_strings[j]);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700894 data += ETH_GSTRING_LEN;
895 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700896
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700897 num_iq_stats = ARRAY_SIZE(oct_iq_stats_strings);
898 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct_dev); i++) {
899 if (!(oct_dev->io_qmask.iq & (1ULL << i)))
900 continue;
901 for (j = 0; j < num_iq_stats; j++) {
902 sprintf(data, "tx-%d-%s", i,
903 oct_iq_stats_strings[j]);
904 data += ETH_GSTRING_LEN;
905 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700906 }
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700907
908 num_oq_stats = ARRAY_SIZE(oct_droq_stats_strings);
909 /* for (i = 0; i < oct_dev->num_oqs; i++) { */
910 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct_dev); i++) {
911 if (!(oct_dev->io_qmask.oq & (1ULL << i)))
912 continue;
913 for (j = 0; j < num_oq_stats; j++) {
914 sprintf(data, "rx-%d-%s", i,
915 oct_droq_stats_strings[j]);
916 data += ETH_GSTRING_LEN;
917 }
918 }
919 break;
920
921 default:
922 netif_info(lio, drv, lio->netdev, "Unknown Stringset !!\n");
923 break;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700924 }
925}
926
927static int lio_get_sset_count(struct net_device *netdev, int sset)
928{
929 struct lio *lio = GET_LIO(netdev);
930 struct octeon_device *oct_dev = lio->oct_dev;
931
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700932 switch (sset) {
933 case ETH_SS_STATS:
934 return (ARRAY_SIZE(oct_stats_strings) +
935 ARRAY_SIZE(oct_iq_stats_strings) * oct_dev->num_iqs +
936 ARRAY_SIZE(oct_droq_stats_strings) * oct_dev->num_oqs);
937 default:
938 return -EOPNOTSUPP;
939 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700940}
941
942static int lio_get_intr_coalesce(struct net_device *netdev,
943 struct ethtool_coalesce *intr_coal)
944{
945 struct lio *lio = GET_LIO(netdev);
946 struct octeon_device *oct = lio->oct_dev;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700947 struct octeon_instr_queue *iq;
948 struct oct_intrmod_cfg *intrmod_cfg;
949
950 intrmod_cfg = &oct->intrmod;
951
952 switch (oct->chip_id) {
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700953 case OCTEON_CN68XX:
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -0700954 case OCTEON_CN66XX: {
955 struct octeon_cn6xxx *cn6xxx =
956 (struct octeon_cn6xxx *)oct->chip;
957
958 if (!intrmod_cfg->rx_enable) {
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700959 intr_coal->rx_coalesce_usecs =
960 CFG_GET_OQ_INTR_TIME(cn6xxx->conf);
961 intr_coal->rx_max_coalesced_frames =
962 CFG_GET_OQ_INTR_PKT(cn6xxx->conf);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700963 }
964
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -0700965 iq = oct->instr_queue[lio->linfo.txpciq[0].s.q_no];
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700966 intr_coal->tx_max_coalesced_frames = iq->fill_threshold;
967 break;
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -0700968 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700969 default:
970 netif_info(lio, drv, lio->netdev, "Unknown Chip !!\n");
971 return -EINVAL;
972 }
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -0700973 if (intrmod_cfg->rx_enable) {
974 intr_coal->use_adaptive_rx_coalesce =
975 intrmod_cfg->rx_enable;
976 intr_coal->rate_sample_interval =
977 intrmod_cfg->check_intrvl;
978 intr_coal->pkt_rate_high =
979 intrmod_cfg->maxpkt_ratethr;
980 intr_coal->pkt_rate_low =
981 intrmod_cfg->minpkt_ratethr;
982 intr_coal->rx_max_coalesced_frames_high =
983 intrmod_cfg->rx_maxcnt_trigger;
984 intr_coal->rx_coalesce_usecs_high =
985 intrmod_cfg->rx_maxtmr_trigger;
986 intr_coal->rx_coalesce_usecs_low =
987 intrmod_cfg->rx_mintmr_trigger;
988 intr_coal->rx_max_coalesced_frames_low =
989 intrmod_cfg->rx_mincnt_trigger;
990 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700991 return 0;
992}
993
994/* Callback function for intrmod */
995static void octnet_intrmod_callback(struct octeon_device *oct_dev,
996 u32 status,
997 void *ptr)
998{
999 struct oct_intrmod_cmd *cmd = ptr;
1000 struct octeon_soft_command *sc = cmd->sc;
1001
1002 oct_dev = cmd->oct_dev;
1003
1004 if (status)
1005 dev_err(&oct_dev->pci_dev->dev, "intrmod config failed. Status: %llx\n",
1006 CVM_CAST64(status));
1007 else
1008 dev_info(&oct_dev->pci_dev->dev,
1009 "Rx-Adaptive Interrupt moderation enabled:%llx\n",
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001010 oct_dev->intrmod.rx_enable);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001011
1012 octeon_free_soft_command(oct_dev, sc);
1013}
1014
1015/* Configure interrupt moderation parameters */
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001016static int octnet_set_intrmod_cfg(struct lio *lio,
1017 struct oct_intrmod_cfg *intr_cfg)
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001018{
1019 struct octeon_soft_command *sc;
1020 struct oct_intrmod_cmd *cmd;
1021 struct oct_intrmod_cfg *cfg;
1022 int retval;
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001023 struct octeon_device *oct_dev = lio->oct_dev;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001024
1025 /* Alloc soft command */
1026 sc = (struct octeon_soft_command *)
1027 octeon_alloc_soft_command(oct_dev,
1028 sizeof(struct oct_intrmod_cfg),
1029 0,
1030 sizeof(struct oct_intrmod_cmd));
1031
1032 if (!sc)
1033 return -ENOMEM;
1034
1035 cmd = (struct oct_intrmod_cmd *)sc->ctxptr;
1036 cfg = (struct oct_intrmod_cfg *)sc->virtdptr;
1037
1038 memcpy(cfg, intr_cfg, sizeof(struct oct_intrmod_cfg));
1039 octeon_swap_8B_data((u64 *)cfg, (sizeof(struct oct_intrmod_cfg)) / 8);
1040 cmd->sc = sc;
1041 cmd->cfg = cfg;
1042 cmd->oct_dev = oct_dev;
1043
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001044 sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1045
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001046 octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC,
1047 OPCODE_NIC_INTRMOD_CFG, 0, 0, 0);
1048
1049 sc->callback = octnet_intrmod_callback;
1050 sc->callback_arg = cmd;
1051 sc->wait_time = 1000;
1052
1053 retval = octeon_send_soft_command(oct_dev, sc);
Raghu Vatsavayiddc173a2016-06-14 16:54:43 -07001054 if (retval == IQ_SEND_FAILED) {
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001055 octeon_free_soft_command(oct_dev, sc);
1056 return -EINVAL;
1057 }
1058
1059 return 0;
1060}
1061
Raghu Vatsavayia7d5a3d2016-07-03 13:56:48 -07001062static void
Raghu Vatsavayi1f164712016-06-21 22:53:11 -07001063octnet_nic_stats_callback(struct octeon_device *oct_dev,
1064 u32 status, void *ptr)
1065{
1066 struct octeon_soft_command *sc = (struct octeon_soft_command *)ptr;
1067 struct oct_nic_stats_resp *resp = (struct oct_nic_stats_resp *)
1068 sc->virtrptr;
1069 struct oct_nic_stats_ctrl *ctrl = (struct oct_nic_stats_ctrl *)
1070 sc->ctxptr;
1071 struct nic_rx_stats *rsp_rstats = &resp->stats.fromwire;
1072 struct nic_tx_stats *rsp_tstats = &resp->stats.fromhost;
1073
1074 struct nic_rx_stats *rstats = &oct_dev->link_stats.fromwire;
1075 struct nic_tx_stats *tstats = &oct_dev->link_stats.fromhost;
1076
1077 if ((status != OCTEON_REQUEST_TIMEOUT) && !resp->status) {
1078 octeon_swap_8B_data((u64 *)&resp->stats,
1079 (sizeof(struct oct_link_stats)) >> 3);
1080
1081 /* RX link-level stats */
1082 rstats->total_rcvd = rsp_rstats->total_rcvd;
1083 rstats->bytes_rcvd = rsp_rstats->bytes_rcvd;
1084 rstats->total_bcst = rsp_rstats->total_bcst;
1085 rstats->total_mcst = rsp_rstats->total_mcst;
1086 rstats->runts = rsp_rstats->runts;
1087 rstats->ctl_rcvd = rsp_rstats->ctl_rcvd;
1088 /* Accounts for over/under-run of buffers */
1089 rstats->fifo_err = rsp_rstats->fifo_err;
1090 rstats->dmac_drop = rsp_rstats->dmac_drop;
1091 rstats->fcs_err = rsp_rstats->fcs_err;
1092 rstats->jabber_err = rsp_rstats->jabber_err;
1093 rstats->l2_err = rsp_rstats->l2_err;
1094 rstats->frame_err = rsp_rstats->frame_err;
1095
1096 /* RX firmware stats */
1097 rstats->fw_total_rcvd = rsp_rstats->fw_total_rcvd;
1098 rstats->fw_total_fwd = rsp_rstats->fw_total_fwd;
1099 rstats->fw_err_pko = rsp_rstats->fw_err_pko;
1100 rstats->fw_err_link = rsp_rstats->fw_err_link;
1101 rstats->fw_err_drop = rsp_rstats->fw_err_drop;
Raghu Vatsavayi01fb2372016-07-03 13:56:47 -07001102 rstats->fw_rx_vxlan = rsp_rstats->fw_rx_vxlan;
1103 rstats->fw_rx_vxlan_err = rsp_rstats->fw_rx_vxlan_err;
1104
Raghu Vatsavayi1f164712016-06-21 22:53:11 -07001105 /* Number of packets that are LROed */
1106 rstats->fw_lro_pkts = rsp_rstats->fw_lro_pkts;
1107 /* Number of octets that are LROed */
1108 rstats->fw_lro_octs = rsp_rstats->fw_lro_octs;
1109 /* Number of LRO packets formed */
1110 rstats->fw_total_lro = rsp_rstats->fw_total_lro;
1111 /* Number of times lRO of packet aborted */
1112 rstats->fw_lro_aborts = rsp_rstats->fw_lro_aborts;
1113 rstats->fw_lro_aborts_port = rsp_rstats->fw_lro_aborts_port;
1114 rstats->fw_lro_aborts_seq = rsp_rstats->fw_lro_aborts_seq;
1115 rstats->fw_lro_aborts_tsval = rsp_rstats->fw_lro_aborts_tsval;
1116 rstats->fw_lro_aborts_timer = rsp_rstats->fw_lro_aborts_timer;
1117 /* intrmod: packet forward rate */
1118 rstats->fwd_rate = rsp_rstats->fwd_rate;
1119
1120 /* TX link-level stats */
1121 tstats->total_pkts_sent = rsp_tstats->total_pkts_sent;
1122 tstats->total_bytes_sent = rsp_tstats->total_bytes_sent;
1123 tstats->mcast_pkts_sent = rsp_tstats->mcast_pkts_sent;
1124 tstats->bcast_pkts_sent = rsp_tstats->bcast_pkts_sent;
1125 tstats->ctl_sent = rsp_tstats->ctl_sent;
1126 /* Packets sent after one collision*/
1127 tstats->one_collision_sent = rsp_tstats->one_collision_sent;
1128 /* Packets sent after multiple collision*/
1129 tstats->multi_collision_sent = rsp_tstats->multi_collision_sent;
1130 /* Packets not sent due to max collisions */
1131 tstats->max_collision_fail = rsp_tstats->max_collision_fail;
1132 /* Packets not sent due to max deferrals */
1133 tstats->max_deferral_fail = rsp_tstats->max_deferral_fail;
1134 /* Accounts for over/under-run of buffers */
1135 tstats->fifo_err = rsp_tstats->fifo_err;
1136 tstats->runts = rsp_tstats->runts;
1137 /* Total number of collisions detected */
1138 tstats->total_collisions = rsp_tstats->total_collisions;
1139
1140 /* firmware stats */
1141 tstats->fw_total_sent = rsp_tstats->fw_total_sent;
1142 tstats->fw_total_fwd = rsp_tstats->fw_total_fwd;
1143 tstats->fw_err_pko = rsp_tstats->fw_err_pko;
1144 tstats->fw_err_link = rsp_tstats->fw_err_link;
1145 tstats->fw_err_drop = rsp_tstats->fw_err_drop;
1146 tstats->fw_tso = rsp_tstats->fw_tso;
1147 tstats->fw_tso_fwd = rsp_tstats->fw_tso_fwd;
1148 tstats->fw_err_tso = rsp_tstats->fw_err_tso;
Raghu Vatsavayi01fb2372016-07-03 13:56:47 -07001149 tstats->fw_tx_vxlan = rsp_tstats->fw_tx_vxlan;
1150
Raghu Vatsavayi1f164712016-06-21 22:53:11 -07001151 resp->status = 1;
1152 } else {
1153 resp->status = -1;
1154 }
1155 complete(&ctrl->complete);
1156}
1157
1158/* Configure interrupt moderation parameters */
1159static int octnet_get_link_stats(struct net_device *netdev)
1160{
1161 struct lio *lio = GET_LIO(netdev);
1162 struct octeon_device *oct_dev = lio->oct_dev;
1163
1164 struct octeon_soft_command *sc;
1165 struct oct_nic_stats_ctrl *ctrl;
1166 struct oct_nic_stats_resp *resp;
1167
1168 int retval;
1169
1170 /* Alloc soft command */
1171 sc = (struct octeon_soft_command *)
1172 octeon_alloc_soft_command(oct_dev,
1173 0,
1174 sizeof(struct oct_nic_stats_resp),
1175 sizeof(struct octnic_ctrl_pkt));
1176
1177 if (!sc)
1178 return -ENOMEM;
1179
1180 resp = (struct oct_nic_stats_resp *)sc->virtrptr;
1181 memset(resp, 0, sizeof(struct oct_nic_stats_resp));
1182
1183 ctrl = (struct oct_nic_stats_ctrl *)sc->ctxptr;
1184 memset(ctrl, 0, sizeof(struct oct_nic_stats_ctrl));
1185 ctrl->netdev = netdev;
1186 init_completion(&ctrl->complete);
1187
1188 sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1189
1190 octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC,
1191 OPCODE_NIC_PORT_STATS, 0, 0, 0);
1192
1193 sc->callback = octnet_nic_stats_callback;
1194 sc->callback_arg = sc;
1195 sc->wait_time = 500; /*in milli seconds*/
1196
1197 retval = octeon_send_soft_command(oct_dev, sc);
1198 if (retval == IQ_SEND_FAILED) {
1199 octeon_free_soft_command(oct_dev, sc);
1200 return -EINVAL;
1201 }
1202
1203 wait_for_completion_timeout(&ctrl->complete, msecs_to_jiffies(1000));
1204
1205 if (resp->status != 1) {
1206 octeon_free_soft_command(oct_dev, sc);
1207
1208 return -EINVAL;
1209 }
1210
1211 octeon_free_soft_command(oct_dev, sc);
1212
1213 return 0;
1214}
1215
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001216/* Enable/Disable auto interrupt Moderation */
1217static int oct_cfg_adaptive_intr(struct lio *lio, struct ethtool_coalesce
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001218 *intr_coal)
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001219{
1220 int ret = 0;
1221 struct octeon_device *oct = lio->oct_dev;
1222 struct oct_intrmod_cfg *intrmod_cfg;
1223
1224 intrmod_cfg = &oct->intrmod;
1225
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001226 if (oct->intrmod.rx_enable || oct->intrmod.tx_enable) {
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001227 if (intr_coal->rate_sample_interval)
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001228 intrmod_cfg->check_intrvl =
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001229 intr_coal->rate_sample_interval;
1230 else
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001231 intrmod_cfg->check_intrvl =
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001232 LIO_INTRMOD_CHECK_INTERVAL;
1233
1234 if (intr_coal->pkt_rate_high)
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001235 intrmod_cfg->maxpkt_ratethr =
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001236 intr_coal->pkt_rate_high;
1237 else
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001238 intrmod_cfg->maxpkt_ratethr =
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001239 LIO_INTRMOD_MAXPKT_RATETHR;
1240
1241 if (intr_coal->pkt_rate_low)
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001242 intrmod_cfg->minpkt_ratethr =
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001243 intr_coal->pkt_rate_low;
1244 else
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001245 intrmod_cfg->minpkt_ratethr =
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001246 LIO_INTRMOD_MINPKT_RATETHR;
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001247 }
1248 if (oct->intrmod.rx_enable) {
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001249 if (intr_coal->rx_max_coalesced_frames_high)
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001250 intrmod_cfg->rx_maxcnt_trigger =
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001251 intr_coal->rx_max_coalesced_frames_high;
1252 else
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001253 intrmod_cfg->rx_maxcnt_trigger =
1254 LIO_INTRMOD_RXMAXCNT_TRIGGER;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001255
1256 if (intr_coal->rx_coalesce_usecs_high)
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001257 intrmod_cfg->rx_maxtmr_trigger =
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001258 intr_coal->rx_coalesce_usecs_high;
1259 else
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001260 intrmod_cfg->rx_maxtmr_trigger =
1261 LIO_INTRMOD_RXMAXTMR_TRIGGER;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001262
1263 if (intr_coal->rx_coalesce_usecs_low)
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001264 intrmod_cfg->rx_mintmr_trigger =
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001265 intr_coal->rx_coalesce_usecs_low;
1266 else
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001267 intrmod_cfg->rx_mintmr_trigger =
1268 LIO_INTRMOD_RXMINTMR_TRIGGER;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001269
1270 if (intr_coal->rx_max_coalesced_frames_low)
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001271 intrmod_cfg->rx_mincnt_trigger =
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001272 intr_coal->rx_max_coalesced_frames_low;
1273 else
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001274 intrmod_cfg->rx_mincnt_trigger =
1275 LIO_INTRMOD_RXMINCNT_TRIGGER;
1276 }
1277 if (oct->intrmod.tx_enable) {
1278 if (intr_coal->tx_max_coalesced_frames_high)
1279 intrmod_cfg->tx_maxcnt_trigger =
1280 intr_coal->tx_max_coalesced_frames_high;
1281 else
1282 intrmod_cfg->tx_maxcnt_trigger =
1283 LIO_INTRMOD_TXMAXCNT_TRIGGER;
1284 if (intr_coal->tx_max_coalesced_frames_low)
1285 intrmod_cfg->tx_mincnt_trigger =
1286 intr_coal->tx_max_coalesced_frames_low;
1287 else
1288 intrmod_cfg->tx_mincnt_trigger =
1289 LIO_INTRMOD_TXMINCNT_TRIGGER;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001290 }
1291
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001292 ret = octnet_set_intrmod_cfg(lio, intrmod_cfg);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001293
1294 return ret;
1295}
1296
1297static int
1298oct_cfg_rx_intrcnt(struct lio *lio, struct ethtool_coalesce *intr_coal)
1299{
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001300 struct octeon_device *oct = lio->oct_dev;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001301 u32 rx_max_coalesced_frames;
1302
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001303 /* Config Cnt based interrupt values */
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001304 switch (oct->chip_id) {
1305 case OCTEON_CN68XX:
1306 case OCTEON_CN66XX: {
1307 struct octeon_cn6xxx *cn6xxx =
1308 (struct octeon_cn6xxx *)oct->chip;
1309
1310 if (!intr_coal->rx_max_coalesced_frames)
1311 rx_max_coalesced_frames = CN6XXX_OQ_INTR_PKT;
1312 else
1313 rx_max_coalesced_frames =
1314 intr_coal->rx_max_coalesced_frames;
1315 octeon_write_csr(oct, CN6XXX_SLI_OQ_INT_LEVEL_PKTS,
1316 rx_max_coalesced_frames);
1317 CFG_SET_OQ_INTR_PKT(cn6xxx->conf, rx_max_coalesced_frames);
1318 break;
1319 }
1320 default:
1321 return -EINVAL;
1322 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001323 return 0;
1324}
1325
1326static int oct_cfg_rx_intrtime(struct lio *lio, struct ethtool_coalesce
1327 *intr_coal)
1328{
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001329 struct octeon_device *oct = lio->oct_dev;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001330 u32 time_threshold, rx_coalesce_usecs;
1331
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001332 /* Config Time based interrupt values */
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001333 switch (oct->chip_id) {
1334 case OCTEON_CN68XX:
1335 case OCTEON_CN66XX: {
1336 struct octeon_cn6xxx *cn6xxx =
1337 (struct octeon_cn6xxx *)oct->chip;
1338 if (!intr_coal->rx_coalesce_usecs)
1339 rx_coalesce_usecs = CN6XXX_OQ_INTR_TIME;
1340 else
1341 rx_coalesce_usecs = intr_coal->rx_coalesce_usecs;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001342
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001343 time_threshold = lio_cn6xxx_get_oq_ticks(oct,
1344 rx_coalesce_usecs);
1345 octeon_write_csr(oct,
1346 CN6XXX_SLI_OQ_INT_LEVEL_TIME,
1347 time_threshold);
1348
1349 CFG_SET_OQ_INTR_TIME(cn6xxx->conf, rx_coalesce_usecs);
1350 break;
1351 }
1352 default:
1353 return -EINVAL;
1354 }
1355
1356 return 0;
1357}
1358
1359static int
1360oct_cfg_tx_intrcnt(struct lio *lio, struct ethtool_coalesce *intr_coal
1361 __attribute__((unused)))
1362{
1363 struct octeon_device *oct = lio->oct_dev;
1364
1365 /* Config Cnt based interrupt values */
1366 switch (oct->chip_id) {
1367 case OCTEON_CN68XX:
1368 case OCTEON_CN66XX:
1369 break;
1370 default:
1371 return -EINVAL;
1372 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001373 return 0;
1374}
1375
1376static int lio_set_intr_coalesce(struct net_device *netdev,
1377 struct ethtool_coalesce *intr_coal)
1378{
1379 struct lio *lio = GET_LIO(netdev);
1380 int ret;
1381 struct octeon_device *oct = lio->oct_dev;
1382 u32 j, q_no;
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001383 int db_max, db_min;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001384
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001385 switch (oct->chip_id) {
1386 case OCTEON_CN68XX:
1387 case OCTEON_CN66XX:
1388 db_min = CN6XXX_DB_MIN;
1389 db_max = CN6XXX_DB_MAX;
1390 if ((intr_coal->tx_max_coalesced_frames >= db_min) &&
1391 (intr_coal->tx_max_coalesced_frames <= db_max)) {
1392 for (j = 0; j < lio->linfo.num_txpciq; j++) {
1393 q_no = lio->linfo.txpciq[j].s.q_no;
1394 oct->instr_queue[q_no]->fill_threshold =
1395 intr_coal->tx_max_coalesced_frames;
1396 }
1397 } else {
1398 dev_err(&oct->pci_dev->dev,
1399 "LIQUIDIO: Invalid tx-frames:%d. Range is min:%d max:%d\n",
1400 intr_coal->tx_max_coalesced_frames, db_min,
1401 db_max);
1402 return -EINVAL;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001403 }
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001404 break;
1405 default:
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001406 return -EINVAL;
1407 }
1408
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001409 oct->intrmod.rx_enable = intr_coal->use_adaptive_rx_coalesce ? 1 : 0;
1410 oct->intrmod.tx_enable = intr_coal->use_adaptive_tx_coalesce ? 1 : 0;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001411
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001412 ret = oct_cfg_adaptive_intr(lio, intr_coal);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001413
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001414 if (!intr_coal->use_adaptive_rx_coalesce) {
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001415 ret = oct_cfg_rx_intrtime(lio, intr_coal);
1416 if (ret)
1417 goto ret_intrmod;
1418
1419 ret = oct_cfg_rx_intrcnt(lio, intr_coal);
1420 if (ret)
1421 goto ret_intrmod;
1422 }
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001423 if (!intr_coal->use_adaptive_tx_coalesce) {
1424 ret = oct_cfg_tx_intrcnt(lio, intr_coal);
1425 if (ret)
1426 goto ret_intrmod;
1427 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001428
1429 return 0;
1430ret_intrmod:
1431 return ret;
1432}
1433
1434static int lio_get_ts_info(struct net_device *netdev,
1435 struct ethtool_ts_info *info)
1436{
1437 struct lio *lio = GET_LIO(netdev);
1438
1439 info->so_timestamping =
Raghu Vatsavayi178cc102016-06-21 22:53:13 -07001440#ifdef PTP_HARDWARE_TIMESTAMPING
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001441 SOF_TIMESTAMPING_TX_HARDWARE |
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001442 SOF_TIMESTAMPING_RX_HARDWARE |
Raghu Vatsavayi178cc102016-06-21 22:53:13 -07001443 SOF_TIMESTAMPING_RAW_HARDWARE |
1444 SOF_TIMESTAMPING_TX_SOFTWARE |
1445#endif
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001446 SOF_TIMESTAMPING_RX_SOFTWARE |
Raghu Vatsavayi178cc102016-06-21 22:53:13 -07001447 SOF_TIMESTAMPING_SOFTWARE;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001448
1449 if (lio->ptp_clock)
1450 info->phc_index = ptp_clock_index(lio->ptp_clock);
1451 else
1452 info->phc_index = -1;
1453
Raghu Vatsavayi178cc102016-06-21 22:53:13 -07001454#ifdef PTP_HARDWARE_TIMESTAMPING
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001455 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
1456
1457 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
1458 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
1459 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
1460 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
Raghu Vatsavayi178cc102016-06-21 22:53:13 -07001461#endif
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001462
1463 return 0;
1464}
1465
1466static int lio_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
1467{
1468 struct lio *lio = GET_LIO(netdev);
1469 struct octeon_device *oct = lio->oct_dev;
1470 struct oct_link_info *linfo;
1471 struct octnic_ctrl_pkt nctrl;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001472 int ret = 0;
1473
1474 /* get the link info */
1475 linfo = &lio->linfo;
1476
1477 if (ecmd->autoneg != AUTONEG_ENABLE && ecmd->autoneg != AUTONEG_DISABLE)
1478 return -EINVAL;
1479
1480 if (ecmd->autoneg == AUTONEG_DISABLE && ((ecmd->speed != SPEED_100 &&
1481 ecmd->speed != SPEED_10) ||
1482 (ecmd->duplex != DUPLEX_HALF &&
1483 ecmd->duplex != DUPLEX_FULL)))
1484 return -EINVAL;
1485
Raghu Vatsavayi9eb60842016-06-21 22:53:12 -07001486 /* Ethtool Support is not provided for XAUI, RXAUI, and XFI Interfaces
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001487 * as they operate at fixed Speed and Duplex settings
1488 */
Raghu Vatsavayi9eb60842016-06-21 22:53:12 -07001489 if (linfo->link.s.if_mode == INTERFACE_MODE_XAUI ||
1490 linfo->link.s.if_mode == INTERFACE_MODE_RXAUI ||
1491 linfo->link.s.if_mode == INTERFACE_MODE_XFI) {
1492 dev_info(&oct->pci_dev->dev,
1493 "Autonegotiation, duplex and speed settings cannot be modified.\n");
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001494 return -EINVAL;
1495 }
1496
1497 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
1498
1499 nctrl.ncmd.u64 = 0;
1500 nctrl.ncmd.s.cmd = OCTNET_CMD_SET_SETTINGS;
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07001501 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001502 nctrl.wait_time = 1000;
1503 nctrl.netpndev = (u64)netdev;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001504 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
1505
1506 /* Passing the parameters sent by ethtool like Speed, Autoneg & Duplex
1507 * to SE core application using ncmd.s.more & ncmd.s.param
1508 */
1509 if (ecmd->autoneg == AUTONEG_ENABLE) {
1510 /* Autoneg ON */
1511 nctrl.ncmd.s.more = OCTNIC_NCMD_PHY_ON |
1512 OCTNIC_NCMD_AUTONEG_ON;
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07001513 nctrl.ncmd.s.param1 = ecmd->advertising;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001514 } else {
1515 /* Autoneg OFF */
1516 nctrl.ncmd.s.more = OCTNIC_NCMD_PHY_ON;
1517
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07001518 nctrl.ncmd.s.param2 = ecmd->duplex;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001519
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07001520 nctrl.ncmd.s.param1 = ecmd->speed;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001521 }
1522
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07001523 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001524 if (ret < 0) {
1525 dev_err(&oct->pci_dev->dev, "Failed to set settings\n");
1526 return -1;
1527 }
1528
1529 return 0;
1530}
1531
1532static int lio_nway_reset(struct net_device *netdev)
1533{
1534 if (netif_running(netdev)) {
1535 struct ethtool_cmd ecmd;
1536
1537 memset(&ecmd, 0, sizeof(struct ethtool_cmd));
1538 ecmd.autoneg = 0;
1539 ecmd.speed = 0;
1540 ecmd.duplex = 0;
1541 lio_set_settings(netdev, &ecmd);
1542 }
1543 return 0;
1544}
1545
1546/* Return register dump len. */
Raghu Vatsavayia7d5a3d2016-07-03 13:56:48 -07001547static int lio_get_regs_len(struct net_device *dev __attribute__((unused)))
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001548{
1549 return OCT_ETHTOOL_REGDUMP_LEN;
1550}
1551
1552static int cn6xxx_read_csr_reg(char *s, struct octeon_device *oct)
1553{
1554 u32 reg;
1555 int i, len = 0;
1556
1557 /* PCI Window Registers */
1558
1559 len += sprintf(s + len, "\n\t Octeon CSR Registers\n\n");
1560 reg = CN6XXX_WIN_WR_ADDR_LO;
1561 len += sprintf(s + len, "\n[%02x] (WIN_WR_ADDR_LO): %08x\n",
1562 CN6XXX_WIN_WR_ADDR_LO, octeon_read_csr(oct, reg));
1563 reg = CN6XXX_WIN_WR_ADDR_HI;
1564 len += sprintf(s + len, "[%02x] (WIN_WR_ADDR_HI): %08x\n",
1565 CN6XXX_WIN_WR_ADDR_HI, octeon_read_csr(oct, reg));
1566 reg = CN6XXX_WIN_RD_ADDR_LO;
1567 len += sprintf(s + len, "[%02x] (WIN_RD_ADDR_LO): %08x\n",
1568 CN6XXX_WIN_RD_ADDR_LO, octeon_read_csr(oct, reg));
1569 reg = CN6XXX_WIN_RD_ADDR_HI;
1570 len += sprintf(s + len, "[%02x] (WIN_RD_ADDR_HI): %08x\n",
1571 CN6XXX_WIN_RD_ADDR_HI, octeon_read_csr(oct, reg));
1572 reg = CN6XXX_WIN_WR_DATA_LO;
1573 len += sprintf(s + len, "[%02x] (WIN_WR_DATA_LO): %08x\n",
1574 CN6XXX_WIN_WR_DATA_LO, octeon_read_csr(oct, reg));
1575 reg = CN6XXX_WIN_WR_DATA_HI;
1576 len += sprintf(s + len, "[%02x] (WIN_WR_DATA_HI): %08x\n",
1577 CN6XXX_WIN_WR_DATA_HI, octeon_read_csr(oct, reg));
1578 len += sprintf(s + len, "[%02x] (WIN_WR_MASK_REG): %08x\n",
1579 CN6XXX_WIN_WR_MASK_REG,
1580 octeon_read_csr(oct, CN6XXX_WIN_WR_MASK_REG));
1581
1582 /* PCI Interrupt Register */
1583 len += sprintf(s + len, "\n[%x] (INT_ENABLE PORT 0): %08x\n",
1584 CN6XXX_SLI_INT_ENB64_PORT0, octeon_read_csr(oct,
1585 CN6XXX_SLI_INT_ENB64_PORT0));
1586 len += sprintf(s + len, "\n[%x] (INT_ENABLE PORT 1): %08x\n",
1587 CN6XXX_SLI_INT_ENB64_PORT1,
1588 octeon_read_csr(oct, CN6XXX_SLI_INT_ENB64_PORT1));
1589 len += sprintf(s + len, "[%x] (INT_SUM): %08x\n", CN6XXX_SLI_INT_SUM64,
1590 octeon_read_csr(oct, CN6XXX_SLI_INT_SUM64));
1591
1592 /* PCI Output queue registers */
1593 for (i = 0; i < oct->num_oqs; i++) {
1594 reg = CN6XXX_SLI_OQ_PKTS_SENT(i);
1595 len += sprintf(s + len, "\n[%x] (PKTS_SENT_%d): %08x\n",
1596 reg, i, octeon_read_csr(oct, reg));
1597 reg = CN6XXX_SLI_OQ_PKTS_CREDIT(i);
1598 len += sprintf(s + len, "[%x] (PKT_CREDITS_%d): %08x\n",
1599 reg, i, octeon_read_csr(oct, reg));
1600 }
1601 reg = CN6XXX_SLI_OQ_INT_LEVEL_PKTS;
1602 len += sprintf(s + len, "\n[%x] (PKTS_SENT_INT_LEVEL): %08x\n",
1603 reg, octeon_read_csr(oct, reg));
1604 reg = CN6XXX_SLI_OQ_INT_LEVEL_TIME;
1605 len += sprintf(s + len, "[%x] (PKTS_SENT_TIME): %08x\n",
1606 reg, octeon_read_csr(oct, reg));
1607
1608 /* PCI Input queue registers */
1609 for (i = 0; i <= 3; i++) {
1610 u32 reg;
1611
1612 reg = CN6XXX_SLI_IQ_DOORBELL(i);
1613 len += sprintf(s + len, "\n[%x] (INSTR_DOORBELL_%d): %08x\n",
1614 reg, i, octeon_read_csr(oct, reg));
1615 reg = CN6XXX_SLI_IQ_INSTR_COUNT(i);
1616 len += sprintf(s + len, "[%x] (INSTR_COUNT_%d): %08x\n",
1617 reg, i, octeon_read_csr(oct, reg));
1618 }
1619
1620 /* PCI DMA registers */
1621
1622 len += sprintf(s + len, "\n[%x] (DMA_CNT_0): %08x\n",
1623 CN6XXX_DMA_CNT(0),
1624 octeon_read_csr(oct, CN6XXX_DMA_CNT(0)));
1625 reg = CN6XXX_DMA_PKT_INT_LEVEL(0);
1626 len += sprintf(s + len, "[%x] (DMA_INT_LEV_0): %08x\n",
1627 CN6XXX_DMA_PKT_INT_LEVEL(0), octeon_read_csr(oct, reg));
1628 reg = CN6XXX_DMA_TIME_INT_LEVEL(0);
1629 len += sprintf(s + len, "[%x] (DMA_TIME_0): %08x\n",
1630 CN6XXX_DMA_TIME_INT_LEVEL(0),
1631 octeon_read_csr(oct, reg));
1632
1633 len += sprintf(s + len, "\n[%x] (DMA_CNT_1): %08x\n",
1634 CN6XXX_DMA_CNT(1),
1635 octeon_read_csr(oct, CN6XXX_DMA_CNT(1)));
1636 reg = CN6XXX_DMA_PKT_INT_LEVEL(1);
1637 len += sprintf(s + len, "[%x] (DMA_INT_LEV_1): %08x\n",
1638 CN6XXX_DMA_PKT_INT_LEVEL(1),
1639 octeon_read_csr(oct, reg));
1640 reg = CN6XXX_DMA_PKT_INT_LEVEL(1);
1641 len += sprintf(s + len, "[%x] (DMA_TIME_1): %08x\n",
1642 CN6XXX_DMA_TIME_INT_LEVEL(1),
1643 octeon_read_csr(oct, reg));
1644
1645 /* PCI Index registers */
1646
1647 len += sprintf(s + len, "\n");
1648
1649 for (i = 0; i < 16; i++) {
1650 reg = lio_pci_readq(oct, CN6XXX_BAR1_REG(i, oct->pcie_port));
1651 len += sprintf(s + len, "[%llx] (BAR1_INDEX_%02d): %08x\n",
1652 CN6XXX_BAR1_REG(i, oct->pcie_port), i, reg);
1653 }
1654
1655 return len;
1656}
1657
1658static int cn6xxx_read_config_reg(char *s, struct octeon_device *oct)
1659{
1660 u32 val;
1661 int i, len = 0;
1662
1663 /* PCI CONFIG Registers */
1664
1665 len += sprintf(s + len,
1666 "\n\t Octeon Config space Registers\n\n");
1667
1668 for (i = 0; i <= 13; i++) {
1669 pci_read_config_dword(oct->pci_dev, (i * 4), &val);
1670 len += sprintf(s + len, "[0x%x] (Config[%d]): 0x%08x\n",
1671 (i * 4), i, val);
1672 }
1673
1674 for (i = 30; i <= 34; i++) {
1675 pci_read_config_dword(oct->pci_dev, (i * 4), &val);
1676 len += sprintf(s + len, "[0x%x] (Config[%d]): 0x%08x\n",
1677 (i * 4), i, val);
1678 }
1679
1680 return len;
1681}
1682
1683/* Return register dump user app. */
1684static void lio_get_regs(struct net_device *dev,
1685 struct ethtool_regs *regs, void *regbuf)
1686{
1687 struct lio *lio = GET_LIO(dev);
1688 int len = 0;
1689 struct octeon_device *oct = lio->oct_dev;
1690
1691 memset(regbuf, 0, OCT_ETHTOOL_REGDUMP_LEN);
1692 regs->version = OCT_ETHTOOL_REGSVER;
1693
1694 switch (oct->chip_id) {
1695 /* case OCTEON_CN73XX: Todo */
1696 case OCTEON_CN68XX:
1697 case OCTEON_CN66XX:
1698 len += cn6xxx_read_csr_reg(regbuf + len, oct);
1699 len += cn6xxx_read_config_reg(regbuf + len, oct);
1700 break;
1701 default:
1702 dev_err(&oct->pci_dev->dev, "%s Unknown chipid: %d\n",
1703 __func__, oct->chip_id);
1704 }
1705}
1706
Raghu Vatsavayif5a20472016-06-21 22:53:14 -07001707static u32 lio_get_priv_flags(struct net_device *netdev)
1708{
1709 struct lio *lio = GET_LIO(netdev);
1710
1711 return lio->oct_dev->priv_flags;
1712}
1713
1714static int lio_set_priv_flags(struct net_device *netdev, u32 flags)
1715{
1716 struct lio *lio = GET_LIO(netdev);
1717 bool intr_by_tx_bytes = !!(flags & (0x1 << OCT_PRIV_FLAG_TX_BYTES));
1718
1719 lio_set_priv_flag(lio->oct_dev, OCT_PRIV_FLAG_TX_BYTES,
1720 intr_by_tx_bytes);
1721 return 0;
1722}
1723
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001724static const struct ethtool_ops lio_ethtool_ops = {
1725 .get_settings = lio_get_settings,
1726 .get_link = ethtool_op_get_link,
1727 .get_drvinfo = lio_get_drvinfo,
1728 .get_ringparam = lio_ethtool_get_ringparam,
1729 .get_channels = lio_ethtool_get_channels,
1730 .set_phys_id = lio_set_phys_id,
1731 .get_eeprom_len = lio_get_eeprom_len,
1732 .get_eeprom = lio_get_eeprom,
1733 .get_strings = lio_get_strings,
1734 .get_ethtool_stats = lio_get_ethtool_stats,
1735 .get_pauseparam = lio_get_pauseparam,
1736 .get_regs_len = lio_get_regs_len,
1737 .get_regs = lio_get_regs,
1738 .get_msglevel = lio_get_msglevel,
1739 .set_msglevel = lio_set_msglevel,
1740 .get_sset_count = lio_get_sset_count,
1741 .nway_reset = lio_nway_reset,
1742 .set_settings = lio_set_settings,
1743 .get_coalesce = lio_get_intr_coalesce,
1744 .set_coalesce = lio_set_intr_coalesce,
Raghu Vatsavayif5a20472016-06-21 22:53:14 -07001745 .get_priv_flags = lio_get_priv_flags,
1746 .set_priv_flags = lio_set_priv_flags,
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001747 .get_ts_info = lio_get_ts_info,
1748};
1749
1750void liquidio_set_ethtool_ops(struct net_device *netdev)
1751{
1752 netdev->ethtool_ops = &lio_ethtool_ops;
1753}