blob: a060586f33b7034647cb83ef18f169f0c0ec72da [file] [log] [blame]
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001/**********************************************************************
2* Author: Cavium, Inc.
3*
4* Contact: support@cavium.com
5* Please include "LiquidIO" in the subject.
6*
7* Copyright (c) 2003-2015 Cavium, Inc.
8*
9* This file is free software; you can redistribute it and/or modify
10* it under the terms of the GNU General Public License, Version 2, as
11* published by the Free Software Foundation.
12*
13* This file is distributed in the hope that it will be useful, but
14* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16* NONINFRINGEMENT. See the GNU General Public License for more
17* details.
18*
19* This file may also be available under a different license from Cavium.
20* Contact Cavium, Inc. for more information
21**********************************************************************/
22#include <linux/version.h>
23#include <linux/netdevice.h>
24#include <linux/net_tstamp.h>
25#include <linux/ethtool.h>
26#include <linux/dma-mapping.h>
27#include <linux/pci.h>
28#include "octeon_config.h"
29#include "liquidio_common.h"
30#include "octeon_droq.h"
31#include "octeon_iq.h"
32#include "response_manager.h"
33#include "octeon_device.h"
34#include "octeon_nic.h"
35#include "octeon_main.h"
36#include "octeon_network.h"
37#include "cn66xx_regs.h"
38#include "cn66xx_device.h"
39#include "cn68xx_regs.h"
40#include "cn68xx_device.h"
41#include "liquidio_image.h"
42
Raghu Vatsavayi1f164712016-06-21 22:53:11 -070043static int octnet_get_link_stats(struct net_device *netdev);
44
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070045struct oct_mdio_cmd_context {
46 int octeon_id;
47 wait_queue_head_t wc;
48 int cond;
49};
50
51struct oct_mdio_cmd_resp {
52 u64 rh;
53 struct oct_mdio_cmd resp;
54 u64 status;
55};
56
57#define OCT_MDIO45_RESP_SIZE (sizeof(struct oct_mdio_cmd_resp))
58
59/* Octeon's interface mode of operation */
60enum {
61 INTERFACE_MODE_DISABLED,
62 INTERFACE_MODE_RGMII,
63 INTERFACE_MODE_GMII,
64 INTERFACE_MODE_SPI,
65 INTERFACE_MODE_PCIE,
66 INTERFACE_MODE_XAUI,
67 INTERFACE_MODE_SGMII,
68 INTERFACE_MODE_PICMG,
69 INTERFACE_MODE_NPI,
70 INTERFACE_MODE_LOOP,
71 INTERFACE_MODE_SRIO,
72 INTERFACE_MODE_ILK,
73 INTERFACE_MODE_RXAUI,
74 INTERFACE_MODE_QSGMII,
75 INTERFACE_MODE_AGL,
Raghu Vatsavayi9eb60842016-06-21 22:53:12 -070076 INTERFACE_MODE_XLAUI,
77 INTERFACE_MODE_XFI,
78 INTERFACE_MODE_10G_KR,
79 INTERFACE_MODE_40G_KR4,
80 INTERFACE_MODE_MIXED,
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070081};
82
83#define ARRAY_LENGTH(a) (sizeof(a) / sizeof((a)[0]))
84#define OCT_ETHTOOL_REGDUMP_LEN 4096
85#define OCT_ETHTOOL_REGSVER 1
86
Raghu Vatsavayi1f164712016-06-21 22:53:11 -070087/* statistics of PF */
88static const char oct_stats_strings[][ETH_GSTRING_LEN] = {
89 "rx_packets",
90 "tx_packets",
91 "rx_bytes",
92 "tx_bytes",
93 "rx_errors", /*jabber_err+l2_err+frame_err */
94 "tx_errors", /*fw_err_pko+fw_err_link+fw_err_drop */
95 "rx_dropped", /*st->fromwire.total_rcvd - st->fromwire.fw_total_rcvd
96 *+st->fromwire.dmac_drop + st->fromwire.fw_err_drop
97 */
98 "tx_dropped",
99
100 "tx_total_sent",
101 "tx_total_fwd",
102 "tx_err_pko",
103 "tx_err_link",
104 "tx_err_drop",
105
106 "tx_tso",
107 "tx_tso_packets",
108 "tx_tso_err",
Raghu Vatsavayi01fb2372016-07-03 13:56:47 -0700109 "tx_vxlan",
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700110
111 "mac_tx_total_pkts",
112 "mac_tx_total_bytes",
113 "mac_tx_mcast_pkts",
114 "mac_tx_bcast_pkts",
115 "mac_tx_ctl_packets", /*oct->link_stats.fromhost.ctl_sent */
116 "mac_tx_total_collisions",
117 "mac_tx_one_collision",
118 "mac_tx_multi_collison",
119 "mac_tx_max_collision_fail",
120 "mac_tx_max_deferal_fail",
121 "mac_tx_fifo_err",
122 "mac_tx_runts",
123
124 "rx_total_rcvd",
125 "rx_total_fwd",
126 "rx_jabber_err",
127 "rx_l2_err",
128 "rx_frame_err",
129 "rx_err_pko",
130 "rx_err_link",
131 "rx_err_drop",
132
Raghu Vatsavayi01fb2372016-07-03 13:56:47 -0700133 "rx_vxlan",
134 "rx_vxlan_err",
135
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700136 "rx_lro_pkts",
137 "rx_lro_bytes",
138 "rx_total_lro",
139
140 "rx_lro_aborts",
141 "rx_lro_aborts_port",
142 "rx_lro_aborts_seq",
143 "rx_lro_aborts_tsval",
144 "rx_lro_aborts_timer",
145 "rx_fwd_rate",
146
147 "mac_rx_total_rcvd",
148 "mac_rx_bytes",
149 "mac_rx_total_bcst",
150 "mac_rx_total_mcst",
151 "mac_rx_runts",
152 "mac_rx_ctl_packets",
153 "mac_rx_fifo_err",
154 "mac_rx_dma_drop",
155 "mac_rx_fcs_err",
156
157 "link_state_changes",
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700158};
159
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700160/* statistics of host tx queue */
161static const char oct_iq_stats_strings[][ETH_GSTRING_LEN] = {
162 "packets", /*oct->instr_queue[iq_no]->stats.tx_done*/
163 "bytes", /*oct->instr_queue[iq_no]->stats.tx_tot_bytes*/
164 "dropped",
165 "iq_busy",
166 "sgentry_sent",
167
168 "fw_instr_posted",
169 "fw_instr_processed",
170 "fw_instr_dropped",
171 "fw_bytes_sent",
172
173 "tso",
Raghu Vatsavayi01fb2372016-07-03 13:56:47 -0700174 "vxlan",
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700175 "txq_restart",
176};
177
178/* statistics of host rx queue */
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700179static const char oct_droq_stats_strings[][ETH_GSTRING_LEN] = {
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700180 "packets", /*oct->droq[oq_no]->stats.rx_pkts_received */
181 "bytes", /*oct->droq[oq_no]->stats.rx_bytes_received */
182 "dropped", /*oct->droq[oq_no]->stats.rx_dropped+
183 *oct->droq[oq_no]->stats.dropped_nodispatch+
184 *oct->droq[oq_no]->stats.dropped_toomany+
185 *oct->droq[oq_no]->stats.dropped_nomem
186 */
187 "dropped_nomem",
188 "dropped_toomany",
189 "fw_dropped",
190 "fw_pkts_received",
191 "fw_bytes_received",
192 "fw_dropped_nodispatch",
193
Raghu Vatsavayi01fb2372016-07-03 13:56:47 -0700194 "vxlan",
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700195 "buffer_alloc_failure",
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700196};
197
198#define OCTNIC_NCMD_AUTONEG_ON 0x1
199#define OCTNIC_NCMD_PHY_ON 0x2
200
201static int lio_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
202{
203 struct lio *lio = GET_LIO(netdev);
204 struct octeon_device *oct = lio->oct_dev;
205 struct oct_link_info *linfo;
206
207 linfo = &lio->linfo;
208
Raghu Vatsavayi9eb60842016-06-21 22:53:12 -0700209 if (linfo->link.s.if_mode == INTERFACE_MODE_XAUI ||
210 linfo->link.s.if_mode == INTERFACE_MODE_RXAUI ||
211 linfo->link.s.if_mode == INTERFACE_MODE_XFI) {
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700212 ecmd->port = PORT_FIBRE;
213 ecmd->supported =
214 (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE |
215 SUPPORTED_Pause);
216 ecmd->advertising =
217 (ADVERTISED_10000baseT_Full | ADVERTISED_Pause);
218 ecmd->transceiver = XCVR_EXTERNAL;
219 ecmd->autoneg = AUTONEG_DISABLE;
220
221 } else {
Raghu Vatsavayi9eb60842016-06-21 22:53:12 -0700222 dev_err(&oct->pci_dev->dev, "Unknown link interface reported %d\n",
223 linfo->link.s.if_mode);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700224 }
225
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -0700226 if (linfo->link.s.link_up) {
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700227 ethtool_cmd_speed_set(ecmd, linfo->link.s.speed);
228 ecmd->duplex = linfo->link.s.duplex;
229 } else {
230 ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
231 ecmd->duplex = DUPLEX_UNKNOWN;
232 }
233
234 return 0;
235}
236
237static void
238lio_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
239{
240 struct lio *lio;
241 struct octeon_device *oct;
242
243 lio = GET_LIO(netdev);
244 oct = lio->oct_dev;
245
246 memset(drvinfo, 0, sizeof(struct ethtool_drvinfo));
247 strcpy(drvinfo->driver, "liquidio");
248 strcpy(drvinfo->version, LIQUIDIO_VERSION);
249 strncpy(drvinfo->fw_version, oct->fw_info.liquidio_firmware_version,
250 ETHTOOL_FWVERS_LEN);
251 strncpy(drvinfo->bus_info, pci_name(oct->pci_dev), 32);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700252}
253
254static void
255lio_ethtool_get_channels(struct net_device *dev,
256 struct ethtool_channels *channel)
257{
258 struct lio *lio = GET_LIO(dev);
259 struct octeon_device *oct = lio->oct_dev;
260 u32 max_rx = 0, max_tx = 0, tx_count = 0, rx_count = 0;
261
262 if (OCTEON_CN6XXX(oct)) {
263 struct octeon_config *conf6x = CHIP_FIELD(oct, cn6xxx, conf);
264
265 max_rx = CFG_GET_OQ_MAX_Q(conf6x);
266 max_tx = CFG_GET_IQ_MAX_Q(conf6x);
267 rx_count = CFG_GET_NUM_RXQS_NIC_IF(conf6x, lio->ifidx);
268 tx_count = CFG_GET_NUM_TXQS_NIC_IF(conf6x, lio->ifidx);
269 }
270
271 channel->max_rx = max_rx;
272 channel->max_tx = max_tx;
273 channel->rx_count = rx_count;
274 channel->tx_count = tx_count;
275}
276
277static int lio_get_eeprom_len(struct net_device *netdev)
278{
279 u8 buf[128];
280 struct lio *lio = GET_LIO(netdev);
281 struct octeon_device *oct_dev = lio->oct_dev;
282 struct octeon_board_info *board_info;
283 int len;
284
285 board_info = (struct octeon_board_info *)(&oct_dev->boardinfo);
286 len = sprintf(buf, "boardname:%s serialnum:%s maj:%lld min:%lld\n",
287 board_info->name, board_info->serial_number,
288 board_info->major, board_info->minor);
289
290 return len;
291}
292
293static int
294lio_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
295 u8 *bytes)
296{
297 struct lio *lio = GET_LIO(netdev);
298 struct octeon_device *oct_dev = lio->oct_dev;
299 struct octeon_board_info *board_info;
300 int len;
301
302 if (eeprom->offset != 0)
303 return -EINVAL;
304
305 eeprom->magic = oct_dev->pci_dev->vendor;
306 board_info = (struct octeon_board_info *)(&oct_dev->boardinfo);
307 len =
308 sprintf((char *)bytes,
309 "boardname:%s serialnum:%s maj:%lld min:%lld\n",
310 board_info->name, board_info->serial_number,
311 board_info->major, board_info->minor);
312
313 return 0;
314}
315
316static int octnet_gpio_access(struct net_device *netdev, int addr, int val)
317{
318 struct lio *lio = GET_LIO(netdev);
319 struct octeon_device *oct = lio->oct_dev;
320 struct octnic_ctrl_pkt nctrl;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700321 int ret = 0;
322
323 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
324
325 nctrl.ncmd.u64 = 0;
326 nctrl.ncmd.s.cmd = OCTNET_CMD_GPIO_ACCESS;
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -0700327 nctrl.ncmd.s.param1 = addr;
328 nctrl.ncmd.s.param2 = val;
329 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700330 nctrl.wait_time = 100;
331 nctrl.netpndev = (u64)netdev;
332 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
333
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -0700334 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700335 if (ret < 0) {
336 dev_err(&oct->pci_dev->dev, "Failed to configure gpio value\n");
337 return -EINVAL;
338 }
339
340 return 0;
341}
342
343/* Callback for when mdio command response arrives
344 */
345static void octnet_mdio_resp_callback(struct octeon_device *oct,
346 u32 status,
347 void *buf)
348{
349 struct oct_mdio_cmd_resp *mdio_cmd_rsp;
350 struct oct_mdio_cmd_context *mdio_cmd_ctx;
351 struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
352
353 mdio_cmd_rsp = (struct oct_mdio_cmd_resp *)sc->virtrptr;
354 mdio_cmd_ctx = (struct oct_mdio_cmd_context *)sc->ctxptr;
355
356 oct = lio_get_device(mdio_cmd_ctx->octeon_id);
357 if (status) {
358 dev_err(&oct->pci_dev->dev, "MIDO instruction failed. Status: %llx\n",
359 CVM_CAST64(status));
360 ACCESS_ONCE(mdio_cmd_ctx->cond) = -1;
361 } else {
362 ACCESS_ONCE(mdio_cmd_ctx->cond) = 1;
363 }
364 wake_up_interruptible(&mdio_cmd_ctx->wc);
365}
366
367/* This routine provides PHY access routines for
368 * mdio clause45 .
369 */
370static int
371octnet_mdio45_access(struct lio *lio, int op, int loc, int *value)
372{
373 struct octeon_device *oct_dev = lio->oct_dev;
374 struct octeon_soft_command *sc;
375 struct oct_mdio_cmd_resp *mdio_cmd_rsp;
376 struct oct_mdio_cmd_context *mdio_cmd_ctx;
377 struct oct_mdio_cmd *mdio_cmd;
378 int retval = 0;
379
380 sc = (struct octeon_soft_command *)
381 octeon_alloc_soft_command(oct_dev,
382 sizeof(struct oct_mdio_cmd),
383 sizeof(struct oct_mdio_cmd_resp),
384 sizeof(struct oct_mdio_cmd_context));
385
386 if (!sc)
387 return -ENOMEM;
388
389 mdio_cmd_ctx = (struct oct_mdio_cmd_context *)sc->ctxptr;
390 mdio_cmd_rsp = (struct oct_mdio_cmd_resp *)sc->virtrptr;
391 mdio_cmd = (struct oct_mdio_cmd *)sc->virtdptr;
392
393 ACCESS_ONCE(mdio_cmd_ctx->cond) = 0;
394 mdio_cmd_ctx->octeon_id = lio_get_device_id(oct_dev);
395 mdio_cmd->op = op;
396 mdio_cmd->mdio_addr = loc;
397 if (op)
398 mdio_cmd->value1 = *value;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700399 octeon_swap_8B_data((u64 *)mdio_cmd, sizeof(struct oct_mdio_cmd) / 8);
400
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -0700401 sc->iq_no = lio->linfo.txpciq[0].s.q_no;
402
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700403 octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC, OPCODE_NIC_MDIO45,
404 0, 0, 0);
405
406 sc->wait_time = 1000;
407 sc->callback = octnet_mdio_resp_callback;
408 sc->callback_arg = sc;
409
410 init_waitqueue_head(&mdio_cmd_ctx->wc);
411
412 retval = octeon_send_soft_command(oct_dev, sc);
413
Raghu Vatsavayiddc173a2016-06-14 16:54:43 -0700414 if (retval == IQ_SEND_FAILED) {
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700415 dev_err(&oct_dev->pci_dev->dev,
416 "octnet_mdio45_access instruction failed status: %x\n",
417 retval);
418 retval = -EBUSY;
419 } else {
420 /* Sleep on a wait queue till the cond flag indicates that the
421 * response arrived
422 */
423 sleep_cond(&mdio_cmd_ctx->wc, &mdio_cmd_ctx->cond);
424 retval = mdio_cmd_rsp->status;
425 if (retval) {
426 dev_err(&oct_dev->pci_dev->dev, "octnet mdio45 access failed\n");
427 retval = -EBUSY;
428 } else {
429 octeon_swap_8B_data((u64 *)(&mdio_cmd_rsp->resp),
430 sizeof(struct oct_mdio_cmd) / 8);
431
432 if (ACCESS_ONCE(mdio_cmd_ctx->cond) == 1) {
433 if (!op)
434 *value = mdio_cmd_rsp->resp.value1;
435 } else {
436 retval = -EINVAL;
437 }
438 }
439 }
440
441 octeon_free_soft_command(oct_dev, sc);
442
443 return retval;
444}
445
446static int lio_set_phys_id(struct net_device *netdev,
447 enum ethtool_phys_id_state state)
448{
449 struct lio *lio = GET_LIO(netdev);
450 struct octeon_device *oct = lio->oct_dev;
451 int value, ret;
452
453 switch (state) {
454 case ETHTOOL_ID_ACTIVE:
455 if (oct->chip_id == OCTEON_CN66XX) {
456 octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG,
457 VITESSE_PHY_GPIO_DRIVEON);
458 return 2;
459
460 } else if (oct->chip_id == OCTEON_CN68XX) {
461 /* Save the current LED settings */
462 ret = octnet_mdio45_access(lio, 0,
463 LIO68XX_LED_BEACON_ADDR,
464 &lio->phy_beacon_val);
465 if (ret)
466 return ret;
467
468 ret = octnet_mdio45_access(lio, 0,
469 LIO68XX_LED_CTRL_ADDR,
470 &lio->led_ctrl_val);
471 if (ret)
472 return ret;
473
474 /* Configure Beacon values */
475 value = LIO68XX_LED_BEACON_CFGON;
476 ret =
477 octnet_mdio45_access(lio, 1,
478 LIO68XX_LED_BEACON_ADDR,
479 &value);
480 if (ret)
481 return ret;
482
483 value = LIO68XX_LED_CTRL_CFGON;
484 ret =
485 octnet_mdio45_access(lio, 1,
486 LIO68XX_LED_CTRL_ADDR,
487 &value);
488 if (ret)
489 return ret;
490 } else {
491 return -EINVAL;
492 }
493 break;
494
495 case ETHTOOL_ID_ON:
496 if (oct->chip_id == OCTEON_CN66XX) {
497 octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG,
498 VITESSE_PHY_GPIO_HIGH);
499
500 } else if (oct->chip_id == OCTEON_CN68XX) {
501 return -EINVAL;
502 } else {
503 return -EINVAL;
504 }
505 break;
506
507 case ETHTOOL_ID_OFF:
508 if (oct->chip_id == OCTEON_CN66XX)
509 octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG,
510 VITESSE_PHY_GPIO_LOW);
511 else if (oct->chip_id == OCTEON_CN68XX)
512 return -EINVAL;
513 else
514 return -EINVAL;
515
516 break;
517
518 case ETHTOOL_ID_INACTIVE:
519 if (oct->chip_id == OCTEON_CN66XX) {
520 octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG,
521 VITESSE_PHY_GPIO_DRIVEOFF);
522 } else if (oct->chip_id == OCTEON_CN68XX) {
523 /* Restore LED settings */
524 ret = octnet_mdio45_access(lio, 1,
525 LIO68XX_LED_CTRL_ADDR,
526 &lio->led_ctrl_val);
527 if (ret)
528 return ret;
529
Dan Carpentercbdb9772015-06-24 17:47:02 +0300530 ret = octnet_mdio45_access(lio, 1,
531 LIO68XX_LED_BEACON_ADDR,
532 &lio->phy_beacon_val);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700533 if (ret)
534 return ret;
535
536 } else {
537 return -EINVAL;
538 }
539 break;
540
541 default:
542 return -EINVAL;
543 }
544
545 return 0;
546}
547
548static void
549lio_ethtool_get_ringparam(struct net_device *netdev,
550 struct ethtool_ringparam *ering)
551{
552 struct lio *lio = GET_LIO(netdev);
553 struct octeon_device *oct = lio->oct_dev;
554 u32 tx_max_pending = 0, rx_max_pending = 0, tx_pending = 0,
555 rx_pending = 0;
556
557 if (OCTEON_CN6XXX(oct)) {
558 struct octeon_config *conf6x = CHIP_FIELD(oct, cn6xxx, conf);
559
560 tx_max_pending = CN6XXX_MAX_IQ_DESCRIPTORS;
561 rx_max_pending = CN6XXX_MAX_OQ_DESCRIPTORS;
562 rx_pending = CFG_GET_NUM_RX_DESCS_NIC_IF(conf6x, lio->ifidx);
563 tx_pending = CFG_GET_NUM_TX_DESCS_NIC_IF(conf6x, lio->ifidx);
564 }
565
566 if (lio->mtu > OCTNET_DEFAULT_FRM_SIZE) {
567 ering->rx_pending = 0;
568 ering->rx_max_pending = 0;
569 ering->rx_mini_pending = 0;
570 ering->rx_jumbo_pending = rx_pending;
571 ering->rx_mini_max_pending = 0;
572 ering->rx_jumbo_max_pending = rx_max_pending;
573 } else {
574 ering->rx_pending = rx_pending;
575 ering->rx_max_pending = rx_max_pending;
576 ering->rx_mini_pending = 0;
577 ering->rx_jumbo_pending = 0;
578 ering->rx_mini_max_pending = 0;
579 ering->rx_jumbo_max_pending = 0;
580 }
581
582 ering->tx_pending = tx_pending;
583 ering->tx_max_pending = tx_max_pending;
584}
585
586static u32 lio_get_msglevel(struct net_device *netdev)
587{
588 struct lio *lio = GET_LIO(netdev);
589
590 return lio->msg_enable;
591}
592
593static void lio_set_msglevel(struct net_device *netdev, u32 msglvl)
594{
595 struct lio *lio = GET_LIO(netdev);
596
597 if ((msglvl ^ lio->msg_enable) & NETIF_MSG_HW) {
598 if (msglvl & NETIF_MSG_HW)
599 liquidio_set_feature(netdev,
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -0700600 OCTNET_CMD_VERBOSE_ENABLE, 0);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700601 else
602 liquidio_set_feature(netdev,
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -0700603 OCTNET_CMD_VERBOSE_DISABLE, 0);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700604 }
605
606 lio->msg_enable = msglvl;
607}
608
609static void
610lio_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
611{
612 /* Notes: Not supporting any auto negotiation in these
613 * drivers. Just report pause frame support.
614 */
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700615 struct lio *lio = GET_LIO(netdev);
616 struct octeon_device *oct = lio->oct_dev;
617
618 pause->autoneg = 0;
619
620 pause->tx_pause = oct->tx_pause;
621 pause->rx_pause = oct->rx_pause;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700622}
623
624static void
625lio_get_ethtool_stats(struct net_device *netdev,
626 struct ethtool_stats *stats, u64 *data)
627{
628 struct lio *lio = GET_LIO(netdev);
629 struct octeon_device *oct_dev = lio->oct_dev;
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700630 struct net_device_stats *netstats = &netdev->stats;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700631 int i = 0, j;
632
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700633 netdev->netdev_ops->ndo_get_stats(netdev);
634 octnet_get_link_stats(netdev);
635
636 /*sum of oct->droq[oq_no]->stats->rx_pkts_received */
637 data[i++] = CVM_CAST64(netstats->rx_packets);
638 /*sum of oct->instr_queue[iq_no]->stats.tx_done */
639 data[i++] = CVM_CAST64(netstats->tx_packets);
640 /*sum of oct->droq[oq_no]->stats->rx_bytes_received */
641 data[i++] = CVM_CAST64(netstats->rx_bytes);
642 /*sum of oct->instr_queue[iq_no]->stats.tx_tot_bytes */
643 data[i++] = CVM_CAST64(netstats->tx_bytes);
644 data[i++] = CVM_CAST64(netstats->rx_errors);
645 data[i++] = CVM_CAST64(netstats->tx_errors);
646 /*sum of oct->droq[oq_no]->stats->rx_dropped +
647 *oct->droq[oq_no]->stats->dropped_nodispatch +
648 *oct->droq[oq_no]->stats->dropped_toomany +
649 *oct->droq[oq_no]->stats->dropped_nomem
650 */
651 data[i++] = CVM_CAST64(netstats->rx_dropped);
652 /*sum of oct->instr_queue[iq_no]->stats.tx_dropped */
653 data[i++] = CVM_CAST64(netstats->tx_dropped);
654
655 /*data[i++] = CVM_CAST64(stats->multicast); */
656 /*data[i++] = CVM_CAST64(stats->collisions); */
657
658 /* firmware tx stats */
659 /*per_core_stats[cvmx_get_core_num()].link_stats[mdata->from_ifidx].
660 *fromhost.fw_total_sent
661 */
662 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_total_sent);
663 /*per_core_stats[i].link_stats[port].fromwire.fw_total_fwd */
664 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_total_fwd);
665 /*per_core_stats[j].link_stats[i].fromhost.fw_err_pko */
666 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_pko);
667 /*per_core_stats[j].link_stats[i].fromhost.fw_err_link */
668 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_link);
669 /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.
670 *fw_err_drop
671 */
672 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_drop);
673
674 /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.fw_tso */
675 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_tso);
676 /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.
677 *fw_tso_fwd
678 */
679 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_tso_fwd);
680 /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.
681 *fw_err_tso
682 */
683 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_tso);
Raghu Vatsavayi01fb2372016-07-03 13:56:47 -0700684 /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.
685 *fw_tx_vxlan
686 */
687 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_tx_vxlan);
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700688
689 /* mac tx statistics */
690 /*CVMX_BGXX_CMRX_TX_STAT5 */
691 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.total_pkts_sent);
692 /*CVMX_BGXX_CMRX_TX_STAT4 */
693 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.total_bytes_sent);
694 /*CVMX_BGXX_CMRX_TX_STAT15 */
695 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.mcast_pkts_sent);
696 /*CVMX_BGXX_CMRX_TX_STAT14 */
697 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.bcast_pkts_sent);
698 /*CVMX_BGXX_CMRX_TX_STAT17 */
699 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.ctl_sent);
700 /*CVMX_BGXX_CMRX_TX_STAT0 */
701 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.total_collisions);
702 /*CVMX_BGXX_CMRX_TX_STAT3 */
703 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.one_collision_sent);
704 /*CVMX_BGXX_CMRX_TX_STAT2 */
705 data[i++] =
706 CVM_CAST64(oct_dev->link_stats.fromhost.multi_collision_sent);
707 /*CVMX_BGXX_CMRX_TX_STAT0 */
708 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.max_collision_fail);
709 /*CVMX_BGXX_CMRX_TX_STAT1 */
710 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.max_deferral_fail);
711 /*CVMX_BGXX_CMRX_TX_STAT16 */
712 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fifo_err);
713 /*CVMX_BGXX_CMRX_TX_STAT6 */
714 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.runts);
715
716 /* RX firmware stats */
717 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
718 *fw_total_rcvd
719 */
720 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_total_rcvd);
721 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
722 *fw_total_fwd
723 */
724 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_total_fwd);
725 /*per_core_stats[core_id].link_stats[ifidx].fromwire.jabber_err */
726 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.jabber_err);
727 /*per_core_stats[core_id].link_stats[ifidx].fromwire.l2_err */
728 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.l2_err);
729 /*per_core_stats[core_id].link_stats[ifidx].fromwire.frame_err */
730 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.frame_err);
731 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
732 *fw_err_pko
733 */
734 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_err_pko);
735 /*per_core_stats[j].link_stats[i].fromwire.fw_err_link */
736 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_err_link);
737 /*per_core_stats[cvmx_get_core_num()].link_stats[lro_ctx->ifidx].
738 *fromwire.fw_err_drop
739 */
740 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_err_drop);
741
Raghu Vatsavayi01fb2372016-07-03 13:56:47 -0700742 /*per_core_stats[cvmx_get_core_num()].link_stats[lro_ctx->ifidx].
743 *fromwire.fw_rx_vxlan
744 */
745 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_rx_vxlan);
746 /*per_core_stats[cvmx_get_core_num()].link_stats[lro_ctx->ifidx].
747 *fromwire.fw_rx_vxlan_err
748 */
749 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_rx_vxlan_err);
750
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700751 /* LRO */
752 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
753 *fw_lro_pkts
754 */
755 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_pkts);
756 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
757 *fw_lro_octs
758 */
759 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_octs);
760 /*per_core_stats[j].link_stats[i].fromwire.fw_total_lro */
761 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_total_lro);
762 /*per_core_stats[j].link_stats[i].fromwire.fw_lro_aborts */
763 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts);
764 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
765 *fw_lro_aborts_port
766 */
767 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_port);
768 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
769 *fw_lro_aborts_seq
770 */
771 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_seq);
772 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
773 *fw_lro_aborts_tsval
774 */
775 data[i++] =
776 CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_tsval);
777 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
778 *fw_lro_aborts_timer
779 */
780 /* intrmod: packet forward rate */
781 data[i++] =
782 CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_timer);
783 /*per_core_stats[j].link_stats[i].fromwire.fw_lro_aborts */
784 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fwd_rate);
785
786 /* mac: link-level stats */
787 /*CVMX_BGXX_CMRX_RX_STAT0 */
788 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.total_rcvd);
789 /*CVMX_BGXX_CMRX_RX_STAT1 */
790 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.bytes_rcvd);
791 /*CVMX_PKI_STATX_STAT5 */
792 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.total_bcst);
793 /*CVMX_PKI_STATX_STAT5 */
794 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.total_mcst);
795 /*wqe->word2.err_code or wqe->word2.err_level */
796 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.runts);
797 /*CVMX_BGXX_CMRX_RX_STAT2 */
798 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.ctl_rcvd);
799 /*CVMX_BGXX_CMRX_RX_STAT6 */
800 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fifo_err);
801 /*CVMX_BGXX_CMRX_RX_STAT4 */
802 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.dmac_drop);
803 /*wqe->word2.err_code or wqe->word2.err_level */
804 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fcs_err);
805 /*lio->link_changes*/
806 data[i++] = CVM_CAST64(lio->link_changes);
807
808 /* TX -- lio_update_stats(lio); */
809 for (j = 0; j < MAX_OCTEON_INSTR_QUEUES(oct_dev); j++) {
Raghu Vatsavayi63da8402016-06-21 22:53:03 -0700810 if (!(oct_dev->io_qmask.iq & (1ULL << j)))
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700811 continue;
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700812 /*packets to network port*/
813 /*# of packets tx to network */
814 data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_done);
815 /*# of bytes tx to network */
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700816 data[i++] =
817 CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_tot_bytes);
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700818 /*# of packets dropped */
819 data[i++] =
820 CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_dropped);
821 /*# of tx fails due to queue full */
822 data[i++] =
823 CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_iq_busy);
824 /*XXX gather entries sent */
825 data[i++] =
826 CVM_CAST64(oct_dev->instr_queue[j]->stats.sgentry_sent);
827
828 /*instruction to firmware: data and control */
829 /*# of instructions to the queue */
830 data[i++] =
831 CVM_CAST64(oct_dev->instr_queue[j]->stats.instr_posted);
832 /*# of instructions processed */
833 data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->
834 stats.instr_processed);
835 /*# of instructions could not be processed */
836 data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->
837 stats.instr_dropped);
838 /*bytes sent through the queue */
839 data[i++] =
840 CVM_CAST64(oct_dev->instr_queue[j]->stats.bytes_sent);
841
842 /*tso request*/
843 data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_gso);
Raghu Vatsavayi01fb2372016-07-03 13:56:47 -0700844 /*vxlan request*/
845 data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_vxlan);
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700846 /*txq restart*/
847 data[i++] =
848 CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_restart);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700849 }
850
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700851 /* RX */
852 /* for (j = 0; j < oct_dev->num_oqs; j++) { */
853 for (j = 0; j < MAX_OCTEON_OUTPUT_QUEUES(oct_dev); j++) {
Raghu Vatsavayi63da8402016-06-21 22:53:03 -0700854 if (!(oct_dev->io_qmask.oq & (1ULL << j)))
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700855 continue;
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700856
857 /*packets send to TCP/IP network stack */
858 /*# of packets to network stack */
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700859 data[i++] =
860 CVM_CAST64(oct_dev->droq[j]->stats.rx_pkts_received);
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700861 /*# of bytes to network stack */
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700862 data[i++] =
863 CVM_CAST64(oct_dev->droq[j]->stats.rx_bytes_received);
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700864 /*# of packets dropped */
865 data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem +
866 oct_dev->droq[j]->stats.dropped_toomany +
867 oct_dev->droq[j]->stats.rx_dropped);
868 data[i++] =
869 CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem);
870 data[i++] =
871 CVM_CAST64(oct_dev->droq[j]->stats.dropped_toomany);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700872 data[i++] =
873 CVM_CAST64(oct_dev->droq[j]->stats.rx_dropped);
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700874
875 /*control and data path*/
876 data[i++] =
877 CVM_CAST64(oct_dev->droq[j]->stats.pkts_received);
878 data[i++] =
879 CVM_CAST64(oct_dev->droq[j]->stats.bytes_received);
880 data[i++] =
881 CVM_CAST64(oct_dev->droq[j]->stats.dropped_nodispatch);
Raghu Vatsavayi01fb2372016-07-03 13:56:47 -0700882
883 data[i++] =
884 CVM_CAST64(oct_dev->droq[j]->stats.rx_vxlan);
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700885 data[i++] =
886 CVM_CAST64(oct_dev->droq[j]->stats.rx_alloc_failure);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700887 }
888}
889
890static void lio_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
891{
892 struct lio *lio = GET_LIO(netdev);
893 struct octeon_device *oct_dev = lio->oct_dev;
894 int num_iq_stats, num_oq_stats, i, j;
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700895 int num_stats;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700896
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700897 switch (stringset) {
898 case ETH_SS_STATS:
899 num_stats = ARRAY_SIZE(oct_stats_strings);
900 for (j = 0; j < num_stats; j++) {
901 sprintf(data, "%s", oct_stats_strings[j]);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700902 data += ETH_GSTRING_LEN;
903 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700904
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700905 num_iq_stats = ARRAY_SIZE(oct_iq_stats_strings);
906 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct_dev); i++) {
907 if (!(oct_dev->io_qmask.iq & (1ULL << i)))
908 continue;
909 for (j = 0; j < num_iq_stats; j++) {
910 sprintf(data, "tx-%d-%s", i,
911 oct_iq_stats_strings[j]);
912 data += ETH_GSTRING_LEN;
913 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700914 }
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700915
916 num_oq_stats = ARRAY_SIZE(oct_droq_stats_strings);
917 /* for (i = 0; i < oct_dev->num_oqs; i++) { */
918 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct_dev); i++) {
919 if (!(oct_dev->io_qmask.oq & (1ULL << i)))
920 continue;
921 for (j = 0; j < num_oq_stats; j++) {
922 sprintf(data, "rx-%d-%s", i,
923 oct_droq_stats_strings[j]);
924 data += ETH_GSTRING_LEN;
925 }
926 }
927 break;
928
929 default:
930 netif_info(lio, drv, lio->netdev, "Unknown Stringset !!\n");
931 break;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700932 }
933}
934
935static int lio_get_sset_count(struct net_device *netdev, int sset)
936{
937 struct lio *lio = GET_LIO(netdev);
938 struct octeon_device *oct_dev = lio->oct_dev;
939
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700940 switch (sset) {
941 case ETH_SS_STATS:
942 return (ARRAY_SIZE(oct_stats_strings) +
943 ARRAY_SIZE(oct_iq_stats_strings) * oct_dev->num_iqs +
944 ARRAY_SIZE(oct_droq_stats_strings) * oct_dev->num_oqs);
945 default:
946 return -EOPNOTSUPP;
947 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700948}
949
950static int lio_get_intr_coalesce(struct net_device *netdev,
951 struct ethtool_coalesce *intr_coal)
952{
953 struct lio *lio = GET_LIO(netdev);
954 struct octeon_device *oct = lio->oct_dev;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700955 struct octeon_instr_queue *iq;
956 struct oct_intrmod_cfg *intrmod_cfg;
957
958 intrmod_cfg = &oct->intrmod;
959
960 switch (oct->chip_id) {
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700961 case OCTEON_CN68XX:
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -0700962 case OCTEON_CN66XX: {
963 struct octeon_cn6xxx *cn6xxx =
964 (struct octeon_cn6xxx *)oct->chip;
965
966 if (!intrmod_cfg->rx_enable) {
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700967 intr_coal->rx_coalesce_usecs =
968 CFG_GET_OQ_INTR_TIME(cn6xxx->conf);
969 intr_coal->rx_max_coalesced_frames =
970 CFG_GET_OQ_INTR_PKT(cn6xxx->conf);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700971 }
972
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -0700973 iq = oct->instr_queue[lio->linfo.txpciq[0].s.q_no];
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700974 intr_coal->tx_max_coalesced_frames = iq->fill_threshold;
975 break;
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -0700976 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700977 default:
978 netif_info(lio, drv, lio->netdev, "Unknown Chip !!\n");
979 return -EINVAL;
980 }
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -0700981 if (intrmod_cfg->rx_enable) {
982 intr_coal->use_adaptive_rx_coalesce =
983 intrmod_cfg->rx_enable;
984 intr_coal->rate_sample_interval =
985 intrmod_cfg->check_intrvl;
986 intr_coal->pkt_rate_high =
987 intrmod_cfg->maxpkt_ratethr;
988 intr_coal->pkt_rate_low =
989 intrmod_cfg->minpkt_ratethr;
990 intr_coal->rx_max_coalesced_frames_high =
991 intrmod_cfg->rx_maxcnt_trigger;
992 intr_coal->rx_coalesce_usecs_high =
993 intrmod_cfg->rx_maxtmr_trigger;
994 intr_coal->rx_coalesce_usecs_low =
995 intrmod_cfg->rx_mintmr_trigger;
996 intr_coal->rx_max_coalesced_frames_low =
997 intrmod_cfg->rx_mincnt_trigger;
998 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700999 return 0;
1000}
1001
1002/* Callback function for intrmod */
1003static void octnet_intrmod_callback(struct octeon_device *oct_dev,
1004 u32 status,
1005 void *ptr)
1006{
1007 struct oct_intrmod_cmd *cmd = ptr;
1008 struct octeon_soft_command *sc = cmd->sc;
1009
1010 oct_dev = cmd->oct_dev;
1011
1012 if (status)
1013 dev_err(&oct_dev->pci_dev->dev, "intrmod config failed. Status: %llx\n",
1014 CVM_CAST64(status));
1015 else
1016 dev_info(&oct_dev->pci_dev->dev,
1017 "Rx-Adaptive Interrupt moderation enabled:%llx\n",
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001018 oct_dev->intrmod.rx_enable);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001019
1020 octeon_free_soft_command(oct_dev, sc);
1021}
1022
1023/* Configure interrupt moderation parameters */
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001024static int octnet_set_intrmod_cfg(struct lio *lio,
1025 struct oct_intrmod_cfg *intr_cfg)
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001026{
1027 struct octeon_soft_command *sc;
1028 struct oct_intrmod_cmd *cmd;
1029 struct oct_intrmod_cfg *cfg;
1030 int retval;
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001031 struct octeon_device *oct_dev = lio->oct_dev;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001032
1033 /* Alloc soft command */
1034 sc = (struct octeon_soft_command *)
1035 octeon_alloc_soft_command(oct_dev,
1036 sizeof(struct oct_intrmod_cfg),
1037 0,
1038 sizeof(struct oct_intrmod_cmd));
1039
1040 if (!sc)
1041 return -ENOMEM;
1042
1043 cmd = (struct oct_intrmod_cmd *)sc->ctxptr;
1044 cfg = (struct oct_intrmod_cfg *)sc->virtdptr;
1045
1046 memcpy(cfg, intr_cfg, sizeof(struct oct_intrmod_cfg));
1047 octeon_swap_8B_data((u64 *)cfg, (sizeof(struct oct_intrmod_cfg)) / 8);
1048 cmd->sc = sc;
1049 cmd->cfg = cfg;
1050 cmd->oct_dev = oct_dev;
1051
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001052 sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1053
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001054 octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC,
1055 OPCODE_NIC_INTRMOD_CFG, 0, 0, 0);
1056
1057 sc->callback = octnet_intrmod_callback;
1058 sc->callback_arg = cmd;
1059 sc->wait_time = 1000;
1060
1061 retval = octeon_send_soft_command(oct_dev, sc);
Raghu Vatsavayiddc173a2016-06-14 16:54:43 -07001062 if (retval == IQ_SEND_FAILED) {
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001063 octeon_free_soft_command(oct_dev, sc);
1064 return -EINVAL;
1065 }
1066
1067 return 0;
1068}
1069
Raghu Vatsavayi1f164712016-06-21 22:53:11 -07001070void
1071octnet_nic_stats_callback(struct octeon_device *oct_dev,
1072 u32 status, void *ptr)
1073{
1074 struct octeon_soft_command *sc = (struct octeon_soft_command *)ptr;
1075 struct oct_nic_stats_resp *resp = (struct oct_nic_stats_resp *)
1076 sc->virtrptr;
1077 struct oct_nic_stats_ctrl *ctrl = (struct oct_nic_stats_ctrl *)
1078 sc->ctxptr;
1079 struct nic_rx_stats *rsp_rstats = &resp->stats.fromwire;
1080 struct nic_tx_stats *rsp_tstats = &resp->stats.fromhost;
1081
1082 struct nic_rx_stats *rstats = &oct_dev->link_stats.fromwire;
1083 struct nic_tx_stats *tstats = &oct_dev->link_stats.fromhost;
1084
1085 if ((status != OCTEON_REQUEST_TIMEOUT) && !resp->status) {
1086 octeon_swap_8B_data((u64 *)&resp->stats,
1087 (sizeof(struct oct_link_stats)) >> 3);
1088
1089 /* RX link-level stats */
1090 rstats->total_rcvd = rsp_rstats->total_rcvd;
1091 rstats->bytes_rcvd = rsp_rstats->bytes_rcvd;
1092 rstats->total_bcst = rsp_rstats->total_bcst;
1093 rstats->total_mcst = rsp_rstats->total_mcst;
1094 rstats->runts = rsp_rstats->runts;
1095 rstats->ctl_rcvd = rsp_rstats->ctl_rcvd;
1096 /* Accounts for over/under-run of buffers */
1097 rstats->fifo_err = rsp_rstats->fifo_err;
1098 rstats->dmac_drop = rsp_rstats->dmac_drop;
1099 rstats->fcs_err = rsp_rstats->fcs_err;
1100 rstats->jabber_err = rsp_rstats->jabber_err;
1101 rstats->l2_err = rsp_rstats->l2_err;
1102 rstats->frame_err = rsp_rstats->frame_err;
1103
1104 /* RX firmware stats */
1105 rstats->fw_total_rcvd = rsp_rstats->fw_total_rcvd;
1106 rstats->fw_total_fwd = rsp_rstats->fw_total_fwd;
1107 rstats->fw_err_pko = rsp_rstats->fw_err_pko;
1108 rstats->fw_err_link = rsp_rstats->fw_err_link;
1109 rstats->fw_err_drop = rsp_rstats->fw_err_drop;
Raghu Vatsavayi01fb2372016-07-03 13:56:47 -07001110 rstats->fw_rx_vxlan = rsp_rstats->fw_rx_vxlan;
1111 rstats->fw_rx_vxlan_err = rsp_rstats->fw_rx_vxlan_err;
1112
Raghu Vatsavayi1f164712016-06-21 22:53:11 -07001113 /* Number of packets that are LROed */
1114 rstats->fw_lro_pkts = rsp_rstats->fw_lro_pkts;
1115 /* Number of octets that are LROed */
1116 rstats->fw_lro_octs = rsp_rstats->fw_lro_octs;
1117 /* Number of LRO packets formed */
1118 rstats->fw_total_lro = rsp_rstats->fw_total_lro;
1119 /* Number of times lRO of packet aborted */
1120 rstats->fw_lro_aborts = rsp_rstats->fw_lro_aborts;
1121 rstats->fw_lro_aborts_port = rsp_rstats->fw_lro_aborts_port;
1122 rstats->fw_lro_aborts_seq = rsp_rstats->fw_lro_aborts_seq;
1123 rstats->fw_lro_aborts_tsval = rsp_rstats->fw_lro_aborts_tsval;
1124 rstats->fw_lro_aborts_timer = rsp_rstats->fw_lro_aborts_timer;
1125 /* intrmod: packet forward rate */
1126 rstats->fwd_rate = rsp_rstats->fwd_rate;
1127
1128 /* TX link-level stats */
1129 tstats->total_pkts_sent = rsp_tstats->total_pkts_sent;
1130 tstats->total_bytes_sent = rsp_tstats->total_bytes_sent;
1131 tstats->mcast_pkts_sent = rsp_tstats->mcast_pkts_sent;
1132 tstats->bcast_pkts_sent = rsp_tstats->bcast_pkts_sent;
1133 tstats->ctl_sent = rsp_tstats->ctl_sent;
1134 /* Packets sent after one collision*/
1135 tstats->one_collision_sent = rsp_tstats->one_collision_sent;
1136 /* Packets sent after multiple collision*/
1137 tstats->multi_collision_sent = rsp_tstats->multi_collision_sent;
1138 /* Packets not sent due to max collisions */
1139 tstats->max_collision_fail = rsp_tstats->max_collision_fail;
1140 /* Packets not sent due to max deferrals */
1141 tstats->max_deferral_fail = rsp_tstats->max_deferral_fail;
1142 /* Accounts for over/under-run of buffers */
1143 tstats->fifo_err = rsp_tstats->fifo_err;
1144 tstats->runts = rsp_tstats->runts;
1145 /* Total number of collisions detected */
1146 tstats->total_collisions = rsp_tstats->total_collisions;
1147
1148 /* firmware stats */
1149 tstats->fw_total_sent = rsp_tstats->fw_total_sent;
1150 tstats->fw_total_fwd = rsp_tstats->fw_total_fwd;
1151 tstats->fw_err_pko = rsp_tstats->fw_err_pko;
1152 tstats->fw_err_link = rsp_tstats->fw_err_link;
1153 tstats->fw_err_drop = rsp_tstats->fw_err_drop;
1154 tstats->fw_tso = rsp_tstats->fw_tso;
1155 tstats->fw_tso_fwd = rsp_tstats->fw_tso_fwd;
1156 tstats->fw_err_tso = rsp_tstats->fw_err_tso;
Raghu Vatsavayi01fb2372016-07-03 13:56:47 -07001157 tstats->fw_tx_vxlan = rsp_tstats->fw_tx_vxlan;
1158
Raghu Vatsavayi1f164712016-06-21 22:53:11 -07001159 resp->status = 1;
1160 } else {
1161 resp->status = -1;
1162 }
1163 complete(&ctrl->complete);
1164}
1165
1166/* Configure interrupt moderation parameters */
1167static int octnet_get_link_stats(struct net_device *netdev)
1168{
1169 struct lio *lio = GET_LIO(netdev);
1170 struct octeon_device *oct_dev = lio->oct_dev;
1171
1172 struct octeon_soft_command *sc;
1173 struct oct_nic_stats_ctrl *ctrl;
1174 struct oct_nic_stats_resp *resp;
1175
1176 int retval;
1177
1178 /* Alloc soft command */
1179 sc = (struct octeon_soft_command *)
1180 octeon_alloc_soft_command(oct_dev,
1181 0,
1182 sizeof(struct oct_nic_stats_resp),
1183 sizeof(struct octnic_ctrl_pkt));
1184
1185 if (!sc)
1186 return -ENOMEM;
1187
1188 resp = (struct oct_nic_stats_resp *)sc->virtrptr;
1189 memset(resp, 0, sizeof(struct oct_nic_stats_resp));
1190
1191 ctrl = (struct oct_nic_stats_ctrl *)sc->ctxptr;
1192 memset(ctrl, 0, sizeof(struct oct_nic_stats_ctrl));
1193 ctrl->netdev = netdev;
1194 init_completion(&ctrl->complete);
1195
1196 sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1197
1198 octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC,
1199 OPCODE_NIC_PORT_STATS, 0, 0, 0);
1200
1201 sc->callback = octnet_nic_stats_callback;
1202 sc->callback_arg = sc;
1203 sc->wait_time = 500; /*in milli seconds*/
1204
1205 retval = octeon_send_soft_command(oct_dev, sc);
1206 if (retval == IQ_SEND_FAILED) {
1207 octeon_free_soft_command(oct_dev, sc);
1208 return -EINVAL;
1209 }
1210
1211 wait_for_completion_timeout(&ctrl->complete, msecs_to_jiffies(1000));
1212
1213 if (resp->status != 1) {
1214 octeon_free_soft_command(oct_dev, sc);
1215
1216 return -EINVAL;
1217 }
1218
1219 octeon_free_soft_command(oct_dev, sc);
1220
1221 return 0;
1222}
1223
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001224/* Enable/Disable auto interrupt Moderation */
1225static int oct_cfg_adaptive_intr(struct lio *lio, struct ethtool_coalesce
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001226 *intr_coal)
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001227{
1228 int ret = 0;
1229 struct octeon_device *oct = lio->oct_dev;
1230 struct oct_intrmod_cfg *intrmod_cfg;
1231
1232 intrmod_cfg = &oct->intrmod;
1233
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001234 if (oct->intrmod.rx_enable || oct->intrmod.tx_enable) {
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001235 if (intr_coal->rate_sample_interval)
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001236 intrmod_cfg->check_intrvl =
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001237 intr_coal->rate_sample_interval;
1238 else
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001239 intrmod_cfg->check_intrvl =
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001240 LIO_INTRMOD_CHECK_INTERVAL;
1241
1242 if (intr_coal->pkt_rate_high)
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001243 intrmod_cfg->maxpkt_ratethr =
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001244 intr_coal->pkt_rate_high;
1245 else
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001246 intrmod_cfg->maxpkt_ratethr =
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001247 LIO_INTRMOD_MAXPKT_RATETHR;
1248
1249 if (intr_coal->pkt_rate_low)
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001250 intrmod_cfg->minpkt_ratethr =
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001251 intr_coal->pkt_rate_low;
1252 else
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001253 intrmod_cfg->minpkt_ratethr =
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001254 LIO_INTRMOD_MINPKT_RATETHR;
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001255 }
1256 if (oct->intrmod.rx_enable) {
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001257 if (intr_coal->rx_max_coalesced_frames_high)
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001258 intrmod_cfg->rx_maxcnt_trigger =
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001259 intr_coal->rx_max_coalesced_frames_high;
1260 else
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001261 intrmod_cfg->rx_maxcnt_trigger =
1262 LIO_INTRMOD_RXMAXCNT_TRIGGER;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001263
1264 if (intr_coal->rx_coalesce_usecs_high)
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001265 intrmod_cfg->rx_maxtmr_trigger =
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001266 intr_coal->rx_coalesce_usecs_high;
1267 else
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001268 intrmod_cfg->rx_maxtmr_trigger =
1269 LIO_INTRMOD_RXMAXTMR_TRIGGER;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001270
1271 if (intr_coal->rx_coalesce_usecs_low)
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001272 intrmod_cfg->rx_mintmr_trigger =
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001273 intr_coal->rx_coalesce_usecs_low;
1274 else
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001275 intrmod_cfg->rx_mintmr_trigger =
1276 LIO_INTRMOD_RXMINTMR_TRIGGER;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001277
1278 if (intr_coal->rx_max_coalesced_frames_low)
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001279 intrmod_cfg->rx_mincnt_trigger =
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001280 intr_coal->rx_max_coalesced_frames_low;
1281 else
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001282 intrmod_cfg->rx_mincnt_trigger =
1283 LIO_INTRMOD_RXMINCNT_TRIGGER;
1284 }
1285 if (oct->intrmod.tx_enable) {
1286 if (intr_coal->tx_max_coalesced_frames_high)
1287 intrmod_cfg->tx_maxcnt_trigger =
1288 intr_coal->tx_max_coalesced_frames_high;
1289 else
1290 intrmod_cfg->tx_maxcnt_trigger =
1291 LIO_INTRMOD_TXMAXCNT_TRIGGER;
1292 if (intr_coal->tx_max_coalesced_frames_low)
1293 intrmod_cfg->tx_mincnt_trigger =
1294 intr_coal->tx_max_coalesced_frames_low;
1295 else
1296 intrmod_cfg->tx_mincnt_trigger =
1297 LIO_INTRMOD_TXMINCNT_TRIGGER;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001298 }
1299
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001300 ret = octnet_set_intrmod_cfg(lio, intrmod_cfg);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001301
1302 return ret;
1303}
1304
1305static int
1306oct_cfg_rx_intrcnt(struct lio *lio, struct ethtool_coalesce *intr_coal)
1307{
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001308 struct octeon_device *oct = lio->oct_dev;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001309 u32 rx_max_coalesced_frames;
1310
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001311 /* Config Cnt based interrupt values */
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001312 switch (oct->chip_id) {
1313 case OCTEON_CN68XX:
1314 case OCTEON_CN66XX: {
1315 struct octeon_cn6xxx *cn6xxx =
1316 (struct octeon_cn6xxx *)oct->chip;
1317
1318 if (!intr_coal->rx_max_coalesced_frames)
1319 rx_max_coalesced_frames = CN6XXX_OQ_INTR_PKT;
1320 else
1321 rx_max_coalesced_frames =
1322 intr_coal->rx_max_coalesced_frames;
1323 octeon_write_csr(oct, CN6XXX_SLI_OQ_INT_LEVEL_PKTS,
1324 rx_max_coalesced_frames);
1325 CFG_SET_OQ_INTR_PKT(cn6xxx->conf, rx_max_coalesced_frames);
1326 break;
1327 }
1328 default:
1329 return -EINVAL;
1330 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001331 return 0;
1332}
1333
1334static int oct_cfg_rx_intrtime(struct lio *lio, struct ethtool_coalesce
1335 *intr_coal)
1336{
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001337 struct octeon_device *oct = lio->oct_dev;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001338 u32 time_threshold, rx_coalesce_usecs;
1339
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001340 /* Config Time based interrupt values */
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001341 switch (oct->chip_id) {
1342 case OCTEON_CN68XX:
1343 case OCTEON_CN66XX: {
1344 struct octeon_cn6xxx *cn6xxx =
1345 (struct octeon_cn6xxx *)oct->chip;
1346 if (!intr_coal->rx_coalesce_usecs)
1347 rx_coalesce_usecs = CN6XXX_OQ_INTR_TIME;
1348 else
1349 rx_coalesce_usecs = intr_coal->rx_coalesce_usecs;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001350
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001351 time_threshold = lio_cn6xxx_get_oq_ticks(oct,
1352 rx_coalesce_usecs);
1353 octeon_write_csr(oct,
1354 CN6XXX_SLI_OQ_INT_LEVEL_TIME,
1355 time_threshold);
1356
1357 CFG_SET_OQ_INTR_TIME(cn6xxx->conf, rx_coalesce_usecs);
1358 break;
1359 }
1360 default:
1361 return -EINVAL;
1362 }
1363
1364 return 0;
1365}
1366
1367static int
1368oct_cfg_tx_intrcnt(struct lio *lio, struct ethtool_coalesce *intr_coal
1369 __attribute__((unused)))
1370{
1371 struct octeon_device *oct = lio->oct_dev;
1372
1373 /* Config Cnt based interrupt values */
1374 switch (oct->chip_id) {
1375 case OCTEON_CN68XX:
1376 case OCTEON_CN66XX:
1377 break;
1378 default:
1379 return -EINVAL;
1380 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001381 return 0;
1382}
1383
1384static int lio_set_intr_coalesce(struct net_device *netdev,
1385 struct ethtool_coalesce *intr_coal)
1386{
1387 struct lio *lio = GET_LIO(netdev);
1388 int ret;
1389 struct octeon_device *oct = lio->oct_dev;
1390 u32 j, q_no;
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001391 int db_max, db_min;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001392
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001393 switch (oct->chip_id) {
1394 case OCTEON_CN68XX:
1395 case OCTEON_CN66XX:
1396 db_min = CN6XXX_DB_MIN;
1397 db_max = CN6XXX_DB_MAX;
1398 if ((intr_coal->tx_max_coalesced_frames >= db_min) &&
1399 (intr_coal->tx_max_coalesced_frames <= db_max)) {
1400 for (j = 0; j < lio->linfo.num_txpciq; j++) {
1401 q_no = lio->linfo.txpciq[j].s.q_no;
1402 oct->instr_queue[q_no]->fill_threshold =
1403 intr_coal->tx_max_coalesced_frames;
1404 }
1405 } else {
1406 dev_err(&oct->pci_dev->dev,
1407 "LIQUIDIO: Invalid tx-frames:%d. Range is min:%d max:%d\n",
1408 intr_coal->tx_max_coalesced_frames, db_min,
1409 db_max);
1410 return -EINVAL;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001411 }
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001412 break;
1413 default:
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001414 return -EINVAL;
1415 }
1416
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001417 oct->intrmod.rx_enable = intr_coal->use_adaptive_rx_coalesce ? 1 : 0;
1418 oct->intrmod.tx_enable = intr_coal->use_adaptive_tx_coalesce ? 1 : 0;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001419
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001420 ret = oct_cfg_adaptive_intr(lio, intr_coal);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001421
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001422 if (!intr_coal->use_adaptive_rx_coalesce) {
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001423 ret = oct_cfg_rx_intrtime(lio, intr_coal);
1424 if (ret)
1425 goto ret_intrmod;
1426
1427 ret = oct_cfg_rx_intrcnt(lio, intr_coal);
1428 if (ret)
1429 goto ret_intrmod;
1430 }
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001431 if (!intr_coal->use_adaptive_tx_coalesce) {
1432 ret = oct_cfg_tx_intrcnt(lio, intr_coal);
1433 if (ret)
1434 goto ret_intrmod;
1435 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001436
1437 return 0;
1438ret_intrmod:
1439 return ret;
1440}
1441
1442static int lio_get_ts_info(struct net_device *netdev,
1443 struct ethtool_ts_info *info)
1444{
1445 struct lio *lio = GET_LIO(netdev);
1446
1447 info->so_timestamping =
Raghu Vatsavayi178cc102016-06-21 22:53:13 -07001448#ifdef PTP_HARDWARE_TIMESTAMPING
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001449 SOF_TIMESTAMPING_TX_HARDWARE |
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001450 SOF_TIMESTAMPING_RX_HARDWARE |
Raghu Vatsavayi178cc102016-06-21 22:53:13 -07001451 SOF_TIMESTAMPING_RAW_HARDWARE |
1452 SOF_TIMESTAMPING_TX_SOFTWARE |
1453#endif
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001454 SOF_TIMESTAMPING_RX_SOFTWARE |
Raghu Vatsavayi178cc102016-06-21 22:53:13 -07001455 SOF_TIMESTAMPING_SOFTWARE;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001456
1457 if (lio->ptp_clock)
1458 info->phc_index = ptp_clock_index(lio->ptp_clock);
1459 else
1460 info->phc_index = -1;
1461
Raghu Vatsavayi178cc102016-06-21 22:53:13 -07001462#ifdef PTP_HARDWARE_TIMESTAMPING
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001463 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
1464
1465 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
1466 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
1467 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
1468 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
Raghu Vatsavayi178cc102016-06-21 22:53:13 -07001469#endif
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001470
1471 return 0;
1472}
1473
1474static int lio_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
1475{
1476 struct lio *lio = GET_LIO(netdev);
1477 struct octeon_device *oct = lio->oct_dev;
1478 struct oct_link_info *linfo;
1479 struct octnic_ctrl_pkt nctrl;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001480 int ret = 0;
1481
1482 /* get the link info */
1483 linfo = &lio->linfo;
1484
1485 if (ecmd->autoneg != AUTONEG_ENABLE && ecmd->autoneg != AUTONEG_DISABLE)
1486 return -EINVAL;
1487
1488 if (ecmd->autoneg == AUTONEG_DISABLE && ((ecmd->speed != SPEED_100 &&
1489 ecmd->speed != SPEED_10) ||
1490 (ecmd->duplex != DUPLEX_HALF &&
1491 ecmd->duplex != DUPLEX_FULL)))
1492 return -EINVAL;
1493
Raghu Vatsavayi9eb60842016-06-21 22:53:12 -07001494 /* Ethtool Support is not provided for XAUI, RXAUI, and XFI Interfaces
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001495 * as they operate at fixed Speed and Duplex settings
1496 */
Raghu Vatsavayi9eb60842016-06-21 22:53:12 -07001497 if (linfo->link.s.if_mode == INTERFACE_MODE_XAUI ||
1498 linfo->link.s.if_mode == INTERFACE_MODE_RXAUI ||
1499 linfo->link.s.if_mode == INTERFACE_MODE_XFI) {
1500 dev_info(&oct->pci_dev->dev,
1501 "Autonegotiation, duplex and speed settings cannot be modified.\n");
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001502 return -EINVAL;
1503 }
1504
1505 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
1506
1507 nctrl.ncmd.u64 = 0;
1508 nctrl.ncmd.s.cmd = OCTNET_CMD_SET_SETTINGS;
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07001509 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001510 nctrl.wait_time = 1000;
1511 nctrl.netpndev = (u64)netdev;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001512 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
1513
1514 /* Passing the parameters sent by ethtool like Speed, Autoneg & Duplex
1515 * to SE core application using ncmd.s.more & ncmd.s.param
1516 */
1517 if (ecmd->autoneg == AUTONEG_ENABLE) {
1518 /* Autoneg ON */
1519 nctrl.ncmd.s.more = OCTNIC_NCMD_PHY_ON |
1520 OCTNIC_NCMD_AUTONEG_ON;
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07001521 nctrl.ncmd.s.param1 = ecmd->advertising;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001522 } else {
1523 /* Autoneg OFF */
1524 nctrl.ncmd.s.more = OCTNIC_NCMD_PHY_ON;
1525
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07001526 nctrl.ncmd.s.param2 = ecmd->duplex;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001527
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07001528 nctrl.ncmd.s.param1 = ecmd->speed;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001529 }
1530
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07001531 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001532 if (ret < 0) {
1533 dev_err(&oct->pci_dev->dev, "Failed to set settings\n");
1534 return -1;
1535 }
1536
1537 return 0;
1538}
1539
1540static int lio_nway_reset(struct net_device *netdev)
1541{
1542 if (netif_running(netdev)) {
1543 struct ethtool_cmd ecmd;
1544
1545 memset(&ecmd, 0, sizeof(struct ethtool_cmd));
1546 ecmd.autoneg = 0;
1547 ecmd.speed = 0;
1548 ecmd.duplex = 0;
1549 lio_set_settings(netdev, &ecmd);
1550 }
1551 return 0;
1552}
1553
1554/* Return register dump len. */
1555static int lio_get_regs_len(struct net_device *dev)
1556{
1557 return OCT_ETHTOOL_REGDUMP_LEN;
1558}
1559
1560static int cn6xxx_read_csr_reg(char *s, struct octeon_device *oct)
1561{
1562 u32 reg;
1563 int i, len = 0;
1564
1565 /* PCI Window Registers */
1566
1567 len += sprintf(s + len, "\n\t Octeon CSR Registers\n\n");
1568 reg = CN6XXX_WIN_WR_ADDR_LO;
1569 len += sprintf(s + len, "\n[%02x] (WIN_WR_ADDR_LO): %08x\n",
1570 CN6XXX_WIN_WR_ADDR_LO, octeon_read_csr(oct, reg));
1571 reg = CN6XXX_WIN_WR_ADDR_HI;
1572 len += sprintf(s + len, "[%02x] (WIN_WR_ADDR_HI): %08x\n",
1573 CN6XXX_WIN_WR_ADDR_HI, octeon_read_csr(oct, reg));
1574 reg = CN6XXX_WIN_RD_ADDR_LO;
1575 len += sprintf(s + len, "[%02x] (WIN_RD_ADDR_LO): %08x\n",
1576 CN6XXX_WIN_RD_ADDR_LO, octeon_read_csr(oct, reg));
1577 reg = CN6XXX_WIN_RD_ADDR_HI;
1578 len += sprintf(s + len, "[%02x] (WIN_RD_ADDR_HI): %08x\n",
1579 CN6XXX_WIN_RD_ADDR_HI, octeon_read_csr(oct, reg));
1580 reg = CN6XXX_WIN_WR_DATA_LO;
1581 len += sprintf(s + len, "[%02x] (WIN_WR_DATA_LO): %08x\n",
1582 CN6XXX_WIN_WR_DATA_LO, octeon_read_csr(oct, reg));
1583 reg = CN6XXX_WIN_WR_DATA_HI;
1584 len += sprintf(s + len, "[%02x] (WIN_WR_DATA_HI): %08x\n",
1585 CN6XXX_WIN_WR_DATA_HI, octeon_read_csr(oct, reg));
1586 len += sprintf(s + len, "[%02x] (WIN_WR_MASK_REG): %08x\n",
1587 CN6XXX_WIN_WR_MASK_REG,
1588 octeon_read_csr(oct, CN6XXX_WIN_WR_MASK_REG));
1589
1590 /* PCI Interrupt Register */
1591 len += sprintf(s + len, "\n[%x] (INT_ENABLE PORT 0): %08x\n",
1592 CN6XXX_SLI_INT_ENB64_PORT0, octeon_read_csr(oct,
1593 CN6XXX_SLI_INT_ENB64_PORT0));
1594 len += sprintf(s + len, "\n[%x] (INT_ENABLE PORT 1): %08x\n",
1595 CN6XXX_SLI_INT_ENB64_PORT1,
1596 octeon_read_csr(oct, CN6XXX_SLI_INT_ENB64_PORT1));
1597 len += sprintf(s + len, "[%x] (INT_SUM): %08x\n", CN6XXX_SLI_INT_SUM64,
1598 octeon_read_csr(oct, CN6XXX_SLI_INT_SUM64));
1599
1600 /* PCI Output queue registers */
1601 for (i = 0; i < oct->num_oqs; i++) {
1602 reg = CN6XXX_SLI_OQ_PKTS_SENT(i);
1603 len += sprintf(s + len, "\n[%x] (PKTS_SENT_%d): %08x\n",
1604 reg, i, octeon_read_csr(oct, reg));
1605 reg = CN6XXX_SLI_OQ_PKTS_CREDIT(i);
1606 len += sprintf(s + len, "[%x] (PKT_CREDITS_%d): %08x\n",
1607 reg, i, octeon_read_csr(oct, reg));
1608 }
1609 reg = CN6XXX_SLI_OQ_INT_LEVEL_PKTS;
1610 len += sprintf(s + len, "\n[%x] (PKTS_SENT_INT_LEVEL): %08x\n",
1611 reg, octeon_read_csr(oct, reg));
1612 reg = CN6XXX_SLI_OQ_INT_LEVEL_TIME;
1613 len += sprintf(s + len, "[%x] (PKTS_SENT_TIME): %08x\n",
1614 reg, octeon_read_csr(oct, reg));
1615
1616 /* PCI Input queue registers */
1617 for (i = 0; i <= 3; i++) {
1618 u32 reg;
1619
1620 reg = CN6XXX_SLI_IQ_DOORBELL(i);
1621 len += sprintf(s + len, "\n[%x] (INSTR_DOORBELL_%d): %08x\n",
1622 reg, i, octeon_read_csr(oct, reg));
1623 reg = CN6XXX_SLI_IQ_INSTR_COUNT(i);
1624 len += sprintf(s + len, "[%x] (INSTR_COUNT_%d): %08x\n",
1625 reg, i, octeon_read_csr(oct, reg));
1626 }
1627
1628 /* PCI DMA registers */
1629
1630 len += sprintf(s + len, "\n[%x] (DMA_CNT_0): %08x\n",
1631 CN6XXX_DMA_CNT(0),
1632 octeon_read_csr(oct, CN6XXX_DMA_CNT(0)));
1633 reg = CN6XXX_DMA_PKT_INT_LEVEL(0);
1634 len += sprintf(s + len, "[%x] (DMA_INT_LEV_0): %08x\n",
1635 CN6XXX_DMA_PKT_INT_LEVEL(0), octeon_read_csr(oct, reg));
1636 reg = CN6XXX_DMA_TIME_INT_LEVEL(0);
1637 len += sprintf(s + len, "[%x] (DMA_TIME_0): %08x\n",
1638 CN6XXX_DMA_TIME_INT_LEVEL(0),
1639 octeon_read_csr(oct, reg));
1640
1641 len += sprintf(s + len, "\n[%x] (DMA_CNT_1): %08x\n",
1642 CN6XXX_DMA_CNT(1),
1643 octeon_read_csr(oct, CN6XXX_DMA_CNT(1)));
1644 reg = CN6XXX_DMA_PKT_INT_LEVEL(1);
1645 len += sprintf(s + len, "[%x] (DMA_INT_LEV_1): %08x\n",
1646 CN6XXX_DMA_PKT_INT_LEVEL(1),
1647 octeon_read_csr(oct, reg));
1648 reg = CN6XXX_DMA_PKT_INT_LEVEL(1);
1649 len += sprintf(s + len, "[%x] (DMA_TIME_1): %08x\n",
1650 CN6XXX_DMA_TIME_INT_LEVEL(1),
1651 octeon_read_csr(oct, reg));
1652
1653 /* PCI Index registers */
1654
1655 len += sprintf(s + len, "\n");
1656
1657 for (i = 0; i < 16; i++) {
1658 reg = lio_pci_readq(oct, CN6XXX_BAR1_REG(i, oct->pcie_port));
1659 len += sprintf(s + len, "[%llx] (BAR1_INDEX_%02d): %08x\n",
1660 CN6XXX_BAR1_REG(i, oct->pcie_port), i, reg);
1661 }
1662
1663 return len;
1664}
1665
1666static int cn6xxx_read_config_reg(char *s, struct octeon_device *oct)
1667{
1668 u32 val;
1669 int i, len = 0;
1670
1671 /* PCI CONFIG Registers */
1672
1673 len += sprintf(s + len,
1674 "\n\t Octeon Config space Registers\n\n");
1675
1676 for (i = 0; i <= 13; i++) {
1677 pci_read_config_dword(oct->pci_dev, (i * 4), &val);
1678 len += sprintf(s + len, "[0x%x] (Config[%d]): 0x%08x\n",
1679 (i * 4), i, val);
1680 }
1681
1682 for (i = 30; i <= 34; i++) {
1683 pci_read_config_dword(oct->pci_dev, (i * 4), &val);
1684 len += sprintf(s + len, "[0x%x] (Config[%d]): 0x%08x\n",
1685 (i * 4), i, val);
1686 }
1687
1688 return len;
1689}
1690
1691/* Return register dump user app. */
1692static void lio_get_regs(struct net_device *dev,
1693 struct ethtool_regs *regs, void *regbuf)
1694{
1695 struct lio *lio = GET_LIO(dev);
1696 int len = 0;
1697 struct octeon_device *oct = lio->oct_dev;
1698
1699 memset(regbuf, 0, OCT_ETHTOOL_REGDUMP_LEN);
1700 regs->version = OCT_ETHTOOL_REGSVER;
1701
1702 switch (oct->chip_id) {
1703 /* case OCTEON_CN73XX: Todo */
1704 case OCTEON_CN68XX:
1705 case OCTEON_CN66XX:
1706 len += cn6xxx_read_csr_reg(regbuf + len, oct);
1707 len += cn6xxx_read_config_reg(regbuf + len, oct);
1708 break;
1709 default:
1710 dev_err(&oct->pci_dev->dev, "%s Unknown chipid: %d\n",
1711 __func__, oct->chip_id);
1712 }
1713}
1714
Raghu Vatsavayif5a20472016-06-21 22:53:14 -07001715static u32 lio_get_priv_flags(struct net_device *netdev)
1716{
1717 struct lio *lio = GET_LIO(netdev);
1718
1719 return lio->oct_dev->priv_flags;
1720}
1721
1722static int lio_set_priv_flags(struct net_device *netdev, u32 flags)
1723{
1724 struct lio *lio = GET_LIO(netdev);
1725 bool intr_by_tx_bytes = !!(flags & (0x1 << OCT_PRIV_FLAG_TX_BYTES));
1726
1727 lio_set_priv_flag(lio->oct_dev, OCT_PRIV_FLAG_TX_BYTES,
1728 intr_by_tx_bytes);
1729 return 0;
1730}
1731
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001732static const struct ethtool_ops lio_ethtool_ops = {
1733 .get_settings = lio_get_settings,
1734 .get_link = ethtool_op_get_link,
1735 .get_drvinfo = lio_get_drvinfo,
1736 .get_ringparam = lio_ethtool_get_ringparam,
1737 .get_channels = lio_ethtool_get_channels,
1738 .set_phys_id = lio_set_phys_id,
1739 .get_eeprom_len = lio_get_eeprom_len,
1740 .get_eeprom = lio_get_eeprom,
1741 .get_strings = lio_get_strings,
1742 .get_ethtool_stats = lio_get_ethtool_stats,
1743 .get_pauseparam = lio_get_pauseparam,
1744 .get_regs_len = lio_get_regs_len,
1745 .get_regs = lio_get_regs,
1746 .get_msglevel = lio_get_msglevel,
1747 .set_msglevel = lio_set_msglevel,
1748 .get_sset_count = lio_get_sset_count,
1749 .nway_reset = lio_nway_reset,
1750 .set_settings = lio_set_settings,
1751 .get_coalesce = lio_get_intr_coalesce,
1752 .set_coalesce = lio_set_intr_coalesce,
Raghu Vatsavayif5a20472016-06-21 22:53:14 -07001753 .get_priv_flags = lio_get_priv_flags,
1754 .set_priv_flags = lio_set_priv_flags,
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001755 .get_ts_info = lio_get_ts_info,
1756};
1757
1758void liquidio_set_ethtool_ops(struct net_device *netdev)
1759{
1760 netdev->ethtool_ops = &lio_ethtool_ops;
1761}