blob: 03bfa9771e4dec3895d9020043a34336f717da60 [file] [log] [blame]
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001/**********************************************************************
2* Author: Cavium, Inc.
3*
4* Contact: support@cavium.com
5* Please include "LiquidIO" in the subject.
6*
7* Copyright (c) 2003-2015 Cavium, Inc.
8*
9* This file is free software; you can redistribute it and/or modify
10* it under the terms of the GNU General Public License, Version 2, as
11* published by the Free Software Foundation.
12*
13* This file is distributed in the hope that it will be useful, but
14* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16* NONINFRINGEMENT. See the GNU General Public License for more
17* details.
18*
19* This file may also be available under a different license from Cavium.
20* Contact Cavium, Inc. for more information
21**********************************************************************/
22#include <linux/version.h>
23#include <linux/netdevice.h>
24#include <linux/net_tstamp.h>
25#include <linux/ethtool.h>
26#include <linux/dma-mapping.h>
27#include <linux/pci.h>
28#include "octeon_config.h"
29#include "liquidio_common.h"
30#include "octeon_droq.h"
31#include "octeon_iq.h"
32#include "response_manager.h"
33#include "octeon_device.h"
34#include "octeon_nic.h"
35#include "octeon_main.h"
36#include "octeon_network.h"
37#include "cn66xx_regs.h"
38#include "cn66xx_device.h"
39#include "cn68xx_regs.h"
40#include "cn68xx_device.h"
41#include "liquidio_image.h"
42
Raghu Vatsavayi1f164712016-06-21 22:53:11 -070043static int octnet_get_link_stats(struct net_device *netdev);
44
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070045struct oct_mdio_cmd_context {
46 int octeon_id;
47 wait_queue_head_t wc;
48 int cond;
49};
50
51struct oct_mdio_cmd_resp {
52 u64 rh;
53 struct oct_mdio_cmd resp;
54 u64 status;
55};
56
57#define OCT_MDIO45_RESP_SIZE (sizeof(struct oct_mdio_cmd_resp))
58
59/* Octeon's interface mode of operation */
60enum {
61 INTERFACE_MODE_DISABLED,
62 INTERFACE_MODE_RGMII,
63 INTERFACE_MODE_GMII,
64 INTERFACE_MODE_SPI,
65 INTERFACE_MODE_PCIE,
66 INTERFACE_MODE_XAUI,
67 INTERFACE_MODE_SGMII,
68 INTERFACE_MODE_PICMG,
69 INTERFACE_MODE_NPI,
70 INTERFACE_MODE_LOOP,
71 INTERFACE_MODE_SRIO,
72 INTERFACE_MODE_ILK,
73 INTERFACE_MODE_RXAUI,
74 INTERFACE_MODE_QSGMII,
75 INTERFACE_MODE_AGL,
Raghu Vatsavayi9eb60842016-06-21 22:53:12 -070076 INTERFACE_MODE_XLAUI,
77 INTERFACE_MODE_XFI,
78 INTERFACE_MODE_10G_KR,
79 INTERFACE_MODE_40G_KR4,
80 INTERFACE_MODE_MIXED,
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070081};
82
83#define ARRAY_LENGTH(a) (sizeof(a) / sizeof((a)[0]))
84#define OCT_ETHTOOL_REGDUMP_LEN 4096
85#define OCT_ETHTOOL_REGSVER 1
86
Raghu Vatsavayi1f164712016-06-21 22:53:11 -070087/* statistics of PF */
88static const char oct_stats_strings[][ETH_GSTRING_LEN] = {
89 "rx_packets",
90 "tx_packets",
91 "rx_bytes",
92 "tx_bytes",
93 "rx_errors", /*jabber_err+l2_err+frame_err */
94 "tx_errors", /*fw_err_pko+fw_err_link+fw_err_drop */
95 "rx_dropped", /*st->fromwire.total_rcvd - st->fromwire.fw_total_rcvd
96 *+st->fromwire.dmac_drop + st->fromwire.fw_err_drop
97 */
98 "tx_dropped",
99
100 "tx_total_sent",
101 "tx_total_fwd",
102 "tx_err_pko",
103 "tx_err_link",
104 "tx_err_drop",
105
106 "tx_tso",
107 "tx_tso_packets",
108 "tx_tso_err",
109
110 "mac_tx_total_pkts",
111 "mac_tx_total_bytes",
112 "mac_tx_mcast_pkts",
113 "mac_tx_bcast_pkts",
114 "mac_tx_ctl_packets", /*oct->link_stats.fromhost.ctl_sent */
115 "mac_tx_total_collisions",
116 "mac_tx_one_collision",
117 "mac_tx_multi_collison",
118 "mac_tx_max_collision_fail",
119 "mac_tx_max_deferal_fail",
120 "mac_tx_fifo_err",
121 "mac_tx_runts",
122
123 "rx_total_rcvd",
124 "rx_total_fwd",
125 "rx_jabber_err",
126 "rx_l2_err",
127 "rx_frame_err",
128 "rx_err_pko",
129 "rx_err_link",
130 "rx_err_drop",
131
132 "rx_lro_pkts",
133 "rx_lro_bytes",
134 "rx_total_lro",
135
136 "rx_lro_aborts",
137 "rx_lro_aborts_port",
138 "rx_lro_aborts_seq",
139 "rx_lro_aborts_tsval",
140 "rx_lro_aborts_timer",
141 "rx_fwd_rate",
142
143 "mac_rx_total_rcvd",
144 "mac_rx_bytes",
145 "mac_rx_total_bcst",
146 "mac_rx_total_mcst",
147 "mac_rx_runts",
148 "mac_rx_ctl_packets",
149 "mac_rx_fifo_err",
150 "mac_rx_dma_drop",
151 "mac_rx_fcs_err",
152
153 "link_state_changes",
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700154};
155
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700156/* statistics of host tx queue */
157static const char oct_iq_stats_strings[][ETH_GSTRING_LEN] = {
158 "packets", /*oct->instr_queue[iq_no]->stats.tx_done*/
159 "bytes", /*oct->instr_queue[iq_no]->stats.tx_tot_bytes*/
160 "dropped",
161 "iq_busy",
162 "sgentry_sent",
163
164 "fw_instr_posted",
165 "fw_instr_processed",
166 "fw_instr_dropped",
167 "fw_bytes_sent",
168
169 "tso",
170 "txq_restart",
171};
172
173/* statistics of host rx queue */
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700174static const char oct_droq_stats_strings[][ETH_GSTRING_LEN] = {
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700175 "packets", /*oct->droq[oq_no]->stats.rx_pkts_received */
176 "bytes", /*oct->droq[oq_no]->stats.rx_bytes_received */
177 "dropped", /*oct->droq[oq_no]->stats.rx_dropped+
178 *oct->droq[oq_no]->stats.dropped_nodispatch+
179 *oct->droq[oq_no]->stats.dropped_toomany+
180 *oct->droq[oq_no]->stats.dropped_nomem
181 */
182 "dropped_nomem",
183 "dropped_toomany",
184 "fw_dropped",
185 "fw_pkts_received",
186 "fw_bytes_received",
187 "fw_dropped_nodispatch",
188
189 "buffer_alloc_failure",
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700190};
191
192#define OCTNIC_NCMD_AUTONEG_ON 0x1
193#define OCTNIC_NCMD_PHY_ON 0x2
194
195static int lio_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
196{
197 struct lio *lio = GET_LIO(netdev);
198 struct octeon_device *oct = lio->oct_dev;
199 struct oct_link_info *linfo;
200
201 linfo = &lio->linfo;
202
Raghu Vatsavayi9eb60842016-06-21 22:53:12 -0700203 if (linfo->link.s.if_mode == INTERFACE_MODE_XAUI ||
204 linfo->link.s.if_mode == INTERFACE_MODE_RXAUI ||
205 linfo->link.s.if_mode == INTERFACE_MODE_XFI) {
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700206 ecmd->port = PORT_FIBRE;
207 ecmd->supported =
208 (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE |
209 SUPPORTED_Pause);
210 ecmd->advertising =
211 (ADVERTISED_10000baseT_Full | ADVERTISED_Pause);
212 ecmd->transceiver = XCVR_EXTERNAL;
213 ecmd->autoneg = AUTONEG_DISABLE;
214
215 } else {
Raghu Vatsavayi9eb60842016-06-21 22:53:12 -0700216 dev_err(&oct->pci_dev->dev, "Unknown link interface reported %d\n",
217 linfo->link.s.if_mode);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700218 }
219
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -0700220 if (linfo->link.s.link_up) {
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700221 ethtool_cmd_speed_set(ecmd, linfo->link.s.speed);
222 ecmd->duplex = linfo->link.s.duplex;
223 } else {
224 ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
225 ecmd->duplex = DUPLEX_UNKNOWN;
226 }
227
228 return 0;
229}
230
231static void
232lio_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
233{
234 struct lio *lio;
235 struct octeon_device *oct;
236
237 lio = GET_LIO(netdev);
238 oct = lio->oct_dev;
239
240 memset(drvinfo, 0, sizeof(struct ethtool_drvinfo));
241 strcpy(drvinfo->driver, "liquidio");
242 strcpy(drvinfo->version, LIQUIDIO_VERSION);
243 strncpy(drvinfo->fw_version, oct->fw_info.liquidio_firmware_version,
244 ETHTOOL_FWVERS_LEN);
245 strncpy(drvinfo->bus_info, pci_name(oct->pci_dev), 32);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700246}
247
248static void
249lio_ethtool_get_channels(struct net_device *dev,
250 struct ethtool_channels *channel)
251{
252 struct lio *lio = GET_LIO(dev);
253 struct octeon_device *oct = lio->oct_dev;
254 u32 max_rx = 0, max_tx = 0, tx_count = 0, rx_count = 0;
255
256 if (OCTEON_CN6XXX(oct)) {
257 struct octeon_config *conf6x = CHIP_FIELD(oct, cn6xxx, conf);
258
259 max_rx = CFG_GET_OQ_MAX_Q(conf6x);
260 max_tx = CFG_GET_IQ_MAX_Q(conf6x);
261 rx_count = CFG_GET_NUM_RXQS_NIC_IF(conf6x, lio->ifidx);
262 tx_count = CFG_GET_NUM_TXQS_NIC_IF(conf6x, lio->ifidx);
263 }
264
265 channel->max_rx = max_rx;
266 channel->max_tx = max_tx;
267 channel->rx_count = rx_count;
268 channel->tx_count = tx_count;
269}
270
271static int lio_get_eeprom_len(struct net_device *netdev)
272{
273 u8 buf[128];
274 struct lio *lio = GET_LIO(netdev);
275 struct octeon_device *oct_dev = lio->oct_dev;
276 struct octeon_board_info *board_info;
277 int len;
278
279 board_info = (struct octeon_board_info *)(&oct_dev->boardinfo);
280 len = sprintf(buf, "boardname:%s serialnum:%s maj:%lld min:%lld\n",
281 board_info->name, board_info->serial_number,
282 board_info->major, board_info->minor);
283
284 return len;
285}
286
287static int
288lio_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
289 u8 *bytes)
290{
291 struct lio *lio = GET_LIO(netdev);
292 struct octeon_device *oct_dev = lio->oct_dev;
293 struct octeon_board_info *board_info;
294 int len;
295
296 if (eeprom->offset != 0)
297 return -EINVAL;
298
299 eeprom->magic = oct_dev->pci_dev->vendor;
300 board_info = (struct octeon_board_info *)(&oct_dev->boardinfo);
301 len =
302 sprintf((char *)bytes,
303 "boardname:%s serialnum:%s maj:%lld min:%lld\n",
304 board_info->name, board_info->serial_number,
305 board_info->major, board_info->minor);
306
307 return 0;
308}
309
310static int octnet_gpio_access(struct net_device *netdev, int addr, int val)
311{
312 struct lio *lio = GET_LIO(netdev);
313 struct octeon_device *oct = lio->oct_dev;
314 struct octnic_ctrl_pkt nctrl;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700315 int ret = 0;
316
317 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
318
319 nctrl.ncmd.u64 = 0;
320 nctrl.ncmd.s.cmd = OCTNET_CMD_GPIO_ACCESS;
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -0700321 nctrl.ncmd.s.param1 = addr;
322 nctrl.ncmd.s.param2 = val;
323 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700324 nctrl.wait_time = 100;
325 nctrl.netpndev = (u64)netdev;
326 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
327
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -0700328 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700329 if (ret < 0) {
330 dev_err(&oct->pci_dev->dev, "Failed to configure gpio value\n");
331 return -EINVAL;
332 }
333
334 return 0;
335}
336
337/* Callback for when mdio command response arrives
338 */
339static void octnet_mdio_resp_callback(struct octeon_device *oct,
340 u32 status,
341 void *buf)
342{
343 struct oct_mdio_cmd_resp *mdio_cmd_rsp;
344 struct oct_mdio_cmd_context *mdio_cmd_ctx;
345 struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
346
347 mdio_cmd_rsp = (struct oct_mdio_cmd_resp *)sc->virtrptr;
348 mdio_cmd_ctx = (struct oct_mdio_cmd_context *)sc->ctxptr;
349
350 oct = lio_get_device(mdio_cmd_ctx->octeon_id);
351 if (status) {
352 dev_err(&oct->pci_dev->dev, "MIDO instruction failed. Status: %llx\n",
353 CVM_CAST64(status));
354 ACCESS_ONCE(mdio_cmd_ctx->cond) = -1;
355 } else {
356 ACCESS_ONCE(mdio_cmd_ctx->cond) = 1;
357 }
358 wake_up_interruptible(&mdio_cmd_ctx->wc);
359}
360
361/* This routine provides PHY access routines for
362 * mdio clause45 .
363 */
364static int
365octnet_mdio45_access(struct lio *lio, int op, int loc, int *value)
366{
367 struct octeon_device *oct_dev = lio->oct_dev;
368 struct octeon_soft_command *sc;
369 struct oct_mdio_cmd_resp *mdio_cmd_rsp;
370 struct oct_mdio_cmd_context *mdio_cmd_ctx;
371 struct oct_mdio_cmd *mdio_cmd;
372 int retval = 0;
373
374 sc = (struct octeon_soft_command *)
375 octeon_alloc_soft_command(oct_dev,
376 sizeof(struct oct_mdio_cmd),
377 sizeof(struct oct_mdio_cmd_resp),
378 sizeof(struct oct_mdio_cmd_context));
379
380 if (!sc)
381 return -ENOMEM;
382
383 mdio_cmd_ctx = (struct oct_mdio_cmd_context *)sc->ctxptr;
384 mdio_cmd_rsp = (struct oct_mdio_cmd_resp *)sc->virtrptr;
385 mdio_cmd = (struct oct_mdio_cmd *)sc->virtdptr;
386
387 ACCESS_ONCE(mdio_cmd_ctx->cond) = 0;
388 mdio_cmd_ctx->octeon_id = lio_get_device_id(oct_dev);
389 mdio_cmd->op = op;
390 mdio_cmd->mdio_addr = loc;
391 if (op)
392 mdio_cmd->value1 = *value;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700393 octeon_swap_8B_data((u64 *)mdio_cmd, sizeof(struct oct_mdio_cmd) / 8);
394
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -0700395 sc->iq_no = lio->linfo.txpciq[0].s.q_no;
396
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700397 octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC, OPCODE_NIC_MDIO45,
398 0, 0, 0);
399
400 sc->wait_time = 1000;
401 sc->callback = octnet_mdio_resp_callback;
402 sc->callback_arg = sc;
403
404 init_waitqueue_head(&mdio_cmd_ctx->wc);
405
406 retval = octeon_send_soft_command(oct_dev, sc);
407
Raghu Vatsavayiddc173a2016-06-14 16:54:43 -0700408 if (retval == IQ_SEND_FAILED) {
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700409 dev_err(&oct_dev->pci_dev->dev,
410 "octnet_mdio45_access instruction failed status: %x\n",
411 retval);
412 retval = -EBUSY;
413 } else {
414 /* Sleep on a wait queue till the cond flag indicates that the
415 * response arrived
416 */
417 sleep_cond(&mdio_cmd_ctx->wc, &mdio_cmd_ctx->cond);
418 retval = mdio_cmd_rsp->status;
419 if (retval) {
420 dev_err(&oct_dev->pci_dev->dev, "octnet mdio45 access failed\n");
421 retval = -EBUSY;
422 } else {
423 octeon_swap_8B_data((u64 *)(&mdio_cmd_rsp->resp),
424 sizeof(struct oct_mdio_cmd) / 8);
425
426 if (ACCESS_ONCE(mdio_cmd_ctx->cond) == 1) {
427 if (!op)
428 *value = mdio_cmd_rsp->resp.value1;
429 } else {
430 retval = -EINVAL;
431 }
432 }
433 }
434
435 octeon_free_soft_command(oct_dev, sc);
436
437 return retval;
438}
439
440static int lio_set_phys_id(struct net_device *netdev,
441 enum ethtool_phys_id_state state)
442{
443 struct lio *lio = GET_LIO(netdev);
444 struct octeon_device *oct = lio->oct_dev;
445 int value, ret;
446
447 switch (state) {
448 case ETHTOOL_ID_ACTIVE:
449 if (oct->chip_id == OCTEON_CN66XX) {
450 octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG,
451 VITESSE_PHY_GPIO_DRIVEON);
452 return 2;
453
454 } else if (oct->chip_id == OCTEON_CN68XX) {
455 /* Save the current LED settings */
456 ret = octnet_mdio45_access(lio, 0,
457 LIO68XX_LED_BEACON_ADDR,
458 &lio->phy_beacon_val);
459 if (ret)
460 return ret;
461
462 ret = octnet_mdio45_access(lio, 0,
463 LIO68XX_LED_CTRL_ADDR,
464 &lio->led_ctrl_val);
465 if (ret)
466 return ret;
467
468 /* Configure Beacon values */
469 value = LIO68XX_LED_BEACON_CFGON;
470 ret =
471 octnet_mdio45_access(lio, 1,
472 LIO68XX_LED_BEACON_ADDR,
473 &value);
474 if (ret)
475 return ret;
476
477 value = LIO68XX_LED_CTRL_CFGON;
478 ret =
479 octnet_mdio45_access(lio, 1,
480 LIO68XX_LED_CTRL_ADDR,
481 &value);
482 if (ret)
483 return ret;
484 } else {
485 return -EINVAL;
486 }
487 break;
488
489 case ETHTOOL_ID_ON:
490 if (oct->chip_id == OCTEON_CN66XX) {
491 octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG,
492 VITESSE_PHY_GPIO_HIGH);
493
494 } else if (oct->chip_id == OCTEON_CN68XX) {
495 return -EINVAL;
496 } else {
497 return -EINVAL;
498 }
499 break;
500
501 case ETHTOOL_ID_OFF:
502 if (oct->chip_id == OCTEON_CN66XX)
503 octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG,
504 VITESSE_PHY_GPIO_LOW);
505 else if (oct->chip_id == OCTEON_CN68XX)
506 return -EINVAL;
507 else
508 return -EINVAL;
509
510 break;
511
512 case ETHTOOL_ID_INACTIVE:
513 if (oct->chip_id == OCTEON_CN66XX) {
514 octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG,
515 VITESSE_PHY_GPIO_DRIVEOFF);
516 } else if (oct->chip_id == OCTEON_CN68XX) {
517 /* Restore LED settings */
518 ret = octnet_mdio45_access(lio, 1,
519 LIO68XX_LED_CTRL_ADDR,
520 &lio->led_ctrl_val);
521 if (ret)
522 return ret;
523
Dan Carpentercbdb9772015-06-24 17:47:02 +0300524 ret = octnet_mdio45_access(lio, 1,
525 LIO68XX_LED_BEACON_ADDR,
526 &lio->phy_beacon_val);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700527 if (ret)
528 return ret;
529
530 } else {
531 return -EINVAL;
532 }
533 break;
534
535 default:
536 return -EINVAL;
537 }
538
539 return 0;
540}
541
542static void
543lio_ethtool_get_ringparam(struct net_device *netdev,
544 struct ethtool_ringparam *ering)
545{
546 struct lio *lio = GET_LIO(netdev);
547 struct octeon_device *oct = lio->oct_dev;
548 u32 tx_max_pending = 0, rx_max_pending = 0, tx_pending = 0,
549 rx_pending = 0;
550
551 if (OCTEON_CN6XXX(oct)) {
552 struct octeon_config *conf6x = CHIP_FIELD(oct, cn6xxx, conf);
553
554 tx_max_pending = CN6XXX_MAX_IQ_DESCRIPTORS;
555 rx_max_pending = CN6XXX_MAX_OQ_DESCRIPTORS;
556 rx_pending = CFG_GET_NUM_RX_DESCS_NIC_IF(conf6x, lio->ifidx);
557 tx_pending = CFG_GET_NUM_TX_DESCS_NIC_IF(conf6x, lio->ifidx);
558 }
559
560 if (lio->mtu > OCTNET_DEFAULT_FRM_SIZE) {
561 ering->rx_pending = 0;
562 ering->rx_max_pending = 0;
563 ering->rx_mini_pending = 0;
564 ering->rx_jumbo_pending = rx_pending;
565 ering->rx_mini_max_pending = 0;
566 ering->rx_jumbo_max_pending = rx_max_pending;
567 } else {
568 ering->rx_pending = rx_pending;
569 ering->rx_max_pending = rx_max_pending;
570 ering->rx_mini_pending = 0;
571 ering->rx_jumbo_pending = 0;
572 ering->rx_mini_max_pending = 0;
573 ering->rx_jumbo_max_pending = 0;
574 }
575
576 ering->tx_pending = tx_pending;
577 ering->tx_max_pending = tx_max_pending;
578}
579
580static u32 lio_get_msglevel(struct net_device *netdev)
581{
582 struct lio *lio = GET_LIO(netdev);
583
584 return lio->msg_enable;
585}
586
587static void lio_set_msglevel(struct net_device *netdev, u32 msglvl)
588{
589 struct lio *lio = GET_LIO(netdev);
590
591 if ((msglvl ^ lio->msg_enable) & NETIF_MSG_HW) {
592 if (msglvl & NETIF_MSG_HW)
593 liquidio_set_feature(netdev,
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -0700594 OCTNET_CMD_VERBOSE_ENABLE, 0);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700595 else
596 liquidio_set_feature(netdev,
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -0700597 OCTNET_CMD_VERBOSE_DISABLE, 0);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700598 }
599
600 lio->msg_enable = msglvl;
601}
602
603static void
604lio_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
605{
606 /* Notes: Not supporting any auto negotiation in these
607 * drivers. Just report pause frame support.
608 */
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700609 struct lio *lio = GET_LIO(netdev);
610 struct octeon_device *oct = lio->oct_dev;
611
612 pause->autoneg = 0;
613
614 pause->tx_pause = oct->tx_pause;
615 pause->rx_pause = oct->rx_pause;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700616}
617
618static void
619lio_get_ethtool_stats(struct net_device *netdev,
620 struct ethtool_stats *stats, u64 *data)
621{
622 struct lio *lio = GET_LIO(netdev);
623 struct octeon_device *oct_dev = lio->oct_dev;
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700624 struct net_device_stats *netstats = &netdev->stats;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700625 int i = 0, j;
626
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700627 netdev->netdev_ops->ndo_get_stats(netdev);
628 octnet_get_link_stats(netdev);
629
630 /*sum of oct->droq[oq_no]->stats->rx_pkts_received */
631 data[i++] = CVM_CAST64(netstats->rx_packets);
632 /*sum of oct->instr_queue[iq_no]->stats.tx_done */
633 data[i++] = CVM_CAST64(netstats->tx_packets);
634 /*sum of oct->droq[oq_no]->stats->rx_bytes_received */
635 data[i++] = CVM_CAST64(netstats->rx_bytes);
636 /*sum of oct->instr_queue[iq_no]->stats.tx_tot_bytes */
637 data[i++] = CVM_CAST64(netstats->tx_bytes);
638 data[i++] = CVM_CAST64(netstats->rx_errors);
639 data[i++] = CVM_CAST64(netstats->tx_errors);
640 /*sum of oct->droq[oq_no]->stats->rx_dropped +
641 *oct->droq[oq_no]->stats->dropped_nodispatch +
642 *oct->droq[oq_no]->stats->dropped_toomany +
643 *oct->droq[oq_no]->stats->dropped_nomem
644 */
645 data[i++] = CVM_CAST64(netstats->rx_dropped);
646 /*sum of oct->instr_queue[iq_no]->stats.tx_dropped */
647 data[i++] = CVM_CAST64(netstats->tx_dropped);
648
649 /*data[i++] = CVM_CAST64(stats->multicast); */
650 /*data[i++] = CVM_CAST64(stats->collisions); */
651
652 /* firmware tx stats */
653 /*per_core_stats[cvmx_get_core_num()].link_stats[mdata->from_ifidx].
654 *fromhost.fw_total_sent
655 */
656 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_total_sent);
657 /*per_core_stats[i].link_stats[port].fromwire.fw_total_fwd */
658 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_total_fwd);
659 /*per_core_stats[j].link_stats[i].fromhost.fw_err_pko */
660 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_pko);
661 /*per_core_stats[j].link_stats[i].fromhost.fw_err_link */
662 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_link);
663 /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.
664 *fw_err_drop
665 */
666 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_drop);
667
668 /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.fw_tso */
669 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_tso);
670 /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.
671 *fw_tso_fwd
672 */
673 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_tso_fwd);
674 /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.
675 *fw_err_tso
676 */
677 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_tso);
678
679 /* mac tx statistics */
680 /*CVMX_BGXX_CMRX_TX_STAT5 */
681 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.total_pkts_sent);
682 /*CVMX_BGXX_CMRX_TX_STAT4 */
683 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.total_bytes_sent);
684 /*CVMX_BGXX_CMRX_TX_STAT15 */
685 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.mcast_pkts_sent);
686 /*CVMX_BGXX_CMRX_TX_STAT14 */
687 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.bcast_pkts_sent);
688 /*CVMX_BGXX_CMRX_TX_STAT17 */
689 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.ctl_sent);
690 /*CVMX_BGXX_CMRX_TX_STAT0 */
691 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.total_collisions);
692 /*CVMX_BGXX_CMRX_TX_STAT3 */
693 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.one_collision_sent);
694 /*CVMX_BGXX_CMRX_TX_STAT2 */
695 data[i++] =
696 CVM_CAST64(oct_dev->link_stats.fromhost.multi_collision_sent);
697 /*CVMX_BGXX_CMRX_TX_STAT0 */
698 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.max_collision_fail);
699 /*CVMX_BGXX_CMRX_TX_STAT1 */
700 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.max_deferral_fail);
701 /*CVMX_BGXX_CMRX_TX_STAT16 */
702 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fifo_err);
703 /*CVMX_BGXX_CMRX_TX_STAT6 */
704 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.runts);
705
706 /* RX firmware stats */
707 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
708 *fw_total_rcvd
709 */
710 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_total_rcvd);
711 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
712 *fw_total_fwd
713 */
714 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_total_fwd);
715 /*per_core_stats[core_id].link_stats[ifidx].fromwire.jabber_err */
716 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.jabber_err);
717 /*per_core_stats[core_id].link_stats[ifidx].fromwire.l2_err */
718 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.l2_err);
719 /*per_core_stats[core_id].link_stats[ifidx].fromwire.frame_err */
720 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.frame_err);
721 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
722 *fw_err_pko
723 */
724 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_err_pko);
725 /*per_core_stats[j].link_stats[i].fromwire.fw_err_link */
726 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_err_link);
727 /*per_core_stats[cvmx_get_core_num()].link_stats[lro_ctx->ifidx].
728 *fromwire.fw_err_drop
729 */
730 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_err_drop);
731
732 /* LRO */
733 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
734 *fw_lro_pkts
735 */
736 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_pkts);
737 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
738 *fw_lro_octs
739 */
740 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_octs);
741 /*per_core_stats[j].link_stats[i].fromwire.fw_total_lro */
742 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_total_lro);
743 /*per_core_stats[j].link_stats[i].fromwire.fw_lro_aborts */
744 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts);
745 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
746 *fw_lro_aborts_port
747 */
748 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_port);
749 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
750 *fw_lro_aborts_seq
751 */
752 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_seq);
753 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
754 *fw_lro_aborts_tsval
755 */
756 data[i++] =
757 CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_tsval);
758 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
759 *fw_lro_aborts_timer
760 */
761 /* intrmod: packet forward rate */
762 data[i++] =
763 CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_timer);
764 /*per_core_stats[j].link_stats[i].fromwire.fw_lro_aborts */
765 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fwd_rate);
766
767 /* mac: link-level stats */
768 /*CVMX_BGXX_CMRX_RX_STAT0 */
769 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.total_rcvd);
770 /*CVMX_BGXX_CMRX_RX_STAT1 */
771 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.bytes_rcvd);
772 /*CVMX_PKI_STATX_STAT5 */
773 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.total_bcst);
774 /*CVMX_PKI_STATX_STAT5 */
775 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.total_mcst);
776 /*wqe->word2.err_code or wqe->word2.err_level */
777 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.runts);
778 /*CVMX_BGXX_CMRX_RX_STAT2 */
779 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.ctl_rcvd);
780 /*CVMX_BGXX_CMRX_RX_STAT6 */
781 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fifo_err);
782 /*CVMX_BGXX_CMRX_RX_STAT4 */
783 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.dmac_drop);
784 /*wqe->word2.err_code or wqe->word2.err_level */
785 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fcs_err);
786 /*lio->link_changes*/
787 data[i++] = CVM_CAST64(lio->link_changes);
788
789 /* TX -- lio_update_stats(lio); */
790 for (j = 0; j < MAX_OCTEON_INSTR_QUEUES(oct_dev); j++) {
Raghu Vatsavayi63da8402016-06-21 22:53:03 -0700791 if (!(oct_dev->io_qmask.iq & (1ULL << j)))
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700792 continue;
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700793 /*packets to network port*/
794 /*# of packets tx to network */
795 data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_done);
796 /*# of bytes tx to network */
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700797 data[i++] =
798 CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_tot_bytes);
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700799 /*# of packets dropped */
800 data[i++] =
801 CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_dropped);
802 /*# of tx fails due to queue full */
803 data[i++] =
804 CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_iq_busy);
805 /*XXX gather entries sent */
806 data[i++] =
807 CVM_CAST64(oct_dev->instr_queue[j]->stats.sgentry_sent);
808
809 /*instruction to firmware: data and control */
810 /*# of instructions to the queue */
811 data[i++] =
812 CVM_CAST64(oct_dev->instr_queue[j]->stats.instr_posted);
813 /*# of instructions processed */
814 data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->
815 stats.instr_processed);
816 /*# of instructions could not be processed */
817 data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->
818 stats.instr_dropped);
819 /*bytes sent through the queue */
820 data[i++] =
821 CVM_CAST64(oct_dev->instr_queue[j]->stats.bytes_sent);
822
823 /*tso request*/
824 data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_gso);
825 /*txq restart*/
826 data[i++] =
827 CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_restart);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700828 }
829
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700830 /* RX */
831 /* for (j = 0; j < oct_dev->num_oqs; j++) { */
832 for (j = 0; j < MAX_OCTEON_OUTPUT_QUEUES(oct_dev); j++) {
Raghu Vatsavayi63da8402016-06-21 22:53:03 -0700833 if (!(oct_dev->io_qmask.oq & (1ULL << j)))
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700834 continue;
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700835
836 /*packets send to TCP/IP network stack */
837 /*# of packets to network stack */
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700838 data[i++] =
839 CVM_CAST64(oct_dev->droq[j]->stats.rx_pkts_received);
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700840 /*# of bytes to network stack */
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700841 data[i++] =
842 CVM_CAST64(oct_dev->droq[j]->stats.rx_bytes_received);
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700843 /*# of packets dropped */
844 data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem +
845 oct_dev->droq[j]->stats.dropped_toomany +
846 oct_dev->droq[j]->stats.rx_dropped);
847 data[i++] =
848 CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem);
849 data[i++] =
850 CVM_CAST64(oct_dev->droq[j]->stats.dropped_toomany);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700851 data[i++] =
852 CVM_CAST64(oct_dev->droq[j]->stats.rx_dropped);
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700853
854 /*control and data path*/
855 data[i++] =
856 CVM_CAST64(oct_dev->droq[j]->stats.pkts_received);
857 data[i++] =
858 CVM_CAST64(oct_dev->droq[j]->stats.bytes_received);
859 data[i++] =
860 CVM_CAST64(oct_dev->droq[j]->stats.dropped_nodispatch);
861 data[i++] =
862 CVM_CAST64(oct_dev->droq[j]->stats.rx_alloc_failure);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700863 }
864}
865
866static void lio_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
867{
868 struct lio *lio = GET_LIO(netdev);
869 struct octeon_device *oct_dev = lio->oct_dev;
870 int num_iq_stats, num_oq_stats, i, j;
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700871 int num_stats;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700872
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700873 switch (stringset) {
874 case ETH_SS_STATS:
875 num_stats = ARRAY_SIZE(oct_stats_strings);
876 for (j = 0; j < num_stats; j++) {
877 sprintf(data, "%s", oct_stats_strings[j]);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700878 data += ETH_GSTRING_LEN;
879 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700880
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700881 num_iq_stats = ARRAY_SIZE(oct_iq_stats_strings);
882 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct_dev); i++) {
883 if (!(oct_dev->io_qmask.iq & (1ULL << i)))
884 continue;
885 for (j = 0; j < num_iq_stats; j++) {
886 sprintf(data, "tx-%d-%s", i,
887 oct_iq_stats_strings[j]);
888 data += ETH_GSTRING_LEN;
889 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700890 }
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700891
892 num_oq_stats = ARRAY_SIZE(oct_droq_stats_strings);
893 /* for (i = 0; i < oct_dev->num_oqs; i++) { */
894 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct_dev); i++) {
895 if (!(oct_dev->io_qmask.oq & (1ULL << i)))
896 continue;
897 for (j = 0; j < num_oq_stats; j++) {
898 sprintf(data, "rx-%d-%s", i,
899 oct_droq_stats_strings[j]);
900 data += ETH_GSTRING_LEN;
901 }
902 }
903 break;
904
905 default:
906 netif_info(lio, drv, lio->netdev, "Unknown Stringset !!\n");
907 break;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700908 }
909}
910
911static int lio_get_sset_count(struct net_device *netdev, int sset)
912{
913 struct lio *lio = GET_LIO(netdev);
914 struct octeon_device *oct_dev = lio->oct_dev;
915
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700916 switch (sset) {
917 case ETH_SS_STATS:
918 return (ARRAY_SIZE(oct_stats_strings) +
919 ARRAY_SIZE(oct_iq_stats_strings) * oct_dev->num_iqs +
920 ARRAY_SIZE(oct_droq_stats_strings) * oct_dev->num_oqs);
921 default:
922 return -EOPNOTSUPP;
923 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700924}
925
926static int lio_get_intr_coalesce(struct net_device *netdev,
927 struct ethtool_coalesce *intr_coal)
928{
929 struct lio *lio = GET_LIO(netdev);
930 struct octeon_device *oct = lio->oct_dev;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700931 struct octeon_instr_queue *iq;
932 struct oct_intrmod_cfg *intrmod_cfg;
933
934 intrmod_cfg = &oct->intrmod;
935
936 switch (oct->chip_id) {
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700937 case OCTEON_CN68XX:
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -0700938 case OCTEON_CN66XX: {
939 struct octeon_cn6xxx *cn6xxx =
940 (struct octeon_cn6xxx *)oct->chip;
941
942 if (!intrmod_cfg->rx_enable) {
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700943 intr_coal->rx_coalesce_usecs =
944 CFG_GET_OQ_INTR_TIME(cn6xxx->conf);
945 intr_coal->rx_max_coalesced_frames =
946 CFG_GET_OQ_INTR_PKT(cn6xxx->conf);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700947 }
948
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -0700949 iq = oct->instr_queue[lio->linfo.txpciq[0].s.q_no];
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700950 intr_coal->tx_max_coalesced_frames = iq->fill_threshold;
951 break;
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -0700952 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700953 default:
954 netif_info(lio, drv, lio->netdev, "Unknown Chip !!\n");
955 return -EINVAL;
956 }
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -0700957 if (intrmod_cfg->rx_enable) {
958 intr_coal->use_adaptive_rx_coalesce =
959 intrmod_cfg->rx_enable;
960 intr_coal->rate_sample_interval =
961 intrmod_cfg->check_intrvl;
962 intr_coal->pkt_rate_high =
963 intrmod_cfg->maxpkt_ratethr;
964 intr_coal->pkt_rate_low =
965 intrmod_cfg->minpkt_ratethr;
966 intr_coal->rx_max_coalesced_frames_high =
967 intrmod_cfg->rx_maxcnt_trigger;
968 intr_coal->rx_coalesce_usecs_high =
969 intrmod_cfg->rx_maxtmr_trigger;
970 intr_coal->rx_coalesce_usecs_low =
971 intrmod_cfg->rx_mintmr_trigger;
972 intr_coal->rx_max_coalesced_frames_low =
973 intrmod_cfg->rx_mincnt_trigger;
974 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700975 return 0;
976}
977
978/* Callback function for intrmod */
979static void octnet_intrmod_callback(struct octeon_device *oct_dev,
980 u32 status,
981 void *ptr)
982{
983 struct oct_intrmod_cmd *cmd = ptr;
984 struct octeon_soft_command *sc = cmd->sc;
985
986 oct_dev = cmd->oct_dev;
987
988 if (status)
989 dev_err(&oct_dev->pci_dev->dev, "intrmod config failed. Status: %llx\n",
990 CVM_CAST64(status));
991 else
992 dev_info(&oct_dev->pci_dev->dev,
993 "Rx-Adaptive Interrupt moderation enabled:%llx\n",
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -0700994 oct_dev->intrmod.rx_enable);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700995
996 octeon_free_soft_command(oct_dev, sc);
997}
998
999/* Configure interrupt moderation parameters */
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001000static int octnet_set_intrmod_cfg(struct lio *lio,
1001 struct oct_intrmod_cfg *intr_cfg)
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001002{
1003 struct octeon_soft_command *sc;
1004 struct oct_intrmod_cmd *cmd;
1005 struct oct_intrmod_cfg *cfg;
1006 int retval;
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001007 struct octeon_device *oct_dev = lio->oct_dev;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001008
1009 /* Alloc soft command */
1010 sc = (struct octeon_soft_command *)
1011 octeon_alloc_soft_command(oct_dev,
1012 sizeof(struct oct_intrmod_cfg),
1013 0,
1014 sizeof(struct oct_intrmod_cmd));
1015
1016 if (!sc)
1017 return -ENOMEM;
1018
1019 cmd = (struct oct_intrmod_cmd *)sc->ctxptr;
1020 cfg = (struct oct_intrmod_cfg *)sc->virtdptr;
1021
1022 memcpy(cfg, intr_cfg, sizeof(struct oct_intrmod_cfg));
1023 octeon_swap_8B_data((u64 *)cfg, (sizeof(struct oct_intrmod_cfg)) / 8);
1024 cmd->sc = sc;
1025 cmd->cfg = cfg;
1026 cmd->oct_dev = oct_dev;
1027
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001028 sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1029
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001030 octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC,
1031 OPCODE_NIC_INTRMOD_CFG, 0, 0, 0);
1032
1033 sc->callback = octnet_intrmod_callback;
1034 sc->callback_arg = cmd;
1035 sc->wait_time = 1000;
1036
1037 retval = octeon_send_soft_command(oct_dev, sc);
Raghu Vatsavayiddc173a2016-06-14 16:54:43 -07001038 if (retval == IQ_SEND_FAILED) {
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001039 octeon_free_soft_command(oct_dev, sc);
1040 return -EINVAL;
1041 }
1042
1043 return 0;
1044}
1045
Raghu Vatsavayi1f164712016-06-21 22:53:11 -07001046void
1047octnet_nic_stats_callback(struct octeon_device *oct_dev,
1048 u32 status, void *ptr)
1049{
1050 struct octeon_soft_command *sc = (struct octeon_soft_command *)ptr;
1051 struct oct_nic_stats_resp *resp = (struct oct_nic_stats_resp *)
1052 sc->virtrptr;
1053 struct oct_nic_stats_ctrl *ctrl = (struct oct_nic_stats_ctrl *)
1054 sc->ctxptr;
1055 struct nic_rx_stats *rsp_rstats = &resp->stats.fromwire;
1056 struct nic_tx_stats *rsp_tstats = &resp->stats.fromhost;
1057
1058 struct nic_rx_stats *rstats = &oct_dev->link_stats.fromwire;
1059 struct nic_tx_stats *tstats = &oct_dev->link_stats.fromhost;
1060
1061 if ((status != OCTEON_REQUEST_TIMEOUT) && !resp->status) {
1062 octeon_swap_8B_data((u64 *)&resp->stats,
1063 (sizeof(struct oct_link_stats)) >> 3);
1064
1065 /* RX link-level stats */
1066 rstats->total_rcvd = rsp_rstats->total_rcvd;
1067 rstats->bytes_rcvd = rsp_rstats->bytes_rcvd;
1068 rstats->total_bcst = rsp_rstats->total_bcst;
1069 rstats->total_mcst = rsp_rstats->total_mcst;
1070 rstats->runts = rsp_rstats->runts;
1071 rstats->ctl_rcvd = rsp_rstats->ctl_rcvd;
1072 /* Accounts for over/under-run of buffers */
1073 rstats->fifo_err = rsp_rstats->fifo_err;
1074 rstats->dmac_drop = rsp_rstats->dmac_drop;
1075 rstats->fcs_err = rsp_rstats->fcs_err;
1076 rstats->jabber_err = rsp_rstats->jabber_err;
1077 rstats->l2_err = rsp_rstats->l2_err;
1078 rstats->frame_err = rsp_rstats->frame_err;
1079
1080 /* RX firmware stats */
1081 rstats->fw_total_rcvd = rsp_rstats->fw_total_rcvd;
1082 rstats->fw_total_fwd = rsp_rstats->fw_total_fwd;
1083 rstats->fw_err_pko = rsp_rstats->fw_err_pko;
1084 rstats->fw_err_link = rsp_rstats->fw_err_link;
1085 rstats->fw_err_drop = rsp_rstats->fw_err_drop;
1086 /* Number of packets that are LROed */
1087 rstats->fw_lro_pkts = rsp_rstats->fw_lro_pkts;
1088 /* Number of octets that are LROed */
1089 rstats->fw_lro_octs = rsp_rstats->fw_lro_octs;
1090 /* Number of LRO packets formed */
1091 rstats->fw_total_lro = rsp_rstats->fw_total_lro;
1092 /* Number of times lRO of packet aborted */
1093 rstats->fw_lro_aborts = rsp_rstats->fw_lro_aborts;
1094 rstats->fw_lro_aborts_port = rsp_rstats->fw_lro_aborts_port;
1095 rstats->fw_lro_aborts_seq = rsp_rstats->fw_lro_aborts_seq;
1096 rstats->fw_lro_aborts_tsval = rsp_rstats->fw_lro_aborts_tsval;
1097 rstats->fw_lro_aborts_timer = rsp_rstats->fw_lro_aborts_timer;
1098 /* intrmod: packet forward rate */
1099 rstats->fwd_rate = rsp_rstats->fwd_rate;
1100
1101 /* TX link-level stats */
1102 tstats->total_pkts_sent = rsp_tstats->total_pkts_sent;
1103 tstats->total_bytes_sent = rsp_tstats->total_bytes_sent;
1104 tstats->mcast_pkts_sent = rsp_tstats->mcast_pkts_sent;
1105 tstats->bcast_pkts_sent = rsp_tstats->bcast_pkts_sent;
1106 tstats->ctl_sent = rsp_tstats->ctl_sent;
1107 /* Packets sent after one collision*/
1108 tstats->one_collision_sent = rsp_tstats->one_collision_sent;
1109 /* Packets sent after multiple collision*/
1110 tstats->multi_collision_sent = rsp_tstats->multi_collision_sent;
1111 /* Packets not sent due to max collisions */
1112 tstats->max_collision_fail = rsp_tstats->max_collision_fail;
1113 /* Packets not sent due to max deferrals */
1114 tstats->max_deferral_fail = rsp_tstats->max_deferral_fail;
1115 /* Accounts for over/under-run of buffers */
1116 tstats->fifo_err = rsp_tstats->fifo_err;
1117 tstats->runts = rsp_tstats->runts;
1118 /* Total number of collisions detected */
1119 tstats->total_collisions = rsp_tstats->total_collisions;
1120
1121 /* firmware stats */
1122 tstats->fw_total_sent = rsp_tstats->fw_total_sent;
1123 tstats->fw_total_fwd = rsp_tstats->fw_total_fwd;
1124 tstats->fw_err_pko = rsp_tstats->fw_err_pko;
1125 tstats->fw_err_link = rsp_tstats->fw_err_link;
1126 tstats->fw_err_drop = rsp_tstats->fw_err_drop;
1127 tstats->fw_tso = rsp_tstats->fw_tso;
1128 tstats->fw_tso_fwd = rsp_tstats->fw_tso_fwd;
1129 tstats->fw_err_tso = rsp_tstats->fw_err_tso;
1130 resp->status = 1;
1131 } else {
1132 resp->status = -1;
1133 }
1134 complete(&ctrl->complete);
1135}
1136
1137/* Configure interrupt moderation parameters */
1138static int octnet_get_link_stats(struct net_device *netdev)
1139{
1140 struct lio *lio = GET_LIO(netdev);
1141 struct octeon_device *oct_dev = lio->oct_dev;
1142
1143 struct octeon_soft_command *sc;
1144 struct oct_nic_stats_ctrl *ctrl;
1145 struct oct_nic_stats_resp *resp;
1146
1147 int retval;
1148
1149 /* Alloc soft command */
1150 sc = (struct octeon_soft_command *)
1151 octeon_alloc_soft_command(oct_dev,
1152 0,
1153 sizeof(struct oct_nic_stats_resp),
1154 sizeof(struct octnic_ctrl_pkt));
1155
1156 if (!sc)
1157 return -ENOMEM;
1158
1159 resp = (struct oct_nic_stats_resp *)sc->virtrptr;
1160 memset(resp, 0, sizeof(struct oct_nic_stats_resp));
1161
1162 ctrl = (struct oct_nic_stats_ctrl *)sc->ctxptr;
1163 memset(ctrl, 0, sizeof(struct oct_nic_stats_ctrl));
1164 ctrl->netdev = netdev;
1165 init_completion(&ctrl->complete);
1166
1167 sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1168
1169 octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC,
1170 OPCODE_NIC_PORT_STATS, 0, 0, 0);
1171
1172 sc->callback = octnet_nic_stats_callback;
1173 sc->callback_arg = sc;
1174 sc->wait_time = 500; /*in milli seconds*/
1175
1176 retval = octeon_send_soft_command(oct_dev, sc);
1177 if (retval == IQ_SEND_FAILED) {
1178 octeon_free_soft_command(oct_dev, sc);
1179 return -EINVAL;
1180 }
1181
1182 wait_for_completion_timeout(&ctrl->complete, msecs_to_jiffies(1000));
1183
1184 if (resp->status != 1) {
1185 octeon_free_soft_command(oct_dev, sc);
1186
1187 return -EINVAL;
1188 }
1189
1190 octeon_free_soft_command(oct_dev, sc);
1191
1192 return 0;
1193}
1194
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001195/* Enable/Disable auto interrupt Moderation */
1196static int oct_cfg_adaptive_intr(struct lio *lio, struct ethtool_coalesce
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001197 *intr_coal)
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001198{
1199 int ret = 0;
1200 struct octeon_device *oct = lio->oct_dev;
1201 struct oct_intrmod_cfg *intrmod_cfg;
1202
1203 intrmod_cfg = &oct->intrmod;
1204
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001205 if (oct->intrmod.rx_enable || oct->intrmod.tx_enable) {
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001206 if (intr_coal->rate_sample_interval)
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001207 intrmod_cfg->check_intrvl =
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001208 intr_coal->rate_sample_interval;
1209 else
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001210 intrmod_cfg->check_intrvl =
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001211 LIO_INTRMOD_CHECK_INTERVAL;
1212
1213 if (intr_coal->pkt_rate_high)
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001214 intrmod_cfg->maxpkt_ratethr =
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001215 intr_coal->pkt_rate_high;
1216 else
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001217 intrmod_cfg->maxpkt_ratethr =
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001218 LIO_INTRMOD_MAXPKT_RATETHR;
1219
1220 if (intr_coal->pkt_rate_low)
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001221 intrmod_cfg->minpkt_ratethr =
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001222 intr_coal->pkt_rate_low;
1223 else
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001224 intrmod_cfg->minpkt_ratethr =
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001225 LIO_INTRMOD_MINPKT_RATETHR;
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001226 }
1227 if (oct->intrmod.rx_enable) {
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001228 if (intr_coal->rx_max_coalesced_frames_high)
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001229 intrmod_cfg->rx_maxcnt_trigger =
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001230 intr_coal->rx_max_coalesced_frames_high;
1231 else
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001232 intrmod_cfg->rx_maxcnt_trigger =
1233 LIO_INTRMOD_RXMAXCNT_TRIGGER;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001234
1235 if (intr_coal->rx_coalesce_usecs_high)
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001236 intrmod_cfg->rx_maxtmr_trigger =
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001237 intr_coal->rx_coalesce_usecs_high;
1238 else
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001239 intrmod_cfg->rx_maxtmr_trigger =
1240 LIO_INTRMOD_RXMAXTMR_TRIGGER;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001241
1242 if (intr_coal->rx_coalesce_usecs_low)
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001243 intrmod_cfg->rx_mintmr_trigger =
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001244 intr_coal->rx_coalesce_usecs_low;
1245 else
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001246 intrmod_cfg->rx_mintmr_trigger =
1247 LIO_INTRMOD_RXMINTMR_TRIGGER;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001248
1249 if (intr_coal->rx_max_coalesced_frames_low)
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001250 intrmod_cfg->rx_mincnt_trigger =
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001251 intr_coal->rx_max_coalesced_frames_low;
1252 else
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001253 intrmod_cfg->rx_mincnt_trigger =
1254 LIO_INTRMOD_RXMINCNT_TRIGGER;
1255 }
1256 if (oct->intrmod.tx_enable) {
1257 if (intr_coal->tx_max_coalesced_frames_high)
1258 intrmod_cfg->tx_maxcnt_trigger =
1259 intr_coal->tx_max_coalesced_frames_high;
1260 else
1261 intrmod_cfg->tx_maxcnt_trigger =
1262 LIO_INTRMOD_TXMAXCNT_TRIGGER;
1263 if (intr_coal->tx_max_coalesced_frames_low)
1264 intrmod_cfg->tx_mincnt_trigger =
1265 intr_coal->tx_max_coalesced_frames_low;
1266 else
1267 intrmod_cfg->tx_mincnt_trigger =
1268 LIO_INTRMOD_TXMINCNT_TRIGGER;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001269 }
1270
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001271 ret = octnet_set_intrmod_cfg(lio, intrmod_cfg);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001272
1273 return ret;
1274}
1275
1276static int
1277oct_cfg_rx_intrcnt(struct lio *lio, struct ethtool_coalesce *intr_coal)
1278{
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001279 struct octeon_device *oct = lio->oct_dev;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001280 u32 rx_max_coalesced_frames;
1281
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001282 /* Config Cnt based interrupt values */
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001283 switch (oct->chip_id) {
1284 case OCTEON_CN68XX:
1285 case OCTEON_CN66XX: {
1286 struct octeon_cn6xxx *cn6xxx =
1287 (struct octeon_cn6xxx *)oct->chip;
1288
1289 if (!intr_coal->rx_max_coalesced_frames)
1290 rx_max_coalesced_frames = CN6XXX_OQ_INTR_PKT;
1291 else
1292 rx_max_coalesced_frames =
1293 intr_coal->rx_max_coalesced_frames;
1294 octeon_write_csr(oct, CN6XXX_SLI_OQ_INT_LEVEL_PKTS,
1295 rx_max_coalesced_frames);
1296 CFG_SET_OQ_INTR_PKT(cn6xxx->conf, rx_max_coalesced_frames);
1297 break;
1298 }
1299 default:
1300 return -EINVAL;
1301 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001302 return 0;
1303}
1304
1305static int oct_cfg_rx_intrtime(struct lio *lio, struct ethtool_coalesce
1306 *intr_coal)
1307{
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001308 struct octeon_device *oct = lio->oct_dev;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001309 u32 time_threshold, rx_coalesce_usecs;
1310
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001311 /* Config Time based interrupt values */
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001312 switch (oct->chip_id) {
1313 case OCTEON_CN68XX:
1314 case OCTEON_CN66XX: {
1315 struct octeon_cn6xxx *cn6xxx =
1316 (struct octeon_cn6xxx *)oct->chip;
1317 if (!intr_coal->rx_coalesce_usecs)
1318 rx_coalesce_usecs = CN6XXX_OQ_INTR_TIME;
1319 else
1320 rx_coalesce_usecs = intr_coal->rx_coalesce_usecs;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001321
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001322 time_threshold = lio_cn6xxx_get_oq_ticks(oct,
1323 rx_coalesce_usecs);
1324 octeon_write_csr(oct,
1325 CN6XXX_SLI_OQ_INT_LEVEL_TIME,
1326 time_threshold);
1327
1328 CFG_SET_OQ_INTR_TIME(cn6xxx->conf, rx_coalesce_usecs);
1329 break;
1330 }
1331 default:
1332 return -EINVAL;
1333 }
1334
1335 return 0;
1336}
1337
1338static int
1339oct_cfg_tx_intrcnt(struct lio *lio, struct ethtool_coalesce *intr_coal
1340 __attribute__((unused)))
1341{
1342 struct octeon_device *oct = lio->oct_dev;
1343
1344 /* Config Cnt based interrupt values */
1345 switch (oct->chip_id) {
1346 case OCTEON_CN68XX:
1347 case OCTEON_CN66XX:
1348 break;
1349 default:
1350 return -EINVAL;
1351 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001352 return 0;
1353}
1354
1355static int lio_set_intr_coalesce(struct net_device *netdev,
1356 struct ethtool_coalesce *intr_coal)
1357{
1358 struct lio *lio = GET_LIO(netdev);
1359 int ret;
1360 struct octeon_device *oct = lio->oct_dev;
1361 u32 j, q_no;
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001362 int db_max, db_min;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001363
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001364 switch (oct->chip_id) {
1365 case OCTEON_CN68XX:
1366 case OCTEON_CN66XX:
1367 db_min = CN6XXX_DB_MIN;
1368 db_max = CN6XXX_DB_MAX;
1369 if ((intr_coal->tx_max_coalesced_frames >= db_min) &&
1370 (intr_coal->tx_max_coalesced_frames <= db_max)) {
1371 for (j = 0; j < lio->linfo.num_txpciq; j++) {
1372 q_no = lio->linfo.txpciq[j].s.q_no;
1373 oct->instr_queue[q_no]->fill_threshold =
1374 intr_coal->tx_max_coalesced_frames;
1375 }
1376 } else {
1377 dev_err(&oct->pci_dev->dev,
1378 "LIQUIDIO: Invalid tx-frames:%d. Range is min:%d max:%d\n",
1379 intr_coal->tx_max_coalesced_frames, db_min,
1380 db_max);
1381 return -EINVAL;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001382 }
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001383 break;
1384 default:
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001385 return -EINVAL;
1386 }
1387
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001388 oct->intrmod.rx_enable = intr_coal->use_adaptive_rx_coalesce ? 1 : 0;
1389 oct->intrmod.tx_enable = intr_coal->use_adaptive_tx_coalesce ? 1 : 0;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001390
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001391 ret = oct_cfg_adaptive_intr(lio, intr_coal);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001392
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001393 if (!intr_coal->use_adaptive_rx_coalesce) {
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001394 ret = oct_cfg_rx_intrtime(lio, intr_coal);
1395 if (ret)
1396 goto ret_intrmod;
1397
1398 ret = oct_cfg_rx_intrcnt(lio, intr_coal);
1399 if (ret)
1400 goto ret_intrmod;
1401 }
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001402 if (!intr_coal->use_adaptive_tx_coalesce) {
1403 ret = oct_cfg_tx_intrcnt(lio, intr_coal);
1404 if (ret)
1405 goto ret_intrmod;
1406 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001407
1408 return 0;
1409ret_intrmod:
1410 return ret;
1411}
1412
1413static int lio_get_ts_info(struct net_device *netdev,
1414 struct ethtool_ts_info *info)
1415{
1416 struct lio *lio = GET_LIO(netdev);
1417
1418 info->so_timestamping =
Raghu Vatsavayi178cc102016-06-21 22:53:13 -07001419#ifdef PTP_HARDWARE_TIMESTAMPING
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001420 SOF_TIMESTAMPING_TX_HARDWARE |
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001421 SOF_TIMESTAMPING_RX_HARDWARE |
Raghu Vatsavayi178cc102016-06-21 22:53:13 -07001422 SOF_TIMESTAMPING_RAW_HARDWARE |
1423 SOF_TIMESTAMPING_TX_SOFTWARE |
1424#endif
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001425 SOF_TIMESTAMPING_RX_SOFTWARE |
Raghu Vatsavayi178cc102016-06-21 22:53:13 -07001426 SOF_TIMESTAMPING_SOFTWARE;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001427
1428 if (lio->ptp_clock)
1429 info->phc_index = ptp_clock_index(lio->ptp_clock);
1430 else
1431 info->phc_index = -1;
1432
Raghu Vatsavayi178cc102016-06-21 22:53:13 -07001433#ifdef PTP_HARDWARE_TIMESTAMPING
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001434 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
1435
1436 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
1437 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
1438 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
1439 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
Raghu Vatsavayi178cc102016-06-21 22:53:13 -07001440#endif
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001441
1442 return 0;
1443}
1444
1445static int lio_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
1446{
1447 struct lio *lio = GET_LIO(netdev);
1448 struct octeon_device *oct = lio->oct_dev;
1449 struct oct_link_info *linfo;
1450 struct octnic_ctrl_pkt nctrl;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001451 int ret = 0;
1452
1453 /* get the link info */
1454 linfo = &lio->linfo;
1455
1456 if (ecmd->autoneg != AUTONEG_ENABLE && ecmd->autoneg != AUTONEG_DISABLE)
1457 return -EINVAL;
1458
1459 if (ecmd->autoneg == AUTONEG_DISABLE && ((ecmd->speed != SPEED_100 &&
1460 ecmd->speed != SPEED_10) ||
1461 (ecmd->duplex != DUPLEX_HALF &&
1462 ecmd->duplex != DUPLEX_FULL)))
1463 return -EINVAL;
1464
Raghu Vatsavayi9eb60842016-06-21 22:53:12 -07001465 /* Ethtool Support is not provided for XAUI, RXAUI, and XFI Interfaces
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001466 * as they operate at fixed Speed and Duplex settings
1467 */
Raghu Vatsavayi9eb60842016-06-21 22:53:12 -07001468 if (linfo->link.s.if_mode == INTERFACE_MODE_XAUI ||
1469 linfo->link.s.if_mode == INTERFACE_MODE_RXAUI ||
1470 linfo->link.s.if_mode == INTERFACE_MODE_XFI) {
1471 dev_info(&oct->pci_dev->dev,
1472 "Autonegotiation, duplex and speed settings cannot be modified.\n");
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001473 return -EINVAL;
1474 }
1475
1476 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
1477
1478 nctrl.ncmd.u64 = 0;
1479 nctrl.ncmd.s.cmd = OCTNET_CMD_SET_SETTINGS;
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07001480 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001481 nctrl.wait_time = 1000;
1482 nctrl.netpndev = (u64)netdev;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001483 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
1484
1485 /* Passing the parameters sent by ethtool like Speed, Autoneg & Duplex
1486 * to SE core application using ncmd.s.more & ncmd.s.param
1487 */
1488 if (ecmd->autoneg == AUTONEG_ENABLE) {
1489 /* Autoneg ON */
1490 nctrl.ncmd.s.more = OCTNIC_NCMD_PHY_ON |
1491 OCTNIC_NCMD_AUTONEG_ON;
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07001492 nctrl.ncmd.s.param1 = ecmd->advertising;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001493 } else {
1494 /* Autoneg OFF */
1495 nctrl.ncmd.s.more = OCTNIC_NCMD_PHY_ON;
1496
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07001497 nctrl.ncmd.s.param2 = ecmd->duplex;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001498
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07001499 nctrl.ncmd.s.param1 = ecmd->speed;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001500 }
1501
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07001502 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001503 if (ret < 0) {
1504 dev_err(&oct->pci_dev->dev, "Failed to set settings\n");
1505 return -1;
1506 }
1507
1508 return 0;
1509}
1510
1511static int lio_nway_reset(struct net_device *netdev)
1512{
1513 if (netif_running(netdev)) {
1514 struct ethtool_cmd ecmd;
1515
1516 memset(&ecmd, 0, sizeof(struct ethtool_cmd));
1517 ecmd.autoneg = 0;
1518 ecmd.speed = 0;
1519 ecmd.duplex = 0;
1520 lio_set_settings(netdev, &ecmd);
1521 }
1522 return 0;
1523}
1524
1525/* Return register dump len. */
1526static int lio_get_regs_len(struct net_device *dev)
1527{
1528 return OCT_ETHTOOL_REGDUMP_LEN;
1529}
1530
1531static int cn6xxx_read_csr_reg(char *s, struct octeon_device *oct)
1532{
1533 u32 reg;
1534 int i, len = 0;
1535
1536 /* PCI Window Registers */
1537
1538 len += sprintf(s + len, "\n\t Octeon CSR Registers\n\n");
1539 reg = CN6XXX_WIN_WR_ADDR_LO;
1540 len += sprintf(s + len, "\n[%02x] (WIN_WR_ADDR_LO): %08x\n",
1541 CN6XXX_WIN_WR_ADDR_LO, octeon_read_csr(oct, reg));
1542 reg = CN6XXX_WIN_WR_ADDR_HI;
1543 len += sprintf(s + len, "[%02x] (WIN_WR_ADDR_HI): %08x\n",
1544 CN6XXX_WIN_WR_ADDR_HI, octeon_read_csr(oct, reg));
1545 reg = CN6XXX_WIN_RD_ADDR_LO;
1546 len += sprintf(s + len, "[%02x] (WIN_RD_ADDR_LO): %08x\n",
1547 CN6XXX_WIN_RD_ADDR_LO, octeon_read_csr(oct, reg));
1548 reg = CN6XXX_WIN_RD_ADDR_HI;
1549 len += sprintf(s + len, "[%02x] (WIN_RD_ADDR_HI): %08x\n",
1550 CN6XXX_WIN_RD_ADDR_HI, octeon_read_csr(oct, reg));
1551 reg = CN6XXX_WIN_WR_DATA_LO;
1552 len += sprintf(s + len, "[%02x] (WIN_WR_DATA_LO): %08x\n",
1553 CN6XXX_WIN_WR_DATA_LO, octeon_read_csr(oct, reg));
1554 reg = CN6XXX_WIN_WR_DATA_HI;
1555 len += sprintf(s + len, "[%02x] (WIN_WR_DATA_HI): %08x\n",
1556 CN6XXX_WIN_WR_DATA_HI, octeon_read_csr(oct, reg));
1557 len += sprintf(s + len, "[%02x] (WIN_WR_MASK_REG): %08x\n",
1558 CN6XXX_WIN_WR_MASK_REG,
1559 octeon_read_csr(oct, CN6XXX_WIN_WR_MASK_REG));
1560
1561 /* PCI Interrupt Register */
1562 len += sprintf(s + len, "\n[%x] (INT_ENABLE PORT 0): %08x\n",
1563 CN6XXX_SLI_INT_ENB64_PORT0, octeon_read_csr(oct,
1564 CN6XXX_SLI_INT_ENB64_PORT0));
1565 len += sprintf(s + len, "\n[%x] (INT_ENABLE PORT 1): %08x\n",
1566 CN6XXX_SLI_INT_ENB64_PORT1,
1567 octeon_read_csr(oct, CN6XXX_SLI_INT_ENB64_PORT1));
1568 len += sprintf(s + len, "[%x] (INT_SUM): %08x\n", CN6XXX_SLI_INT_SUM64,
1569 octeon_read_csr(oct, CN6XXX_SLI_INT_SUM64));
1570
1571 /* PCI Output queue registers */
1572 for (i = 0; i < oct->num_oqs; i++) {
1573 reg = CN6XXX_SLI_OQ_PKTS_SENT(i);
1574 len += sprintf(s + len, "\n[%x] (PKTS_SENT_%d): %08x\n",
1575 reg, i, octeon_read_csr(oct, reg));
1576 reg = CN6XXX_SLI_OQ_PKTS_CREDIT(i);
1577 len += sprintf(s + len, "[%x] (PKT_CREDITS_%d): %08x\n",
1578 reg, i, octeon_read_csr(oct, reg));
1579 }
1580 reg = CN6XXX_SLI_OQ_INT_LEVEL_PKTS;
1581 len += sprintf(s + len, "\n[%x] (PKTS_SENT_INT_LEVEL): %08x\n",
1582 reg, octeon_read_csr(oct, reg));
1583 reg = CN6XXX_SLI_OQ_INT_LEVEL_TIME;
1584 len += sprintf(s + len, "[%x] (PKTS_SENT_TIME): %08x\n",
1585 reg, octeon_read_csr(oct, reg));
1586
1587 /* PCI Input queue registers */
1588 for (i = 0; i <= 3; i++) {
1589 u32 reg;
1590
1591 reg = CN6XXX_SLI_IQ_DOORBELL(i);
1592 len += sprintf(s + len, "\n[%x] (INSTR_DOORBELL_%d): %08x\n",
1593 reg, i, octeon_read_csr(oct, reg));
1594 reg = CN6XXX_SLI_IQ_INSTR_COUNT(i);
1595 len += sprintf(s + len, "[%x] (INSTR_COUNT_%d): %08x\n",
1596 reg, i, octeon_read_csr(oct, reg));
1597 }
1598
1599 /* PCI DMA registers */
1600
1601 len += sprintf(s + len, "\n[%x] (DMA_CNT_0): %08x\n",
1602 CN6XXX_DMA_CNT(0),
1603 octeon_read_csr(oct, CN6XXX_DMA_CNT(0)));
1604 reg = CN6XXX_DMA_PKT_INT_LEVEL(0);
1605 len += sprintf(s + len, "[%x] (DMA_INT_LEV_0): %08x\n",
1606 CN6XXX_DMA_PKT_INT_LEVEL(0), octeon_read_csr(oct, reg));
1607 reg = CN6XXX_DMA_TIME_INT_LEVEL(0);
1608 len += sprintf(s + len, "[%x] (DMA_TIME_0): %08x\n",
1609 CN6XXX_DMA_TIME_INT_LEVEL(0),
1610 octeon_read_csr(oct, reg));
1611
1612 len += sprintf(s + len, "\n[%x] (DMA_CNT_1): %08x\n",
1613 CN6XXX_DMA_CNT(1),
1614 octeon_read_csr(oct, CN6XXX_DMA_CNT(1)));
1615 reg = CN6XXX_DMA_PKT_INT_LEVEL(1);
1616 len += sprintf(s + len, "[%x] (DMA_INT_LEV_1): %08x\n",
1617 CN6XXX_DMA_PKT_INT_LEVEL(1),
1618 octeon_read_csr(oct, reg));
1619 reg = CN6XXX_DMA_PKT_INT_LEVEL(1);
1620 len += sprintf(s + len, "[%x] (DMA_TIME_1): %08x\n",
1621 CN6XXX_DMA_TIME_INT_LEVEL(1),
1622 octeon_read_csr(oct, reg));
1623
1624 /* PCI Index registers */
1625
1626 len += sprintf(s + len, "\n");
1627
1628 for (i = 0; i < 16; i++) {
1629 reg = lio_pci_readq(oct, CN6XXX_BAR1_REG(i, oct->pcie_port));
1630 len += sprintf(s + len, "[%llx] (BAR1_INDEX_%02d): %08x\n",
1631 CN6XXX_BAR1_REG(i, oct->pcie_port), i, reg);
1632 }
1633
1634 return len;
1635}
1636
1637static int cn6xxx_read_config_reg(char *s, struct octeon_device *oct)
1638{
1639 u32 val;
1640 int i, len = 0;
1641
1642 /* PCI CONFIG Registers */
1643
1644 len += sprintf(s + len,
1645 "\n\t Octeon Config space Registers\n\n");
1646
1647 for (i = 0; i <= 13; i++) {
1648 pci_read_config_dword(oct->pci_dev, (i * 4), &val);
1649 len += sprintf(s + len, "[0x%x] (Config[%d]): 0x%08x\n",
1650 (i * 4), i, val);
1651 }
1652
1653 for (i = 30; i <= 34; i++) {
1654 pci_read_config_dword(oct->pci_dev, (i * 4), &val);
1655 len += sprintf(s + len, "[0x%x] (Config[%d]): 0x%08x\n",
1656 (i * 4), i, val);
1657 }
1658
1659 return len;
1660}
1661
1662/* Return register dump user app. */
1663static void lio_get_regs(struct net_device *dev,
1664 struct ethtool_regs *regs, void *regbuf)
1665{
1666 struct lio *lio = GET_LIO(dev);
1667 int len = 0;
1668 struct octeon_device *oct = lio->oct_dev;
1669
1670 memset(regbuf, 0, OCT_ETHTOOL_REGDUMP_LEN);
1671 regs->version = OCT_ETHTOOL_REGSVER;
1672
1673 switch (oct->chip_id) {
1674 /* case OCTEON_CN73XX: Todo */
1675 case OCTEON_CN68XX:
1676 case OCTEON_CN66XX:
1677 len += cn6xxx_read_csr_reg(regbuf + len, oct);
1678 len += cn6xxx_read_config_reg(regbuf + len, oct);
1679 break;
1680 default:
1681 dev_err(&oct->pci_dev->dev, "%s Unknown chipid: %d\n",
1682 __func__, oct->chip_id);
1683 }
1684}
1685
Raghu Vatsavayif5a20472016-06-21 22:53:14 -07001686static u32 lio_get_priv_flags(struct net_device *netdev)
1687{
1688 struct lio *lio = GET_LIO(netdev);
1689
1690 return lio->oct_dev->priv_flags;
1691}
1692
1693static int lio_set_priv_flags(struct net_device *netdev, u32 flags)
1694{
1695 struct lio *lio = GET_LIO(netdev);
1696 bool intr_by_tx_bytes = !!(flags & (0x1 << OCT_PRIV_FLAG_TX_BYTES));
1697
1698 lio_set_priv_flag(lio->oct_dev, OCT_PRIV_FLAG_TX_BYTES,
1699 intr_by_tx_bytes);
1700 return 0;
1701}
1702
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001703static const struct ethtool_ops lio_ethtool_ops = {
1704 .get_settings = lio_get_settings,
1705 .get_link = ethtool_op_get_link,
1706 .get_drvinfo = lio_get_drvinfo,
1707 .get_ringparam = lio_ethtool_get_ringparam,
1708 .get_channels = lio_ethtool_get_channels,
1709 .set_phys_id = lio_set_phys_id,
1710 .get_eeprom_len = lio_get_eeprom_len,
1711 .get_eeprom = lio_get_eeprom,
1712 .get_strings = lio_get_strings,
1713 .get_ethtool_stats = lio_get_ethtool_stats,
1714 .get_pauseparam = lio_get_pauseparam,
1715 .get_regs_len = lio_get_regs_len,
1716 .get_regs = lio_get_regs,
1717 .get_msglevel = lio_get_msglevel,
1718 .set_msglevel = lio_set_msglevel,
1719 .get_sset_count = lio_get_sset_count,
1720 .nway_reset = lio_nway_reset,
1721 .set_settings = lio_set_settings,
1722 .get_coalesce = lio_get_intr_coalesce,
1723 .set_coalesce = lio_set_intr_coalesce,
Raghu Vatsavayif5a20472016-06-21 22:53:14 -07001724 .get_priv_flags = lio_get_priv_flags,
1725 .set_priv_flags = lio_set_priv_flags,
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001726 .get_ts_info = lio_get_ts_info,
1727};
1728
1729void liquidio_set_ethtool_ops(struct net_device *netdev)
1730{
1731 netdev->ethtool_ops = &lio_ethtool_ops;
1732}