blob: 2b030957f492e22506ee52be3c57393d75b79150 [file] [log] [blame]
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001/**********************************************************************
2* Author: Cavium, Inc.
3*
4* Contact: support@cavium.com
5* Please include "LiquidIO" in the subject.
6*
7* Copyright (c) 2003-2015 Cavium, Inc.
8*
9* This file is free software; you can redistribute it and/or modify
10* it under the terms of the GNU General Public License, Version 2, as
11* published by the Free Software Foundation.
12*
13* This file is distributed in the hope that it will be useful, but
14* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16* NONINFRINGEMENT. See the GNU General Public License for more
17* details.
18*
19* This file may also be available under a different license from Cavium.
20* Contact Cavium, Inc. for more information
21**********************************************************************/
22#include <linux/version.h>
23#include <linux/netdevice.h>
24#include <linux/net_tstamp.h>
25#include <linux/ethtool.h>
26#include <linux/dma-mapping.h>
27#include <linux/pci.h>
28#include "octeon_config.h"
29#include "liquidio_common.h"
30#include "octeon_droq.h"
31#include "octeon_iq.h"
32#include "response_manager.h"
33#include "octeon_device.h"
34#include "octeon_nic.h"
35#include "octeon_main.h"
36#include "octeon_network.h"
37#include "cn66xx_regs.h"
38#include "cn66xx_device.h"
39#include "cn68xx_regs.h"
40#include "cn68xx_device.h"
41#include "liquidio_image.h"
42
Raghu Vatsavayi1f164712016-06-21 22:53:11 -070043static int octnet_get_link_stats(struct net_device *netdev);
44
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070045struct oct_mdio_cmd_context {
46 int octeon_id;
47 wait_queue_head_t wc;
48 int cond;
49};
50
51struct oct_mdio_cmd_resp {
52 u64 rh;
53 struct oct_mdio_cmd resp;
54 u64 status;
55};
56
57#define OCT_MDIO45_RESP_SIZE (sizeof(struct oct_mdio_cmd_resp))
58
59/* Octeon's interface mode of operation */
60enum {
61 INTERFACE_MODE_DISABLED,
62 INTERFACE_MODE_RGMII,
63 INTERFACE_MODE_GMII,
64 INTERFACE_MODE_SPI,
65 INTERFACE_MODE_PCIE,
66 INTERFACE_MODE_XAUI,
67 INTERFACE_MODE_SGMII,
68 INTERFACE_MODE_PICMG,
69 INTERFACE_MODE_NPI,
70 INTERFACE_MODE_LOOP,
71 INTERFACE_MODE_SRIO,
72 INTERFACE_MODE_ILK,
73 INTERFACE_MODE_RXAUI,
74 INTERFACE_MODE_QSGMII,
75 INTERFACE_MODE_AGL,
76};
77
78#define ARRAY_LENGTH(a) (sizeof(a) / sizeof((a)[0]))
79#define OCT_ETHTOOL_REGDUMP_LEN 4096
80#define OCT_ETHTOOL_REGSVER 1
81
Raghu Vatsavayi1f164712016-06-21 22:53:11 -070082/* statistics of PF */
83static const char oct_stats_strings[][ETH_GSTRING_LEN] = {
84 "rx_packets",
85 "tx_packets",
86 "rx_bytes",
87 "tx_bytes",
88 "rx_errors", /*jabber_err+l2_err+frame_err */
89 "tx_errors", /*fw_err_pko+fw_err_link+fw_err_drop */
90 "rx_dropped", /*st->fromwire.total_rcvd - st->fromwire.fw_total_rcvd
91 *+st->fromwire.dmac_drop + st->fromwire.fw_err_drop
92 */
93 "tx_dropped",
94
95 "tx_total_sent",
96 "tx_total_fwd",
97 "tx_err_pko",
98 "tx_err_link",
99 "tx_err_drop",
100
101 "tx_tso",
102 "tx_tso_packets",
103 "tx_tso_err",
104
105 "mac_tx_total_pkts",
106 "mac_tx_total_bytes",
107 "mac_tx_mcast_pkts",
108 "mac_tx_bcast_pkts",
109 "mac_tx_ctl_packets", /*oct->link_stats.fromhost.ctl_sent */
110 "mac_tx_total_collisions",
111 "mac_tx_one_collision",
112 "mac_tx_multi_collison",
113 "mac_tx_max_collision_fail",
114 "mac_tx_max_deferal_fail",
115 "mac_tx_fifo_err",
116 "mac_tx_runts",
117
118 "rx_total_rcvd",
119 "rx_total_fwd",
120 "rx_jabber_err",
121 "rx_l2_err",
122 "rx_frame_err",
123 "rx_err_pko",
124 "rx_err_link",
125 "rx_err_drop",
126
127 "rx_lro_pkts",
128 "rx_lro_bytes",
129 "rx_total_lro",
130
131 "rx_lro_aborts",
132 "rx_lro_aborts_port",
133 "rx_lro_aborts_seq",
134 "rx_lro_aborts_tsval",
135 "rx_lro_aborts_timer",
136 "rx_fwd_rate",
137
138 "mac_rx_total_rcvd",
139 "mac_rx_bytes",
140 "mac_rx_total_bcst",
141 "mac_rx_total_mcst",
142 "mac_rx_runts",
143 "mac_rx_ctl_packets",
144 "mac_rx_fifo_err",
145 "mac_rx_dma_drop",
146 "mac_rx_fcs_err",
147
148 "link_state_changes",
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700149};
150
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700151/* statistics of host tx queue */
152static const char oct_iq_stats_strings[][ETH_GSTRING_LEN] = {
153 "packets", /*oct->instr_queue[iq_no]->stats.tx_done*/
154 "bytes", /*oct->instr_queue[iq_no]->stats.tx_tot_bytes*/
155 "dropped",
156 "iq_busy",
157 "sgentry_sent",
158
159 "fw_instr_posted",
160 "fw_instr_processed",
161 "fw_instr_dropped",
162 "fw_bytes_sent",
163
164 "tso",
165 "txq_restart",
166};
167
168/* statistics of host rx queue */
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700169static const char oct_droq_stats_strings[][ETH_GSTRING_LEN] = {
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700170 "packets", /*oct->droq[oq_no]->stats.rx_pkts_received */
171 "bytes", /*oct->droq[oq_no]->stats.rx_bytes_received */
172 "dropped", /*oct->droq[oq_no]->stats.rx_dropped+
173 *oct->droq[oq_no]->stats.dropped_nodispatch+
174 *oct->droq[oq_no]->stats.dropped_toomany+
175 *oct->droq[oq_no]->stats.dropped_nomem
176 */
177 "dropped_nomem",
178 "dropped_toomany",
179 "fw_dropped",
180 "fw_pkts_received",
181 "fw_bytes_received",
182 "fw_dropped_nodispatch",
183
184 "buffer_alloc_failure",
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700185};
186
187#define OCTNIC_NCMD_AUTONEG_ON 0x1
188#define OCTNIC_NCMD_PHY_ON 0x2
189
190static int lio_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
191{
192 struct lio *lio = GET_LIO(netdev);
193 struct octeon_device *oct = lio->oct_dev;
194 struct oct_link_info *linfo;
195
196 linfo = &lio->linfo;
197
198 if (linfo->link.s.interface == INTERFACE_MODE_XAUI ||
199 linfo->link.s.interface == INTERFACE_MODE_RXAUI) {
200 ecmd->port = PORT_FIBRE;
201 ecmd->supported =
202 (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE |
203 SUPPORTED_Pause);
204 ecmd->advertising =
205 (ADVERTISED_10000baseT_Full | ADVERTISED_Pause);
206 ecmd->transceiver = XCVR_EXTERNAL;
207 ecmd->autoneg = AUTONEG_DISABLE;
208
209 } else {
210 dev_err(&oct->pci_dev->dev, "Unknown link interface reported\n");
211 }
212
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -0700213 if (linfo->link.s.link_up) {
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700214 ethtool_cmd_speed_set(ecmd, linfo->link.s.speed);
215 ecmd->duplex = linfo->link.s.duplex;
216 } else {
217 ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
218 ecmd->duplex = DUPLEX_UNKNOWN;
219 }
220
221 return 0;
222}
223
224static void
225lio_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
226{
227 struct lio *lio;
228 struct octeon_device *oct;
229
230 lio = GET_LIO(netdev);
231 oct = lio->oct_dev;
232
233 memset(drvinfo, 0, sizeof(struct ethtool_drvinfo));
234 strcpy(drvinfo->driver, "liquidio");
235 strcpy(drvinfo->version, LIQUIDIO_VERSION);
236 strncpy(drvinfo->fw_version, oct->fw_info.liquidio_firmware_version,
237 ETHTOOL_FWVERS_LEN);
238 strncpy(drvinfo->bus_info, pci_name(oct->pci_dev), 32);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700239}
240
241static void
242lio_ethtool_get_channels(struct net_device *dev,
243 struct ethtool_channels *channel)
244{
245 struct lio *lio = GET_LIO(dev);
246 struct octeon_device *oct = lio->oct_dev;
247 u32 max_rx = 0, max_tx = 0, tx_count = 0, rx_count = 0;
248
249 if (OCTEON_CN6XXX(oct)) {
250 struct octeon_config *conf6x = CHIP_FIELD(oct, cn6xxx, conf);
251
252 max_rx = CFG_GET_OQ_MAX_Q(conf6x);
253 max_tx = CFG_GET_IQ_MAX_Q(conf6x);
254 rx_count = CFG_GET_NUM_RXQS_NIC_IF(conf6x, lio->ifidx);
255 tx_count = CFG_GET_NUM_TXQS_NIC_IF(conf6x, lio->ifidx);
256 }
257
258 channel->max_rx = max_rx;
259 channel->max_tx = max_tx;
260 channel->rx_count = rx_count;
261 channel->tx_count = tx_count;
262}
263
264static int lio_get_eeprom_len(struct net_device *netdev)
265{
266 u8 buf[128];
267 struct lio *lio = GET_LIO(netdev);
268 struct octeon_device *oct_dev = lio->oct_dev;
269 struct octeon_board_info *board_info;
270 int len;
271
272 board_info = (struct octeon_board_info *)(&oct_dev->boardinfo);
273 len = sprintf(buf, "boardname:%s serialnum:%s maj:%lld min:%lld\n",
274 board_info->name, board_info->serial_number,
275 board_info->major, board_info->minor);
276
277 return len;
278}
279
280static int
281lio_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
282 u8 *bytes)
283{
284 struct lio *lio = GET_LIO(netdev);
285 struct octeon_device *oct_dev = lio->oct_dev;
286 struct octeon_board_info *board_info;
287 int len;
288
289 if (eeprom->offset != 0)
290 return -EINVAL;
291
292 eeprom->magic = oct_dev->pci_dev->vendor;
293 board_info = (struct octeon_board_info *)(&oct_dev->boardinfo);
294 len =
295 sprintf((char *)bytes,
296 "boardname:%s serialnum:%s maj:%lld min:%lld\n",
297 board_info->name, board_info->serial_number,
298 board_info->major, board_info->minor);
299
300 return 0;
301}
302
303static int octnet_gpio_access(struct net_device *netdev, int addr, int val)
304{
305 struct lio *lio = GET_LIO(netdev);
306 struct octeon_device *oct = lio->oct_dev;
307 struct octnic_ctrl_pkt nctrl;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700308 int ret = 0;
309
310 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
311
312 nctrl.ncmd.u64 = 0;
313 nctrl.ncmd.s.cmd = OCTNET_CMD_GPIO_ACCESS;
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -0700314 nctrl.ncmd.s.param1 = addr;
315 nctrl.ncmd.s.param2 = val;
316 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700317 nctrl.wait_time = 100;
318 nctrl.netpndev = (u64)netdev;
319 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
320
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -0700321 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700322 if (ret < 0) {
323 dev_err(&oct->pci_dev->dev, "Failed to configure gpio value\n");
324 return -EINVAL;
325 }
326
327 return 0;
328}
329
330/* Callback for when mdio command response arrives
331 */
332static void octnet_mdio_resp_callback(struct octeon_device *oct,
333 u32 status,
334 void *buf)
335{
336 struct oct_mdio_cmd_resp *mdio_cmd_rsp;
337 struct oct_mdio_cmd_context *mdio_cmd_ctx;
338 struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
339
340 mdio_cmd_rsp = (struct oct_mdio_cmd_resp *)sc->virtrptr;
341 mdio_cmd_ctx = (struct oct_mdio_cmd_context *)sc->ctxptr;
342
343 oct = lio_get_device(mdio_cmd_ctx->octeon_id);
344 if (status) {
345 dev_err(&oct->pci_dev->dev, "MIDO instruction failed. Status: %llx\n",
346 CVM_CAST64(status));
347 ACCESS_ONCE(mdio_cmd_ctx->cond) = -1;
348 } else {
349 ACCESS_ONCE(mdio_cmd_ctx->cond) = 1;
350 }
351 wake_up_interruptible(&mdio_cmd_ctx->wc);
352}
353
354/* This routine provides PHY access routines for
355 * mdio clause45 .
356 */
357static int
358octnet_mdio45_access(struct lio *lio, int op, int loc, int *value)
359{
360 struct octeon_device *oct_dev = lio->oct_dev;
361 struct octeon_soft_command *sc;
362 struct oct_mdio_cmd_resp *mdio_cmd_rsp;
363 struct oct_mdio_cmd_context *mdio_cmd_ctx;
364 struct oct_mdio_cmd *mdio_cmd;
365 int retval = 0;
366
367 sc = (struct octeon_soft_command *)
368 octeon_alloc_soft_command(oct_dev,
369 sizeof(struct oct_mdio_cmd),
370 sizeof(struct oct_mdio_cmd_resp),
371 sizeof(struct oct_mdio_cmd_context));
372
373 if (!sc)
374 return -ENOMEM;
375
376 mdio_cmd_ctx = (struct oct_mdio_cmd_context *)sc->ctxptr;
377 mdio_cmd_rsp = (struct oct_mdio_cmd_resp *)sc->virtrptr;
378 mdio_cmd = (struct oct_mdio_cmd *)sc->virtdptr;
379
380 ACCESS_ONCE(mdio_cmd_ctx->cond) = 0;
381 mdio_cmd_ctx->octeon_id = lio_get_device_id(oct_dev);
382 mdio_cmd->op = op;
383 mdio_cmd->mdio_addr = loc;
384 if (op)
385 mdio_cmd->value1 = *value;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700386 octeon_swap_8B_data((u64 *)mdio_cmd, sizeof(struct oct_mdio_cmd) / 8);
387
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -0700388 sc->iq_no = lio->linfo.txpciq[0].s.q_no;
389
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700390 octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC, OPCODE_NIC_MDIO45,
391 0, 0, 0);
392
393 sc->wait_time = 1000;
394 sc->callback = octnet_mdio_resp_callback;
395 sc->callback_arg = sc;
396
397 init_waitqueue_head(&mdio_cmd_ctx->wc);
398
399 retval = octeon_send_soft_command(oct_dev, sc);
400
Raghu Vatsavayiddc173a2016-06-14 16:54:43 -0700401 if (retval == IQ_SEND_FAILED) {
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700402 dev_err(&oct_dev->pci_dev->dev,
403 "octnet_mdio45_access instruction failed status: %x\n",
404 retval);
405 retval = -EBUSY;
406 } else {
407 /* Sleep on a wait queue till the cond flag indicates that the
408 * response arrived
409 */
410 sleep_cond(&mdio_cmd_ctx->wc, &mdio_cmd_ctx->cond);
411 retval = mdio_cmd_rsp->status;
412 if (retval) {
413 dev_err(&oct_dev->pci_dev->dev, "octnet mdio45 access failed\n");
414 retval = -EBUSY;
415 } else {
416 octeon_swap_8B_data((u64 *)(&mdio_cmd_rsp->resp),
417 sizeof(struct oct_mdio_cmd) / 8);
418
419 if (ACCESS_ONCE(mdio_cmd_ctx->cond) == 1) {
420 if (!op)
421 *value = mdio_cmd_rsp->resp.value1;
422 } else {
423 retval = -EINVAL;
424 }
425 }
426 }
427
428 octeon_free_soft_command(oct_dev, sc);
429
430 return retval;
431}
432
433static int lio_set_phys_id(struct net_device *netdev,
434 enum ethtool_phys_id_state state)
435{
436 struct lio *lio = GET_LIO(netdev);
437 struct octeon_device *oct = lio->oct_dev;
438 int value, ret;
439
440 switch (state) {
441 case ETHTOOL_ID_ACTIVE:
442 if (oct->chip_id == OCTEON_CN66XX) {
443 octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG,
444 VITESSE_PHY_GPIO_DRIVEON);
445 return 2;
446
447 } else if (oct->chip_id == OCTEON_CN68XX) {
448 /* Save the current LED settings */
449 ret = octnet_mdio45_access(lio, 0,
450 LIO68XX_LED_BEACON_ADDR,
451 &lio->phy_beacon_val);
452 if (ret)
453 return ret;
454
455 ret = octnet_mdio45_access(lio, 0,
456 LIO68XX_LED_CTRL_ADDR,
457 &lio->led_ctrl_val);
458 if (ret)
459 return ret;
460
461 /* Configure Beacon values */
462 value = LIO68XX_LED_BEACON_CFGON;
463 ret =
464 octnet_mdio45_access(lio, 1,
465 LIO68XX_LED_BEACON_ADDR,
466 &value);
467 if (ret)
468 return ret;
469
470 value = LIO68XX_LED_CTRL_CFGON;
471 ret =
472 octnet_mdio45_access(lio, 1,
473 LIO68XX_LED_CTRL_ADDR,
474 &value);
475 if (ret)
476 return ret;
477 } else {
478 return -EINVAL;
479 }
480 break;
481
482 case ETHTOOL_ID_ON:
483 if (oct->chip_id == OCTEON_CN66XX) {
484 octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG,
485 VITESSE_PHY_GPIO_HIGH);
486
487 } else if (oct->chip_id == OCTEON_CN68XX) {
488 return -EINVAL;
489 } else {
490 return -EINVAL;
491 }
492 break;
493
494 case ETHTOOL_ID_OFF:
495 if (oct->chip_id == OCTEON_CN66XX)
496 octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG,
497 VITESSE_PHY_GPIO_LOW);
498 else if (oct->chip_id == OCTEON_CN68XX)
499 return -EINVAL;
500 else
501 return -EINVAL;
502
503 break;
504
505 case ETHTOOL_ID_INACTIVE:
506 if (oct->chip_id == OCTEON_CN66XX) {
507 octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG,
508 VITESSE_PHY_GPIO_DRIVEOFF);
509 } else if (oct->chip_id == OCTEON_CN68XX) {
510 /* Restore LED settings */
511 ret = octnet_mdio45_access(lio, 1,
512 LIO68XX_LED_CTRL_ADDR,
513 &lio->led_ctrl_val);
514 if (ret)
515 return ret;
516
Dan Carpentercbdb9772015-06-24 17:47:02 +0300517 ret = octnet_mdio45_access(lio, 1,
518 LIO68XX_LED_BEACON_ADDR,
519 &lio->phy_beacon_val);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700520 if (ret)
521 return ret;
522
523 } else {
524 return -EINVAL;
525 }
526 break;
527
528 default:
529 return -EINVAL;
530 }
531
532 return 0;
533}
534
535static void
536lio_ethtool_get_ringparam(struct net_device *netdev,
537 struct ethtool_ringparam *ering)
538{
539 struct lio *lio = GET_LIO(netdev);
540 struct octeon_device *oct = lio->oct_dev;
541 u32 tx_max_pending = 0, rx_max_pending = 0, tx_pending = 0,
542 rx_pending = 0;
543
544 if (OCTEON_CN6XXX(oct)) {
545 struct octeon_config *conf6x = CHIP_FIELD(oct, cn6xxx, conf);
546
547 tx_max_pending = CN6XXX_MAX_IQ_DESCRIPTORS;
548 rx_max_pending = CN6XXX_MAX_OQ_DESCRIPTORS;
549 rx_pending = CFG_GET_NUM_RX_DESCS_NIC_IF(conf6x, lio->ifidx);
550 tx_pending = CFG_GET_NUM_TX_DESCS_NIC_IF(conf6x, lio->ifidx);
551 }
552
553 if (lio->mtu > OCTNET_DEFAULT_FRM_SIZE) {
554 ering->rx_pending = 0;
555 ering->rx_max_pending = 0;
556 ering->rx_mini_pending = 0;
557 ering->rx_jumbo_pending = rx_pending;
558 ering->rx_mini_max_pending = 0;
559 ering->rx_jumbo_max_pending = rx_max_pending;
560 } else {
561 ering->rx_pending = rx_pending;
562 ering->rx_max_pending = rx_max_pending;
563 ering->rx_mini_pending = 0;
564 ering->rx_jumbo_pending = 0;
565 ering->rx_mini_max_pending = 0;
566 ering->rx_jumbo_max_pending = 0;
567 }
568
569 ering->tx_pending = tx_pending;
570 ering->tx_max_pending = tx_max_pending;
571}
572
573static u32 lio_get_msglevel(struct net_device *netdev)
574{
575 struct lio *lio = GET_LIO(netdev);
576
577 return lio->msg_enable;
578}
579
580static void lio_set_msglevel(struct net_device *netdev, u32 msglvl)
581{
582 struct lio *lio = GET_LIO(netdev);
583
584 if ((msglvl ^ lio->msg_enable) & NETIF_MSG_HW) {
585 if (msglvl & NETIF_MSG_HW)
586 liquidio_set_feature(netdev,
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -0700587 OCTNET_CMD_VERBOSE_ENABLE, 0);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700588 else
589 liquidio_set_feature(netdev,
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -0700590 OCTNET_CMD_VERBOSE_DISABLE, 0);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700591 }
592
593 lio->msg_enable = msglvl;
594}
595
596static void
597lio_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
598{
599 /* Notes: Not supporting any auto negotiation in these
600 * drivers. Just report pause frame support.
601 */
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700602 struct lio *lio = GET_LIO(netdev);
603 struct octeon_device *oct = lio->oct_dev;
604
605 pause->autoneg = 0;
606
607 pause->tx_pause = oct->tx_pause;
608 pause->rx_pause = oct->rx_pause;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700609}
610
611static void
612lio_get_ethtool_stats(struct net_device *netdev,
613 struct ethtool_stats *stats, u64 *data)
614{
615 struct lio *lio = GET_LIO(netdev);
616 struct octeon_device *oct_dev = lio->oct_dev;
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700617 struct net_device_stats *netstats = &netdev->stats;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700618 int i = 0, j;
619
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700620 netdev->netdev_ops->ndo_get_stats(netdev);
621 octnet_get_link_stats(netdev);
622
623 /*sum of oct->droq[oq_no]->stats->rx_pkts_received */
624 data[i++] = CVM_CAST64(netstats->rx_packets);
625 /*sum of oct->instr_queue[iq_no]->stats.tx_done */
626 data[i++] = CVM_CAST64(netstats->tx_packets);
627 /*sum of oct->droq[oq_no]->stats->rx_bytes_received */
628 data[i++] = CVM_CAST64(netstats->rx_bytes);
629 /*sum of oct->instr_queue[iq_no]->stats.tx_tot_bytes */
630 data[i++] = CVM_CAST64(netstats->tx_bytes);
631 data[i++] = CVM_CAST64(netstats->rx_errors);
632 data[i++] = CVM_CAST64(netstats->tx_errors);
633 /*sum of oct->droq[oq_no]->stats->rx_dropped +
634 *oct->droq[oq_no]->stats->dropped_nodispatch +
635 *oct->droq[oq_no]->stats->dropped_toomany +
636 *oct->droq[oq_no]->stats->dropped_nomem
637 */
638 data[i++] = CVM_CAST64(netstats->rx_dropped);
639 /*sum of oct->instr_queue[iq_no]->stats.tx_dropped */
640 data[i++] = CVM_CAST64(netstats->tx_dropped);
641
642 /*data[i++] = CVM_CAST64(stats->multicast); */
643 /*data[i++] = CVM_CAST64(stats->collisions); */
644
645 /* firmware tx stats */
646 /*per_core_stats[cvmx_get_core_num()].link_stats[mdata->from_ifidx].
647 *fromhost.fw_total_sent
648 */
649 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_total_sent);
650 /*per_core_stats[i].link_stats[port].fromwire.fw_total_fwd */
651 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_total_fwd);
652 /*per_core_stats[j].link_stats[i].fromhost.fw_err_pko */
653 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_pko);
654 /*per_core_stats[j].link_stats[i].fromhost.fw_err_link */
655 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_link);
656 /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.
657 *fw_err_drop
658 */
659 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_drop);
660
661 /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.fw_tso */
662 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_tso);
663 /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.
664 *fw_tso_fwd
665 */
666 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_tso_fwd);
667 /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.
668 *fw_err_tso
669 */
670 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_tso);
671
672 /* mac tx statistics */
673 /*CVMX_BGXX_CMRX_TX_STAT5 */
674 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.total_pkts_sent);
675 /*CVMX_BGXX_CMRX_TX_STAT4 */
676 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.total_bytes_sent);
677 /*CVMX_BGXX_CMRX_TX_STAT15 */
678 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.mcast_pkts_sent);
679 /*CVMX_BGXX_CMRX_TX_STAT14 */
680 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.bcast_pkts_sent);
681 /*CVMX_BGXX_CMRX_TX_STAT17 */
682 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.ctl_sent);
683 /*CVMX_BGXX_CMRX_TX_STAT0 */
684 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.total_collisions);
685 /*CVMX_BGXX_CMRX_TX_STAT3 */
686 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.one_collision_sent);
687 /*CVMX_BGXX_CMRX_TX_STAT2 */
688 data[i++] =
689 CVM_CAST64(oct_dev->link_stats.fromhost.multi_collision_sent);
690 /*CVMX_BGXX_CMRX_TX_STAT0 */
691 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.max_collision_fail);
692 /*CVMX_BGXX_CMRX_TX_STAT1 */
693 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.max_deferral_fail);
694 /*CVMX_BGXX_CMRX_TX_STAT16 */
695 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fifo_err);
696 /*CVMX_BGXX_CMRX_TX_STAT6 */
697 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.runts);
698
699 /* RX firmware stats */
700 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
701 *fw_total_rcvd
702 */
703 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_total_rcvd);
704 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
705 *fw_total_fwd
706 */
707 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_total_fwd);
708 /*per_core_stats[core_id].link_stats[ifidx].fromwire.jabber_err */
709 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.jabber_err);
710 /*per_core_stats[core_id].link_stats[ifidx].fromwire.l2_err */
711 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.l2_err);
712 /*per_core_stats[core_id].link_stats[ifidx].fromwire.frame_err */
713 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.frame_err);
714 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
715 *fw_err_pko
716 */
717 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_err_pko);
718 /*per_core_stats[j].link_stats[i].fromwire.fw_err_link */
719 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_err_link);
720 /*per_core_stats[cvmx_get_core_num()].link_stats[lro_ctx->ifidx].
721 *fromwire.fw_err_drop
722 */
723 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_err_drop);
724
725 /* LRO */
726 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
727 *fw_lro_pkts
728 */
729 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_pkts);
730 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
731 *fw_lro_octs
732 */
733 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_octs);
734 /*per_core_stats[j].link_stats[i].fromwire.fw_total_lro */
735 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_total_lro);
736 /*per_core_stats[j].link_stats[i].fromwire.fw_lro_aborts */
737 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts);
738 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
739 *fw_lro_aborts_port
740 */
741 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_port);
742 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
743 *fw_lro_aborts_seq
744 */
745 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_seq);
746 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
747 *fw_lro_aborts_tsval
748 */
749 data[i++] =
750 CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_tsval);
751 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
752 *fw_lro_aborts_timer
753 */
754 /* intrmod: packet forward rate */
755 data[i++] =
756 CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_timer);
757 /*per_core_stats[j].link_stats[i].fromwire.fw_lro_aborts */
758 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fwd_rate);
759
760 /* mac: link-level stats */
761 /*CVMX_BGXX_CMRX_RX_STAT0 */
762 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.total_rcvd);
763 /*CVMX_BGXX_CMRX_RX_STAT1 */
764 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.bytes_rcvd);
765 /*CVMX_PKI_STATX_STAT5 */
766 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.total_bcst);
767 /*CVMX_PKI_STATX_STAT5 */
768 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.total_mcst);
769 /*wqe->word2.err_code or wqe->word2.err_level */
770 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.runts);
771 /*CVMX_BGXX_CMRX_RX_STAT2 */
772 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.ctl_rcvd);
773 /*CVMX_BGXX_CMRX_RX_STAT6 */
774 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fifo_err);
775 /*CVMX_BGXX_CMRX_RX_STAT4 */
776 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.dmac_drop);
777 /*wqe->word2.err_code or wqe->word2.err_level */
778 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fcs_err);
779 /*lio->link_changes*/
780 data[i++] = CVM_CAST64(lio->link_changes);
781
782 /* TX -- lio_update_stats(lio); */
783 for (j = 0; j < MAX_OCTEON_INSTR_QUEUES(oct_dev); j++) {
Raghu Vatsavayi63da8402016-06-21 22:53:03 -0700784 if (!(oct_dev->io_qmask.iq & (1ULL << j)))
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700785 continue;
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700786 /*packets to network port*/
787 /*# of packets tx to network */
788 data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_done);
789 /*# of bytes tx to network */
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700790 data[i++] =
791 CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_tot_bytes);
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700792 /*# of packets dropped */
793 data[i++] =
794 CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_dropped);
795 /*# of tx fails due to queue full */
796 data[i++] =
797 CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_iq_busy);
798 /*XXX gather entries sent */
799 data[i++] =
800 CVM_CAST64(oct_dev->instr_queue[j]->stats.sgentry_sent);
801
802 /*instruction to firmware: data and control */
803 /*# of instructions to the queue */
804 data[i++] =
805 CVM_CAST64(oct_dev->instr_queue[j]->stats.instr_posted);
806 /*# of instructions processed */
807 data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->
808 stats.instr_processed);
809 /*# of instructions could not be processed */
810 data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->
811 stats.instr_dropped);
812 /*bytes sent through the queue */
813 data[i++] =
814 CVM_CAST64(oct_dev->instr_queue[j]->stats.bytes_sent);
815
816 /*tso request*/
817 data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_gso);
818 /*txq restart*/
819 data[i++] =
820 CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_restart);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700821 }
822
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700823 /* RX */
824 /* for (j = 0; j < oct_dev->num_oqs; j++) { */
825 for (j = 0; j < MAX_OCTEON_OUTPUT_QUEUES(oct_dev); j++) {
Raghu Vatsavayi63da8402016-06-21 22:53:03 -0700826 if (!(oct_dev->io_qmask.oq & (1ULL << j)))
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700827 continue;
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700828
829 /*packets send to TCP/IP network stack */
830 /*# of packets to network stack */
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700831 data[i++] =
832 CVM_CAST64(oct_dev->droq[j]->stats.rx_pkts_received);
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700833 /*# of bytes to network stack */
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700834 data[i++] =
835 CVM_CAST64(oct_dev->droq[j]->stats.rx_bytes_received);
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700836 /*# of packets dropped */
837 data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem +
838 oct_dev->droq[j]->stats.dropped_toomany +
839 oct_dev->droq[j]->stats.rx_dropped);
840 data[i++] =
841 CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem);
842 data[i++] =
843 CVM_CAST64(oct_dev->droq[j]->stats.dropped_toomany);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700844 data[i++] =
845 CVM_CAST64(oct_dev->droq[j]->stats.rx_dropped);
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700846
847 /*control and data path*/
848 data[i++] =
849 CVM_CAST64(oct_dev->droq[j]->stats.pkts_received);
850 data[i++] =
851 CVM_CAST64(oct_dev->droq[j]->stats.bytes_received);
852 data[i++] =
853 CVM_CAST64(oct_dev->droq[j]->stats.dropped_nodispatch);
854 data[i++] =
855 CVM_CAST64(oct_dev->droq[j]->stats.rx_alloc_failure);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700856 }
857}
858
859static void lio_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
860{
861 struct lio *lio = GET_LIO(netdev);
862 struct octeon_device *oct_dev = lio->oct_dev;
863 int num_iq_stats, num_oq_stats, i, j;
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700864 int num_stats;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700865
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700866 switch (stringset) {
867 case ETH_SS_STATS:
868 num_stats = ARRAY_SIZE(oct_stats_strings);
869 for (j = 0; j < num_stats; j++) {
870 sprintf(data, "%s", oct_stats_strings[j]);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700871 data += ETH_GSTRING_LEN;
872 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700873
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700874 num_iq_stats = ARRAY_SIZE(oct_iq_stats_strings);
875 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct_dev); i++) {
876 if (!(oct_dev->io_qmask.iq & (1ULL << i)))
877 continue;
878 for (j = 0; j < num_iq_stats; j++) {
879 sprintf(data, "tx-%d-%s", i,
880 oct_iq_stats_strings[j]);
881 data += ETH_GSTRING_LEN;
882 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700883 }
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700884
885 num_oq_stats = ARRAY_SIZE(oct_droq_stats_strings);
886 /* for (i = 0; i < oct_dev->num_oqs; i++) { */
887 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct_dev); i++) {
888 if (!(oct_dev->io_qmask.oq & (1ULL << i)))
889 continue;
890 for (j = 0; j < num_oq_stats; j++) {
891 sprintf(data, "rx-%d-%s", i,
892 oct_droq_stats_strings[j]);
893 data += ETH_GSTRING_LEN;
894 }
895 }
896 break;
897
898 default:
899 netif_info(lio, drv, lio->netdev, "Unknown Stringset !!\n");
900 break;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700901 }
902}
903
904static int lio_get_sset_count(struct net_device *netdev, int sset)
905{
906 struct lio *lio = GET_LIO(netdev);
907 struct octeon_device *oct_dev = lio->oct_dev;
908
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700909 switch (sset) {
910 case ETH_SS_STATS:
911 return (ARRAY_SIZE(oct_stats_strings) +
912 ARRAY_SIZE(oct_iq_stats_strings) * oct_dev->num_iqs +
913 ARRAY_SIZE(oct_droq_stats_strings) * oct_dev->num_oqs);
914 default:
915 return -EOPNOTSUPP;
916 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700917}
918
919static int lio_get_intr_coalesce(struct net_device *netdev,
920 struct ethtool_coalesce *intr_coal)
921{
922 struct lio *lio = GET_LIO(netdev);
923 struct octeon_device *oct = lio->oct_dev;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700924 struct octeon_instr_queue *iq;
925 struct oct_intrmod_cfg *intrmod_cfg;
926
927 intrmod_cfg = &oct->intrmod;
928
929 switch (oct->chip_id) {
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700930 case OCTEON_CN68XX:
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -0700931 case OCTEON_CN66XX: {
932 struct octeon_cn6xxx *cn6xxx =
933 (struct octeon_cn6xxx *)oct->chip;
934
935 if (!intrmod_cfg->rx_enable) {
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700936 intr_coal->rx_coalesce_usecs =
937 CFG_GET_OQ_INTR_TIME(cn6xxx->conf);
938 intr_coal->rx_max_coalesced_frames =
939 CFG_GET_OQ_INTR_PKT(cn6xxx->conf);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700940 }
941
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -0700942 iq = oct->instr_queue[lio->linfo.txpciq[0].s.q_no];
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700943 intr_coal->tx_max_coalesced_frames = iq->fill_threshold;
944 break;
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -0700945 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700946 default:
947 netif_info(lio, drv, lio->netdev, "Unknown Chip !!\n");
948 return -EINVAL;
949 }
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -0700950 if (intrmod_cfg->rx_enable) {
951 intr_coal->use_adaptive_rx_coalesce =
952 intrmod_cfg->rx_enable;
953 intr_coal->rate_sample_interval =
954 intrmod_cfg->check_intrvl;
955 intr_coal->pkt_rate_high =
956 intrmod_cfg->maxpkt_ratethr;
957 intr_coal->pkt_rate_low =
958 intrmod_cfg->minpkt_ratethr;
959 intr_coal->rx_max_coalesced_frames_high =
960 intrmod_cfg->rx_maxcnt_trigger;
961 intr_coal->rx_coalesce_usecs_high =
962 intrmod_cfg->rx_maxtmr_trigger;
963 intr_coal->rx_coalesce_usecs_low =
964 intrmod_cfg->rx_mintmr_trigger;
965 intr_coal->rx_max_coalesced_frames_low =
966 intrmod_cfg->rx_mincnt_trigger;
967 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700968 return 0;
969}
970
971/* Callback function for intrmod */
972static void octnet_intrmod_callback(struct octeon_device *oct_dev,
973 u32 status,
974 void *ptr)
975{
976 struct oct_intrmod_cmd *cmd = ptr;
977 struct octeon_soft_command *sc = cmd->sc;
978
979 oct_dev = cmd->oct_dev;
980
981 if (status)
982 dev_err(&oct_dev->pci_dev->dev, "intrmod config failed. Status: %llx\n",
983 CVM_CAST64(status));
984 else
985 dev_info(&oct_dev->pci_dev->dev,
986 "Rx-Adaptive Interrupt moderation enabled:%llx\n",
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -0700987 oct_dev->intrmod.rx_enable);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700988
989 octeon_free_soft_command(oct_dev, sc);
990}
991
992/* Configure interrupt moderation parameters */
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -0700993static int octnet_set_intrmod_cfg(struct lio *lio,
994 struct oct_intrmod_cfg *intr_cfg)
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700995{
996 struct octeon_soft_command *sc;
997 struct oct_intrmod_cmd *cmd;
998 struct oct_intrmod_cfg *cfg;
999 int retval;
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001000 struct octeon_device *oct_dev = lio->oct_dev;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001001
1002 /* Alloc soft command */
1003 sc = (struct octeon_soft_command *)
1004 octeon_alloc_soft_command(oct_dev,
1005 sizeof(struct oct_intrmod_cfg),
1006 0,
1007 sizeof(struct oct_intrmod_cmd));
1008
1009 if (!sc)
1010 return -ENOMEM;
1011
1012 cmd = (struct oct_intrmod_cmd *)sc->ctxptr;
1013 cfg = (struct oct_intrmod_cfg *)sc->virtdptr;
1014
1015 memcpy(cfg, intr_cfg, sizeof(struct oct_intrmod_cfg));
1016 octeon_swap_8B_data((u64 *)cfg, (sizeof(struct oct_intrmod_cfg)) / 8);
1017 cmd->sc = sc;
1018 cmd->cfg = cfg;
1019 cmd->oct_dev = oct_dev;
1020
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001021 sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1022
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001023 octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC,
1024 OPCODE_NIC_INTRMOD_CFG, 0, 0, 0);
1025
1026 sc->callback = octnet_intrmod_callback;
1027 sc->callback_arg = cmd;
1028 sc->wait_time = 1000;
1029
1030 retval = octeon_send_soft_command(oct_dev, sc);
Raghu Vatsavayiddc173a2016-06-14 16:54:43 -07001031 if (retval == IQ_SEND_FAILED) {
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001032 octeon_free_soft_command(oct_dev, sc);
1033 return -EINVAL;
1034 }
1035
1036 return 0;
1037}
1038
Raghu Vatsavayi1f164712016-06-21 22:53:11 -07001039void
1040octnet_nic_stats_callback(struct octeon_device *oct_dev,
1041 u32 status, void *ptr)
1042{
1043 struct octeon_soft_command *sc = (struct octeon_soft_command *)ptr;
1044 struct oct_nic_stats_resp *resp = (struct oct_nic_stats_resp *)
1045 sc->virtrptr;
1046 struct oct_nic_stats_ctrl *ctrl = (struct oct_nic_stats_ctrl *)
1047 sc->ctxptr;
1048 struct nic_rx_stats *rsp_rstats = &resp->stats.fromwire;
1049 struct nic_tx_stats *rsp_tstats = &resp->stats.fromhost;
1050
1051 struct nic_rx_stats *rstats = &oct_dev->link_stats.fromwire;
1052 struct nic_tx_stats *tstats = &oct_dev->link_stats.fromhost;
1053
1054 if ((status != OCTEON_REQUEST_TIMEOUT) && !resp->status) {
1055 octeon_swap_8B_data((u64 *)&resp->stats,
1056 (sizeof(struct oct_link_stats)) >> 3);
1057
1058 /* RX link-level stats */
1059 rstats->total_rcvd = rsp_rstats->total_rcvd;
1060 rstats->bytes_rcvd = rsp_rstats->bytes_rcvd;
1061 rstats->total_bcst = rsp_rstats->total_bcst;
1062 rstats->total_mcst = rsp_rstats->total_mcst;
1063 rstats->runts = rsp_rstats->runts;
1064 rstats->ctl_rcvd = rsp_rstats->ctl_rcvd;
1065 /* Accounts for over/under-run of buffers */
1066 rstats->fifo_err = rsp_rstats->fifo_err;
1067 rstats->dmac_drop = rsp_rstats->dmac_drop;
1068 rstats->fcs_err = rsp_rstats->fcs_err;
1069 rstats->jabber_err = rsp_rstats->jabber_err;
1070 rstats->l2_err = rsp_rstats->l2_err;
1071 rstats->frame_err = rsp_rstats->frame_err;
1072
1073 /* RX firmware stats */
1074 rstats->fw_total_rcvd = rsp_rstats->fw_total_rcvd;
1075 rstats->fw_total_fwd = rsp_rstats->fw_total_fwd;
1076 rstats->fw_err_pko = rsp_rstats->fw_err_pko;
1077 rstats->fw_err_link = rsp_rstats->fw_err_link;
1078 rstats->fw_err_drop = rsp_rstats->fw_err_drop;
1079 /* Number of packets that are LROed */
1080 rstats->fw_lro_pkts = rsp_rstats->fw_lro_pkts;
1081 /* Number of octets that are LROed */
1082 rstats->fw_lro_octs = rsp_rstats->fw_lro_octs;
1083 /* Number of LRO packets formed */
1084 rstats->fw_total_lro = rsp_rstats->fw_total_lro;
1085 /* Number of times lRO of packet aborted */
1086 rstats->fw_lro_aborts = rsp_rstats->fw_lro_aborts;
1087 rstats->fw_lro_aborts_port = rsp_rstats->fw_lro_aborts_port;
1088 rstats->fw_lro_aborts_seq = rsp_rstats->fw_lro_aborts_seq;
1089 rstats->fw_lro_aborts_tsval = rsp_rstats->fw_lro_aborts_tsval;
1090 rstats->fw_lro_aborts_timer = rsp_rstats->fw_lro_aborts_timer;
1091 /* intrmod: packet forward rate */
1092 rstats->fwd_rate = rsp_rstats->fwd_rate;
1093
1094 /* TX link-level stats */
1095 tstats->total_pkts_sent = rsp_tstats->total_pkts_sent;
1096 tstats->total_bytes_sent = rsp_tstats->total_bytes_sent;
1097 tstats->mcast_pkts_sent = rsp_tstats->mcast_pkts_sent;
1098 tstats->bcast_pkts_sent = rsp_tstats->bcast_pkts_sent;
1099 tstats->ctl_sent = rsp_tstats->ctl_sent;
1100 /* Packets sent after one collision*/
1101 tstats->one_collision_sent = rsp_tstats->one_collision_sent;
1102 /* Packets sent after multiple collision*/
1103 tstats->multi_collision_sent = rsp_tstats->multi_collision_sent;
1104 /* Packets not sent due to max collisions */
1105 tstats->max_collision_fail = rsp_tstats->max_collision_fail;
1106 /* Packets not sent due to max deferrals */
1107 tstats->max_deferral_fail = rsp_tstats->max_deferral_fail;
1108 /* Accounts for over/under-run of buffers */
1109 tstats->fifo_err = rsp_tstats->fifo_err;
1110 tstats->runts = rsp_tstats->runts;
1111 /* Total number of collisions detected */
1112 tstats->total_collisions = rsp_tstats->total_collisions;
1113
1114 /* firmware stats */
1115 tstats->fw_total_sent = rsp_tstats->fw_total_sent;
1116 tstats->fw_total_fwd = rsp_tstats->fw_total_fwd;
1117 tstats->fw_err_pko = rsp_tstats->fw_err_pko;
1118 tstats->fw_err_link = rsp_tstats->fw_err_link;
1119 tstats->fw_err_drop = rsp_tstats->fw_err_drop;
1120 tstats->fw_tso = rsp_tstats->fw_tso;
1121 tstats->fw_tso_fwd = rsp_tstats->fw_tso_fwd;
1122 tstats->fw_err_tso = rsp_tstats->fw_err_tso;
1123 resp->status = 1;
1124 } else {
1125 resp->status = -1;
1126 }
1127 complete(&ctrl->complete);
1128}
1129
1130/* Configure interrupt moderation parameters */
1131static int octnet_get_link_stats(struct net_device *netdev)
1132{
1133 struct lio *lio = GET_LIO(netdev);
1134 struct octeon_device *oct_dev = lio->oct_dev;
1135
1136 struct octeon_soft_command *sc;
1137 struct oct_nic_stats_ctrl *ctrl;
1138 struct oct_nic_stats_resp *resp;
1139
1140 int retval;
1141
1142 /* Alloc soft command */
1143 sc = (struct octeon_soft_command *)
1144 octeon_alloc_soft_command(oct_dev,
1145 0,
1146 sizeof(struct oct_nic_stats_resp),
1147 sizeof(struct octnic_ctrl_pkt));
1148
1149 if (!sc)
1150 return -ENOMEM;
1151
1152 resp = (struct oct_nic_stats_resp *)sc->virtrptr;
1153 memset(resp, 0, sizeof(struct oct_nic_stats_resp));
1154
1155 ctrl = (struct oct_nic_stats_ctrl *)sc->ctxptr;
1156 memset(ctrl, 0, sizeof(struct oct_nic_stats_ctrl));
1157 ctrl->netdev = netdev;
1158 init_completion(&ctrl->complete);
1159
1160 sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1161
1162 octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC,
1163 OPCODE_NIC_PORT_STATS, 0, 0, 0);
1164
1165 sc->callback = octnet_nic_stats_callback;
1166 sc->callback_arg = sc;
1167 sc->wait_time = 500; /*in milli seconds*/
1168
1169 retval = octeon_send_soft_command(oct_dev, sc);
1170 if (retval == IQ_SEND_FAILED) {
1171 octeon_free_soft_command(oct_dev, sc);
1172 return -EINVAL;
1173 }
1174
1175 wait_for_completion_timeout(&ctrl->complete, msecs_to_jiffies(1000));
1176
1177 if (resp->status != 1) {
1178 octeon_free_soft_command(oct_dev, sc);
1179
1180 return -EINVAL;
1181 }
1182
1183 octeon_free_soft_command(oct_dev, sc);
1184
1185 return 0;
1186}
1187
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001188/* Enable/Disable auto interrupt Moderation */
1189static int oct_cfg_adaptive_intr(struct lio *lio, struct ethtool_coalesce
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001190 *intr_coal)
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001191{
1192 int ret = 0;
1193 struct octeon_device *oct = lio->oct_dev;
1194 struct oct_intrmod_cfg *intrmod_cfg;
1195
1196 intrmod_cfg = &oct->intrmod;
1197
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001198 if (oct->intrmod.rx_enable || oct->intrmod.tx_enable) {
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001199 if (intr_coal->rate_sample_interval)
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001200 intrmod_cfg->check_intrvl =
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001201 intr_coal->rate_sample_interval;
1202 else
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001203 intrmod_cfg->check_intrvl =
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001204 LIO_INTRMOD_CHECK_INTERVAL;
1205
1206 if (intr_coal->pkt_rate_high)
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001207 intrmod_cfg->maxpkt_ratethr =
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001208 intr_coal->pkt_rate_high;
1209 else
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001210 intrmod_cfg->maxpkt_ratethr =
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001211 LIO_INTRMOD_MAXPKT_RATETHR;
1212
1213 if (intr_coal->pkt_rate_low)
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001214 intrmod_cfg->minpkt_ratethr =
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001215 intr_coal->pkt_rate_low;
1216 else
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001217 intrmod_cfg->minpkt_ratethr =
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001218 LIO_INTRMOD_MINPKT_RATETHR;
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001219 }
1220 if (oct->intrmod.rx_enable) {
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001221 if (intr_coal->rx_max_coalesced_frames_high)
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001222 intrmod_cfg->rx_maxcnt_trigger =
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001223 intr_coal->rx_max_coalesced_frames_high;
1224 else
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001225 intrmod_cfg->rx_maxcnt_trigger =
1226 LIO_INTRMOD_RXMAXCNT_TRIGGER;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001227
1228 if (intr_coal->rx_coalesce_usecs_high)
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001229 intrmod_cfg->rx_maxtmr_trigger =
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001230 intr_coal->rx_coalesce_usecs_high;
1231 else
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001232 intrmod_cfg->rx_maxtmr_trigger =
1233 LIO_INTRMOD_RXMAXTMR_TRIGGER;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001234
1235 if (intr_coal->rx_coalesce_usecs_low)
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001236 intrmod_cfg->rx_mintmr_trigger =
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001237 intr_coal->rx_coalesce_usecs_low;
1238 else
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001239 intrmod_cfg->rx_mintmr_trigger =
1240 LIO_INTRMOD_RXMINTMR_TRIGGER;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001241
1242 if (intr_coal->rx_max_coalesced_frames_low)
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001243 intrmod_cfg->rx_mincnt_trigger =
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001244 intr_coal->rx_max_coalesced_frames_low;
1245 else
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001246 intrmod_cfg->rx_mincnt_trigger =
1247 LIO_INTRMOD_RXMINCNT_TRIGGER;
1248 }
1249 if (oct->intrmod.tx_enable) {
1250 if (intr_coal->tx_max_coalesced_frames_high)
1251 intrmod_cfg->tx_maxcnt_trigger =
1252 intr_coal->tx_max_coalesced_frames_high;
1253 else
1254 intrmod_cfg->tx_maxcnt_trigger =
1255 LIO_INTRMOD_TXMAXCNT_TRIGGER;
1256 if (intr_coal->tx_max_coalesced_frames_low)
1257 intrmod_cfg->tx_mincnt_trigger =
1258 intr_coal->tx_max_coalesced_frames_low;
1259 else
1260 intrmod_cfg->tx_mincnt_trigger =
1261 LIO_INTRMOD_TXMINCNT_TRIGGER;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001262 }
1263
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001264 ret = octnet_set_intrmod_cfg(lio, intrmod_cfg);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001265
1266 return ret;
1267}
1268
1269static int
1270oct_cfg_rx_intrcnt(struct lio *lio, struct ethtool_coalesce *intr_coal)
1271{
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001272 struct octeon_device *oct = lio->oct_dev;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001273 u32 rx_max_coalesced_frames;
1274
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001275 /* Config Cnt based interrupt values */
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001276 switch (oct->chip_id) {
1277 case OCTEON_CN68XX:
1278 case OCTEON_CN66XX: {
1279 struct octeon_cn6xxx *cn6xxx =
1280 (struct octeon_cn6xxx *)oct->chip;
1281
1282 if (!intr_coal->rx_max_coalesced_frames)
1283 rx_max_coalesced_frames = CN6XXX_OQ_INTR_PKT;
1284 else
1285 rx_max_coalesced_frames =
1286 intr_coal->rx_max_coalesced_frames;
1287 octeon_write_csr(oct, CN6XXX_SLI_OQ_INT_LEVEL_PKTS,
1288 rx_max_coalesced_frames);
1289 CFG_SET_OQ_INTR_PKT(cn6xxx->conf, rx_max_coalesced_frames);
1290 break;
1291 }
1292 default:
1293 return -EINVAL;
1294 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001295 return 0;
1296}
1297
1298static int oct_cfg_rx_intrtime(struct lio *lio, struct ethtool_coalesce
1299 *intr_coal)
1300{
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001301 struct octeon_device *oct = lio->oct_dev;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001302 u32 time_threshold, rx_coalesce_usecs;
1303
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001304 /* Config Time based interrupt values */
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001305 switch (oct->chip_id) {
1306 case OCTEON_CN68XX:
1307 case OCTEON_CN66XX: {
1308 struct octeon_cn6xxx *cn6xxx =
1309 (struct octeon_cn6xxx *)oct->chip;
1310 if (!intr_coal->rx_coalesce_usecs)
1311 rx_coalesce_usecs = CN6XXX_OQ_INTR_TIME;
1312 else
1313 rx_coalesce_usecs = intr_coal->rx_coalesce_usecs;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001314
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001315 time_threshold = lio_cn6xxx_get_oq_ticks(oct,
1316 rx_coalesce_usecs);
1317 octeon_write_csr(oct,
1318 CN6XXX_SLI_OQ_INT_LEVEL_TIME,
1319 time_threshold);
1320
1321 CFG_SET_OQ_INTR_TIME(cn6xxx->conf, rx_coalesce_usecs);
1322 break;
1323 }
1324 default:
1325 return -EINVAL;
1326 }
1327
1328 return 0;
1329}
1330
1331static int
1332oct_cfg_tx_intrcnt(struct lio *lio, struct ethtool_coalesce *intr_coal
1333 __attribute__((unused)))
1334{
1335 struct octeon_device *oct = lio->oct_dev;
1336
1337 /* Config Cnt based interrupt values */
1338 switch (oct->chip_id) {
1339 case OCTEON_CN68XX:
1340 case OCTEON_CN66XX:
1341 break;
1342 default:
1343 return -EINVAL;
1344 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001345 return 0;
1346}
1347
1348static int lio_set_intr_coalesce(struct net_device *netdev,
1349 struct ethtool_coalesce *intr_coal)
1350{
1351 struct lio *lio = GET_LIO(netdev);
1352 int ret;
1353 struct octeon_device *oct = lio->oct_dev;
1354 u32 j, q_no;
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001355 int db_max, db_min;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001356
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001357 switch (oct->chip_id) {
1358 case OCTEON_CN68XX:
1359 case OCTEON_CN66XX:
1360 db_min = CN6XXX_DB_MIN;
1361 db_max = CN6XXX_DB_MAX;
1362 if ((intr_coal->tx_max_coalesced_frames >= db_min) &&
1363 (intr_coal->tx_max_coalesced_frames <= db_max)) {
1364 for (j = 0; j < lio->linfo.num_txpciq; j++) {
1365 q_no = lio->linfo.txpciq[j].s.q_no;
1366 oct->instr_queue[q_no]->fill_threshold =
1367 intr_coal->tx_max_coalesced_frames;
1368 }
1369 } else {
1370 dev_err(&oct->pci_dev->dev,
1371 "LIQUIDIO: Invalid tx-frames:%d. Range is min:%d max:%d\n",
1372 intr_coal->tx_max_coalesced_frames, db_min,
1373 db_max);
1374 return -EINVAL;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001375 }
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001376 break;
1377 default:
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001378 return -EINVAL;
1379 }
1380
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001381 oct->intrmod.rx_enable = intr_coal->use_adaptive_rx_coalesce ? 1 : 0;
1382 oct->intrmod.tx_enable = intr_coal->use_adaptive_tx_coalesce ? 1 : 0;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001383
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001384 ret = oct_cfg_adaptive_intr(lio, intr_coal);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001385
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001386 if (!intr_coal->use_adaptive_rx_coalesce) {
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001387 ret = oct_cfg_rx_intrtime(lio, intr_coal);
1388 if (ret)
1389 goto ret_intrmod;
1390
1391 ret = oct_cfg_rx_intrcnt(lio, intr_coal);
1392 if (ret)
1393 goto ret_intrmod;
1394 }
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001395 if (!intr_coal->use_adaptive_tx_coalesce) {
1396 ret = oct_cfg_tx_intrcnt(lio, intr_coal);
1397 if (ret)
1398 goto ret_intrmod;
1399 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001400
1401 return 0;
1402ret_intrmod:
1403 return ret;
1404}
1405
1406static int lio_get_ts_info(struct net_device *netdev,
1407 struct ethtool_ts_info *info)
1408{
1409 struct lio *lio = GET_LIO(netdev);
1410
1411 info->so_timestamping =
1412 SOF_TIMESTAMPING_TX_HARDWARE |
1413 SOF_TIMESTAMPING_TX_SOFTWARE |
1414 SOF_TIMESTAMPING_RX_HARDWARE |
1415 SOF_TIMESTAMPING_RX_SOFTWARE |
1416 SOF_TIMESTAMPING_SOFTWARE | SOF_TIMESTAMPING_RAW_HARDWARE;
1417
1418 if (lio->ptp_clock)
1419 info->phc_index = ptp_clock_index(lio->ptp_clock);
1420 else
1421 info->phc_index = -1;
1422
1423 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
1424
1425 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
1426 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
1427 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
1428 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
1429
1430 return 0;
1431}
1432
1433static int lio_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
1434{
1435 struct lio *lio = GET_LIO(netdev);
1436 struct octeon_device *oct = lio->oct_dev;
1437 struct oct_link_info *linfo;
1438 struct octnic_ctrl_pkt nctrl;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001439 int ret = 0;
1440
1441 /* get the link info */
1442 linfo = &lio->linfo;
1443
1444 if (ecmd->autoneg != AUTONEG_ENABLE && ecmd->autoneg != AUTONEG_DISABLE)
1445 return -EINVAL;
1446
1447 if (ecmd->autoneg == AUTONEG_DISABLE && ((ecmd->speed != SPEED_100 &&
1448 ecmd->speed != SPEED_10) ||
1449 (ecmd->duplex != DUPLEX_HALF &&
1450 ecmd->duplex != DUPLEX_FULL)))
1451 return -EINVAL;
1452
1453 /* Ethtool Support is not provided for XAUI and RXAUI Interfaces
1454 * as they operate at fixed Speed and Duplex settings
1455 */
1456 if (linfo->link.s.interface == INTERFACE_MODE_XAUI ||
1457 linfo->link.s.interface == INTERFACE_MODE_RXAUI) {
1458 dev_info(&oct->pci_dev->dev, "XAUI IFs settings cannot be modified.\n");
1459 return -EINVAL;
1460 }
1461
1462 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
1463
1464 nctrl.ncmd.u64 = 0;
1465 nctrl.ncmd.s.cmd = OCTNET_CMD_SET_SETTINGS;
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07001466 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001467 nctrl.wait_time = 1000;
1468 nctrl.netpndev = (u64)netdev;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001469 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
1470
1471 /* Passing the parameters sent by ethtool like Speed, Autoneg & Duplex
1472 * to SE core application using ncmd.s.more & ncmd.s.param
1473 */
1474 if (ecmd->autoneg == AUTONEG_ENABLE) {
1475 /* Autoneg ON */
1476 nctrl.ncmd.s.more = OCTNIC_NCMD_PHY_ON |
1477 OCTNIC_NCMD_AUTONEG_ON;
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07001478 nctrl.ncmd.s.param1 = ecmd->advertising;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001479 } else {
1480 /* Autoneg OFF */
1481 nctrl.ncmd.s.more = OCTNIC_NCMD_PHY_ON;
1482
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07001483 nctrl.ncmd.s.param2 = ecmd->duplex;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001484
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07001485 nctrl.ncmd.s.param1 = ecmd->speed;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001486 }
1487
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07001488 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001489 if (ret < 0) {
1490 dev_err(&oct->pci_dev->dev, "Failed to set settings\n");
1491 return -1;
1492 }
1493
1494 return 0;
1495}
1496
1497static int lio_nway_reset(struct net_device *netdev)
1498{
1499 if (netif_running(netdev)) {
1500 struct ethtool_cmd ecmd;
1501
1502 memset(&ecmd, 0, sizeof(struct ethtool_cmd));
1503 ecmd.autoneg = 0;
1504 ecmd.speed = 0;
1505 ecmd.duplex = 0;
1506 lio_set_settings(netdev, &ecmd);
1507 }
1508 return 0;
1509}
1510
1511/* Return register dump len. */
1512static int lio_get_regs_len(struct net_device *dev)
1513{
1514 return OCT_ETHTOOL_REGDUMP_LEN;
1515}
1516
1517static int cn6xxx_read_csr_reg(char *s, struct octeon_device *oct)
1518{
1519 u32 reg;
1520 int i, len = 0;
1521
1522 /* PCI Window Registers */
1523
1524 len += sprintf(s + len, "\n\t Octeon CSR Registers\n\n");
1525 reg = CN6XXX_WIN_WR_ADDR_LO;
1526 len += sprintf(s + len, "\n[%02x] (WIN_WR_ADDR_LO): %08x\n",
1527 CN6XXX_WIN_WR_ADDR_LO, octeon_read_csr(oct, reg));
1528 reg = CN6XXX_WIN_WR_ADDR_HI;
1529 len += sprintf(s + len, "[%02x] (WIN_WR_ADDR_HI): %08x\n",
1530 CN6XXX_WIN_WR_ADDR_HI, octeon_read_csr(oct, reg));
1531 reg = CN6XXX_WIN_RD_ADDR_LO;
1532 len += sprintf(s + len, "[%02x] (WIN_RD_ADDR_LO): %08x\n",
1533 CN6XXX_WIN_RD_ADDR_LO, octeon_read_csr(oct, reg));
1534 reg = CN6XXX_WIN_RD_ADDR_HI;
1535 len += sprintf(s + len, "[%02x] (WIN_RD_ADDR_HI): %08x\n",
1536 CN6XXX_WIN_RD_ADDR_HI, octeon_read_csr(oct, reg));
1537 reg = CN6XXX_WIN_WR_DATA_LO;
1538 len += sprintf(s + len, "[%02x] (WIN_WR_DATA_LO): %08x\n",
1539 CN6XXX_WIN_WR_DATA_LO, octeon_read_csr(oct, reg));
1540 reg = CN6XXX_WIN_WR_DATA_HI;
1541 len += sprintf(s + len, "[%02x] (WIN_WR_DATA_HI): %08x\n",
1542 CN6XXX_WIN_WR_DATA_HI, octeon_read_csr(oct, reg));
1543 len += sprintf(s + len, "[%02x] (WIN_WR_MASK_REG): %08x\n",
1544 CN6XXX_WIN_WR_MASK_REG,
1545 octeon_read_csr(oct, CN6XXX_WIN_WR_MASK_REG));
1546
1547 /* PCI Interrupt Register */
1548 len += sprintf(s + len, "\n[%x] (INT_ENABLE PORT 0): %08x\n",
1549 CN6XXX_SLI_INT_ENB64_PORT0, octeon_read_csr(oct,
1550 CN6XXX_SLI_INT_ENB64_PORT0));
1551 len += sprintf(s + len, "\n[%x] (INT_ENABLE PORT 1): %08x\n",
1552 CN6XXX_SLI_INT_ENB64_PORT1,
1553 octeon_read_csr(oct, CN6XXX_SLI_INT_ENB64_PORT1));
1554 len += sprintf(s + len, "[%x] (INT_SUM): %08x\n", CN6XXX_SLI_INT_SUM64,
1555 octeon_read_csr(oct, CN6XXX_SLI_INT_SUM64));
1556
1557 /* PCI Output queue registers */
1558 for (i = 0; i < oct->num_oqs; i++) {
1559 reg = CN6XXX_SLI_OQ_PKTS_SENT(i);
1560 len += sprintf(s + len, "\n[%x] (PKTS_SENT_%d): %08x\n",
1561 reg, i, octeon_read_csr(oct, reg));
1562 reg = CN6XXX_SLI_OQ_PKTS_CREDIT(i);
1563 len += sprintf(s + len, "[%x] (PKT_CREDITS_%d): %08x\n",
1564 reg, i, octeon_read_csr(oct, reg));
1565 }
1566 reg = CN6XXX_SLI_OQ_INT_LEVEL_PKTS;
1567 len += sprintf(s + len, "\n[%x] (PKTS_SENT_INT_LEVEL): %08x\n",
1568 reg, octeon_read_csr(oct, reg));
1569 reg = CN6XXX_SLI_OQ_INT_LEVEL_TIME;
1570 len += sprintf(s + len, "[%x] (PKTS_SENT_TIME): %08x\n",
1571 reg, octeon_read_csr(oct, reg));
1572
1573 /* PCI Input queue registers */
1574 for (i = 0; i <= 3; i++) {
1575 u32 reg;
1576
1577 reg = CN6XXX_SLI_IQ_DOORBELL(i);
1578 len += sprintf(s + len, "\n[%x] (INSTR_DOORBELL_%d): %08x\n",
1579 reg, i, octeon_read_csr(oct, reg));
1580 reg = CN6XXX_SLI_IQ_INSTR_COUNT(i);
1581 len += sprintf(s + len, "[%x] (INSTR_COUNT_%d): %08x\n",
1582 reg, i, octeon_read_csr(oct, reg));
1583 }
1584
1585 /* PCI DMA registers */
1586
1587 len += sprintf(s + len, "\n[%x] (DMA_CNT_0): %08x\n",
1588 CN6XXX_DMA_CNT(0),
1589 octeon_read_csr(oct, CN6XXX_DMA_CNT(0)));
1590 reg = CN6XXX_DMA_PKT_INT_LEVEL(0);
1591 len += sprintf(s + len, "[%x] (DMA_INT_LEV_0): %08x\n",
1592 CN6XXX_DMA_PKT_INT_LEVEL(0), octeon_read_csr(oct, reg));
1593 reg = CN6XXX_DMA_TIME_INT_LEVEL(0);
1594 len += sprintf(s + len, "[%x] (DMA_TIME_0): %08x\n",
1595 CN6XXX_DMA_TIME_INT_LEVEL(0),
1596 octeon_read_csr(oct, reg));
1597
1598 len += sprintf(s + len, "\n[%x] (DMA_CNT_1): %08x\n",
1599 CN6XXX_DMA_CNT(1),
1600 octeon_read_csr(oct, CN6XXX_DMA_CNT(1)));
1601 reg = CN6XXX_DMA_PKT_INT_LEVEL(1);
1602 len += sprintf(s + len, "[%x] (DMA_INT_LEV_1): %08x\n",
1603 CN6XXX_DMA_PKT_INT_LEVEL(1),
1604 octeon_read_csr(oct, reg));
1605 reg = CN6XXX_DMA_PKT_INT_LEVEL(1);
1606 len += sprintf(s + len, "[%x] (DMA_TIME_1): %08x\n",
1607 CN6XXX_DMA_TIME_INT_LEVEL(1),
1608 octeon_read_csr(oct, reg));
1609
1610 /* PCI Index registers */
1611
1612 len += sprintf(s + len, "\n");
1613
1614 for (i = 0; i < 16; i++) {
1615 reg = lio_pci_readq(oct, CN6XXX_BAR1_REG(i, oct->pcie_port));
1616 len += sprintf(s + len, "[%llx] (BAR1_INDEX_%02d): %08x\n",
1617 CN6XXX_BAR1_REG(i, oct->pcie_port), i, reg);
1618 }
1619
1620 return len;
1621}
1622
1623static int cn6xxx_read_config_reg(char *s, struct octeon_device *oct)
1624{
1625 u32 val;
1626 int i, len = 0;
1627
1628 /* PCI CONFIG Registers */
1629
1630 len += sprintf(s + len,
1631 "\n\t Octeon Config space Registers\n\n");
1632
1633 for (i = 0; i <= 13; i++) {
1634 pci_read_config_dword(oct->pci_dev, (i * 4), &val);
1635 len += sprintf(s + len, "[0x%x] (Config[%d]): 0x%08x\n",
1636 (i * 4), i, val);
1637 }
1638
1639 for (i = 30; i <= 34; i++) {
1640 pci_read_config_dword(oct->pci_dev, (i * 4), &val);
1641 len += sprintf(s + len, "[0x%x] (Config[%d]): 0x%08x\n",
1642 (i * 4), i, val);
1643 }
1644
1645 return len;
1646}
1647
1648/* Return register dump user app. */
1649static void lio_get_regs(struct net_device *dev,
1650 struct ethtool_regs *regs, void *regbuf)
1651{
1652 struct lio *lio = GET_LIO(dev);
1653 int len = 0;
1654 struct octeon_device *oct = lio->oct_dev;
1655
1656 memset(regbuf, 0, OCT_ETHTOOL_REGDUMP_LEN);
1657 regs->version = OCT_ETHTOOL_REGSVER;
1658
1659 switch (oct->chip_id) {
1660 /* case OCTEON_CN73XX: Todo */
1661 case OCTEON_CN68XX:
1662 case OCTEON_CN66XX:
1663 len += cn6xxx_read_csr_reg(regbuf + len, oct);
1664 len += cn6xxx_read_config_reg(regbuf + len, oct);
1665 break;
1666 default:
1667 dev_err(&oct->pci_dev->dev, "%s Unknown chipid: %d\n",
1668 __func__, oct->chip_id);
1669 }
1670}
1671
1672static const struct ethtool_ops lio_ethtool_ops = {
1673 .get_settings = lio_get_settings,
1674 .get_link = ethtool_op_get_link,
1675 .get_drvinfo = lio_get_drvinfo,
1676 .get_ringparam = lio_ethtool_get_ringparam,
1677 .get_channels = lio_ethtool_get_channels,
1678 .set_phys_id = lio_set_phys_id,
1679 .get_eeprom_len = lio_get_eeprom_len,
1680 .get_eeprom = lio_get_eeprom,
1681 .get_strings = lio_get_strings,
1682 .get_ethtool_stats = lio_get_ethtool_stats,
1683 .get_pauseparam = lio_get_pauseparam,
1684 .get_regs_len = lio_get_regs_len,
1685 .get_regs = lio_get_regs,
1686 .get_msglevel = lio_get_msglevel,
1687 .set_msglevel = lio_set_msglevel,
1688 .get_sset_count = lio_get_sset_count,
1689 .nway_reset = lio_nway_reset,
1690 .set_settings = lio_set_settings,
1691 .get_coalesce = lio_get_intr_coalesce,
1692 .set_coalesce = lio_set_intr_coalesce,
1693 .get_ts_info = lio_get_ts_info,
1694};
1695
1696void liquidio_set_ethtool_ops(struct net_device *netdev)
1697{
1698 netdev->ethtool_ops = &lio_ethtool_ops;
1699}