blob: 2e253061460b76e8e3136586926b97f532566e52 [file] [log] [blame]
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001/**********************************************************************
Raghu Vatsavayi50579d32016-11-14 15:54:46 -08002 * Author: Cavium, Inc.
3 *
4 * Contact: support@cavium.com
5 * Please include "LiquidIO" in the subject.
6 *
7 * Copyright (c) 2003-2016 Cavium, Inc.
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more details.
17 ***********************************************************************/
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070018#include <linux/netdevice.h>
19#include <linux/net_tstamp.h>
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070020#include <linux/pci.h>
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070021#include "liquidio_common.h"
22#include "octeon_droq.h"
23#include "octeon_iq.h"
24#include "response_manager.h"
25#include "octeon_device.h"
26#include "octeon_nic.h"
27#include "octeon_main.h"
28#include "octeon_network.h"
29#include "cn66xx_regs.h"
30#include "cn66xx_device.h"
Raghu Vatsavayidc3abcb2016-09-01 11:16:08 -070031#include "cn23xx_pf_device.h"
Raghu Vatsavayid8ab8482016-12-08 13:00:46 -080032#include "cn23xx_vf_device.h"
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070033
Raghu Vatsavayi1f164712016-06-21 22:53:11 -070034static int octnet_get_link_stats(struct net_device *netdev);
35
Prasad Kanneganti50c0add2017-03-28 12:14:06 -070036struct oct_intrmod_context {
37 int octeon_id;
38 wait_queue_head_t wc;
39 int cond;
40 int status;
41};
42
43struct oct_intrmod_resp {
44 u64 rh;
45 struct oct_intrmod_cfg intrmod;
46 u64 status;
47};
48
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070049struct oct_mdio_cmd_context {
50 int octeon_id;
51 wait_queue_head_t wc;
52 int cond;
53};
54
55struct oct_mdio_cmd_resp {
56 u64 rh;
57 struct oct_mdio_cmd resp;
58 u64 status;
59};
60
61#define OCT_MDIO45_RESP_SIZE (sizeof(struct oct_mdio_cmd_resp))
62
63/* Octeon's interface mode of operation */
64enum {
65 INTERFACE_MODE_DISABLED,
66 INTERFACE_MODE_RGMII,
67 INTERFACE_MODE_GMII,
68 INTERFACE_MODE_SPI,
69 INTERFACE_MODE_PCIE,
70 INTERFACE_MODE_XAUI,
71 INTERFACE_MODE_SGMII,
72 INTERFACE_MODE_PICMG,
73 INTERFACE_MODE_NPI,
74 INTERFACE_MODE_LOOP,
75 INTERFACE_MODE_SRIO,
76 INTERFACE_MODE_ILK,
77 INTERFACE_MODE_RXAUI,
78 INTERFACE_MODE_QSGMII,
79 INTERFACE_MODE_AGL,
Raghu Vatsavayi9eb60842016-06-21 22:53:12 -070080 INTERFACE_MODE_XLAUI,
81 INTERFACE_MODE_XFI,
82 INTERFACE_MODE_10G_KR,
83 INTERFACE_MODE_40G_KR4,
84 INTERFACE_MODE_MIXED,
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070085};
86
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070087#define OCT_ETHTOOL_REGDUMP_LEN 4096
Raghu Vatsavayidc3abcb2016-09-01 11:16:08 -070088#define OCT_ETHTOOL_REGDUMP_LEN_23XX (4096 * 11)
Raghu Vatsavayid8ab8482016-12-08 13:00:46 -080089#define OCT_ETHTOOL_REGDUMP_LEN_23XX_VF (4096 * 2)
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070090#define OCT_ETHTOOL_REGSVER 1
91
Raghu Vatsavayi1f164712016-06-21 22:53:11 -070092/* statistics of PF */
93static const char oct_stats_strings[][ETH_GSTRING_LEN] = {
94 "rx_packets",
95 "tx_packets",
96 "rx_bytes",
97 "tx_bytes",
98 "rx_errors", /*jabber_err+l2_err+frame_err */
99 "tx_errors", /*fw_err_pko+fw_err_link+fw_err_drop */
Raghu Vatsavayi50579d32016-11-14 15:54:46 -0800100 "rx_dropped", /*st->fromwire.total_rcvd - st->fromwire.fw_total_rcvd +
101 *st->fromwire.dmac_drop + st->fromwire.fw_err_drop
102 */
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700103 "tx_dropped",
104
105 "tx_total_sent",
106 "tx_total_fwd",
107 "tx_err_pko",
108 "tx_err_link",
109 "tx_err_drop",
110
111 "tx_tso",
112 "tx_tso_packets",
113 "tx_tso_err",
Raghu Vatsavayi01fb2372016-07-03 13:56:47 -0700114 "tx_vxlan",
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700115
116 "mac_tx_total_pkts",
117 "mac_tx_total_bytes",
118 "mac_tx_mcast_pkts",
119 "mac_tx_bcast_pkts",
120 "mac_tx_ctl_packets", /*oct->link_stats.fromhost.ctl_sent */
121 "mac_tx_total_collisions",
122 "mac_tx_one_collision",
123 "mac_tx_multi_collison",
124 "mac_tx_max_collision_fail",
125 "mac_tx_max_deferal_fail",
126 "mac_tx_fifo_err",
127 "mac_tx_runts",
128
129 "rx_total_rcvd",
130 "rx_total_fwd",
131 "rx_jabber_err",
132 "rx_l2_err",
133 "rx_frame_err",
134 "rx_err_pko",
135 "rx_err_link",
136 "rx_err_drop",
137
Raghu Vatsavayi01fb2372016-07-03 13:56:47 -0700138 "rx_vxlan",
139 "rx_vxlan_err",
140
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700141 "rx_lro_pkts",
142 "rx_lro_bytes",
143 "rx_total_lro",
144
145 "rx_lro_aborts",
146 "rx_lro_aborts_port",
147 "rx_lro_aborts_seq",
148 "rx_lro_aborts_tsval",
149 "rx_lro_aborts_timer",
150 "rx_fwd_rate",
151
152 "mac_rx_total_rcvd",
153 "mac_rx_bytes",
154 "mac_rx_total_bcst",
155 "mac_rx_total_mcst",
156 "mac_rx_runts",
157 "mac_rx_ctl_packets",
158 "mac_rx_fifo_err",
159 "mac_rx_dma_drop",
160 "mac_rx_fcs_err",
161
162 "link_state_changes",
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700163};
164
Raghu Vatsavayid8ab8482016-12-08 13:00:46 -0800165/* statistics of VF */
166static const char oct_vf_stats_strings[][ETH_GSTRING_LEN] = {
167 "rx_packets",
168 "tx_packets",
169 "rx_bytes",
170 "tx_bytes",
171 "rx_errors", /* jabber_err + l2_err+frame_err */
172 "tx_errors", /* fw_err_pko + fw_err_link+fw_err_drop */
173 "rx_dropped", /* total_rcvd - fw_total_rcvd + dmac_drop + fw_err_drop */
174 "tx_dropped",
175 "link_state_changes",
176};
177
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700178/* statistics of host tx queue */
179static const char oct_iq_stats_strings[][ETH_GSTRING_LEN] = {
180 "packets", /*oct->instr_queue[iq_no]->stats.tx_done*/
181 "bytes", /*oct->instr_queue[iq_no]->stats.tx_tot_bytes*/
182 "dropped",
183 "iq_busy",
184 "sgentry_sent",
185
186 "fw_instr_posted",
187 "fw_instr_processed",
188 "fw_instr_dropped",
189 "fw_bytes_sent",
190
191 "tso",
Raghu Vatsavayi01fb2372016-07-03 13:56:47 -0700192 "vxlan",
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700193 "txq_restart",
194};
195
196/* statistics of host rx queue */
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700197static const char oct_droq_stats_strings[][ETH_GSTRING_LEN] = {
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700198 "packets", /*oct->droq[oq_no]->stats.rx_pkts_received */
199 "bytes", /*oct->droq[oq_no]->stats.rx_bytes_received */
200 "dropped", /*oct->droq[oq_no]->stats.rx_dropped+
201 *oct->droq[oq_no]->stats.dropped_nodispatch+
202 *oct->droq[oq_no]->stats.dropped_toomany+
203 *oct->droq[oq_no]->stats.dropped_nomem
204 */
205 "dropped_nomem",
206 "dropped_toomany",
207 "fw_dropped",
208 "fw_pkts_received",
209 "fw_bytes_received",
210 "fw_dropped_nodispatch",
211
Raghu Vatsavayi01fb2372016-07-03 13:56:47 -0700212 "vxlan",
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700213 "buffer_alloc_failure",
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700214};
215
Raghu Vatsavayi30136392016-09-01 11:16:11 -0700216/* LiquidIO driver private flags */
217static const char oct_priv_flags_strings[][ETH_GSTRING_LEN] = {
218};
219
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700220#define OCTNIC_NCMD_AUTONEG_ON 0x1
221#define OCTNIC_NCMD_PHY_ON 0x2
222
Raghu Vatsavayid8ab8482016-12-08 13:00:46 -0800223static int lio_get_link_ksettings(struct net_device *netdev,
224 struct ethtool_link_ksettings *ecmd)
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700225{
226 struct lio *lio = GET_LIO(netdev);
227 struct octeon_device *oct = lio->oct_dev;
228 struct oct_link_info *linfo;
Manish Awasthife723df2017-03-16 16:16:17 -0700229 u32 supported = 0, advertising = 0;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700230
231 linfo = &lio->linfo;
232
Raghu Vatsavayi9eb60842016-06-21 22:53:12 -0700233 if (linfo->link.s.if_mode == INTERFACE_MODE_XAUI ||
234 linfo->link.s.if_mode == INTERFACE_MODE_RXAUI ||
Manish Awasthife723df2017-03-16 16:16:17 -0700235 linfo->link.s.if_mode == INTERFACE_MODE_XLAUI ||
Raghu Vatsavayi9eb60842016-06-21 22:53:12 -0700236 linfo->link.s.if_mode == INTERFACE_MODE_XFI) {
Raghu Vatsavayid8ab8482016-12-08 13:00:46 -0800237 ecmd->base.port = PORT_FIBRE;
Manish Awasthife723df2017-03-16 16:16:17 -0700238
239 if (linfo->link.s.speed == SPEED_10000) {
240 supported = SUPPORTED_10000baseT_Full;
241 advertising = ADVERTISED_10000baseT_Full;
242 }
243
244 supported |= SUPPORTED_FIBRE | SUPPORTED_Pause;
245 advertising |= ADVERTISED_Pause;
Raghu Vatsavayid8ab8482016-12-08 13:00:46 -0800246 ethtool_convert_legacy_u32_to_link_mode(
247 ecmd->link_modes.supported, supported);
248 ethtool_convert_legacy_u32_to_link_mode(
249 ecmd->link_modes.advertising, advertising);
250 ecmd->base.autoneg = AUTONEG_DISABLE;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700251
252 } else {
Raghu Vatsavayi9eb60842016-06-21 22:53:12 -0700253 dev_err(&oct->pci_dev->dev, "Unknown link interface reported %d\n",
254 linfo->link.s.if_mode);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700255 }
256
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -0700257 if (linfo->link.s.link_up) {
Raghu Vatsavayid8ab8482016-12-08 13:00:46 -0800258 ecmd->base.speed = linfo->link.s.speed;
259 ecmd->base.duplex = linfo->link.s.duplex;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700260 } else {
Raghu Vatsavayid8ab8482016-12-08 13:00:46 -0800261 ecmd->base.speed = SPEED_UNKNOWN;
262 ecmd->base.duplex = DUPLEX_UNKNOWN;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700263 }
264
265 return 0;
266}
267
268static void
269lio_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
270{
271 struct lio *lio;
272 struct octeon_device *oct;
273
274 lio = GET_LIO(netdev);
275 oct = lio->oct_dev;
276
277 memset(drvinfo, 0, sizeof(struct ethtool_drvinfo));
278 strcpy(drvinfo->driver, "liquidio");
279 strcpy(drvinfo->version, LIQUIDIO_VERSION);
280 strncpy(drvinfo->fw_version, oct->fw_info.liquidio_firmware_version,
281 ETHTOOL_FWVERS_LEN);
282 strncpy(drvinfo->bus_info, pci_name(oct->pci_dev), 32);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700283}
284
285static void
Raghu Vatsavayid8ab8482016-12-08 13:00:46 -0800286lio_get_vf_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
287{
288 struct octeon_device *oct;
289 struct lio *lio;
290
291 lio = GET_LIO(netdev);
292 oct = lio->oct_dev;
293
294 memset(drvinfo, 0, sizeof(struct ethtool_drvinfo));
295 strcpy(drvinfo->driver, "liquidio_vf");
296 strcpy(drvinfo->version, LIQUIDIO_VERSION);
297 strncpy(drvinfo->fw_version, oct->fw_info.liquidio_firmware_version,
298 ETHTOOL_FWVERS_LEN);
299 strncpy(drvinfo->bus_info, pci_name(oct->pci_dev), 32);
300}
301
302static void
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700303lio_ethtool_get_channels(struct net_device *dev,
304 struct ethtool_channels *channel)
305{
306 struct lio *lio = GET_LIO(dev);
307 struct octeon_device *oct = lio->oct_dev;
308 u32 max_rx = 0, max_tx = 0, tx_count = 0, rx_count = 0;
309
310 if (OCTEON_CN6XXX(oct)) {
Raghu Vatsavayi97a25322016-11-14 15:54:47 -0800311 struct octeon_config *conf6x = CHIP_CONF(oct, cn6xxx);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700312
313 max_rx = CFG_GET_OQ_MAX_Q(conf6x);
314 max_tx = CFG_GET_IQ_MAX_Q(conf6x);
315 rx_count = CFG_GET_NUM_RXQS_NIC_IF(conf6x, lio->ifidx);
316 tx_count = CFG_GET_NUM_TXQS_NIC_IF(conf6x, lio->ifidx);
Raghu Vatsavayidc3abcb2016-09-01 11:16:08 -0700317 } else if (OCTEON_CN23XX_PF(oct)) {
Raghu Vatsavayidc3abcb2016-09-01 11:16:08 -0700318
Weilin Chang026b4712017-01-04 16:18:50 -0800319 max_rx = oct->sriov_info.num_pf_rings;
320 max_tx = oct->sriov_info.num_pf_rings;
321 rx_count = lio->linfo.num_rxpciq;
322 tx_count = lio->linfo.num_txpciq;
323 } else if (OCTEON_CN23XX_VF(oct)) {
324 max_tx = oct->sriov_info.rings_per_vf;
325 max_rx = oct->sriov_info.rings_per_vf;
326 rx_count = lio->linfo.num_rxpciq;
327 tx_count = lio->linfo.num_txpciq;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700328 }
329
330 channel->max_rx = max_rx;
331 channel->max_tx = max_tx;
332 channel->rx_count = rx_count;
333 channel->tx_count = tx_count;
334}
335
336static int lio_get_eeprom_len(struct net_device *netdev)
337{
338 u8 buf[128];
339 struct lio *lio = GET_LIO(netdev);
340 struct octeon_device *oct_dev = lio->oct_dev;
341 struct octeon_board_info *board_info;
342 int len;
343
344 board_info = (struct octeon_board_info *)(&oct_dev->boardinfo);
345 len = sprintf(buf, "boardname:%s serialnum:%s maj:%lld min:%lld\n",
346 board_info->name, board_info->serial_number,
347 board_info->major, board_info->minor);
348
349 return len;
350}
351
352static int
353lio_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
354 u8 *bytes)
355{
356 struct lio *lio = GET_LIO(netdev);
357 struct octeon_device *oct_dev = lio->oct_dev;
358 struct octeon_board_info *board_info;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700359
Raghu Vatsavayi32581242016-08-31 11:03:20 -0700360 if (eeprom->offset)
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700361 return -EINVAL;
362
363 eeprom->magic = oct_dev->pci_dev->vendor;
364 board_info = (struct octeon_board_info *)(&oct_dev->boardinfo);
Raghu Vatsavayi32581242016-08-31 11:03:20 -0700365 sprintf((char *)bytes,
366 "boardname:%s serialnum:%s maj:%lld min:%lld\n",
367 board_info->name, board_info->serial_number,
368 board_info->major, board_info->minor);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700369
370 return 0;
371}
372
373static int octnet_gpio_access(struct net_device *netdev, int addr, int val)
374{
375 struct lio *lio = GET_LIO(netdev);
376 struct octeon_device *oct = lio->oct_dev;
377 struct octnic_ctrl_pkt nctrl;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700378 int ret = 0;
379
380 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
381
382 nctrl.ncmd.u64 = 0;
383 nctrl.ncmd.s.cmd = OCTNET_CMD_GPIO_ACCESS;
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -0700384 nctrl.ncmd.s.param1 = addr;
385 nctrl.ncmd.s.param2 = val;
386 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700387 nctrl.wait_time = 100;
388 nctrl.netpndev = (u64)netdev;
389 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
390
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -0700391 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700392 if (ret < 0) {
393 dev_err(&oct->pci_dev->dev, "Failed to configure gpio value\n");
394 return -EINVAL;
395 }
396
397 return 0;
398}
399
Raghu Vatsavayidc3abcb2016-09-01 11:16:08 -0700400static int octnet_id_active(struct net_device *netdev, int val)
401{
402 struct lio *lio = GET_LIO(netdev);
403 struct octeon_device *oct = lio->oct_dev;
404 struct octnic_ctrl_pkt nctrl;
405 int ret = 0;
406
407 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
408
409 nctrl.ncmd.u64 = 0;
410 nctrl.ncmd.s.cmd = OCTNET_CMD_ID_ACTIVE;
411 nctrl.ncmd.s.param1 = val;
412 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
413 nctrl.wait_time = 100;
414 nctrl.netpndev = (u64)netdev;
415 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
416
417 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
418 if (ret < 0) {
419 dev_err(&oct->pci_dev->dev, "Failed to configure gpio value\n");
420 return -EINVAL;
421 }
422
423 return 0;
424}
425
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700426/* Callback for when mdio command response arrives
427 */
428static void octnet_mdio_resp_callback(struct octeon_device *oct,
429 u32 status,
430 void *buf)
431{
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700432 struct oct_mdio_cmd_context *mdio_cmd_ctx;
433 struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
434
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700435 mdio_cmd_ctx = (struct oct_mdio_cmd_context *)sc->ctxptr;
436
437 oct = lio_get_device(mdio_cmd_ctx->octeon_id);
438 if (status) {
439 dev_err(&oct->pci_dev->dev, "MIDO instruction failed. Status: %llx\n",
440 CVM_CAST64(status));
Raghu Vatsavayia7d5a3d2016-07-03 13:56:48 -0700441 WRITE_ONCE(mdio_cmd_ctx->cond, -1);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700442 } else {
Raghu Vatsavayia7d5a3d2016-07-03 13:56:48 -0700443 WRITE_ONCE(mdio_cmd_ctx->cond, 1);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700444 }
445 wake_up_interruptible(&mdio_cmd_ctx->wc);
446}
447
448/* This routine provides PHY access routines for
449 * mdio clause45 .
450 */
451static int
452octnet_mdio45_access(struct lio *lio, int op, int loc, int *value)
453{
454 struct octeon_device *oct_dev = lio->oct_dev;
455 struct octeon_soft_command *sc;
456 struct oct_mdio_cmd_resp *mdio_cmd_rsp;
457 struct oct_mdio_cmd_context *mdio_cmd_ctx;
458 struct oct_mdio_cmd *mdio_cmd;
459 int retval = 0;
460
461 sc = (struct octeon_soft_command *)
462 octeon_alloc_soft_command(oct_dev,
463 sizeof(struct oct_mdio_cmd),
464 sizeof(struct oct_mdio_cmd_resp),
465 sizeof(struct oct_mdio_cmd_context));
466
467 if (!sc)
468 return -ENOMEM;
469
470 mdio_cmd_ctx = (struct oct_mdio_cmd_context *)sc->ctxptr;
471 mdio_cmd_rsp = (struct oct_mdio_cmd_resp *)sc->virtrptr;
472 mdio_cmd = (struct oct_mdio_cmd *)sc->virtdptr;
473
Raghu Vatsavayia7d5a3d2016-07-03 13:56:48 -0700474 WRITE_ONCE(mdio_cmd_ctx->cond, 0);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700475 mdio_cmd_ctx->octeon_id = lio_get_device_id(oct_dev);
476 mdio_cmd->op = op;
477 mdio_cmd->mdio_addr = loc;
478 if (op)
479 mdio_cmd->value1 = *value;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700480 octeon_swap_8B_data((u64 *)mdio_cmd, sizeof(struct oct_mdio_cmd) / 8);
481
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -0700482 sc->iq_no = lio->linfo.txpciq[0].s.q_no;
483
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700484 octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC, OPCODE_NIC_MDIO45,
485 0, 0, 0);
486
487 sc->wait_time = 1000;
488 sc->callback = octnet_mdio_resp_callback;
489 sc->callback_arg = sc;
490
491 init_waitqueue_head(&mdio_cmd_ctx->wc);
492
493 retval = octeon_send_soft_command(oct_dev, sc);
494
Raghu Vatsavayiddc173a2016-06-14 16:54:43 -0700495 if (retval == IQ_SEND_FAILED) {
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700496 dev_err(&oct_dev->pci_dev->dev,
497 "octnet_mdio45_access instruction failed status: %x\n",
498 retval);
Raghu Vatsavayi32581242016-08-31 11:03:20 -0700499 retval = -EBUSY;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700500 } else {
501 /* Sleep on a wait queue till the cond flag indicates that the
502 * response arrived
503 */
504 sleep_cond(&mdio_cmd_ctx->wc, &mdio_cmd_ctx->cond);
505 retval = mdio_cmd_rsp->status;
506 if (retval) {
507 dev_err(&oct_dev->pci_dev->dev, "octnet mdio45 access failed\n");
508 retval = -EBUSY;
509 } else {
510 octeon_swap_8B_data((u64 *)(&mdio_cmd_rsp->resp),
511 sizeof(struct oct_mdio_cmd) / 8);
512
Raghu Vatsavayia7d5a3d2016-07-03 13:56:48 -0700513 if (READ_ONCE(mdio_cmd_ctx->cond) == 1) {
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700514 if (!op)
515 *value = mdio_cmd_rsp->resp.value1;
516 } else {
517 retval = -EINVAL;
518 }
519 }
520 }
521
522 octeon_free_soft_command(oct_dev, sc);
523
524 return retval;
525}
526
527static int lio_set_phys_id(struct net_device *netdev,
528 enum ethtool_phys_id_state state)
529{
530 struct lio *lio = GET_LIO(netdev);
531 struct octeon_device *oct = lio->oct_dev;
532 int value, ret;
533
534 switch (state) {
535 case ETHTOOL_ID_ACTIVE:
536 if (oct->chip_id == OCTEON_CN66XX) {
537 octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG,
538 VITESSE_PHY_GPIO_DRIVEON);
539 return 2;
540
541 } else if (oct->chip_id == OCTEON_CN68XX) {
542 /* Save the current LED settings */
543 ret = octnet_mdio45_access(lio, 0,
544 LIO68XX_LED_BEACON_ADDR,
545 &lio->phy_beacon_val);
546 if (ret)
547 return ret;
548
549 ret = octnet_mdio45_access(lio, 0,
550 LIO68XX_LED_CTRL_ADDR,
551 &lio->led_ctrl_val);
552 if (ret)
553 return ret;
554
555 /* Configure Beacon values */
556 value = LIO68XX_LED_BEACON_CFGON;
Raghu Vatsavayia2c64b62016-07-03 13:56:55 -0700557 ret = octnet_mdio45_access(lio, 1,
558 LIO68XX_LED_BEACON_ADDR,
559 &value);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700560 if (ret)
561 return ret;
562
563 value = LIO68XX_LED_CTRL_CFGON;
Raghu Vatsavayia2c64b62016-07-03 13:56:55 -0700564 ret = octnet_mdio45_access(lio, 1,
565 LIO68XX_LED_CTRL_ADDR,
566 &value);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700567 if (ret)
568 return ret;
Raghu Vatsavayidc3abcb2016-09-01 11:16:08 -0700569 } else if (oct->chip_id == OCTEON_CN23XX_PF_VID) {
570 octnet_id_active(netdev, LED_IDENTIFICATION_ON);
571
572 /* returns 0 since updates are asynchronous */
573 return 0;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700574 } else {
575 return -EINVAL;
576 }
577 break;
578
579 case ETHTOOL_ID_ON:
580 if (oct->chip_id == OCTEON_CN66XX) {
581 octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG,
582 VITESSE_PHY_GPIO_HIGH);
583
584 } else if (oct->chip_id == OCTEON_CN68XX) {
585 return -EINVAL;
586 } else {
587 return -EINVAL;
588 }
589 break;
590
591 case ETHTOOL_ID_OFF:
592 if (oct->chip_id == OCTEON_CN66XX)
593 octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG,
594 VITESSE_PHY_GPIO_LOW);
595 else if (oct->chip_id == OCTEON_CN68XX)
596 return -EINVAL;
597 else
598 return -EINVAL;
599
600 break;
601
602 case ETHTOOL_ID_INACTIVE:
603 if (oct->chip_id == OCTEON_CN66XX) {
604 octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG,
605 VITESSE_PHY_GPIO_DRIVEOFF);
606 } else if (oct->chip_id == OCTEON_CN68XX) {
607 /* Restore LED settings */
608 ret = octnet_mdio45_access(lio, 1,
609 LIO68XX_LED_CTRL_ADDR,
610 &lio->led_ctrl_val);
611 if (ret)
612 return ret;
613
Dan Carpentercbdb9772015-06-24 17:47:02 +0300614 ret = octnet_mdio45_access(lio, 1,
615 LIO68XX_LED_BEACON_ADDR,
616 &lio->phy_beacon_val);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700617 if (ret)
618 return ret;
Raghu Vatsavayidc3abcb2016-09-01 11:16:08 -0700619 } else if (oct->chip_id == OCTEON_CN23XX_PF_VID) {
620 octnet_id_active(netdev, LED_IDENTIFICATION_OFF);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700621
Raghu Vatsavayidc3abcb2016-09-01 11:16:08 -0700622 return 0;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700623 } else {
624 return -EINVAL;
625 }
626 break;
627
628 default:
629 return -EINVAL;
630 }
631
632 return 0;
633}
634
635static void
636lio_ethtool_get_ringparam(struct net_device *netdev,
637 struct ethtool_ringparam *ering)
638{
639 struct lio *lio = GET_LIO(netdev);
640 struct octeon_device *oct = lio->oct_dev;
641 u32 tx_max_pending = 0, rx_max_pending = 0, tx_pending = 0,
642 rx_pending = 0;
643
644 if (OCTEON_CN6XXX(oct)) {
Raghu Vatsavayi97a25322016-11-14 15:54:47 -0800645 struct octeon_config *conf6x = CHIP_CONF(oct, cn6xxx);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700646
647 tx_max_pending = CN6XXX_MAX_IQ_DESCRIPTORS;
648 rx_max_pending = CN6XXX_MAX_OQ_DESCRIPTORS;
649 rx_pending = CFG_GET_NUM_RX_DESCS_NIC_IF(conf6x, lio->ifidx);
650 tx_pending = CFG_GET_NUM_TX_DESCS_NIC_IF(conf6x, lio->ifidx);
Raghu Vatsavayidc3abcb2016-09-01 11:16:08 -0700651 } else if (OCTEON_CN23XX_PF(oct)) {
Raghu Vatsavayi97a25322016-11-14 15:54:47 -0800652 struct octeon_config *conf23 = CHIP_CONF(oct, cn23xx_pf);
Raghu Vatsavayidc3abcb2016-09-01 11:16:08 -0700653
654 tx_max_pending = CN23XX_MAX_IQ_DESCRIPTORS;
655 rx_max_pending = CN23XX_MAX_OQ_DESCRIPTORS;
656 rx_pending = CFG_GET_NUM_RX_DESCS_NIC_IF(conf23, lio->ifidx);
657 tx_pending = CFG_GET_NUM_TX_DESCS_NIC_IF(conf23, lio->ifidx);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700658 }
659
Raghu Vatsavayi4c2743f2016-07-03 13:56:53 -0700660 if (lio->mtu > OCTNET_DEFAULT_FRM_SIZE - OCTNET_FRM_HEADER_SIZE) {
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700661 ering->rx_pending = 0;
662 ering->rx_max_pending = 0;
663 ering->rx_mini_pending = 0;
664 ering->rx_jumbo_pending = rx_pending;
665 ering->rx_mini_max_pending = 0;
666 ering->rx_jumbo_max_pending = rx_max_pending;
667 } else {
668 ering->rx_pending = rx_pending;
669 ering->rx_max_pending = rx_max_pending;
670 ering->rx_mini_pending = 0;
671 ering->rx_jumbo_pending = 0;
672 ering->rx_mini_max_pending = 0;
673 ering->rx_jumbo_max_pending = 0;
674 }
675
676 ering->tx_pending = tx_pending;
677 ering->tx_max_pending = tx_max_pending;
678}
679
680static u32 lio_get_msglevel(struct net_device *netdev)
681{
682 struct lio *lio = GET_LIO(netdev);
683
684 return lio->msg_enable;
685}
686
687static void lio_set_msglevel(struct net_device *netdev, u32 msglvl)
688{
689 struct lio *lio = GET_LIO(netdev);
690
691 if ((msglvl ^ lio->msg_enable) & NETIF_MSG_HW) {
692 if (msglvl & NETIF_MSG_HW)
693 liquidio_set_feature(netdev,
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -0700694 OCTNET_CMD_VERBOSE_ENABLE, 0);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700695 else
696 liquidio_set_feature(netdev,
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -0700697 OCTNET_CMD_VERBOSE_DISABLE, 0);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700698 }
699
700 lio->msg_enable = msglvl;
701}
702
703static void
704lio_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
705{
706 /* Notes: Not supporting any auto negotiation in these
707 * drivers. Just report pause frame support.
708 */
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700709 struct lio *lio = GET_LIO(netdev);
710 struct octeon_device *oct = lio->oct_dev;
711
712 pause->autoneg = 0;
713
714 pause->tx_pause = oct->tx_pause;
715 pause->rx_pause = oct->rx_pause;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700716}
717
Raghu Vatsavayi30136392016-09-01 11:16:11 -0700718static int
719lio_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
720{
721 /* Notes: Not supporting any auto negotiation in these
722 * drivers.
723 */
724 struct lio *lio = GET_LIO(netdev);
725 struct octeon_device *oct = lio->oct_dev;
726 struct octnic_ctrl_pkt nctrl;
727 struct oct_link_info *linfo = &lio->linfo;
728
729 int ret = 0;
730
731 if (oct->chip_id != OCTEON_CN23XX_PF_VID)
732 return -EINVAL;
733
734 if (linfo->link.s.duplex == 0) {
735 /*no flow control for half duplex*/
736 if (pause->rx_pause || pause->tx_pause)
737 return -EINVAL;
738 }
739
740 /*do not support autoneg of link flow control*/
741 if (pause->autoneg == AUTONEG_ENABLE)
742 return -EINVAL;
743
744 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
745
746 nctrl.ncmd.u64 = 0;
747 nctrl.ncmd.s.cmd = OCTNET_CMD_SET_FLOW_CTL;
748 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
749 nctrl.wait_time = 100;
750 nctrl.netpndev = (u64)netdev;
751 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
752
753 if (pause->rx_pause) {
754 /*enable rx pause*/
755 nctrl.ncmd.s.param1 = 1;
756 } else {
757 /*disable rx pause*/
758 nctrl.ncmd.s.param1 = 0;
759 }
760
761 if (pause->tx_pause) {
762 /*enable tx pause*/
763 nctrl.ncmd.s.param2 = 1;
764 } else {
765 /*disable tx pause*/
766 nctrl.ncmd.s.param2 = 0;
767 }
768
769 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
770 if (ret < 0) {
771 dev_err(&oct->pci_dev->dev, "Failed to set pause parameter\n");
772 return -EINVAL;
773 }
774
775 oct->rx_pause = pause->rx_pause;
776 oct->tx_pause = pause->tx_pause;
777
778 return 0;
779}
780
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700781static void
782lio_get_ethtool_stats(struct net_device *netdev,
Raghu Vatsavayia7d5a3d2016-07-03 13:56:48 -0700783 struct ethtool_stats *stats __attribute__((unused)),
784 u64 *data)
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700785{
786 struct lio *lio = GET_LIO(netdev);
787 struct octeon_device *oct_dev = lio->oct_dev;
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700788 struct net_device_stats *netstats = &netdev->stats;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700789 int i = 0, j;
790
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700791 netdev->netdev_ops->ndo_get_stats(netdev);
792 octnet_get_link_stats(netdev);
793
794 /*sum of oct->droq[oq_no]->stats->rx_pkts_received */
795 data[i++] = CVM_CAST64(netstats->rx_packets);
796 /*sum of oct->instr_queue[iq_no]->stats.tx_done */
797 data[i++] = CVM_CAST64(netstats->tx_packets);
798 /*sum of oct->droq[oq_no]->stats->rx_bytes_received */
799 data[i++] = CVM_CAST64(netstats->rx_bytes);
800 /*sum of oct->instr_queue[iq_no]->stats.tx_tot_bytes */
801 data[i++] = CVM_CAST64(netstats->tx_bytes);
802 data[i++] = CVM_CAST64(netstats->rx_errors);
803 data[i++] = CVM_CAST64(netstats->tx_errors);
804 /*sum of oct->droq[oq_no]->stats->rx_dropped +
805 *oct->droq[oq_no]->stats->dropped_nodispatch +
806 *oct->droq[oq_no]->stats->dropped_toomany +
807 *oct->droq[oq_no]->stats->dropped_nomem
808 */
809 data[i++] = CVM_CAST64(netstats->rx_dropped);
810 /*sum of oct->instr_queue[iq_no]->stats.tx_dropped */
811 data[i++] = CVM_CAST64(netstats->tx_dropped);
812
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700813 /* firmware tx stats */
814 /*per_core_stats[cvmx_get_core_num()].link_stats[mdata->from_ifidx].
815 *fromhost.fw_total_sent
816 */
817 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_total_sent);
818 /*per_core_stats[i].link_stats[port].fromwire.fw_total_fwd */
819 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_total_fwd);
820 /*per_core_stats[j].link_stats[i].fromhost.fw_err_pko */
821 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_pko);
822 /*per_core_stats[j].link_stats[i].fromhost.fw_err_link */
823 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_link);
824 /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.
825 *fw_err_drop
826 */
827 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_drop);
828
829 /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.fw_tso */
830 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_tso);
831 /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.
832 *fw_tso_fwd
833 */
834 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_tso_fwd);
835 /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.
836 *fw_err_tso
837 */
838 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_tso);
Raghu Vatsavayi01fb2372016-07-03 13:56:47 -0700839 /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.
840 *fw_tx_vxlan
841 */
842 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_tx_vxlan);
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700843
844 /* mac tx statistics */
845 /*CVMX_BGXX_CMRX_TX_STAT5 */
846 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.total_pkts_sent);
847 /*CVMX_BGXX_CMRX_TX_STAT4 */
848 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.total_bytes_sent);
849 /*CVMX_BGXX_CMRX_TX_STAT15 */
850 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.mcast_pkts_sent);
851 /*CVMX_BGXX_CMRX_TX_STAT14 */
852 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.bcast_pkts_sent);
853 /*CVMX_BGXX_CMRX_TX_STAT17 */
854 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.ctl_sent);
855 /*CVMX_BGXX_CMRX_TX_STAT0 */
856 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.total_collisions);
857 /*CVMX_BGXX_CMRX_TX_STAT3 */
858 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.one_collision_sent);
859 /*CVMX_BGXX_CMRX_TX_STAT2 */
860 data[i++] =
861 CVM_CAST64(oct_dev->link_stats.fromhost.multi_collision_sent);
862 /*CVMX_BGXX_CMRX_TX_STAT0 */
863 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.max_collision_fail);
864 /*CVMX_BGXX_CMRX_TX_STAT1 */
865 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.max_deferral_fail);
866 /*CVMX_BGXX_CMRX_TX_STAT16 */
867 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fifo_err);
868 /*CVMX_BGXX_CMRX_TX_STAT6 */
869 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.runts);
870
871 /* RX firmware stats */
872 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
873 *fw_total_rcvd
874 */
875 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_total_rcvd);
876 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
877 *fw_total_fwd
878 */
879 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_total_fwd);
880 /*per_core_stats[core_id].link_stats[ifidx].fromwire.jabber_err */
881 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.jabber_err);
882 /*per_core_stats[core_id].link_stats[ifidx].fromwire.l2_err */
883 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.l2_err);
884 /*per_core_stats[core_id].link_stats[ifidx].fromwire.frame_err */
885 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.frame_err);
886 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
887 *fw_err_pko
888 */
889 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_err_pko);
890 /*per_core_stats[j].link_stats[i].fromwire.fw_err_link */
891 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_err_link);
892 /*per_core_stats[cvmx_get_core_num()].link_stats[lro_ctx->ifidx].
893 *fromwire.fw_err_drop
894 */
895 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_err_drop);
896
Raghu Vatsavayi01fb2372016-07-03 13:56:47 -0700897 /*per_core_stats[cvmx_get_core_num()].link_stats[lro_ctx->ifidx].
898 *fromwire.fw_rx_vxlan
899 */
900 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_rx_vxlan);
901 /*per_core_stats[cvmx_get_core_num()].link_stats[lro_ctx->ifidx].
902 *fromwire.fw_rx_vxlan_err
903 */
904 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_rx_vxlan_err);
905
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700906 /* LRO */
907 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
908 *fw_lro_pkts
909 */
910 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_pkts);
911 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
912 *fw_lro_octs
913 */
914 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_octs);
915 /*per_core_stats[j].link_stats[i].fromwire.fw_total_lro */
916 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_total_lro);
917 /*per_core_stats[j].link_stats[i].fromwire.fw_lro_aborts */
918 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts);
919 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
920 *fw_lro_aborts_port
921 */
922 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_port);
923 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
924 *fw_lro_aborts_seq
925 */
926 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_seq);
927 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
928 *fw_lro_aborts_tsval
929 */
930 data[i++] =
931 CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_tsval);
932 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
933 *fw_lro_aborts_timer
934 */
935 /* intrmod: packet forward rate */
936 data[i++] =
937 CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_timer);
938 /*per_core_stats[j].link_stats[i].fromwire.fw_lro_aborts */
939 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fwd_rate);
940
941 /* mac: link-level stats */
942 /*CVMX_BGXX_CMRX_RX_STAT0 */
943 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.total_rcvd);
944 /*CVMX_BGXX_CMRX_RX_STAT1 */
945 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.bytes_rcvd);
946 /*CVMX_PKI_STATX_STAT5 */
947 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.total_bcst);
948 /*CVMX_PKI_STATX_STAT5 */
949 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.total_mcst);
950 /*wqe->word2.err_code or wqe->word2.err_level */
951 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.runts);
952 /*CVMX_BGXX_CMRX_RX_STAT2 */
953 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.ctl_rcvd);
954 /*CVMX_BGXX_CMRX_RX_STAT6 */
955 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fifo_err);
956 /*CVMX_BGXX_CMRX_RX_STAT4 */
957 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.dmac_drop);
958 /*wqe->word2.err_code or wqe->word2.err_level */
959 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fcs_err);
960 /*lio->link_changes*/
961 data[i++] = CVM_CAST64(lio->link_changes);
962
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700963 for (j = 0; j < MAX_OCTEON_INSTR_QUEUES(oct_dev); j++) {
Raghu Vatsavayi763185a2016-11-14 15:54:45 -0800964 if (!(oct_dev->io_qmask.iq & BIT_ULL(j)))
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700965 continue;
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700966 /*packets to network port*/
967 /*# of packets tx to network */
968 data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_done);
969 /*# of bytes tx to network */
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700970 data[i++] =
971 CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_tot_bytes);
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700972 /*# of packets dropped */
973 data[i++] =
974 CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_dropped);
975 /*# of tx fails due to queue full */
976 data[i++] =
977 CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_iq_busy);
978 /*XXX gather entries sent */
979 data[i++] =
980 CVM_CAST64(oct_dev->instr_queue[j]->stats.sgentry_sent);
981
982 /*instruction to firmware: data and control */
983 /*# of instructions to the queue */
984 data[i++] =
985 CVM_CAST64(oct_dev->instr_queue[j]->stats.instr_posted);
986 /*# of instructions processed */
Satanand Burla9ae122c2017-05-31 10:45:15 -0700987 data[i++] = CVM_CAST64(
988 oct_dev->instr_queue[j]->stats.instr_processed);
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700989 /*# of instructions could not be processed */
Satanand Burla9ae122c2017-05-31 10:45:15 -0700990 data[i++] = CVM_CAST64(
991 oct_dev->instr_queue[j]->stats.instr_dropped);
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700992 /*bytes sent through the queue */
993 data[i++] =
994 CVM_CAST64(oct_dev->instr_queue[j]->stats.bytes_sent);
995
996 /*tso request*/
997 data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_gso);
Raghu Vatsavayi01fb2372016-07-03 13:56:47 -0700998 /*vxlan request*/
999 data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_vxlan);
Raghu Vatsavayi1f164712016-06-21 22:53:11 -07001000 /*txq restart*/
1001 data[i++] =
1002 CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_restart);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001003 }
1004
Raghu Vatsavayi1f164712016-06-21 22:53:11 -07001005 /* RX */
Raghu Vatsavayi1f164712016-06-21 22:53:11 -07001006 for (j = 0; j < MAX_OCTEON_OUTPUT_QUEUES(oct_dev); j++) {
Raghu Vatsavayi763185a2016-11-14 15:54:45 -08001007 if (!(oct_dev->io_qmask.oq & BIT_ULL(j)))
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001008 continue;
Raghu Vatsavayi1f164712016-06-21 22:53:11 -07001009
1010 /*packets send to TCP/IP network stack */
1011 /*# of packets to network stack */
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001012 data[i++] =
1013 CVM_CAST64(oct_dev->droq[j]->stats.rx_pkts_received);
Raghu Vatsavayi1f164712016-06-21 22:53:11 -07001014 /*# of bytes to network stack */
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001015 data[i++] =
1016 CVM_CAST64(oct_dev->droq[j]->stats.rx_bytes_received);
Raghu Vatsavayi1f164712016-06-21 22:53:11 -07001017 /*# of packets dropped */
1018 data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem +
1019 oct_dev->droq[j]->stats.dropped_toomany +
1020 oct_dev->droq[j]->stats.rx_dropped);
1021 data[i++] =
1022 CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem);
1023 data[i++] =
1024 CVM_CAST64(oct_dev->droq[j]->stats.dropped_toomany);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001025 data[i++] =
1026 CVM_CAST64(oct_dev->droq[j]->stats.rx_dropped);
Raghu Vatsavayi1f164712016-06-21 22:53:11 -07001027
1028 /*control and data path*/
1029 data[i++] =
1030 CVM_CAST64(oct_dev->droq[j]->stats.pkts_received);
1031 data[i++] =
1032 CVM_CAST64(oct_dev->droq[j]->stats.bytes_received);
1033 data[i++] =
1034 CVM_CAST64(oct_dev->droq[j]->stats.dropped_nodispatch);
Raghu Vatsavayi01fb2372016-07-03 13:56:47 -07001035
1036 data[i++] =
1037 CVM_CAST64(oct_dev->droq[j]->stats.rx_vxlan);
Raghu Vatsavayi1f164712016-06-21 22:53:11 -07001038 data[i++] =
1039 CVM_CAST64(oct_dev->droq[j]->stats.rx_alloc_failure);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001040 }
1041}
1042
Raghu Vatsavayid8ab8482016-12-08 13:00:46 -08001043static void lio_vf_get_ethtool_stats(struct net_device *netdev,
1044 struct ethtool_stats *stats
1045 __attribute__((unused)),
1046 u64 *data)
1047{
1048 struct net_device_stats *netstats = &netdev->stats;
1049 struct lio *lio = GET_LIO(netdev);
1050 struct octeon_device *oct_dev = lio->oct_dev;
1051 int i = 0, j, vj;
1052
1053 netdev->netdev_ops->ndo_get_stats(netdev);
1054 /* sum of oct->droq[oq_no]->stats->rx_pkts_received */
1055 data[i++] = CVM_CAST64(netstats->rx_packets);
1056 /* sum of oct->instr_queue[iq_no]->stats.tx_done */
1057 data[i++] = CVM_CAST64(netstats->tx_packets);
1058 /* sum of oct->droq[oq_no]->stats->rx_bytes_received */
1059 data[i++] = CVM_CAST64(netstats->rx_bytes);
1060 /* sum of oct->instr_queue[iq_no]->stats.tx_tot_bytes */
1061 data[i++] = CVM_CAST64(netstats->tx_bytes);
1062 data[i++] = CVM_CAST64(netstats->rx_errors);
1063 data[i++] = CVM_CAST64(netstats->tx_errors);
1064 /* sum of oct->droq[oq_no]->stats->rx_dropped +
1065 * oct->droq[oq_no]->stats->dropped_nodispatch +
1066 * oct->droq[oq_no]->stats->dropped_toomany +
1067 * oct->droq[oq_no]->stats->dropped_nomem
1068 */
1069 data[i++] = CVM_CAST64(netstats->rx_dropped);
1070 /* sum of oct->instr_queue[iq_no]->stats.tx_dropped */
1071 data[i++] = CVM_CAST64(netstats->tx_dropped);
1072 /* lio->link_changes */
1073 data[i++] = CVM_CAST64(lio->link_changes);
1074
1075 for (vj = 0; vj < lio->linfo.num_txpciq; vj++) {
1076 j = lio->linfo.txpciq[vj].s.q_no;
1077
1078 /* packets to network port */
1079 /* # of packets tx to network */
1080 data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_done);
1081 /* # of bytes tx to network */
1082 data[i++] = CVM_CAST64(
1083 oct_dev->instr_queue[j]->stats.tx_tot_bytes);
1084 /* # of packets dropped */
1085 data[i++] = CVM_CAST64(
1086 oct_dev->instr_queue[j]->stats.tx_dropped);
1087 /* # of tx fails due to queue full */
1088 data[i++] = CVM_CAST64(
1089 oct_dev->instr_queue[j]->stats.tx_iq_busy);
1090 /* XXX gather entries sent */
1091 data[i++] = CVM_CAST64(
1092 oct_dev->instr_queue[j]->stats.sgentry_sent);
1093
1094 /* instruction to firmware: data and control */
1095 /* # of instructions to the queue */
1096 data[i++] = CVM_CAST64(
1097 oct_dev->instr_queue[j]->stats.instr_posted);
1098 /* # of instructions processed */
1099 data[i++] =
1100 CVM_CAST64(oct_dev->instr_queue[j]->stats.instr_processed);
1101 /* # of instructions could not be processed */
1102 data[i++] =
1103 CVM_CAST64(oct_dev->instr_queue[j]->stats.instr_dropped);
1104 /* bytes sent through the queue */
1105 data[i++] = CVM_CAST64(
1106 oct_dev->instr_queue[j]->stats.bytes_sent);
1107 /* tso request */
1108 data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_gso);
1109 /* vxlan request */
1110 data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_vxlan);
1111 /* txq restart */
1112 data[i++] = CVM_CAST64(
1113 oct_dev->instr_queue[j]->stats.tx_restart);
1114 }
1115
1116 /* RX */
1117 for (vj = 0; vj < lio->linfo.num_rxpciq; vj++) {
1118 j = lio->linfo.rxpciq[vj].s.q_no;
1119
1120 /* packets send to TCP/IP network stack */
1121 /* # of packets to network stack */
1122 data[i++] = CVM_CAST64(
1123 oct_dev->droq[j]->stats.rx_pkts_received);
1124 /* # of bytes to network stack */
1125 data[i++] = CVM_CAST64(
1126 oct_dev->droq[j]->stats.rx_bytes_received);
1127 data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem +
1128 oct_dev->droq[j]->stats.dropped_toomany +
1129 oct_dev->droq[j]->stats.rx_dropped);
1130 data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem);
1131 data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_toomany);
1132 data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.rx_dropped);
1133
1134 /* control and data path */
1135 data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.pkts_received);
1136 data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.bytes_received);
1137 data[i++] =
1138 CVM_CAST64(oct_dev->droq[j]->stats.dropped_nodispatch);
1139
1140 data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.rx_vxlan);
1141 data[i++] =
1142 CVM_CAST64(oct_dev->droq[j]->stats.rx_alloc_failure);
1143 }
1144}
1145
Raghu Vatsavayi30136392016-09-01 11:16:11 -07001146static void lio_get_priv_flags_strings(struct lio *lio, u8 *data)
1147{
1148 struct octeon_device *oct_dev = lio->oct_dev;
1149 int i;
1150
1151 switch (oct_dev->chip_id) {
1152 case OCTEON_CN23XX_PF_VID:
Raghu Vatsavayid8ab8482016-12-08 13:00:46 -08001153 case OCTEON_CN23XX_VF_VID:
Raghu Vatsavayi30136392016-09-01 11:16:11 -07001154 for (i = 0; i < ARRAY_SIZE(oct_priv_flags_strings); i++) {
1155 sprintf(data, "%s", oct_priv_flags_strings[i]);
1156 data += ETH_GSTRING_LEN;
1157 }
1158 break;
1159 case OCTEON_CN68XX:
1160 case OCTEON_CN66XX:
1161 break;
1162 default:
1163 netif_info(lio, drv, lio->netdev, "Unknown Chip !!\n");
1164 break;
1165 }
1166}
1167
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001168static void lio_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
1169{
1170 struct lio *lio = GET_LIO(netdev);
1171 struct octeon_device *oct_dev = lio->oct_dev;
1172 int num_iq_stats, num_oq_stats, i, j;
Raghu Vatsavayi1f164712016-06-21 22:53:11 -07001173 int num_stats;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001174
Raghu Vatsavayi1f164712016-06-21 22:53:11 -07001175 switch (stringset) {
1176 case ETH_SS_STATS:
1177 num_stats = ARRAY_SIZE(oct_stats_strings);
1178 for (j = 0; j < num_stats; j++) {
1179 sprintf(data, "%s", oct_stats_strings[j]);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001180 data += ETH_GSTRING_LEN;
1181 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001182
Raghu Vatsavayi1f164712016-06-21 22:53:11 -07001183 num_iq_stats = ARRAY_SIZE(oct_iq_stats_strings);
1184 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct_dev); i++) {
Raghu Vatsavayi763185a2016-11-14 15:54:45 -08001185 if (!(oct_dev->io_qmask.iq & BIT_ULL(i)))
Raghu Vatsavayi1f164712016-06-21 22:53:11 -07001186 continue;
1187 for (j = 0; j < num_iq_stats; j++) {
1188 sprintf(data, "tx-%d-%s", i,
1189 oct_iq_stats_strings[j]);
1190 data += ETH_GSTRING_LEN;
1191 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001192 }
Raghu Vatsavayi1f164712016-06-21 22:53:11 -07001193
1194 num_oq_stats = ARRAY_SIZE(oct_droq_stats_strings);
Raghu Vatsavayi1f164712016-06-21 22:53:11 -07001195 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct_dev); i++) {
Raghu Vatsavayi763185a2016-11-14 15:54:45 -08001196 if (!(oct_dev->io_qmask.oq & BIT_ULL(i)))
Raghu Vatsavayi1f164712016-06-21 22:53:11 -07001197 continue;
1198 for (j = 0; j < num_oq_stats; j++) {
1199 sprintf(data, "rx-%d-%s", i,
1200 oct_droq_stats_strings[j]);
1201 data += ETH_GSTRING_LEN;
1202 }
1203 }
1204 break;
1205
Raghu Vatsavayi30136392016-09-01 11:16:11 -07001206 case ETH_SS_PRIV_FLAGS:
1207 lio_get_priv_flags_strings(lio, data);
1208 break;
Raghu Vatsavayi1f164712016-06-21 22:53:11 -07001209 default:
1210 netif_info(lio, drv, lio->netdev, "Unknown Stringset !!\n");
1211 break;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001212 }
1213}
1214
Raghu Vatsavayid8ab8482016-12-08 13:00:46 -08001215static void lio_vf_get_strings(struct net_device *netdev, u32 stringset,
1216 u8 *data)
1217{
1218 int num_iq_stats, num_oq_stats, i, j;
1219 struct lio *lio = GET_LIO(netdev);
1220 struct octeon_device *oct_dev = lio->oct_dev;
1221 int num_stats;
1222
1223 switch (stringset) {
1224 case ETH_SS_STATS:
1225 num_stats = ARRAY_SIZE(oct_vf_stats_strings);
1226 for (j = 0; j < num_stats; j++) {
1227 sprintf(data, "%s", oct_vf_stats_strings[j]);
1228 data += ETH_GSTRING_LEN;
1229 }
1230
1231 num_iq_stats = ARRAY_SIZE(oct_iq_stats_strings);
1232 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct_dev); i++) {
1233 if (!(oct_dev->io_qmask.iq & BIT_ULL(i)))
1234 continue;
1235 for (j = 0; j < num_iq_stats; j++) {
1236 sprintf(data, "tx-%d-%s", i,
1237 oct_iq_stats_strings[j]);
1238 data += ETH_GSTRING_LEN;
1239 }
1240 }
1241
1242 num_oq_stats = ARRAY_SIZE(oct_droq_stats_strings);
1243 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct_dev); i++) {
1244 if (!(oct_dev->io_qmask.oq & BIT_ULL(i)))
1245 continue;
1246 for (j = 0; j < num_oq_stats; j++) {
1247 sprintf(data, "rx-%d-%s", i,
1248 oct_droq_stats_strings[j]);
1249 data += ETH_GSTRING_LEN;
1250 }
1251 }
1252 break;
1253
1254 case ETH_SS_PRIV_FLAGS:
1255 lio_get_priv_flags_strings(lio, data);
1256 break;
1257 default:
1258 netif_info(lio, drv, lio->netdev, "Unknown Stringset !!\n");
1259 break;
1260 }
1261}
1262
Raghu Vatsavayi30136392016-09-01 11:16:11 -07001263static int lio_get_priv_flags_ss_count(struct lio *lio)
1264{
1265 struct octeon_device *oct_dev = lio->oct_dev;
1266
1267 switch (oct_dev->chip_id) {
1268 case OCTEON_CN23XX_PF_VID:
Raghu Vatsavayid8ab8482016-12-08 13:00:46 -08001269 case OCTEON_CN23XX_VF_VID:
Raghu Vatsavayi30136392016-09-01 11:16:11 -07001270 return ARRAY_SIZE(oct_priv_flags_strings);
1271 case OCTEON_CN68XX:
1272 case OCTEON_CN66XX:
1273 return -EOPNOTSUPP;
1274 default:
1275 netif_info(lio, drv, lio->netdev, "Unknown Chip !!\n");
1276 return -EOPNOTSUPP;
1277 }
1278}
1279
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001280static int lio_get_sset_count(struct net_device *netdev, int sset)
1281{
1282 struct lio *lio = GET_LIO(netdev);
1283 struct octeon_device *oct_dev = lio->oct_dev;
1284
Raghu Vatsavayi1f164712016-06-21 22:53:11 -07001285 switch (sset) {
1286 case ETH_SS_STATS:
1287 return (ARRAY_SIZE(oct_stats_strings) +
1288 ARRAY_SIZE(oct_iq_stats_strings) * oct_dev->num_iqs +
1289 ARRAY_SIZE(oct_droq_stats_strings) * oct_dev->num_oqs);
Raghu Vatsavayi30136392016-09-01 11:16:11 -07001290 case ETH_SS_PRIV_FLAGS:
1291 return lio_get_priv_flags_ss_count(lio);
Raghu Vatsavayi1f164712016-06-21 22:53:11 -07001292 default:
1293 return -EOPNOTSUPP;
1294 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001295}
1296
Raghu Vatsavayid8ab8482016-12-08 13:00:46 -08001297static int lio_vf_get_sset_count(struct net_device *netdev, int sset)
1298{
1299 struct lio *lio = GET_LIO(netdev);
1300 struct octeon_device *oct_dev = lio->oct_dev;
1301
1302 switch (sset) {
1303 case ETH_SS_STATS:
1304 return (ARRAY_SIZE(oct_vf_stats_strings) +
1305 ARRAY_SIZE(oct_iq_stats_strings) * oct_dev->num_iqs +
1306 ARRAY_SIZE(oct_droq_stats_strings) * oct_dev->num_oqs);
1307 case ETH_SS_PRIV_FLAGS:
1308 return lio_get_priv_flags_ss_count(lio);
1309 default:
1310 return -EOPNOTSUPP;
1311 }
1312}
1313
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001314/* Callback function for intrmod */
1315static void octnet_intrmod_callback(struct octeon_device *oct_dev,
1316 u32 status,
1317 void *ptr)
1318{
Prasad Kanneganti50c0add2017-03-28 12:14:06 -07001319 struct octeon_soft_command *sc = (struct octeon_soft_command *)ptr;
1320 struct oct_intrmod_context *ctx;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001321
Prasad Kanneganti50c0add2017-03-28 12:14:06 -07001322 ctx = (struct oct_intrmod_context *)sc->ctxptr;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001323
Prasad Kanneganti50c0add2017-03-28 12:14:06 -07001324 ctx->status = status;
1325
Prasad Kanneganti50c0add2017-03-28 12:14:06 -07001326 WRITE_ONCE(ctx->cond, 1);
1327
1328 /* This barrier is required to be sure that the response has been
1329 * written fully before waking up the handler
1330 */
1331 wmb();
1332
1333 wake_up_interruptible(&ctx->wc);
1334}
1335
1336/* get interrupt moderation parameters */
1337static int octnet_get_intrmod_cfg(struct lio *lio,
1338 struct oct_intrmod_cfg *intr_cfg)
1339{
1340 struct octeon_soft_command *sc;
1341 struct oct_intrmod_context *ctx;
1342 struct oct_intrmod_resp *resp;
1343 int retval;
1344 struct octeon_device *oct_dev = lio->oct_dev;
1345
1346 /* Alloc soft command */
1347 sc = (struct octeon_soft_command *)
1348 octeon_alloc_soft_command(oct_dev,
1349 0,
1350 sizeof(struct oct_intrmod_resp),
1351 sizeof(struct oct_intrmod_context));
1352
1353 if (!sc)
1354 return -ENOMEM;
1355
1356 resp = (struct oct_intrmod_resp *)sc->virtrptr;
1357 memset(resp, 0, sizeof(struct oct_intrmod_resp));
1358
1359 ctx = (struct oct_intrmod_context *)sc->ctxptr;
Dan Carpenter781159f2017-04-03 21:18:27 +03001360 memset(ctx, 0, sizeof(struct oct_intrmod_context));
Prasad Kanneganti50c0add2017-03-28 12:14:06 -07001361 WRITE_ONCE(ctx->cond, 0);
1362 ctx->octeon_id = lio_get_device_id(oct_dev);
1363 init_waitqueue_head(&ctx->wc);
1364
1365 sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1366
1367 octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC,
1368 OPCODE_NIC_INTRMOD_PARAMS, 0, 0, 0);
1369
1370 sc->callback = octnet_intrmod_callback;
1371 sc->callback_arg = sc;
1372 sc->wait_time = 1000;
1373
1374 retval = octeon_send_soft_command(oct_dev, sc);
1375 if (retval == IQ_SEND_FAILED) {
1376 octeon_free_soft_command(oct_dev, sc);
1377 return -EINVAL;
1378 }
1379
1380 /* Sleep on a wait queue till the cond flag indicates that the
1381 * response arrived or timed-out.
1382 */
1383 if (sleep_cond(&ctx->wc, &ctx->cond) == -EINTR) {
1384 dev_err(&oct_dev->pci_dev->dev, "Wait interrupted\n");
1385 goto intrmod_info_wait_intr;
1386 }
1387
1388 retval = ctx->status || resp->status;
1389 if (retval) {
1390 dev_err(&oct_dev->pci_dev->dev,
1391 "Get interrupt moderation parameters failed\n");
1392 goto intrmod_info_wait_fail;
1393 }
1394
1395 octeon_swap_8B_data((u64 *)&resp->intrmod,
1396 (sizeof(struct oct_intrmod_cfg)) / 8);
1397 memcpy(intr_cfg, &resp->intrmod, sizeof(struct oct_intrmod_cfg));
1398 octeon_free_soft_command(oct_dev, sc);
1399
1400 return 0;
1401
1402intrmod_info_wait_fail:
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001403
1404 octeon_free_soft_command(oct_dev, sc);
Prasad Kanneganti50c0add2017-03-28 12:14:06 -07001405
1406intrmod_info_wait_intr:
1407
1408 return -ENODEV;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001409}
1410
1411/* Configure interrupt moderation parameters */
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001412static int octnet_set_intrmod_cfg(struct lio *lio,
1413 struct oct_intrmod_cfg *intr_cfg)
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001414{
1415 struct octeon_soft_command *sc;
Prasad Kanneganti50c0add2017-03-28 12:14:06 -07001416 struct oct_intrmod_context *ctx;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001417 struct oct_intrmod_cfg *cfg;
1418 int retval;
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001419 struct octeon_device *oct_dev = lio->oct_dev;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001420
1421 /* Alloc soft command */
1422 sc = (struct octeon_soft_command *)
1423 octeon_alloc_soft_command(oct_dev,
1424 sizeof(struct oct_intrmod_cfg),
1425 0,
Prasad Kanneganti50c0add2017-03-28 12:14:06 -07001426 sizeof(struct oct_intrmod_context));
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001427
1428 if (!sc)
1429 return -ENOMEM;
1430
Prasad Kanneganti50c0add2017-03-28 12:14:06 -07001431 ctx = (struct oct_intrmod_context *)sc->ctxptr;
1432
1433 WRITE_ONCE(ctx->cond, 0);
1434 ctx->octeon_id = lio_get_device_id(oct_dev);
1435 init_waitqueue_head(&ctx->wc);
1436
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001437 cfg = (struct oct_intrmod_cfg *)sc->virtdptr;
1438
1439 memcpy(cfg, intr_cfg, sizeof(struct oct_intrmod_cfg));
1440 octeon_swap_8B_data((u64 *)cfg, (sizeof(struct oct_intrmod_cfg)) / 8);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001441
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001442 sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1443
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001444 octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC,
1445 OPCODE_NIC_INTRMOD_CFG, 0, 0, 0);
1446
1447 sc->callback = octnet_intrmod_callback;
Prasad Kanneganti50c0add2017-03-28 12:14:06 -07001448 sc->callback_arg = sc;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001449 sc->wait_time = 1000;
1450
1451 retval = octeon_send_soft_command(oct_dev, sc);
Raghu Vatsavayiddc173a2016-06-14 16:54:43 -07001452 if (retval == IQ_SEND_FAILED) {
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001453 octeon_free_soft_command(oct_dev, sc);
1454 return -EINVAL;
1455 }
1456
Prasad Kanneganti50c0add2017-03-28 12:14:06 -07001457 /* Sleep on a wait queue till the cond flag indicates that the
1458 * response arrived or timed-out.
1459 */
1460 if (sleep_cond(&ctx->wc, &ctx->cond) != -EINTR) {
1461 retval = ctx->status;
1462 if (retval)
1463 dev_err(&oct_dev->pci_dev->dev,
1464 "intrmod config failed. Status: %llx\n",
1465 CVM_CAST64(retval));
1466 else
1467 dev_info(&oct_dev->pci_dev->dev,
1468 "Rx-Adaptive Interrupt moderation %s\n",
1469 (intr_cfg->rx_enable) ?
1470 "enabled" : "disabled");
1471
1472 octeon_free_soft_command(oct_dev, sc);
1473
1474 return ((retval) ? -ENODEV : 0);
1475 }
1476
1477 dev_err(&oct_dev->pci_dev->dev, "iq/oq config failed\n");
1478
1479 return -EINTR;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001480}
1481
Raghu Vatsavayia7d5a3d2016-07-03 13:56:48 -07001482static void
Raghu Vatsavayi1f164712016-06-21 22:53:11 -07001483octnet_nic_stats_callback(struct octeon_device *oct_dev,
1484 u32 status, void *ptr)
1485{
Raghu Vatsavayidc3abcb2016-09-01 11:16:08 -07001486 struct octeon_soft_command *sc = (struct octeon_soft_command *)ptr;
1487 struct oct_nic_stats_resp *resp =
1488 (struct oct_nic_stats_resp *)sc->virtrptr;
1489 struct oct_nic_stats_ctrl *ctrl =
1490 (struct oct_nic_stats_ctrl *)sc->ctxptr;
Raghu Vatsavayi1f164712016-06-21 22:53:11 -07001491 struct nic_rx_stats *rsp_rstats = &resp->stats.fromwire;
1492 struct nic_tx_stats *rsp_tstats = &resp->stats.fromhost;
1493
1494 struct nic_rx_stats *rstats = &oct_dev->link_stats.fromwire;
1495 struct nic_tx_stats *tstats = &oct_dev->link_stats.fromhost;
1496
1497 if ((status != OCTEON_REQUEST_TIMEOUT) && !resp->status) {
1498 octeon_swap_8B_data((u64 *)&resp->stats,
1499 (sizeof(struct oct_link_stats)) >> 3);
1500
1501 /* RX link-level stats */
1502 rstats->total_rcvd = rsp_rstats->total_rcvd;
1503 rstats->bytes_rcvd = rsp_rstats->bytes_rcvd;
1504 rstats->total_bcst = rsp_rstats->total_bcst;
1505 rstats->total_mcst = rsp_rstats->total_mcst;
1506 rstats->runts = rsp_rstats->runts;
1507 rstats->ctl_rcvd = rsp_rstats->ctl_rcvd;
1508 /* Accounts for over/under-run of buffers */
1509 rstats->fifo_err = rsp_rstats->fifo_err;
1510 rstats->dmac_drop = rsp_rstats->dmac_drop;
1511 rstats->fcs_err = rsp_rstats->fcs_err;
1512 rstats->jabber_err = rsp_rstats->jabber_err;
1513 rstats->l2_err = rsp_rstats->l2_err;
1514 rstats->frame_err = rsp_rstats->frame_err;
1515
1516 /* RX firmware stats */
1517 rstats->fw_total_rcvd = rsp_rstats->fw_total_rcvd;
1518 rstats->fw_total_fwd = rsp_rstats->fw_total_fwd;
1519 rstats->fw_err_pko = rsp_rstats->fw_err_pko;
1520 rstats->fw_err_link = rsp_rstats->fw_err_link;
1521 rstats->fw_err_drop = rsp_rstats->fw_err_drop;
Raghu Vatsavayi01fb2372016-07-03 13:56:47 -07001522 rstats->fw_rx_vxlan = rsp_rstats->fw_rx_vxlan;
1523 rstats->fw_rx_vxlan_err = rsp_rstats->fw_rx_vxlan_err;
1524
Raghu Vatsavayi1f164712016-06-21 22:53:11 -07001525 /* Number of packets that are LROed */
1526 rstats->fw_lro_pkts = rsp_rstats->fw_lro_pkts;
1527 /* Number of octets that are LROed */
1528 rstats->fw_lro_octs = rsp_rstats->fw_lro_octs;
1529 /* Number of LRO packets formed */
1530 rstats->fw_total_lro = rsp_rstats->fw_total_lro;
1531 /* Number of times lRO of packet aborted */
1532 rstats->fw_lro_aborts = rsp_rstats->fw_lro_aborts;
1533 rstats->fw_lro_aborts_port = rsp_rstats->fw_lro_aborts_port;
1534 rstats->fw_lro_aborts_seq = rsp_rstats->fw_lro_aborts_seq;
1535 rstats->fw_lro_aborts_tsval = rsp_rstats->fw_lro_aborts_tsval;
1536 rstats->fw_lro_aborts_timer = rsp_rstats->fw_lro_aborts_timer;
1537 /* intrmod: packet forward rate */
1538 rstats->fwd_rate = rsp_rstats->fwd_rate;
1539
1540 /* TX link-level stats */
1541 tstats->total_pkts_sent = rsp_tstats->total_pkts_sent;
1542 tstats->total_bytes_sent = rsp_tstats->total_bytes_sent;
1543 tstats->mcast_pkts_sent = rsp_tstats->mcast_pkts_sent;
1544 tstats->bcast_pkts_sent = rsp_tstats->bcast_pkts_sent;
1545 tstats->ctl_sent = rsp_tstats->ctl_sent;
1546 /* Packets sent after one collision*/
1547 tstats->one_collision_sent = rsp_tstats->one_collision_sent;
1548 /* Packets sent after multiple collision*/
1549 tstats->multi_collision_sent = rsp_tstats->multi_collision_sent;
1550 /* Packets not sent due to max collisions */
1551 tstats->max_collision_fail = rsp_tstats->max_collision_fail;
1552 /* Packets not sent due to max deferrals */
1553 tstats->max_deferral_fail = rsp_tstats->max_deferral_fail;
1554 /* Accounts for over/under-run of buffers */
1555 tstats->fifo_err = rsp_tstats->fifo_err;
1556 tstats->runts = rsp_tstats->runts;
1557 /* Total number of collisions detected */
1558 tstats->total_collisions = rsp_tstats->total_collisions;
1559
1560 /* firmware stats */
1561 tstats->fw_total_sent = rsp_tstats->fw_total_sent;
1562 tstats->fw_total_fwd = rsp_tstats->fw_total_fwd;
1563 tstats->fw_err_pko = rsp_tstats->fw_err_pko;
1564 tstats->fw_err_link = rsp_tstats->fw_err_link;
1565 tstats->fw_err_drop = rsp_tstats->fw_err_drop;
1566 tstats->fw_tso = rsp_tstats->fw_tso;
1567 tstats->fw_tso_fwd = rsp_tstats->fw_tso_fwd;
1568 tstats->fw_err_tso = rsp_tstats->fw_err_tso;
Raghu Vatsavayi01fb2372016-07-03 13:56:47 -07001569 tstats->fw_tx_vxlan = rsp_tstats->fw_tx_vxlan;
1570
Raghu Vatsavayi1f164712016-06-21 22:53:11 -07001571 resp->status = 1;
1572 } else {
1573 resp->status = -1;
1574 }
1575 complete(&ctrl->complete);
1576}
1577
1578/* Configure interrupt moderation parameters */
1579static int octnet_get_link_stats(struct net_device *netdev)
1580{
1581 struct lio *lio = GET_LIO(netdev);
1582 struct octeon_device *oct_dev = lio->oct_dev;
1583
1584 struct octeon_soft_command *sc;
1585 struct oct_nic_stats_ctrl *ctrl;
1586 struct oct_nic_stats_resp *resp;
1587
1588 int retval;
1589
1590 /* Alloc soft command */
1591 sc = (struct octeon_soft_command *)
1592 octeon_alloc_soft_command(oct_dev,
1593 0,
1594 sizeof(struct oct_nic_stats_resp),
1595 sizeof(struct octnic_ctrl_pkt));
1596
1597 if (!sc)
1598 return -ENOMEM;
1599
1600 resp = (struct oct_nic_stats_resp *)sc->virtrptr;
1601 memset(resp, 0, sizeof(struct oct_nic_stats_resp));
1602
1603 ctrl = (struct oct_nic_stats_ctrl *)sc->ctxptr;
1604 memset(ctrl, 0, sizeof(struct oct_nic_stats_ctrl));
1605 ctrl->netdev = netdev;
1606 init_completion(&ctrl->complete);
1607
1608 sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1609
1610 octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC,
1611 OPCODE_NIC_PORT_STATS, 0, 0, 0);
1612
1613 sc->callback = octnet_nic_stats_callback;
1614 sc->callback_arg = sc;
1615 sc->wait_time = 500; /*in milli seconds*/
1616
1617 retval = octeon_send_soft_command(oct_dev, sc);
1618 if (retval == IQ_SEND_FAILED) {
1619 octeon_free_soft_command(oct_dev, sc);
1620 return -EINVAL;
1621 }
1622
1623 wait_for_completion_timeout(&ctrl->complete, msecs_to_jiffies(1000));
1624
1625 if (resp->status != 1) {
1626 octeon_free_soft_command(oct_dev, sc);
1627
1628 return -EINVAL;
1629 }
1630
1631 octeon_free_soft_command(oct_dev, sc);
1632
1633 return 0;
1634}
1635
Prasad Kanneganti50c0add2017-03-28 12:14:06 -07001636static int lio_get_intr_coalesce(struct net_device *netdev,
1637 struct ethtool_coalesce *intr_coal)
1638{
1639 struct lio *lio = GET_LIO(netdev);
1640 struct octeon_device *oct = lio->oct_dev;
1641 struct octeon_instr_queue *iq;
1642 struct oct_intrmod_cfg intrmod_cfg;
1643
1644 if (octnet_get_intrmod_cfg(lio, &intrmod_cfg))
1645 return -ENODEV;
1646
1647 switch (oct->chip_id) {
1648 case OCTEON_CN23XX_PF_VID:
1649 case OCTEON_CN23XX_VF_VID: {
1650 if (!intrmod_cfg.rx_enable) {
1651 intr_coal->rx_coalesce_usecs = oct->rx_coalesce_usecs;
1652 intr_coal->rx_max_coalesced_frames =
1653 oct->rx_max_coalesced_frames;
1654 }
1655 if (!intrmod_cfg.tx_enable)
1656 intr_coal->tx_max_coalesced_frames =
1657 oct->tx_max_coalesced_frames;
1658 break;
1659 }
1660 case OCTEON_CN68XX:
1661 case OCTEON_CN66XX: {
1662 struct octeon_cn6xxx *cn6xxx =
1663 (struct octeon_cn6xxx *)oct->chip;
1664
1665 if (!intrmod_cfg.rx_enable) {
1666 intr_coal->rx_coalesce_usecs =
1667 CFG_GET_OQ_INTR_TIME(cn6xxx->conf);
1668 intr_coal->rx_max_coalesced_frames =
1669 CFG_GET_OQ_INTR_PKT(cn6xxx->conf);
1670 }
1671 iq = oct->instr_queue[lio->linfo.txpciq[0].s.q_no];
1672 intr_coal->tx_max_coalesced_frames = iq->fill_threshold;
1673 break;
1674 }
1675 default:
1676 netif_info(lio, drv, lio->netdev, "Unknown Chip !!\n");
1677 return -EINVAL;
1678 }
1679 if (intrmod_cfg.rx_enable) {
1680 intr_coal->use_adaptive_rx_coalesce =
1681 intrmod_cfg.rx_enable;
1682 intr_coal->rate_sample_interval =
1683 intrmod_cfg.check_intrvl;
1684 intr_coal->pkt_rate_high =
1685 intrmod_cfg.maxpkt_ratethr;
1686 intr_coal->pkt_rate_low =
1687 intrmod_cfg.minpkt_ratethr;
1688 intr_coal->rx_max_coalesced_frames_high =
1689 intrmod_cfg.rx_maxcnt_trigger;
1690 intr_coal->rx_coalesce_usecs_high =
1691 intrmod_cfg.rx_maxtmr_trigger;
1692 intr_coal->rx_coalesce_usecs_low =
1693 intrmod_cfg.rx_mintmr_trigger;
1694 intr_coal->rx_max_coalesced_frames_low =
1695 intrmod_cfg.rx_mincnt_trigger;
1696 }
1697 if ((OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct)) &&
1698 (intrmod_cfg.tx_enable)) {
1699 intr_coal->use_adaptive_tx_coalesce =
1700 intrmod_cfg.tx_enable;
1701 intr_coal->tx_max_coalesced_frames_high =
1702 intrmod_cfg.tx_maxcnt_trigger;
1703 intr_coal->tx_max_coalesced_frames_low =
1704 intrmod_cfg.tx_mincnt_trigger;
1705 }
1706 return 0;
1707}
1708
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001709/* Enable/Disable auto interrupt Moderation */
Prasad Kanneganti50c0add2017-03-28 12:14:06 -07001710static int oct_cfg_adaptive_intr(struct lio *lio,
1711 struct oct_intrmod_cfg *intrmod_cfg,
1712 struct ethtool_coalesce *intr_coal)
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001713{
1714 int ret = 0;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001715
Prasad Kanneganti50c0add2017-03-28 12:14:06 -07001716 if (intrmod_cfg->rx_enable || intrmod_cfg->tx_enable) {
1717 intrmod_cfg->check_intrvl = intr_coal->rate_sample_interval;
1718 intrmod_cfg->maxpkt_ratethr = intr_coal->pkt_rate_high;
1719 intrmod_cfg->minpkt_ratethr = intr_coal->pkt_rate_low;
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001720 }
Prasad Kanneganti50c0add2017-03-28 12:14:06 -07001721 if (intrmod_cfg->rx_enable) {
1722 intrmod_cfg->rx_maxcnt_trigger =
1723 intr_coal->rx_max_coalesced_frames_high;
1724 intrmod_cfg->rx_maxtmr_trigger =
1725 intr_coal->rx_coalesce_usecs_high;
1726 intrmod_cfg->rx_mintmr_trigger =
1727 intr_coal->rx_coalesce_usecs_low;
1728 intrmod_cfg->rx_mincnt_trigger =
1729 intr_coal->rx_max_coalesced_frames_low;
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001730 }
Prasad Kanneganti50c0add2017-03-28 12:14:06 -07001731 if (intrmod_cfg->tx_enable) {
1732 intrmod_cfg->tx_maxcnt_trigger =
1733 intr_coal->tx_max_coalesced_frames_high;
1734 intrmod_cfg->tx_mincnt_trigger =
1735 intr_coal->tx_max_coalesced_frames_low;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001736 }
1737
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001738 ret = octnet_set_intrmod_cfg(lio, intrmod_cfg);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001739
1740 return ret;
1741}
1742
1743static int
Prasad Kanneganti50c0add2017-03-28 12:14:06 -07001744oct_cfg_rx_intrcnt(struct lio *lio,
1745 struct oct_intrmod_cfg *intrmod,
1746 struct ethtool_coalesce *intr_coal)
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001747{
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001748 struct octeon_device *oct = lio->oct_dev;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001749 u32 rx_max_coalesced_frames;
1750
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001751 /* Config Cnt based interrupt values */
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001752 switch (oct->chip_id) {
1753 case OCTEON_CN68XX:
1754 case OCTEON_CN66XX: {
1755 struct octeon_cn6xxx *cn6xxx =
1756 (struct octeon_cn6xxx *)oct->chip;
1757
1758 if (!intr_coal->rx_max_coalesced_frames)
1759 rx_max_coalesced_frames = CN6XXX_OQ_INTR_PKT;
1760 else
1761 rx_max_coalesced_frames =
1762 intr_coal->rx_max_coalesced_frames;
1763 octeon_write_csr(oct, CN6XXX_SLI_OQ_INT_LEVEL_PKTS,
1764 rx_max_coalesced_frames);
1765 CFG_SET_OQ_INTR_PKT(cn6xxx->conf, rx_max_coalesced_frames);
1766 break;
1767 }
Raghu Vatsavayidc3abcb2016-09-01 11:16:08 -07001768 case OCTEON_CN23XX_PF_VID: {
1769 int q_no;
1770
1771 if (!intr_coal->rx_max_coalesced_frames)
Prasad Kanneganti50c0add2017-03-28 12:14:06 -07001772 rx_max_coalesced_frames = intrmod->rx_frames;
Raghu Vatsavayidc3abcb2016-09-01 11:16:08 -07001773 else
1774 rx_max_coalesced_frames =
1775 intr_coal->rx_max_coalesced_frames;
1776 for (q_no = 0; q_no < oct->num_oqs; q_no++) {
1777 q_no += oct->sriov_info.pf_srn;
1778 octeon_write_csr64(
1779 oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(q_no),
1780 (octeon_read_csr64(
1781 oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(q_no)) &
1782 (0x3fffff00000000UL)) |
Prasad Kanneganti50c0add2017-03-28 12:14:06 -07001783 (rx_max_coalesced_frames - 1));
Raghu Vatsavayidc3abcb2016-09-01 11:16:08 -07001784 /*consider setting resend bit*/
1785 }
Prasad Kanneganti50c0add2017-03-28 12:14:06 -07001786 intrmod->rx_frames = rx_max_coalesced_frames;
1787 oct->rx_max_coalesced_frames = rx_max_coalesced_frames;
Raghu Vatsavayidc3abcb2016-09-01 11:16:08 -07001788 break;
1789 }
Raghu Vatsavayid8ab8482016-12-08 13:00:46 -08001790 case OCTEON_CN23XX_VF_VID: {
1791 int q_no;
1792
1793 if (!intr_coal->rx_max_coalesced_frames)
Prasad Kanneganti50c0add2017-03-28 12:14:06 -07001794 rx_max_coalesced_frames = intrmod->rx_frames;
Raghu Vatsavayid8ab8482016-12-08 13:00:46 -08001795 else
1796 rx_max_coalesced_frames =
1797 intr_coal->rx_max_coalesced_frames;
1798 for (q_no = 0; q_no < oct->num_oqs; q_no++) {
1799 octeon_write_csr64(
1800 oct, CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(q_no),
1801 (octeon_read_csr64(
1802 oct, CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(q_no)) &
1803 (0x3fffff00000000UL)) |
1804 rx_max_coalesced_frames);
Prasad Kanneganti50c0add2017-03-28 12:14:06 -07001805 /*consider writing to resend bit here*/
Raghu Vatsavayid8ab8482016-12-08 13:00:46 -08001806 }
Prasad Kanneganti50c0add2017-03-28 12:14:06 -07001807 intrmod->rx_frames = rx_max_coalesced_frames;
1808 oct->rx_max_coalesced_frames = rx_max_coalesced_frames;
Raghu Vatsavayid8ab8482016-12-08 13:00:46 -08001809 break;
1810 }
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001811 default:
1812 return -EINVAL;
1813 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001814 return 0;
1815}
1816
Raghu Vatsavayi32581242016-08-31 11:03:20 -07001817static int oct_cfg_rx_intrtime(struct lio *lio,
Prasad Kanneganti50c0add2017-03-28 12:14:06 -07001818 struct oct_intrmod_cfg *intrmod,
Raghu Vatsavayi32581242016-08-31 11:03:20 -07001819 struct ethtool_coalesce *intr_coal)
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001820{
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001821 struct octeon_device *oct = lio->oct_dev;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001822 u32 time_threshold, rx_coalesce_usecs;
1823
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001824 /* Config Time based interrupt values */
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001825 switch (oct->chip_id) {
1826 case OCTEON_CN68XX:
1827 case OCTEON_CN66XX: {
1828 struct octeon_cn6xxx *cn6xxx =
1829 (struct octeon_cn6xxx *)oct->chip;
1830 if (!intr_coal->rx_coalesce_usecs)
1831 rx_coalesce_usecs = CN6XXX_OQ_INTR_TIME;
1832 else
1833 rx_coalesce_usecs = intr_coal->rx_coalesce_usecs;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001834
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001835 time_threshold = lio_cn6xxx_get_oq_ticks(oct,
1836 rx_coalesce_usecs);
1837 octeon_write_csr(oct,
1838 CN6XXX_SLI_OQ_INT_LEVEL_TIME,
1839 time_threshold);
1840
1841 CFG_SET_OQ_INTR_TIME(cn6xxx->conf, rx_coalesce_usecs);
1842 break;
1843 }
Raghu Vatsavayidc3abcb2016-09-01 11:16:08 -07001844 case OCTEON_CN23XX_PF_VID: {
1845 u64 time_threshold;
1846 int q_no;
1847
1848 if (!intr_coal->rx_coalesce_usecs)
Prasad Kanneganti50c0add2017-03-28 12:14:06 -07001849 rx_coalesce_usecs = intrmod->rx_usecs;
Raghu Vatsavayidc3abcb2016-09-01 11:16:08 -07001850 else
1851 rx_coalesce_usecs = intr_coal->rx_coalesce_usecs;
1852 time_threshold =
1853 cn23xx_pf_get_oq_ticks(oct, (u32)rx_coalesce_usecs);
1854 for (q_no = 0; q_no < oct->num_oqs; q_no++) {
1855 q_no += oct->sriov_info.pf_srn;
1856 octeon_write_csr64(oct,
1857 CN23XX_SLI_OQ_PKT_INT_LEVELS(q_no),
Prasad Kanneganti50c0add2017-03-28 12:14:06 -07001858 (intrmod->rx_frames |
1859 ((u64)time_threshold << 32)));
Raghu Vatsavayidc3abcb2016-09-01 11:16:08 -07001860 /*consider writing to resend bit here*/
1861 }
Prasad Kanneganti50c0add2017-03-28 12:14:06 -07001862 intrmod->rx_usecs = rx_coalesce_usecs;
1863 oct->rx_coalesce_usecs = rx_coalesce_usecs;
Raghu Vatsavayidc3abcb2016-09-01 11:16:08 -07001864 break;
1865 }
Raghu Vatsavayid8ab8482016-12-08 13:00:46 -08001866 case OCTEON_CN23XX_VF_VID: {
1867 u64 time_threshold;
1868 int q_no;
1869
1870 if (!intr_coal->rx_coalesce_usecs)
Prasad Kanneganti50c0add2017-03-28 12:14:06 -07001871 rx_coalesce_usecs = intrmod->rx_usecs;
Raghu Vatsavayid8ab8482016-12-08 13:00:46 -08001872 else
1873 rx_coalesce_usecs = intr_coal->rx_coalesce_usecs;
1874
1875 time_threshold =
1876 cn23xx_vf_get_oq_ticks(oct, (u32)rx_coalesce_usecs);
1877 for (q_no = 0; q_no < oct->num_oqs; q_no++) {
1878 octeon_write_csr64(
1879 oct, CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(q_no),
Prasad Kanneganti50c0add2017-03-28 12:14:06 -07001880 (intrmod->rx_frames |
1881 ((u64)time_threshold << 32)));
1882 /*consider setting resend bit*/
Raghu Vatsavayid8ab8482016-12-08 13:00:46 -08001883 }
Prasad Kanneganti50c0add2017-03-28 12:14:06 -07001884 intrmod->rx_usecs = rx_coalesce_usecs;
1885 oct->rx_coalesce_usecs = rx_coalesce_usecs;
Raghu Vatsavayid8ab8482016-12-08 13:00:46 -08001886 break;
1887 }
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001888 default:
1889 return -EINVAL;
1890 }
1891
1892 return 0;
1893}
1894
1895static int
Prasad Kanneganti50c0add2017-03-28 12:14:06 -07001896oct_cfg_tx_intrcnt(struct lio *lio,
1897 struct oct_intrmod_cfg *intrmod,
1898 struct ethtool_coalesce *intr_coal)
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001899{
1900 struct octeon_device *oct = lio->oct_dev;
Raghu Vatsavayidc3abcb2016-09-01 11:16:08 -07001901 u32 iq_intr_pkt;
1902 void __iomem *inst_cnt_reg;
1903 u64 val;
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001904
1905 /* Config Cnt based interrupt values */
1906 switch (oct->chip_id) {
1907 case OCTEON_CN68XX:
1908 case OCTEON_CN66XX:
1909 break;
Raghu Vatsavayid8ab8482016-12-08 13:00:46 -08001910 case OCTEON_CN23XX_VF_VID:
Raghu Vatsavayidc3abcb2016-09-01 11:16:08 -07001911 case OCTEON_CN23XX_PF_VID: {
1912 int q_no;
1913
1914 if (!intr_coal->tx_max_coalesced_frames)
1915 iq_intr_pkt = CN23XX_DEF_IQ_INTR_THRESHOLD &
1916 CN23XX_PKT_IN_DONE_WMARK_MASK;
1917 else
1918 iq_intr_pkt = intr_coal->tx_max_coalesced_frames &
1919 CN23XX_PKT_IN_DONE_WMARK_MASK;
1920 for (q_no = 0; q_no < oct->num_iqs; q_no++) {
1921 inst_cnt_reg = (oct->instr_queue[q_no])->inst_cnt_reg;
1922 val = readq(inst_cnt_reg);
1923 /*clear wmark and count.dont want to write count back*/
1924 val = (val & 0xFFFF000000000000ULL) |
Prasad Kanneganti50c0add2017-03-28 12:14:06 -07001925 ((u64)(iq_intr_pkt - 1)
Raghu Vatsavayidc3abcb2016-09-01 11:16:08 -07001926 << CN23XX_PKT_IN_DONE_WMARK_BIT_POS);
1927 writeq(val, inst_cnt_reg);
1928 /*consider setting resend bit*/
1929 }
Prasad Kanneganti50c0add2017-03-28 12:14:06 -07001930 intrmod->tx_frames = iq_intr_pkt;
1931 oct->tx_max_coalesced_frames = iq_intr_pkt;
Raghu Vatsavayidc3abcb2016-09-01 11:16:08 -07001932 break;
1933 }
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001934 default:
1935 return -EINVAL;
1936 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001937 return 0;
1938}
1939
1940static int lio_set_intr_coalesce(struct net_device *netdev,
1941 struct ethtool_coalesce *intr_coal)
1942{
1943 struct lio *lio = GET_LIO(netdev);
1944 int ret;
1945 struct octeon_device *oct = lio->oct_dev;
Prasad Kanneganti50c0add2017-03-28 12:14:06 -07001946 struct oct_intrmod_cfg intrmod = {0};
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001947 u32 j, q_no;
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001948 int db_max, db_min;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001949
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001950 switch (oct->chip_id) {
1951 case OCTEON_CN68XX:
1952 case OCTEON_CN66XX:
1953 db_min = CN6XXX_DB_MIN;
1954 db_max = CN6XXX_DB_MAX;
1955 if ((intr_coal->tx_max_coalesced_frames >= db_min) &&
1956 (intr_coal->tx_max_coalesced_frames <= db_max)) {
1957 for (j = 0; j < lio->linfo.num_txpciq; j++) {
1958 q_no = lio->linfo.txpciq[j].s.q_no;
1959 oct->instr_queue[q_no]->fill_threshold =
1960 intr_coal->tx_max_coalesced_frames;
1961 }
1962 } else {
1963 dev_err(&oct->pci_dev->dev,
1964 "LIQUIDIO: Invalid tx-frames:%d. Range is min:%d max:%d\n",
Prasad Kanneganti50c0add2017-03-28 12:14:06 -07001965 intr_coal->tx_max_coalesced_frames,
1966 db_min, db_max);
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001967 return -EINVAL;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001968 }
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001969 break;
Raghu Vatsavayidc3abcb2016-09-01 11:16:08 -07001970 case OCTEON_CN23XX_PF_VID:
Raghu Vatsavayid8ab8482016-12-08 13:00:46 -08001971 case OCTEON_CN23XX_VF_VID:
Raghu Vatsavayidc3abcb2016-09-01 11:16:08 -07001972 break;
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001973 default:
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001974 return -EINVAL;
1975 }
1976
Prasad Kanneganti50c0add2017-03-28 12:14:06 -07001977 intrmod.rx_enable = intr_coal->use_adaptive_rx_coalesce ? 1 : 0;
1978 intrmod.tx_enable = intr_coal->use_adaptive_tx_coalesce ? 1 : 0;
1979 intrmod.rx_frames = CFG_GET_OQ_INTR_PKT(octeon_get_conf(oct));
1980 intrmod.rx_usecs = CFG_GET_OQ_INTR_TIME(octeon_get_conf(oct));
1981 intrmod.tx_frames = CFG_GET_IQ_INTR_PKT(octeon_get_conf(oct));
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001982
Prasad Kanneganti50c0add2017-03-28 12:14:06 -07001983 ret = oct_cfg_adaptive_intr(lio, &intrmod, intr_coal);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001984
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07001985 if (!intr_coal->use_adaptive_rx_coalesce) {
Prasad Kanneganti50c0add2017-03-28 12:14:06 -07001986 ret = oct_cfg_rx_intrtime(lio, &intrmod, intr_coal);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001987 if (ret)
1988 goto ret_intrmod;
1989
Prasad Kanneganti50c0add2017-03-28 12:14:06 -07001990 ret = oct_cfg_rx_intrcnt(lio, &intrmod, intr_coal);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001991 if (ret)
1992 goto ret_intrmod;
Prasad Kanneganti50c0add2017-03-28 12:14:06 -07001993 } else {
1994 oct->rx_coalesce_usecs =
1995 CFG_GET_OQ_INTR_TIME(octeon_get_conf(oct));
1996 oct->rx_max_coalesced_frames =
1997 CFG_GET_OQ_INTR_PKT(octeon_get_conf(oct));
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001998 }
Prasad Kanneganti50c0add2017-03-28 12:14:06 -07001999
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07002000 if (!intr_coal->use_adaptive_tx_coalesce) {
Prasad Kanneganti50c0add2017-03-28 12:14:06 -07002001 ret = oct_cfg_tx_intrcnt(lio, &intrmod, intr_coal);
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07002002 if (ret)
2003 goto ret_intrmod;
Prasad Kanneganti50c0add2017-03-28 12:14:06 -07002004 } else {
2005 oct->tx_max_coalesced_frames =
2006 CFG_GET_IQ_INTR_PKT(octeon_get_conf(oct));
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07002007 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002008
2009 return 0;
2010ret_intrmod:
2011 return ret;
2012}
2013
2014static int lio_get_ts_info(struct net_device *netdev,
2015 struct ethtool_ts_info *info)
2016{
2017 struct lio *lio = GET_LIO(netdev);
2018
2019 info->so_timestamping =
Raghu Vatsavayi178cc102016-06-21 22:53:13 -07002020#ifdef PTP_HARDWARE_TIMESTAMPING
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002021 SOF_TIMESTAMPING_TX_HARDWARE |
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002022 SOF_TIMESTAMPING_RX_HARDWARE |
Raghu Vatsavayi178cc102016-06-21 22:53:13 -07002023 SOF_TIMESTAMPING_RAW_HARDWARE |
2024 SOF_TIMESTAMPING_TX_SOFTWARE |
2025#endif
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002026 SOF_TIMESTAMPING_RX_SOFTWARE |
Raghu Vatsavayi178cc102016-06-21 22:53:13 -07002027 SOF_TIMESTAMPING_SOFTWARE;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002028
2029 if (lio->ptp_clock)
2030 info->phc_index = ptp_clock_index(lio->ptp_clock);
2031 else
2032 info->phc_index = -1;
2033
Raghu Vatsavayi178cc102016-06-21 22:53:13 -07002034#ifdef PTP_HARDWARE_TIMESTAMPING
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002035 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
2036
2037 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
2038 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
2039 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
2040 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
Raghu Vatsavayi178cc102016-06-21 22:53:13 -07002041#endif
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002042
2043 return 0;
2044}
2045
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002046/* Return register dump len. */
Raghu Vatsavayidc3abcb2016-09-01 11:16:08 -07002047static int lio_get_regs_len(struct net_device *dev)
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002048{
Raghu Vatsavayidc3abcb2016-09-01 11:16:08 -07002049 struct lio *lio = GET_LIO(dev);
2050 struct octeon_device *oct = lio->oct_dev;
2051
2052 switch (oct->chip_id) {
2053 case OCTEON_CN23XX_PF_VID:
2054 return OCT_ETHTOOL_REGDUMP_LEN_23XX;
Raghu Vatsavayid8ab8482016-12-08 13:00:46 -08002055 case OCTEON_CN23XX_VF_VID:
2056 return OCT_ETHTOOL_REGDUMP_LEN_23XX_VF;
Raghu Vatsavayidc3abcb2016-09-01 11:16:08 -07002057 default:
2058 return OCT_ETHTOOL_REGDUMP_LEN;
2059 }
2060}
2061
2062static int cn23xx_read_csr_reg(char *s, struct octeon_device *oct)
2063{
2064 u32 reg;
2065 u8 pf_num = oct->pf_num;
2066 int len = 0;
2067 int i;
2068
2069 /* PCI Window Registers */
2070
2071 len += sprintf(s + len, "\n\t Octeon CSR Registers\n\n");
2072
2073 /*0x29030 or 0x29040*/
2074 reg = CN23XX_SLI_PKT_MAC_RINFO64(oct->pcie_port, oct->pf_num);
2075 len += sprintf(s + len,
2076 "\n[%08x] (SLI_PKT_MAC%d_PF%d_RINFO): %016llx\n",
2077 reg, oct->pcie_port, oct->pf_num,
2078 (u64)octeon_read_csr64(oct, reg));
2079
2080 /*0x27080 or 0x27090*/
2081 reg = CN23XX_SLI_MAC_PF_INT_ENB64(oct->pcie_port, oct->pf_num);
2082 len +=
2083 sprintf(s + len, "\n[%08x] (SLI_MAC%d_PF%d_INT_ENB): %016llx\n",
2084 reg, oct->pcie_port, oct->pf_num,
2085 (u64)octeon_read_csr64(oct, reg));
2086
2087 /*0x27000 or 0x27010*/
2088 reg = CN23XX_SLI_MAC_PF_INT_SUM64(oct->pcie_port, oct->pf_num);
2089 len +=
2090 sprintf(s + len, "\n[%08x] (SLI_MAC%d_PF%d_INT_SUM): %016llx\n",
2091 reg, oct->pcie_port, oct->pf_num,
2092 (u64)octeon_read_csr64(oct, reg));
2093
2094 /*0x29120*/
2095 reg = 0x29120;
2096 len += sprintf(s + len, "\n[%08x] (SLI_PKT_MEM_CTL): %016llx\n", reg,
2097 (u64)octeon_read_csr64(oct, reg));
2098
2099 /*0x27300*/
2100 reg = 0x27300 + oct->pcie_port * CN23XX_MAC_INT_OFFSET +
2101 (oct->pf_num) * CN23XX_PF_INT_OFFSET;
2102 len += sprintf(
2103 s + len, "\n[%08x] (SLI_MAC%d_PF%d_PKT_VF_INT): %016llx\n", reg,
2104 oct->pcie_port, oct->pf_num, (u64)octeon_read_csr64(oct, reg));
2105
2106 /*0x27200*/
2107 reg = 0x27200 + oct->pcie_port * CN23XX_MAC_INT_OFFSET +
2108 (oct->pf_num) * CN23XX_PF_INT_OFFSET;
2109 len += sprintf(s + len,
2110 "\n[%08x] (SLI_MAC%d_PF%d_PP_VF_INT): %016llx\n",
2111 reg, oct->pcie_port, oct->pf_num,
2112 (u64)octeon_read_csr64(oct, reg));
2113
2114 /*29130*/
2115 reg = CN23XX_SLI_PKT_CNT_INT;
2116 len += sprintf(s + len, "\n[%08x] (SLI_PKT_CNT_INT): %016llx\n", reg,
2117 (u64)octeon_read_csr64(oct, reg));
2118
2119 /*0x29140*/
2120 reg = CN23XX_SLI_PKT_TIME_INT;
2121 len += sprintf(s + len, "\n[%08x] (SLI_PKT_TIME_INT): %016llx\n", reg,
2122 (u64)octeon_read_csr64(oct, reg));
2123
2124 /*0x29160*/
2125 reg = 0x29160;
2126 len += sprintf(s + len, "\n[%08x] (SLI_PKT_INT): %016llx\n", reg,
2127 (u64)octeon_read_csr64(oct, reg));
2128
2129 /*0x29180*/
2130 reg = CN23XX_SLI_OQ_WMARK;
2131 len += sprintf(s + len, "\n[%08x] (SLI_PKT_OUTPUT_WMARK): %016llx\n",
2132 reg, (u64)octeon_read_csr64(oct, reg));
2133
2134 /*0x291E0*/
2135 reg = CN23XX_SLI_PKT_IOQ_RING_RST;
2136 len += sprintf(s + len, "\n[%08x] (SLI_PKT_RING_RST): %016llx\n", reg,
2137 (u64)octeon_read_csr64(oct, reg));
2138
2139 /*0x29210*/
2140 reg = CN23XX_SLI_GBL_CONTROL;
2141 len += sprintf(s + len,
2142 "\n[%08x] (SLI_PKT_GBL_CONTROL): %016llx\n", reg,
2143 (u64)octeon_read_csr64(oct, reg));
2144
2145 /*0x29220*/
2146 reg = 0x29220;
2147 len += sprintf(s + len, "\n[%08x] (SLI_PKT_BIST_STATUS): %016llx\n",
2148 reg, (u64)octeon_read_csr64(oct, reg));
2149
2150 /*PF only*/
2151 if (pf_num == 0) {
2152 /*0x29260*/
2153 reg = CN23XX_SLI_OUT_BP_EN_W1S;
2154 len += sprintf(s + len,
2155 "\n[%08x] (SLI_PKT_OUT_BP_EN_W1S): %016llx\n",
2156 reg, (u64)octeon_read_csr64(oct, reg));
2157 } else if (pf_num == 1) {
2158 /*0x29270*/
2159 reg = CN23XX_SLI_OUT_BP_EN2_W1S;
2160 len += sprintf(s + len,
2161 "\n[%08x] (SLI_PKT_OUT_BP_EN2_W1S): %016llx\n",
2162 reg, (u64)octeon_read_csr64(oct, reg));
2163 }
2164
2165 for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
2166 reg = CN23XX_SLI_OQ_BUFF_INFO_SIZE(i);
2167 len +=
2168 sprintf(s + len, "\n[%08x] (SLI_PKT%d_OUT_SIZE): %016llx\n",
2169 reg, i, (u64)octeon_read_csr64(oct, reg));
2170 }
2171
2172 /*0x10040*/
2173 for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) {
2174 reg = CN23XX_SLI_IQ_INSTR_COUNT64(i);
2175 len += sprintf(s + len,
2176 "\n[%08x] (SLI_PKT_IN_DONE%d_CNTS): %016llx\n",
2177 reg, i, (u64)octeon_read_csr64(oct, reg));
2178 }
2179
2180 /*0x10080*/
2181 for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
2182 reg = CN23XX_SLI_OQ_PKTS_CREDIT(i);
2183 len += sprintf(s + len,
2184 "\n[%08x] (SLI_PKT%d_SLIST_BAOFF_DBELL): %016llx\n",
2185 reg, i, (u64)octeon_read_csr64(oct, reg));
2186 }
2187
2188 /*0x10090*/
2189 for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
2190 reg = CN23XX_SLI_OQ_SIZE(i);
2191 len += sprintf(
2192 s + len, "\n[%08x] (SLI_PKT%d_SLIST_FIFO_RSIZE): %016llx\n",
2193 reg, i, (u64)octeon_read_csr64(oct, reg));
2194 }
2195
2196 /*0x10050*/
2197 for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
2198 reg = CN23XX_SLI_OQ_PKT_CONTROL(i);
2199 len += sprintf(
2200 s + len,
2201 "\n[%08x] (SLI_PKT%d__OUTPUT_CONTROL): %016llx\n",
2202 reg, i, (u64)octeon_read_csr64(oct, reg));
2203 }
2204
2205 /*0x10070*/
2206 for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
2207 reg = CN23XX_SLI_OQ_BASE_ADDR64(i);
2208 len += sprintf(s + len,
2209 "\n[%08x] (SLI_PKT%d_SLIST_BADDR): %016llx\n",
2210 reg, i, (u64)octeon_read_csr64(oct, reg));
2211 }
2212
2213 /*0x100a0*/
2214 for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
2215 reg = CN23XX_SLI_OQ_PKT_INT_LEVELS(i);
2216 len += sprintf(s + len,
2217 "\n[%08x] (SLI_PKT%d_INT_LEVELS): %016llx\n",
2218 reg, i, (u64)octeon_read_csr64(oct, reg));
2219 }
2220
2221 /*0x100b0*/
2222 for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
2223 reg = CN23XX_SLI_OQ_PKTS_SENT(i);
2224 len += sprintf(s + len, "\n[%08x] (SLI_PKT%d_CNTS): %016llx\n",
2225 reg, i, (u64)octeon_read_csr64(oct, reg));
2226 }
2227
2228 /*0x100c0*/
2229 for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
2230 reg = 0x100c0 + i * CN23XX_OQ_OFFSET;
2231 len += sprintf(s + len,
2232 "\n[%08x] (SLI_PKT%d_ERROR_INFO): %016llx\n",
2233 reg, i, (u64)octeon_read_csr64(oct, reg));
2234
2235 /*0x10000*/
2236 for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) {
2237 reg = CN23XX_SLI_IQ_PKT_CONTROL64(i);
2238 len += sprintf(
2239 s + len,
2240 "\n[%08x] (SLI_PKT%d_INPUT_CONTROL): %016llx\n",
2241 reg, i, (u64)octeon_read_csr64(oct, reg));
2242 }
2243
2244 /*0x10010*/
2245 for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) {
2246 reg = CN23XX_SLI_IQ_BASE_ADDR64(i);
2247 len += sprintf(
2248 s + len,
2249 "\n[%08x] (SLI_PKT%d_INSTR_BADDR): %016llx\n", reg,
2250 i, (u64)octeon_read_csr64(oct, reg));
2251 }
2252
2253 /*0x10020*/
2254 for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) {
2255 reg = CN23XX_SLI_IQ_DOORBELL(i);
2256 len += sprintf(
2257 s + len,
2258 "\n[%08x] (SLI_PKT%d_INSTR_BAOFF_DBELL): %016llx\n",
2259 reg, i, (u64)octeon_read_csr64(oct, reg));
2260 }
2261
2262 /*0x10030*/
2263 for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) {
2264 reg = CN23XX_SLI_IQ_SIZE(i);
2265 len += sprintf(
2266 s + len,
2267 "\n[%08x] (SLI_PKT%d_INSTR_FIFO_RSIZE): %016llx\n",
2268 reg, i, (u64)octeon_read_csr64(oct, reg));
2269 }
2270
2271 /*0x10040*/
2272 for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++)
2273 reg = CN23XX_SLI_IQ_INSTR_COUNT64(i);
2274 len += sprintf(s + len,
2275 "\n[%08x] (SLI_PKT_IN_DONE%d_CNTS): %016llx\n",
2276 reg, i, (u64)octeon_read_csr64(oct, reg));
2277 }
2278
2279 return len;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002280}
2281
Raghu Vatsavayid8ab8482016-12-08 13:00:46 -08002282static int cn23xx_vf_read_csr_reg(char *s, struct octeon_device *oct)
2283{
2284 int len = 0;
2285 u32 reg;
2286 int i;
2287
2288 /* PCI Window Registers */
2289
2290 len += sprintf(s + len, "\n\t Octeon CSR Registers\n\n");
2291
2292 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2293 reg = CN23XX_VF_SLI_OQ_BUFF_INFO_SIZE(i);
2294 len += sprintf(s + len,
2295 "\n[%08x] (SLI_PKT%d_OUT_SIZE): %016llx\n",
2296 reg, i, (u64)octeon_read_csr64(oct, reg));
2297 }
2298
2299 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2300 reg = CN23XX_VF_SLI_IQ_INSTR_COUNT64(i);
2301 len += sprintf(s + len,
2302 "\n[%08x] (SLI_PKT_IN_DONE%d_CNTS): %016llx\n",
2303 reg, i, (u64)octeon_read_csr64(oct, reg));
2304 }
2305
2306 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2307 reg = CN23XX_VF_SLI_OQ_PKTS_CREDIT(i);
2308 len += sprintf(s + len,
2309 "\n[%08x] (SLI_PKT%d_SLIST_BAOFF_DBELL): %016llx\n",
2310 reg, i, (u64)octeon_read_csr64(oct, reg));
2311 }
2312
2313 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2314 reg = CN23XX_VF_SLI_OQ_SIZE(i);
2315 len += sprintf(s + len,
2316 "\n[%08x] (SLI_PKT%d_SLIST_FIFO_RSIZE): %016llx\n",
2317 reg, i, (u64)octeon_read_csr64(oct, reg));
2318 }
2319
2320 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2321 reg = CN23XX_VF_SLI_OQ_PKT_CONTROL(i);
2322 len += sprintf(s + len,
2323 "\n[%08x] (SLI_PKT%d__OUTPUT_CONTROL): %016llx\n",
2324 reg, i, (u64)octeon_read_csr64(oct, reg));
2325 }
2326
2327 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2328 reg = CN23XX_VF_SLI_OQ_BASE_ADDR64(i);
2329 len += sprintf(s + len,
2330 "\n[%08x] (SLI_PKT%d_SLIST_BADDR): %016llx\n",
2331 reg, i, (u64)octeon_read_csr64(oct, reg));
2332 }
2333
2334 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2335 reg = CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(i);
2336 len += sprintf(s + len,
2337 "\n[%08x] (SLI_PKT%d_INT_LEVELS): %016llx\n",
2338 reg, i, (u64)octeon_read_csr64(oct, reg));
2339 }
2340
2341 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2342 reg = CN23XX_VF_SLI_OQ_PKTS_SENT(i);
2343 len += sprintf(s + len, "\n[%08x] (SLI_PKT%d_CNTS): %016llx\n",
2344 reg, i, (u64)octeon_read_csr64(oct, reg));
2345 }
2346
2347 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2348 reg = 0x100c0 + i * CN23XX_VF_OQ_OFFSET;
2349 len += sprintf(s + len,
2350 "\n[%08x] (SLI_PKT%d_ERROR_INFO): %016llx\n",
2351 reg, i, (u64)octeon_read_csr64(oct, reg));
2352 }
2353
2354 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2355 reg = 0x100d0 + i * CN23XX_VF_IQ_OFFSET;
2356 len += sprintf(s + len,
2357 "\n[%08x] (SLI_PKT%d_VF_INT_SUM): %016llx\n",
2358 reg, i, (u64)octeon_read_csr64(oct, reg));
2359 }
2360
2361 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2362 reg = CN23XX_VF_SLI_IQ_PKT_CONTROL64(i);
2363 len += sprintf(s + len,
2364 "\n[%08x] (SLI_PKT%d_INPUT_CONTROL): %016llx\n",
2365 reg, i, (u64)octeon_read_csr64(oct, reg));
2366 }
2367
2368 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2369 reg = CN23XX_VF_SLI_IQ_BASE_ADDR64(i);
2370 len += sprintf(s + len,
2371 "\n[%08x] (SLI_PKT%d_INSTR_BADDR): %016llx\n",
2372 reg, i, (u64)octeon_read_csr64(oct, reg));
2373 }
2374
2375 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2376 reg = CN23XX_VF_SLI_IQ_DOORBELL(i);
2377 len += sprintf(s + len,
2378 "\n[%08x] (SLI_PKT%d_INSTR_BAOFF_DBELL): %016llx\n",
2379 reg, i, (u64)octeon_read_csr64(oct, reg));
2380 }
2381
2382 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2383 reg = CN23XX_VF_SLI_IQ_SIZE(i);
2384 len += sprintf(s + len,
2385 "\n[%08x] (SLI_PKT%d_INSTR_FIFO_RSIZE): %016llx\n",
2386 reg, i, (u64)octeon_read_csr64(oct, reg));
2387 }
2388
2389 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2390 reg = CN23XX_VF_SLI_IQ_INSTR_COUNT64(i);
2391 len += sprintf(s + len,
2392 "\n[%08x] (SLI_PKT_IN_DONE%d_CNTS): %016llx\n",
2393 reg, i, (u64)octeon_read_csr64(oct, reg));
2394 }
2395
2396 return len;
2397}
2398
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002399static int cn6xxx_read_csr_reg(char *s, struct octeon_device *oct)
2400{
2401 u32 reg;
2402 int i, len = 0;
2403
2404 /* PCI Window Registers */
2405
2406 len += sprintf(s + len, "\n\t Octeon CSR Registers\n\n");
2407 reg = CN6XXX_WIN_WR_ADDR_LO;
2408 len += sprintf(s + len, "\n[%02x] (WIN_WR_ADDR_LO): %08x\n",
2409 CN6XXX_WIN_WR_ADDR_LO, octeon_read_csr(oct, reg));
2410 reg = CN6XXX_WIN_WR_ADDR_HI;
2411 len += sprintf(s + len, "[%02x] (WIN_WR_ADDR_HI): %08x\n",
2412 CN6XXX_WIN_WR_ADDR_HI, octeon_read_csr(oct, reg));
2413 reg = CN6XXX_WIN_RD_ADDR_LO;
2414 len += sprintf(s + len, "[%02x] (WIN_RD_ADDR_LO): %08x\n",
2415 CN6XXX_WIN_RD_ADDR_LO, octeon_read_csr(oct, reg));
2416 reg = CN6XXX_WIN_RD_ADDR_HI;
2417 len += sprintf(s + len, "[%02x] (WIN_RD_ADDR_HI): %08x\n",
2418 CN6XXX_WIN_RD_ADDR_HI, octeon_read_csr(oct, reg));
2419 reg = CN6XXX_WIN_WR_DATA_LO;
2420 len += sprintf(s + len, "[%02x] (WIN_WR_DATA_LO): %08x\n",
2421 CN6XXX_WIN_WR_DATA_LO, octeon_read_csr(oct, reg));
2422 reg = CN6XXX_WIN_WR_DATA_HI;
2423 len += sprintf(s + len, "[%02x] (WIN_WR_DATA_HI): %08x\n",
2424 CN6XXX_WIN_WR_DATA_HI, octeon_read_csr(oct, reg));
2425 len += sprintf(s + len, "[%02x] (WIN_WR_MASK_REG): %08x\n",
2426 CN6XXX_WIN_WR_MASK_REG,
2427 octeon_read_csr(oct, CN6XXX_WIN_WR_MASK_REG));
2428
2429 /* PCI Interrupt Register */
2430 len += sprintf(s + len, "\n[%x] (INT_ENABLE PORT 0): %08x\n",
2431 CN6XXX_SLI_INT_ENB64_PORT0, octeon_read_csr(oct,
2432 CN6XXX_SLI_INT_ENB64_PORT0));
2433 len += sprintf(s + len, "\n[%x] (INT_ENABLE PORT 1): %08x\n",
2434 CN6XXX_SLI_INT_ENB64_PORT1,
2435 octeon_read_csr(oct, CN6XXX_SLI_INT_ENB64_PORT1));
2436 len += sprintf(s + len, "[%x] (INT_SUM): %08x\n", CN6XXX_SLI_INT_SUM64,
2437 octeon_read_csr(oct, CN6XXX_SLI_INT_SUM64));
2438
2439 /* PCI Output queue registers */
2440 for (i = 0; i < oct->num_oqs; i++) {
2441 reg = CN6XXX_SLI_OQ_PKTS_SENT(i);
2442 len += sprintf(s + len, "\n[%x] (PKTS_SENT_%d): %08x\n",
2443 reg, i, octeon_read_csr(oct, reg));
2444 reg = CN6XXX_SLI_OQ_PKTS_CREDIT(i);
2445 len += sprintf(s + len, "[%x] (PKT_CREDITS_%d): %08x\n",
2446 reg, i, octeon_read_csr(oct, reg));
2447 }
2448 reg = CN6XXX_SLI_OQ_INT_LEVEL_PKTS;
2449 len += sprintf(s + len, "\n[%x] (PKTS_SENT_INT_LEVEL): %08x\n",
2450 reg, octeon_read_csr(oct, reg));
2451 reg = CN6XXX_SLI_OQ_INT_LEVEL_TIME;
2452 len += sprintf(s + len, "[%x] (PKTS_SENT_TIME): %08x\n",
2453 reg, octeon_read_csr(oct, reg));
2454
2455 /* PCI Input queue registers */
2456 for (i = 0; i <= 3; i++) {
2457 u32 reg;
2458
2459 reg = CN6XXX_SLI_IQ_DOORBELL(i);
2460 len += sprintf(s + len, "\n[%x] (INSTR_DOORBELL_%d): %08x\n",
2461 reg, i, octeon_read_csr(oct, reg));
2462 reg = CN6XXX_SLI_IQ_INSTR_COUNT(i);
2463 len += sprintf(s + len, "[%x] (INSTR_COUNT_%d): %08x\n",
2464 reg, i, octeon_read_csr(oct, reg));
2465 }
2466
2467 /* PCI DMA registers */
2468
2469 len += sprintf(s + len, "\n[%x] (DMA_CNT_0): %08x\n",
2470 CN6XXX_DMA_CNT(0),
2471 octeon_read_csr(oct, CN6XXX_DMA_CNT(0)));
2472 reg = CN6XXX_DMA_PKT_INT_LEVEL(0);
2473 len += sprintf(s + len, "[%x] (DMA_INT_LEV_0): %08x\n",
2474 CN6XXX_DMA_PKT_INT_LEVEL(0), octeon_read_csr(oct, reg));
2475 reg = CN6XXX_DMA_TIME_INT_LEVEL(0);
2476 len += sprintf(s + len, "[%x] (DMA_TIME_0): %08x\n",
2477 CN6XXX_DMA_TIME_INT_LEVEL(0),
2478 octeon_read_csr(oct, reg));
2479
2480 len += sprintf(s + len, "\n[%x] (DMA_CNT_1): %08x\n",
2481 CN6XXX_DMA_CNT(1),
2482 octeon_read_csr(oct, CN6XXX_DMA_CNT(1)));
2483 reg = CN6XXX_DMA_PKT_INT_LEVEL(1);
2484 len += sprintf(s + len, "[%x] (DMA_INT_LEV_1): %08x\n",
2485 CN6XXX_DMA_PKT_INT_LEVEL(1),
2486 octeon_read_csr(oct, reg));
2487 reg = CN6XXX_DMA_PKT_INT_LEVEL(1);
2488 len += sprintf(s + len, "[%x] (DMA_TIME_1): %08x\n",
2489 CN6XXX_DMA_TIME_INT_LEVEL(1),
2490 octeon_read_csr(oct, reg));
2491
2492 /* PCI Index registers */
2493
2494 len += sprintf(s + len, "\n");
2495
2496 for (i = 0; i < 16; i++) {
2497 reg = lio_pci_readq(oct, CN6XXX_BAR1_REG(i, oct->pcie_port));
2498 len += sprintf(s + len, "[%llx] (BAR1_INDEX_%02d): %08x\n",
2499 CN6XXX_BAR1_REG(i, oct->pcie_port), i, reg);
2500 }
2501
2502 return len;
2503}
2504
2505static int cn6xxx_read_config_reg(char *s, struct octeon_device *oct)
2506{
2507 u32 val;
2508 int i, len = 0;
2509
2510 /* PCI CONFIG Registers */
2511
2512 len += sprintf(s + len,
2513 "\n\t Octeon Config space Registers\n\n");
2514
2515 for (i = 0; i <= 13; i++) {
2516 pci_read_config_dword(oct->pci_dev, (i * 4), &val);
2517 len += sprintf(s + len, "[0x%x] (Config[%d]): 0x%08x\n",
2518 (i * 4), i, val);
2519 }
2520
2521 for (i = 30; i <= 34; i++) {
2522 pci_read_config_dword(oct->pci_dev, (i * 4), &val);
2523 len += sprintf(s + len, "[0x%x] (Config[%d]): 0x%08x\n",
2524 (i * 4), i, val);
2525 }
2526
2527 return len;
2528}
2529
2530/* Return register dump user app. */
2531static void lio_get_regs(struct net_device *dev,
2532 struct ethtool_regs *regs, void *regbuf)
2533{
2534 struct lio *lio = GET_LIO(dev);
2535 int len = 0;
2536 struct octeon_device *oct = lio->oct_dev;
2537
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002538 regs->version = OCT_ETHTOOL_REGSVER;
2539
2540 switch (oct->chip_id) {
Raghu Vatsavayidc3abcb2016-09-01 11:16:08 -07002541 case OCTEON_CN23XX_PF_VID:
2542 memset(regbuf, 0, OCT_ETHTOOL_REGDUMP_LEN_23XX);
2543 len += cn23xx_read_csr_reg(regbuf + len, oct);
2544 break;
Raghu Vatsavayid8ab8482016-12-08 13:00:46 -08002545 case OCTEON_CN23XX_VF_VID:
2546 memset(regbuf, 0, OCT_ETHTOOL_REGDUMP_LEN_23XX_VF);
2547 len += cn23xx_vf_read_csr_reg(regbuf + len, oct);
2548 break;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002549 case OCTEON_CN68XX:
2550 case OCTEON_CN66XX:
Raghu Vatsavayia2c64b62016-07-03 13:56:55 -07002551 memset(regbuf, 0, OCT_ETHTOOL_REGDUMP_LEN);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002552 len += cn6xxx_read_csr_reg(regbuf + len, oct);
2553 len += cn6xxx_read_config_reg(regbuf + len, oct);
2554 break;
2555 default:
2556 dev_err(&oct->pci_dev->dev, "%s Unknown chipid: %d\n",
2557 __func__, oct->chip_id);
2558 }
2559}
2560
Raghu Vatsavayif5a20472016-06-21 22:53:14 -07002561static u32 lio_get_priv_flags(struct net_device *netdev)
2562{
2563 struct lio *lio = GET_LIO(netdev);
2564
2565 return lio->oct_dev->priv_flags;
2566}
2567
2568static int lio_set_priv_flags(struct net_device *netdev, u32 flags)
2569{
2570 struct lio *lio = GET_LIO(netdev);
2571 bool intr_by_tx_bytes = !!(flags & (0x1 << OCT_PRIV_FLAG_TX_BYTES));
2572
2573 lio_set_priv_flag(lio->oct_dev, OCT_PRIV_FLAG_TX_BYTES,
2574 intr_by_tx_bytes);
2575 return 0;
2576}
2577
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002578static const struct ethtool_ops lio_ethtool_ops = {
Raghu Vatsavayid8ab8482016-12-08 13:00:46 -08002579 .get_link_ksettings = lio_get_link_ksettings,
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002580 .get_link = ethtool_op_get_link,
2581 .get_drvinfo = lio_get_drvinfo,
2582 .get_ringparam = lio_ethtool_get_ringparam,
2583 .get_channels = lio_ethtool_get_channels,
2584 .set_phys_id = lio_set_phys_id,
2585 .get_eeprom_len = lio_get_eeprom_len,
2586 .get_eeprom = lio_get_eeprom,
2587 .get_strings = lio_get_strings,
2588 .get_ethtool_stats = lio_get_ethtool_stats,
2589 .get_pauseparam = lio_get_pauseparam,
Raghu Vatsavayi30136392016-09-01 11:16:11 -07002590 .set_pauseparam = lio_set_pauseparam,
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002591 .get_regs_len = lio_get_regs_len,
2592 .get_regs = lio_get_regs,
2593 .get_msglevel = lio_get_msglevel,
2594 .set_msglevel = lio_set_msglevel,
2595 .get_sset_count = lio_get_sset_count,
Raghu Vatsavayid8ab8482016-12-08 13:00:46 -08002596 .get_coalesce = lio_get_intr_coalesce,
2597 .set_coalesce = lio_set_intr_coalesce,
2598 .get_priv_flags = lio_get_priv_flags,
2599 .set_priv_flags = lio_set_priv_flags,
2600 .get_ts_info = lio_get_ts_info,
2601};
2602
2603static const struct ethtool_ops lio_vf_ethtool_ops = {
2604 .get_link_ksettings = lio_get_link_ksettings,
2605 .get_link = ethtool_op_get_link,
2606 .get_drvinfo = lio_get_vf_drvinfo,
2607 .get_ringparam = lio_ethtool_get_ringparam,
2608 .get_channels = lio_ethtool_get_channels,
2609 .get_strings = lio_vf_get_strings,
2610 .get_ethtool_stats = lio_vf_get_ethtool_stats,
2611 .get_regs_len = lio_get_regs_len,
2612 .get_regs = lio_get_regs,
2613 .get_msglevel = lio_get_msglevel,
2614 .set_msglevel = lio_set_msglevel,
2615 .get_sset_count = lio_vf_get_sset_count,
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002616 .get_coalesce = lio_get_intr_coalesce,
2617 .set_coalesce = lio_set_intr_coalesce,
Raghu Vatsavayif5a20472016-06-21 22:53:14 -07002618 .get_priv_flags = lio_get_priv_flags,
2619 .set_priv_flags = lio_set_priv_flags,
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002620 .get_ts_info = lio_get_ts_info,
2621};
2622
2623void liquidio_set_ethtool_ops(struct net_device *netdev)
2624{
Raghu Vatsavayid8ab8482016-12-08 13:00:46 -08002625 struct lio *lio = GET_LIO(netdev);
2626 struct octeon_device *oct = lio->oct_dev;
2627
2628 if (OCTEON_CN23XX_VF(oct))
2629 netdev->ethtool_ops = &lio_vf_ethtool_ops;
2630 else
2631 netdev->ethtool_ops = &lio_ethtool_ops;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002632}