blob: 84cd4ca43c04a4bd21aa6a9e29f9082fa73e57f0 [file] [log] [blame]
Michael Chanc0c050c2015-10-22 16:01:17 -04001/* Broadcom NetXtreme-C/E network driver.
2 *
Michael Chan11f15ed2016-04-05 14:08:55 -04003 * Copyright (c) 2014-2016 Broadcom Corporation
Michael Chan8e202362017-04-04 18:14:09 -04004 * Copyright (c) 2016-2017 Broadcom Limited
Michael Chanc0c050c2015-10-22 16:01:17 -04005 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
9 */
10
Rob Swindell3ebf6f02016-02-26 04:00:06 -050011#include <linux/ctype.h>
Michael Chan8ddc9aa2016-03-07 15:38:47 -050012#include <linux/stringify.h>
Michael Chanc0c050c2015-10-22 16:01:17 -040013#include <linux/ethtool.h>
14#include <linux/interrupt.h>
15#include <linux/pci.h>
16#include <linux/etherdevice.h>
17#include <linux/crc32.h>
18#include <linux/firmware.h>
19#include "bnxt_hsi.h"
20#include "bnxt.h"
21#include "bnxt_ethtool.h"
22#include "bnxt_nvm_defs.h" /* NVRAM content constant and structure defs */
23#include "bnxt_fw_hdr.h" /* Firmware hdr constant and structure defs */
24#define FLASH_NVRAM_TIMEOUT ((HWRM_CMD_TIMEOUT) * 100)
Rob Swindell5ac67d82016-09-19 03:58:03 -040025#define FLASH_PACKAGE_TIMEOUT ((HWRM_CMD_TIMEOUT) * 200)
26#define INSTALL_PACKAGE_TIMEOUT ((HWRM_CMD_TIMEOUT) * 200)
Michael Chanc0c050c2015-10-22 16:01:17 -040027
Rob Swindell3ebf6f02016-02-26 04:00:06 -050028static char *bnxt_get_pkgver(struct net_device *dev, char *buf, size_t buflen);
29
Michael Chanc0c050c2015-10-22 16:01:17 -040030static u32 bnxt_get_msglevel(struct net_device *dev)
31{
32 struct bnxt *bp = netdev_priv(dev);
33
34 return bp->msg_enable;
35}
36
37static void bnxt_set_msglevel(struct net_device *dev, u32 value)
38{
39 struct bnxt *bp = netdev_priv(dev);
40
41 bp->msg_enable = value;
42}
43
44static int bnxt_get_coalesce(struct net_device *dev,
45 struct ethtool_coalesce *coal)
46{
47 struct bnxt *bp = netdev_priv(dev);
48
49 memset(coal, 0, sizeof(*coal));
50
Michael Chandfb5b892016-02-26 04:00:01 -050051 coal->rx_coalesce_usecs = bp->rx_coal_ticks;
52 /* 2 completion records per rx packet */
53 coal->rx_max_coalesced_frames = bp->rx_coal_bufs / 2;
54 coal->rx_coalesce_usecs_irq = bp->rx_coal_ticks_irq;
55 coal->rx_max_coalesced_frames_irq = bp->rx_coal_bufs_irq / 2;
Michael Chanc0c050c2015-10-22 16:01:17 -040056
Michael Chandfc9c942016-02-26 04:00:03 -050057 coal->tx_coalesce_usecs = bp->tx_coal_ticks;
58 coal->tx_max_coalesced_frames = bp->tx_coal_bufs;
59 coal->tx_coalesce_usecs_irq = bp->tx_coal_ticks_irq;
60 coal->tx_max_coalesced_frames_irq = bp->tx_coal_bufs_irq;
61
Michael Chan51f30782016-07-01 18:46:29 -040062 coal->stats_block_coalesce_usecs = bp->stats_coal_ticks;
63
Michael Chanc0c050c2015-10-22 16:01:17 -040064 return 0;
65}
66
67static int bnxt_set_coalesce(struct net_device *dev,
68 struct ethtool_coalesce *coal)
69{
70 struct bnxt *bp = netdev_priv(dev);
Michael Chan51f30782016-07-01 18:46:29 -040071 bool update_stats = false;
Michael Chanc0c050c2015-10-22 16:01:17 -040072 int rc = 0;
73
Michael Chandfb5b892016-02-26 04:00:01 -050074 bp->rx_coal_ticks = coal->rx_coalesce_usecs;
75 /* 2 completion records per rx packet */
76 bp->rx_coal_bufs = coal->rx_max_coalesced_frames * 2;
77 bp->rx_coal_ticks_irq = coal->rx_coalesce_usecs_irq;
78 bp->rx_coal_bufs_irq = coal->rx_max_coalesced_frames_irq * 2;
Michael Chanc0c050c2015-10-22 16:01:17 -040079
Michael Chandfc9c942016-02-26 04:00:03 -050080 bp->tx_coal_ticks = coal->tx_coalesce_usecs;
81 bp->tx_coal_bufs = coal->tx_max_coalesced_frames;
82 bp->tx_coal_ticks_irq = coal->tx_coalesce_usecs_irq;
83 bp->tx_coal_bufs_irq = coal->tx_max_coalesced_frames_irq;
84
Michael Chan51f30782016-07-01 18:46:29 -040085 if (bp->stats_coal_ticks != coal->stats_block_coalesce_usecs) {
86 u32 stats_ticks = coal->stats_block_coalesce_usecs;
87
88 stats_ticks = clamp_t(u32, stats_ticks,
89 BNXT_MIN_STATS_COAL_TICKS,
90 BNXT_MAX_STATS_COAL_TICKS);
91 stats_ticks = rounddown(stats_ticks, BNXT_MIN_STATS_COAL_TICKS);
92 bp->stats_coal_ticks = stats_ticks;
93 update_stats = true;
94 }
95
96 if (netif_running(dev)) {
97 if (update_stats) {
98 rc = bnxt_close_nic(bp, true, false);
99 if (!rc)
100 rc = bnxt_open_nic(bp, true, false);
101 } else {
102 rc = bnxt_hwrm_set_coal(bp);
103 }
104 }
Michael Chanc0c050c2015-10-22 16:01:17 -0400105
106 return rc;
107}
108
109#define BNXT_NUM_STATS 21
110
Michael Chan8ddc9aa2016-03-07 15:38:47 -0500111#define BNXT_RX_STATS_ENTRY(counter) \
112 { BNXT_RX_STATS_OFFSET(counter), __stringify(counter) }
113
Michael Chan8ddc9aa2016-03-07 15:38:47 -0500114#define BNXT_TX_STATS_ENTRY(counter) \
115 { BNXT_TX_STATS_OFFSET(counter), __stringify(counter) }
116
117static const struct {
118 long offset;
119 char string[ETH_GSTRING_LEN];
120} bnxt_port_stats_arr[] = {
121 BNXT_RX_STATS_ENTRY(rx_64b_frames),
122 BNXT_RX_STATS_ENTRY(rx_65b_127b_frames),
123 BNXT_RX_STATS_ENTRY(rx_128b_255b_frames),
124 BNXT_RX_STATS_ENTRY(rx_256b_511b_frames),
125 BNXT_RX_STATS_ENTRY(rx_512b_1023b_frames),
126 BNXT_RX_STATS_ENTRY(rx_1024b_1518_frames),
127 BNXT_RX_STATS_ENTRY(rx_good_vlan_frames),
128 BNXT_RX_STATS_ENTRY(rx_1519b_2047b_frames),
129 BNXT_RX_STATS_ENTRY(rx_2048b_4095b_frames),
130 BNXT_RX_STATS_ENTRY(rx_4096b_9216b_frames),
131 BNXT_RX_STATS_ENTRY(rx_9217b_16383b_frames),
132 BNXT_RX_STATS_ENTRY(rx_total_frames),
133 BNXT_RX_STATS_ENTRY(rx_ucast_frames),
134 BNXT_RX_STATS_ENTRY(rx_mcast_frames),
135 BNXT_RX_STATS_ENTRY(rx_bcast_frames),
136 BNXT_RX_STATS_ENTRY(rx_fcs_err_frames),
137 BNXT_RX_STATS_ENTRY(rx_ctrl_frames),
138 BNXT_RX_STATS_ENTRY(rx_pause_frames),
139 BNXT_RX_STATS_ENTRY(rx_pfc_frames),
140 BNXT_RX_STATS_ENTRY(rx_align_err_frames),
141 BNXT_RX_STATS_ENTRY(rx_ovrsz_frames),
142 BNXT_RX_STATS_ENTRY(rx_jbr_frames),
143 BNXT_RX_STATS_ENTRY(rx_mtu_err_frames),
144 BNXT_RX_STATS_ENTRY(rx_tagged_frames),
145 BNXT_RX_STATS_ENTRY(rx_double_tagged_frames),
146 BNXT_RX_STATS_ENTRY(rx_good_frames),
Michael Chanc77192f2016-12-02 21:17:18 -0500147 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri0),
148 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri1),
149 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri2),
150 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri3),
151 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri4),
152 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri5),
153 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri6),
154 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri7),
Michael Chan8ddc9aa2016-03-07 15:38:47 -0500155 BNXT_RX_STATS_ENTRY(rx_undrsz_frames),
156 BNXT_RX_STATS_ENTRY(rx_eee_lpi_events),
157 BNXT_RX_STATS_ENTRY(rx_eee_lpi_duration),
158 BNXT_RX_STATS_ENTRY(rx_bytes),
159 BNXT_RX_STATS_ENTRY(rx_runt_bytes),
160 BNXT_RX_STATS_ENTRY(rx_runt_frames),
161
162 BNXT_TX_STATS_ENTRY(tx_64b_frames),
163 BNXT_TX_STATS_ENTRY(tx_65b_127b_frames),
164 BNXT_TX_STATS_ENTRY(tx_128b_255b_frames),
165 BNXT_TX_STATS_ENTRY(tx_256b_511b_frames),
166 BNXT_TX_STATS_ENTRY(tx_512b_1023b_frames),
167 BNXT_TX_STATS_ENTRY(tx_1024b_1518_frames),
168 BNXT_TX_STATS_ENTRY(tx_good_vlan_frames),
169 BNXT_TX_STATS_ENTRY(tx_1519b_2047_frames),
170 BNXT_TX_STATS_ENTRY(tx_2048b_4095b_frames),
171 BNXT_TX_STATS_ENTRY(tx_4096b_9216b_frames),
172 BNXT_TX_STATS_ENTRY(tx_9217b_16383b_frames),
173 BNXT_TX_STATS_ENTRY(tx_good_frames),
174 BNXT_TX_STATS_ENTRY(tx_total_frames),
175 BNXT_TX_STATS_ENTRY(tx_ucast_frames),
176 BNXT_TX_STATS_ENTRY(tx_mcast_frames),
177 BNXT_TX_STATS_ENTRY(tx_bcast_frames),
178 BNXT_TX_STATS_ENTRY(tx_pause_frames),
179 BNXT_TX_STATS_ENTRY(tx_pfc_frames),
180 BNXT_TX_STATS_ENTRY(tx_jabber_frames),
181 BNXT_TX_STATS_ENTRY(tx_fcs_err_frames),
182 BNXT_TX_STATS_ENTRY(tx_err),
183 BNXT_TX_STATS_ENTRY(tx_fifo_underruns),
Michael Chanc77192f2016-12-02 21:17:18 -0500184 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri0),
185 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri1),
186 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri2),
187 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri3),
188 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri4),
189 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri5),
190 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri6),
191 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri7),
Michael Chan8ddc9aa2016-03-07 15:38:47 -0500192 BNXT_TX_STATS_ENTRY(tx_eee_lpi_events),
193 BNXT_TX_STATS_ENTRY(tx_eee_lpi_duration),
194 BNXT_TX_STATS_ENTRY(tx_total_collisions),
195 BNXT_TX_STATS_ENTRY(tx_bytes),
196};
197
198#define BNXT_NUM_PORT_STATS ARRAY_SIZE(bnxt_port_stats_arr)
199
Michael Chanc0c050c2015-10-22 16:01:17 -0400200static int bnxt_get_sset_count(struct net_device *dev, int sset)
201{
202 struct bnxt *bp = netdev_priv(dev);
203
204 switch (sset) {
Michael Chan8ddc9aa2016-03-07 15:38:47 -0500205 case ETH_SS_STATS: {
206 int num_stats = BNXT_NUM_STATS * bp->cp_nr_rings;
207
208 if (bp->flags & BNXT_FLAG_PORT_STATS)
209 num_stats += BNXT_NUM_PORT_STATS;
210
211 return num_stats;
212 }
Michael Chanc0c050c2015-10-22 16:01:17 -0400213 default:
214 return -EOPNOTSUPP;
215 }
216}
217
218static void bnxt_get_ethtool_stats(struct net_device *dev,
219 struct ethtool_stats *stats, u64 *buf)
220{
221 u32 i, j = 0;
222 struct bnxt *bp = netdev_priv(dev);
223 u32 buf_size = sizeof(struct ctx_hw_stats) * bp->cp_nr_rings;
224 u32 stat_fields = sizeof(struct ctx_hw_stats) / 8;
225
226 memset(buf, 0, buf_size);
227
228 if (!bp->bnapi)
229 return;
230
231 for (i = 0; i < bp->cp_nr_rings; i++) {
232 struct bnxt_napi *bnapi = bp->bnapi[i];
233 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
234 __le64 *hw_stats = (__le64 *)cpr->hw_stats;
235 int k;
236
237 for (k = 0; k < stat_fields; j++, k++)
238 buf[j] = le64_to_cpu(hw_stats[k]);
239 buf[j++] = cpr->rx_l4_csum_errors;
240 }
Michael Chan8ddc9aa2016-03-07 15:38:47 -0500241 if (bp->flags & BNXT_FLAG_PORT_STATS) {
242 __le64 *port_stats = (__le64 *)bp->hw_rx_port_stats;
243
244 for (i = 0; i < BNXT_NUM_PORT_STATS; i++, j++) {
245 buf[j] = le64_to_cpu(*(port_stats +
246 bnxt_port_stats_arr[i].offset));
247 }
248 }
Michael Chanc0c050c2015-10-22 16:01:17 -0400249}
250
251static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
252{
253 struct bnxt *bp = netdev_priv(dev);
254 u32 i;
255
256 switch (stringset) {
257 /* The number of strings must match BNXT_NUM_STATS defined above. */
258 case ETH_SS_STATS:
259 for (i = 0; i < bp->cp_nr_rings; i++) {
260 sprintf(buf, "[%d]: rx_ucast_packets", i);
261 buf += ETH_GSTRING_LEN;
262 sprintf(buf, "[%d]: rx_mcast_packets", i);
263 buf += ETH_GSTRING_LEN;
264 sprintf(buf, "[%d]: rx_bcast_packets", i);
265 buf += ETH_GSTRING_LEN;
266 sprintf(buf, "[%d]: rx_discards", i);
267 buf += ETH_GSTRING_LEN;
268 sprintf(buf, "[%d]: rx_drops", i);
269 buf += ETH_GSTRING_LEN;
270 sprintf(buf, "[%d]: rx_ucast_bytes", i);
271 buf += ETH_GSTRING_LEN;
272 sprintf(buf, "[%d]: rx_mcast_bytes", i);
273 buf += ETH_GSTRING_LEN;
274 sprintf(buf, "[%d]: rx_bcast_bytes", i);
275 buf += ETH_GSTRING_LEN;
276 sprintf(buf, "[%d]: tx_ucast_packets", i);
277 buf += ETH_GSTRING_LEN;
278 sprintf(buf, "[%d]: tx_mcast_packets", i);
279 buf += ETH_GSTRING_LEN;
280 sprintf(buf, "[%d]: tx_bcast_packets", i);
281 buf += ETH_GSTRING_LEN;
282 sprintf(buf, "[%d]: tx_discards", i);
283 buf += ETH_GSTRING_LEN;
284 sprintf(buf, "[%d]: tx_drops", i);
285 buf += ETH_GSTRING_LEN;
286 sprintf(buf, "[%d]: tx_ucast_bytes", i);
287 buf += ETH_GSTRING_LEN;
288 sprintf(buf, "[%d]: tx_mcast_bytes", i);
289 buf += ETH_GSTRING_LEN;
290 sprintf(buf, "[%d]: tx_bcast_bytes", i);
291 buf += ETH_GSTRING_LEN;
292 sprintf(buf, "[%d]: tpa_packets", i);
293 buf += ETH_GSTRING_LEN;
294 sprintf(buf, "[%d]: tpa_bytes", i);
295 buf += ETH_GSTRING_LEN;
296 sprintf(buf, "[%d]: tpa_events", i);
297 buf += ETH_GSTRING_LEN;
298 sprintf(buf, "[%d]: tpa_aborts", i);
299 buf += ETH_GSTRING_LEN;
300 sprintf(buf, "[%d]: rx_l4_csum_errors", i);
301 buf += ETH_GSTRING_LEN;
302 }
Michael Chan8ddc9aa2016-03-07 15:38:47 -0500303 if (bp->flags & BNXT_FLAG_PORT_STATS) {
304 for (i = 0; i < BNXT_NUM_PORT_STATS; i++) {
305 strcpy(buf, bnxt_port_stats_arr[i].string);
306 buf += ETH_GSTRING_LEN;
307 }
308 }
Michael Chanc0c050c2015-10-22 16:01:17 -0400309 break;
310 default:
311 netdev_err(bp->dev, "bnxt_get_strings invalid request %x\n",
312 stringset);
313 break;
314 }
315}
316
317static void bnxt_get_ringparam(struct net_device *dev,
318 struct ethtool_ringparam *ering)
319{
320 struct bnxt *bp = netdev_priv(dev);
321
322 ering->rx_max_pending = BNXT_MAX_RX_DESC_CNT;
323 ering->rx_jumbo_max_pending = BNXT_MAX_RX_JUM_DESC_CNT;
324 ering->tx_max_pending = BNXT_MAX_TX_DESC_CNT;
325
326 ering->rx_pending = bp->rx_ring_size;
327 ering->rx_jumbo_pending = bp->rx_agg_ring_size;
328 ering->tx_pending = bp->tx_ring_size;
329}
330
331static int bnxt_set_ringparam(struct net_device *dev,
332 struct ethtool_ringparam *ering)
333{
334 struct bnxt *bp = netdev_priv(dev);
335
336 if ((ering->rx_pending > BNXT_MAX_RX_DESC_CNT) ||
337 (ering->tx_pending > BNXT_MAX_TX_DESC_CNT) ||
338 (ering->tx_pending <= MAX_SKB_FRAGS))
339 return -EINVAL;
340
341 if (netif_running(dev))
342 bnxt_close_nic(bp, false, false);
343
344 bp->rx_ring_size = ering->rx_pending;
345 bp->tx_ring_size = ering->tx_pending;
346 bnxt_set_ring_params(bp);
347
348 if (netif_running(dev))
349 return bnxt_open_nic(bp, false, false);
350
351 return 0;
352}
353
354static void bnxt_get_channels(struct net_device *dev,
355 struct ethtool_channels *channel)
356{
357 struct bnxt *bp = netdev_priv(dev);
358 int max_rx_rings, max_tx_rings, tcs;
359
Michael Chan6e6c5a52016-01-02 23:45:02 -0500360 bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, true);
Michael Chana79a5272017-02-12 19:18:12 -0500361 channel->max_combined = min_t(int, max_rx_rings, max_tx_rings);
Michael Chan068c9ec2016-01-02 23:45:04 -0500362
Satish Baddipadige18d6e4e2016-05-15 03:04:43 -0400363 if (bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, false)) {
364 max_rx_rings = 0;
365 max_tx_rings = 0;
366 }
367
Michael Chanc0c050c2015-10-22 16:01:17 -0400368 tcs = netdev_get_num_tc(dev);
369 if (tcs > 1)
370 max_tx_rings /= tcs;
371
372 channel->max_rx = max_rx_rings;
373 channel->max_tx = max_tx_rings;
374 channel->max_other = 0;
Michael Chan068c9ec2016-01-02 23:45:04 -0500375 if (bp->flags & BNXT_FLAG_SHARED_RINGS) {
376 channel->combined_count = bp->rx_nr_rings;
Prashant Sreedharan76595192016-07-18 07:15:22 -0400377 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
378 channel->combined_count--;
Michael Chan068c9ec2016-01-02 23:45:04 -0500379 } else {
Prashant Sreedharan76595192016-07-18 07:15:22 -0400380 if (!BNXT_CHIP_TYPE_NITRO_A0(bp)) {
381 channel->rx_count = bp->rx_nr_rings;
382 channel->tx_count = bp->tx_nr_rings_per_tc;
383 }
Michael Chan068c9ec2016-01-02 23:45:04 -0500384 }
Michael Chanc0c050c2015-10-22 16:01:17 -0400385}
386
387static int bnxt_set_channels(struct net_device *dev,
388 struct ethtool_channels *channel)
389{
390 struct bnxt *bp = netdev_priv(dev);
Michael Chand1e79252017-02-06 16:55:38 -0500391 int req_tx_rings, req_rx_rings, tcs;
Michael Chan068c9ec2016-01-02 23:45:04 -0500392 bool sh = false;
Michael Chan5f449242017-02-06 16:55:40 -0500393 int tx_xdp = 0;
Michael Chand1e79252017-02-06 16:55:38 -0500394 int rc = 0;
Michael Chanc0c050c2015-10-22 16:01:17 -0400395
Michael Chan068c9ec2016-01-02 23:45:04 -0500396 if (channel->other_count)
Michael Chanc0c050c2015-10-22 16:01:17 -0400397 return -EINVAL;
398
Michael Chan068c9ec2016-01-02 23:45:04 -0500399 if (!channel->combined_count &&
400 (!channel->rx_count || !channel->tx_count))
401 return -EINVAL;
402
403 if (channel->combined_count &&
404 (channel->rx_count || channel->tx_count))
405 return -EINVAL;
406
Prashant Sreedharan76595192016-07-18 07:15:22 -0400407 if (BNXT_CHIP_TYPE_NITRO_A0(bp) && (channel->rx_count ||
408 channel->tx_count))
409 return -EINVAL;
410
Michael Chan068c9ec2016-01-02 23:45:04 -0500411 if (channel->combined_count)
412 sh = true;
413
Michael Chanc0c050c2015-10-22 16:01:17 -0400414 tcs = netdev_get_num_tc(dev);
Michael Chanc0c050c2015-10-22 16:01:17 -0400415
Michael Chan391be5c2016-12-29 12:13:41 -0500416 req_tx_rings = sh ? channel->combined_count : channel->tx_count;
Michael Chand1e79252017-02-06 16:55:38 -0500417 req_rx_rings = sh ? channel->combined_count : channel->rx_count;
Michael Chan5f449242017-02-06 16:55:40 -0500418 if (bp->tx_nr_rings_xdp) {
419 if (!sh) {
420 netdev_err(dev, "Only combined mode supported when XDP is enabled.\n");
421 return -EINVAL;
422 }
423 tx_xdp = req_rx_rings;
424 }
425 rc = bnxt_reserve_rings(bp, req_tx_rings, req_rx_rings, tcs, tx_xdp);
Michael Chand1e79252017-02-06 16:55:38 -0500426 if (rc) {
427 netdev_warn(dev, "Unable to allocate the requested rings\n");
428 return rc;
Michael Chan391be5c2016-12-29 12:13:41 -0500429 }
430
Michael Chanc0c050c2015-10-22 16:01:17 -0400431 if (netif_running(dev)) {
432 if (BNXT_PF(bp)) {
433 /* TODO CHIMP_FW: Send message to all VF's
434 * before PF unload
435 */
436 }
437 rc = bnxt_close_nic(bp, true, false);
438 if (rc) {
439 netdev_err(bp->dev, "Set channel failure rc :%x\n",
440 rc);
441 return rc;
442 }
443 }
444
Michael Chan068c9ec2016-01-02 23:45:04 -0500445 if (sh) {
446 bp->flags |= BNXT_FLAG_SHARED_RINGS;
Michael Chand1e79252017-02-06 16:55:38 -0500447 bp->rx_nr_rings = channel->combined_count;
448 bp->tx_nr_rings_per_tc = channel->combined_count;
Michael Chan068c9ec2016-01-02 23:45:04 -0500449 } else {
450 bp->flags &= ~BNXT_FLAG_SHARED_RINGS;
451 bp->rx_nr_rings = channel->rx_count;
452 bp->tx_nr_rings_per_tc = channel->tx_count;
453 }
Michael Chan5f449242017-02-06 16:55:40 -0500454 bp->tx_nr_rings_xdp = tx_xdp;
455 bp->tx_nr_rings = bp->tx_nr_rings_per_tc + tx_xdp;
Michael Chanc0c050c2015-10-22 16:01:17 -0400456 if (tcs > 1)
Michael Chan5f449242017-02-06 16:55:40 -0500457 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tcs + tx_xdp;
Michael Chan068c9ec2016-01-02 23:45:04 -0500458
459 bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
460 bp->tx_nr_rings + bp->rx_nr_rings;
461
Michael Chanc0c050c2015-10-22 16:01:17 -0400462 bp->num_stat_ctxs = bp->cp_nr_rings;
463
Michael Chan2bcfa6f2015-12-27 18:19:24 -0500464 /* After changing number of rx channels, update NTUPLE feature. */
465 netdev_update_features(dev);
Michael Chanc0c050c2015-10-22 16:01:17 -0400466 if (netif_running(dev)) {
467 rc = bnxt_open_nic(bp, true, false);
468 if ((!rc) && BNXT_PF(bp)) {
469 /* TODO CHIMP_FW: Send message to all VF's
470 * to renable
471 */
472 }
473 }
474
475 return rc;
476}
477
478#ifdef CONFIG_RFS_ACCEL
479static int bnxt_grxclsrlall(struct bnxt *bp, struct ethtool_rxnfc *cmd,
480 u32 *rule_locs)
481{
482 int i, j = 0;
483
484 cmd->data = bp->ntp_fltr_count;
485 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
486 struct hlist_head *head;
487 struct bnxt_ntuple_filter *fltr;
488
489 head = &bp->ntp_fltr_hash_tbl[i];
490 rcu_read_lock();
491 hlist_for_each_entry_rcu(fltr, head, hash) {
492 if (j == cmd->rule_cnt)
493 break;
494 rule_locs[j++] = fltr->sw_id;
495 }
496 rcu_read_unlock();
497 if (j == cmd->rule_cnt)
498 break;
499 }
500 cmd->rule_cnt = j;
501 return 0;
502}
503
504static int bnxt_grxclsrule(struct bnxt *bp, struct ethtool_rxnfc *cmd)
505{
506 struct ethtool_rx_flow_spec *fs =
507 (struct ethtool_rx_flow_spec *)&cmd->fs;
508 struct bnxt_ntuple_filter *fltr;
509 struct flow_keys *fkeys;
510 int i, rc = -EINVAL;
511
512 if (fs->location < 0 || fs->location >= BNXT_NTP_FLTR_MAX_FLTR)
513 return rc;
514
515 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
516 struct hlist_head *head;
517
518 head = &bp->ntp_fltr_hash_tbl[i];
519 rcu_read_lock();
520 hlist_for_each_entry_rcu(fltr, head, hash) {
521 if (fltr->sw_id == fs->location)
522 goto fltr_found;
523 }
524 rcu_read_unlock();
525 }
526 return rc;
527
528fltr_found:
529 fkeys = &fltr->fkeys;
Michael Chandda0e742016-12-29 12:13:40 -0500530 if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
531 if (fkeys->basic.ip_proto == IPPROTO_TCP)
532 fs->flow_type = TCP_V4_FLOW;
533 else if (fkeys->basic.ip_proto == IPPROTO_UDP)
534 fs->flow_type = UDP_V4_FLOW;
535 else
536 goto fltr_err;
Michael Chanc0c050c2015-10-22 16:01:17 -0400537
Michael Chandda0e742016-12-29 12:13:40 -0500538 fs->h_u.tcp_ip4_spec.ip4src = fkeys->addrs.v4addrs.src;
539 fs->m_u.tcp_ip4_spec.ip4src = cpu_to_be32(~0);
Michael Chanc0c050c2015-10-22 16:01:17 -0400540
Michael Chandda0e742016-12-29 12:13:40 -0500541 fs->h_u.tcp_ip4_spec.ip4dst = fkeys->addrs.v4addrs.dst;
542 fs->m_u.tcp_ip4_spec.ip4dst = cpu_to_be32(~0);
Michael Chanc0c050c2015-10-22 16:01:17 -0400543
Michael Chandda0e742016-12-29 12:13:40 -0500544 fs->h_u.tcp_ip4_spec.psrc = fkeys->ports.src;
545 fs->m_u.tcp_ip4_spec.psrc = cpu_to_be16(~0);
Michael Chanc0c050c2015-10-22 16:01:17 -0400546
Michael Chandda0e742016-12-29 12:13:40 -0500547 fs->h_u.tcp_ip4_spec.pdst = fkeys->ports.dst;
548 fs->m_u.tcp_ip4_spec.pdst = cpu_to_be16(~0);
549 } else {
550 int i;
551
552 if (fkeys->basic.ip_proto == IPPROTO_TCP)
553 fs->flow_type = TCP_V6_FLOW;
554 else if (fkeys->basic.ip_proto == IPPROTO_UDP)
555 fs->flow_type = UDP_V6_FLOW;
556 else
557 goto fltr_err;
558
559 *(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6src[0] =
560 fkeys->addrs.v6addrs.src;
561 *(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6dst[0] =
562 fkeys->addrs.v6addrs.dst;
563 for (i = 0; i < 4; i++) {
564 fs->m_u.tcp_ip6_spec.ip6src[i] = cpu_to_be32(~0);
565 fs->m_u.tcp_ip6_spec.ip6dst[i] = cpu_to_be32(~0);
566 }
567 fs->h_u.tcp_ip6_spec.psrc = fkeys->ports.src;
568 fs->m_u.tcp_ip6_spec.psrc = cpu_to_be16(~0);
569
570 fs->h_u.tcp_ip6_spec.pdst = fkeys->ports.dst;
571 fs->m_u.tcp_ip6_spec.pdst = cpu_to_be16(~0);
572 }
Michael Chanc0c050c2015-10-22 16:01:17 -0400573
574 fs->ring_cookie = fltr->rxq;
575 rc = 0;
576
577fltr_err:
578 rcu_read_unlock();
579
580 return rc;
581}
Michael Chana0119522016-11-16 21:13:10 -0500582#endif
583
584static u64 get_ethtool_ipv4_rss(struct bnxt *bp)
585{
586 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4)
587 return RXH_IP_SRC | RXH_IP_DST;
588 return 0;
589}
590
591static u64 get_ethtool_ipv6_rss(struct bnxt *bp)
592{
593 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6)
594 return RXH_IP_SRC | RXH_IP_DST;
595 return 0;
596}
597
598static int bnxt_grxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd)
599{
600 cmd->data = 0;
601 switch (cmd->flow_type) {
602 case TCP_V4_FLOW:
603 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4)
604 cmd->data |= RXH_IP_SRC | RXH_IP_DST |
605 RXH_L4_B_0_1 | RXH_L4_B_2_3;
606 cmd->data |= get_ethtool_ipv4_rss(bp);
607 break;
608 case UDP_V4_FLOW:
609 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4)
610 cmd->data |= RXH_IP_SRC | RXH_IP_DST |
611 RXH_L4_B_0_1 | RXH_L4_B_2_3;
612 /* fall through */
613 case SCTP_V4_FLOW:
614 case AH_ESP_V4_FLOW:
615 case AH_V4_FLOW:
616 case ESP_V4_FLOW:
617 case IPV4_FLOW:
618 cmd->data |= get_ethtool_ipv4_rss(bp);
619 break;
620
621 case TCP_V6_FLOW:
622 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6)
623 cmd->data |= RXH_IP_SRC | RXH_IP_DST |
624 RXH_L4_B_0_1 | RXH_L4_B_2_3;
625 cmd->data |= get_ethtool_ipv6_rss(bp);
626 break;
627 case UDP_V6_FLOW:
628 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6)
629 cmd->data |= RXH_IP_SRC | RXH_IP_DST |
630 RXH_L4_B_0_1 | RXH_L4_B_2_3;
631 /* fall through */
632 case SCTP_V6_FLOW:
633 case AH_ESP_V6_FLOW:
634 case AH_V6_FLOW:
635 case ESP_V6_FLOW:
636 case IPV6_FLOW:
637 cmd->data |= get_ethtool_ipv6_rss(bp);
638 break;
639 }
640 return 0;
641}
642
643#define RXH_4TUPLE (RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)
644#define RXH_2TUPLE (RXH_IP_SRC | RXH_IP_DST)
645
646static int bnxt_srxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd)
647{
648 u32 rss_hash_cfg = bp->rss_hash_cfg;
649 int tuple, rc = 0;
650
651 if (cmd->data == RXH_4TUPLE)
652 tuple = 4;
653 else if (cmd->data == RXH_2TUPLE)
654 tuple = 2;
655 else if (!cmd->data)
656 tuple = 0;
657 else
658 return -EINVAL;
659
660 if (cmd->flow_type == TCP_V4_FLOW) {
661 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4;
662 if (tuple == 4)
663 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4;
664 } else if (cmd->flow_type == UDP_V4_FLOW) {
665 if (tuple == 4 && !(bp->flags & BNXT_FLAG_UDP_RSS_CAP))
666 return -EINVAL;
667 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4;
668 if (tuple == 4)
669 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4;
670 } else if (cmd->flow_type == TCP_V6_FLOW) {
671 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
672 if (tuple == 4)
673 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
674 } else if (cmd->flow_type == UDP_V6_FLOW) {
675 if (tuple == 4 && !(bp->flags & BNXT_FLAG_UDP_RSS_CAP))
676 return -EINVAL;
677 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
678 if (tuple == 4)
679 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
680 } else if (tuple == 4) {
681 return -EINVAL;
682 }
683
684 switch (cmd->flow_type) {
685 case TCP_V4_FLOW:
686 case UDP_V4_FLOW:
687 case SCTP_V4_FLOW:
688 case AH_ESP_V4_FLOW:
689 case AH_V4_FLOW:
690 case ESP_V4_FLOW:
691 case IPV4_FLOW:
692 if (tuple == 2)
693 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4;
694 else if (!tuple)
695 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4;
696 break;
697
698 case TCP_V6_FLOW:
699 case UDP_V6_FLOW:
700 case SCTP_V6_FLOW:
701 case AH_ESP_V6_FLOW:
702 case AH_V6_FLOW:
703 case ESP_V6_FLOW:
704 case IPV6_FLOW:
705 if (tuple == 2)
706 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6;
707 else if (!tuple)
708 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6;
709 break;
710 }
711
712 if (bp->rss_hash_cfg == rss_hash_cfg)
713 return 0;
714
715 bp->rss_hash_cfg = rss_hash_cfg;
716 if (netif_running(bp->dev)) {
717 bnxt_close_nic(bp, false, false);
718 rc = bnxt_open_nic(bp, false, false);
719 }
720 return rc;
721}
Michael Chanc0c050c2015-10-22 16:01:17 -0400722
723static int bnxt_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
724 u32 *rule_locs)
725{
726 struct bnxt *bp = netdev_priv(dev);
727 int rc = 0;
728
729 switch (cmd->cmd) {
Michael Chana0119522016-11-16 21:13:10 -0500730#ifdef CONFIG_RFS_ACCEL
Michael Chanc0c050c2015-10-22 16:01:17 -0400731 case ETHTOOL_GRXRINGS:
732 cmd->data = bp->rx_nr_rings;
733 break;
734
735 case ETHTOOL_GRXCLSRLCNT:
736 cmd->rule_cnt = bp->ntp_fltr_count;
737 cmd->data = BNXT_NTP_FLTR_MAX_FLTR;
738 break;
739
740 case ETHTOOL_GRXCLSRLALL:
741 rc = bnxt_grxclsrlall(bp, cmd, (u32 *)rule_locs);
742 break;
743
744 case ETHTOOL_GRXCLSRULE:
745 rc = bnxt_grxclsrule(bp, cmd);
746 break;
Michael Chana0119522016-11-16 21:13:10 -0500747#endif
748
749 case ETHTOOL_GRXFH:
750 rc = bnxt_grxfh(bp, cmd);
751 break;
Michael Chanc0c050c2015-10-22 16:01:17 -0400752
753 default:
754 rc = -EOPNOTSUPP;
755 break;
756 }
757
758 return rc;
759}
Michael Chana0119522016-11-16 21:13:10 -0500760
761static int bnxt_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
762{
763 struct bnxt *bp = netdev_priv(dev);
764 int rc;
765
766 switch (cmd->cmd) {
767 case ETHTOOL_SRXFH:
768 rc = bnxt_srxfh(bp, cmd);
769 break;
770
771 default:
772 rc = -EOPNOTSUPP;
773 break;
774 }
775 return rc;
776}
Michael Chanc0c050c2015-10-22 16:01:17 -0400777
778static u32 bnxt_get_rxfh_indir_size(struct net_device *dev)
779{
780 return HW_HASH_INDEX_SIZE;
781}
782
783static u32 bnxt_get_rxfh_key_size(struct net_device *dev)
784{
785 return HW_HASH_KEY_SIZE;
786}
787
788static int bnxt_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
789 u8 *hfunc)
790{
791 struct bnxt *bp = netdev_priv(dev);
792 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
793 int i = 0;
794
795 if (hfunc)
796 *hfunc = ETH_RSS_HASH_TOP;
797
798 if (indir)
799 for (i = 0; i < HW_HASH_INDEX_SIZE; i++)
800 indir[i] = le16_to_cpu(vnic->rss_table[i]);
801
802 if (key)
803 memcpy(key, vnic->rss_hash_key, HW_HASH_KEY_SIZE);
804
805 return 0;
806}
807
808static void bnxt_get_drvinfo(struct net_device *dev,
809 struct ethtool_drvinfo *info)
810{
811 struct bnxt *bp = netdev_priv(dev);
Rob Swindell3ebf6f02016-02-26 04:00:06 -0500812 char *pkglog;
813 char *pkgver = NULL;
Michael Chanc0c050c2015-10-22 16:01:17 -0400814
Rob Swindell3ebf6f02016-02-26 04:00:06 -0500815 pkglog = kmalloc(BNX_PKG_LOG_MAX_LENGTH, GFP_KERNEL);
816 if (pkglog)
817 pkgver = bnxt_get_pkgver(dev, pkglog, BNX_PKG_LOG_MAX_LENGTH);
Michael Chanc0c050c2015-10-22 16:01:17 -0400818 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
819 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
Rob Swindell3ebf6f02016-02-26 04:00:06 -0500820 if (pkgver && *pkgver != 0 && isdigit(*pkgver))
821 snprintf(info->fw_version, sizeof(info->fw_version) - 1,
822 "%s pkg %s", bp->fw_ver_str, pkgver);
823 else
824 strlcpy(info->fw_version, bp->fw_ver_str,
825 sizeof(info->fw_version));
Michael Chanc0c050c2015-10-22 16:01:17 -0400826 strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
827 info->n_stats = BNXT_NUM_STATS * bp->cp_nr_rings;
828 info->testinfo_len = BNXT_NUM_TESTS(bp);
829 /* TODO CHIMP_FW: eeprom dump details */
830 info->eedump_len = 0;
831 /* TODO CHIMP FW: reg dump details */
832 info->regdump_len = 0;
Rob Swindell3ebf6f02016-02-26 04:00:06 -0500833 kfree(pkglog);
Michael Chanc0c050c2015-10-22 16:01:17 -0400834}
835
Michael Chan8e202362017-04-04 18:14:09 -0400836static void bnxt_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
837{
838 struct bnxt *bp = netdev_priv(dev);
839
840 wol->supported = 0;
841 wol->wolopts = 0;
842 memset(&wol->sopass, 0, sizeof(wol->sopass));
843 if (bp->flags & BNXT_FLAG_WOL_CAP) {
844 wol->supported = WAKE_MAGIC;
845 if (bp->wol)
846 wol->wolopts = WAKE_MAGIC;
847 }
848}
849
Michael Chan5282db62017-04-04 18:14:10 -0400850static int bnxt_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
851{
852 struct bnxt *bp = netdev_priv(dev);
853
854 if (wol->wolopts & ~WAKE_MAGIC)
855 return -EINVAL;
856
857 if (wol->wolopts & WAKE_MAGIC) {
858 if (!(bp->flags & BNXT_FLAG_WOL_CAP))
859 return -EINVAL;
860 if (!bp->wol) {
861 if (bnxt_hwrm_alloc_wol_fltr(bp))
862 return -EBUSY;
863 bp->wol = 1;
864 }
865 } else {
866 if (bp->wol) {
867 if (bnxt_hwrm_free_wol_fltr(bp))
868 return -EBUSY;
869 bp->wol = 0;
870 }
871 }
872 return 0;
873}
874
Michael Chan170ce012016-04-05 14:08:57 -0400875u32 _bnxt_fw_to_ethtool_adv_spds(u16 fw_speeds, u8 fw_pause)
Michael Chanc0c050c2015-10-22 16:01:17 -0400876{
Michael Chanc0c050c2015-10-22 16:01:17 -0400877 u32 speed_mask = 0;
878
879 /* TODO: support 25GB, 40GB, 50GB with different cable type */
880 /* set the advertised speeds */
881 if (fw_speeds & BNXT_LINK_SPEED_MSK_100MB)
882 speed_mask |= ADVERTISED_100baseT_Full;
883 if (fw_speeds & BNXT_LINK_SPEED_MSK_1GB)
884 speed_mask |= ADVERTISED_1000baseT_Full;
885 if (fw_speeds & BNXT_LINK_SPEED_MSK_2_5GB)
886 speed_mask |= ADVERTISED_2500baseX_Full;
887 if (fw_speeds & BNXT_LINK_SPEED_MSK_10GB)
888 speed_mask |= ADVERTISED_10000baseT_Full;
Michael Chanc0c050c2015-10-22 16:01:17 -0400889 if (fw_speeds & BNXT_LINK_SPEED_MSK_40GB)
Michael Chan1c49c422016-02-10 17:33:48 -0500890 speed_mask |= ADVERTISED_40000baseCR4_Full;
Michael Chan27c4d572016-03-07 15:38:41 -0500891
892 if ((fw_pause & BNXT_LINK_PAUSE_BOTH) == BNXT_LINK_PAUSE_BOTH)
893 speed_mask |= ADVERTISED_Pause;
894 else if (fw_pause & BNXT_LINK_PAUSE_TX)
895 speed_mask |= ADVERTISED_Asym_Pause;
896 else if (fw_pause & BNXT_LINK_PAUSE_RX)
897 speed_mask |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
898
Michael Chanc0c050c2015-10-22 16:01:17 -0400899 return speed_mask;
900}
901
Michael Chan00c04a92016-06-13 02:25:38 -0400902#define BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, fw_pause, lk_ksettings, name)\
903{ \
904 if ((fw_speeds) & BNXT_LINK_SPEED_MSK_100MB) \
905 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
906 100baseT_Full); \
907 if ((fw_speeds) & BNXT_LINK_SPEED_MSK_1GB) \
908 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
909 1000baseT_Full); \
910 if ((fw_speeds) & BNXT_LINK_SPEED_MSK_10GB) \
911 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
912 10000baseT_Full); \
913 if ((fw_speeds) & BNXT_LINK_SPEED_MSK_25GB) \
914 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
915 25000baseCR_Full); \
916 if ((fw_speeds) & BNXT_LINK_SPEED_MSK_40GB) \
917 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
918 40000baseCR4_Full);\
919 if ((fw_speeds) & BNXT_LINK_SPEED_MSK_50GB) \
920 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
921 50000baseCR2_Full);\
922 if ((fw_pause) & BNXT_LINK_PAUSE_RX) { \
923 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
924 Pause); \
925 if (!((fw_pause) & BNXT_LINK_PAUSE_TX)) \
926 ethtool_link_ksettings_add_link_mode( \
927 lk_ksettings, name, Asym_Pause);\
928 } else if ((fw_pause) & BNXT_LINK_PAUSE_TX) { \
929 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
930 Asym_Pause); \
931 } \
932}
933
934#define BNXT_ETHTOOL_TO_FW_SPDS(fw_speeds, lk_ksettings, name) \
935{ \
936 if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
937 100baseT_Full) || \
938 ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
939 100baseT_Half)) \
940 (fw_speeds) |= BNXT_LINK_SPEED_MSK_100MB; \
941 if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
942 1000baseT_Full) || \
943 ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
944 1000baseT_Half)) \
945 (fw_speeds) |= BNXT_LINK_SPEED_MSK_1GB; \
946 if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
947 10000baseT_Full)) \
948 (fw_speeds) |= BNXT_LINK_SPEED_MSK_10GB; \
949 if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
950 25000baseCR_Full)) \
951 (fw_speeds) |= BNXT_LINK_SPEED_MSK_25GB; \
952 if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
953 40000baseCR4_Full)) \
954 (fw_speeds) |= BNXT_LINK_SPEED_MSK_40GB; \
955 if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
956 50000baseCR2_Full)) \
957 (fw_speeds) |= BNXT_LINK_SPEED_MSK_50GB; \
958}
959
960static void bnxt_fw_to_ethtool_advertised_spds(struct bnxt_link_info *link_info,
961 struct ethtool_link_ksettings *lk_ksettings)
Michael Chan27c4d572016-03-07 15:38:41 -0500962{
Michael Chan68515a12016-12-29 12:13:34 -0500963 u16 fw_speeds = link_info->advertising;
Michael Chan27c4d572016-03-07 15:38:41 -0500964 u8 fw_pause = 0;
965
966 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
967 fw_pause = link_info->auto_pause_setting;
968
Michael Chan00c04a92016-06-13 02:25:38 -0400969 BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, fw_pause, lk_ksettings, advertising);
Michael Chan27c4d572016-03-07 15:38:41 -0500970}
971
Michael Chan00c04a92016-06-13 02:25:38 -0400972static void bnxt_fw_to_ethtool_lp_adv(struct bnxt_link_info *link_info,
973 struct ethtool_link_ksettings *lk_ksettings)
Michael Chan32773602016-03-07 15:38:42 -0500974{
975 u16 fw_speeds = link_info->lp_auto_link_speeds;
976 u8 fw_pause = 0;
977
978 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
979 fw_pause = link_info->lp_pause;
980
Michael Chan00c04a92016-06-13 02:25:38 -0400981 BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, fw_pause, lk_ksettings,
982 lp_advertising);
Michael Chan32773602016-03-07 15:38:42 -0500983}
984
Michael Chan00c04a92016-06-13 02:25:38 -0400985static void bnxt_fw_to_ethtool_support_spds(struct bnxt_link_info *link_info,
986 struct ethtool_link_ksettings *lk_ksettings)
Michael Chan4b32cac2016-03-07 15:38:43 -0500987{
988 u16 fw_speeds = link_info->support_speeds;
Michael Chan4b32cac2016-03-07 15:38:43 -0500989
Michael Chan00c04a92016-06-13 02:25:38 -0400990 BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, 0, lk_ksettings, supported);
Michael Chan4b32cac2016-03-07 15:38:43 -0500991
Michael Chan00c04a92016-06-13 02:25:38 -0400992 ethtool_link_ksettings_add_link_mode(lk_ksettings, supported, Pause);
993 ethtool_link_ksettings_add_link_mode(lk_ksettings, supported,
994 Asym_Pause);
Michael Chan93ed8112016-06-13 02:25:37 -0400995
Michael Chan00c04a92016-06-13 02:25:38 -0400996 if (link_info->support_auto_speeds)
997 ethtool_link_ksettings_add_link_mode(lk_ksettings, supported,
998 Autoneg);
Michael Chan93ed8112016-06-13 02:25:37 -0400999}
1000
Michael Chanc0c050c2015-10-22 16:01:17 -04001001u32 bnxt_fw_to_ethtool_speed(u16 fw_link_speed)
1002{
1003 switch (fw_link_speed) {
1004 case BNXT_LINK_SPEED_100MB:
1005 return SPEED_100;
1006 case BNXT_LINK_SPEED_1GB:
1007 return SPEED_1000;
1008 case BNXT_LINK_SPEED_2_5GB:
1009 return SPEED_2500;
1010 case BNXT_LINK_SPEED_10GB:
1011 return SPEED_10000;
1012 case BNXT_LINK_SPEED_20GB:
1013 return SPEED_20000;
1014 case BNXT_LINK_SPEED_25GB:
1015 return SPEED_25000;
1016 case BNXT_LINK_SPEED_40GB:
1017 return SPEED_40000;
1018 case BNXT_LINK_SPEED_50GB:
1019 return SPEED_50000;
1020 default:
1021 return SPEED_UNKNOWN;
1022 }
1023}
1024
Michael Chan00c04a92016-06-13 02:25:38 -04001025static int bnxt_get_link_ksettings(struct net_device *dev,
1026 struct ethtool_link_ksettings *lk_ksettings)
Michael Chanc0c050c2015-10-22 16:01:17 -04001027{
1028 struct bnxt *bp = netdev_priv(dev);
1029 struct bnxt_link_info *link_info = &bp->link_info;
Michael Chan00c04a92016-06-13 02:25:38 -04001030 struct ethtool_link_settings *base = &lk_ksettings->base;
1031 u32 ethtool_speed;
Michael Chanc0c050c2015-10-22 16:01:17 -04001032
Michael Chan00c04a92016-06-13 02:25:38 -04001033 ethtool_link_ksettings_zero_link_mode(lk_ksettings, supported);
1034 bnxt_fw_to_ethtool_support_spds(link_info, lk_ksettings);
Michael Chanc0c050c2015-10-22 16:01:17 -04001035
Michael Chan00c04a92016-06-13 02:25:38 -04001036 ethtool_link_ksettings_zero_link_mode(lk_ksettings, advertising);
Michael Chanb7634992016-02-10 17:33:46 -05001037 if (link_info->autoneg) {
Michael Chan00c04a92016-06-13 02:25:38 -04001038 bnxt_fw_to_ethtool_advertised_spds(link_info, lk_ksettings);
1039 ethtool_link_ksettings_add_link_mode(lk_ksettings,
1040 advertising, Autoneg);
1041 base->autoneg = AUTONEG_ENABLE;
Michael Chan32773602016-03-07 15:38:42 -05001042 if (link_info->phy_link_status == BNXT_LINK_LINK)
Michael Chan00c04a92016-06-13 02:25:38 -04001043 bnxt_fw_to_ethtool_lp_adv(link_info, lk_ksettings);
Michael Chan29c262f2016-04-05 14:09:03 -04001044 ethtool_speed = bnxt_fw_to_ethtool_speed(link_info->link_speed);
1045 if (!netif_carrier_ok(dev))
Michael Chan00c04a92016-06-13 02:25:38 -04001046 base->duplex = DUPLEX_UNKNOWN;
Michael Chan29c262f2016-04-05 14:09:03 -04001047 else if (link_info->duplex & BNXT_LINK_DUPLEX_FULL)
Michael Chan00c04a92016-06-13 02:25:38 -04001048 base->duplex = DUPLEX_FULL;
Michael Chan29c262f2016-04-05 14:09:03 -04001049 else
Michael Chan00c04a92016-06-13 02:25:38 -04001050 base->duplex = DUPLEX_HALF;
Michael Chanc0c050c2015-10-22 16:01:17 -04001051 } else {
Michael Chan00c04a92016-06-13 02:25:38 -04001052 base->autoneg = AUTONEG_DISABLE;
Michael Chan29c262f2016-04-05 14:09:03 -04001053 ethtool_speed =
1054 bnxt_fw_to_ethtool_speed(link_info->req_link_speed);
Michael Chan00c04a92016-06-13 02:25:38 -04001055 base->duplex = DUPLEX_HALF;
Michael Chan29c262f2016-04-05 14:09:03 -04001056 if (link_info->req_duplex == BNXT_LINK_DUPLEX_FULL)
Michael Chan00c04a92016-06-13 02:25:38 -04001057 base->duplex = DUPLEX_FULL;
Michael Chanc0c050c2015-10-22 16:01:17 -04001058 }
Michael Chan00c04a92016-06-13 02:25:38 -04001059 base->speed = ethtool_speed;
Michael Chanc0c050c2015-10-22 16:01:17 -04001060
Michael Chan00c04a92016-06-13 02:25:38 -04001061 base->port = PORT_NONE;
Michael Chanc0c050c2015-10-22 16:01:17 -04001062 if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) {
Michael Chan00c04a92016-06-13 02:25:38 -04001063 base->port = PORT_TP;
1064 ethtool_link_ksettings_add_link_mode(lk_ksettings, supported,
1065 TP);
1066 ethtool_link_ksettings_add_link_mode(lk_ksettings, advertising,
1067 TP);
Michael Chanc0c050c2015-10-22 16:01:17 -04001068 } else {
Michael Chan00c04a92016-06-13 02:25:38 -04001069 ethtool_link_ksettings_add_link_mode(lk_ksettings, supported,
1070 FIBRE);
1071 ethtool_link_ksettings_add_link_mode(lk_ksettings, advertising,
1072 FIBRE);
Michael Chanc0c050c2015-10-22 16:01:17 -04001073
1074 if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC)
Michael Chan00c04a92016-06-13 02:25:38 -04001075 base->port = PORT_DA;
Michael Chanc0c050c2015-10-22 16:01:17 -04001076 else if (link_info->media_type ==
1077 PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE)
Michael Chan00c04a92016-06-13 02:25:38 -04001078 base->port = PORT_FIBRE;
Michael Chanc0c050c2015-10-22 16:01:17 -04001079 }
Michael Chan00c04a92016-06-13 02:25:38 -04001080 base->phy_address = link_info->phy_addr;
Michael Chanc0c050c2015-10-22 16:01:17 -04001081
1082 return 0;
1083}
1084
1085static u32 bnxt_get_fw_speed(struct net_device *dev, u16 ethtool_speed)
1086{
Michael Chan9d9cee02016-04-05 14:09:02 -04001087 struct bnxt *bp = netdev_priv(dev);
1088 struct bnxt_link_info *link_info = &bp->link_info;
1089 u16 support_spds = link_info->support_speeds;
1090 u32 fw_speed = 0;
1091
Michael Chanc0c050c2015-10-22 16:01:17 -04001092 switch (ethtool_speed) {
1093 case SPEED_100:
Michael Chan9d9cee02016-04-05 14:09:02 -04001094 if (support_spds & BNXT_LINK_SPEED_MSK_100MB)
1095 fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100MB;
1096 break;
Michael Chanc0c050c2015-10-22 16:01:17 -04001097 case SPEED_1000:
Michael Chan9d9cee02016-04-05 14:09:02 -04001098 if (support_spds & BNXT_LINK_SPEED_MSK_1GB)
1099 fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_1GB;
1100 break;
Michael Chanc0c050c2015-10-22 16:01:17 -04001101 case SPEED_2500:
Michael Chan9d9cee02016-04-05 14:09:02 -04001102 if (support_spds & BNXT_LINK_SPEED_MSK_2_5GB)
1103 fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2_5GB;
1104 break;
Michael Chanc0c050c2015-10-22 16:01:17 -04001105 case SPEED_10000:
Michael Chan9d9cee02016-04-05 14:09:02 -04001106 if (support_spds & BNXT_LINK_SPEED_MSK_10GB)
1107 fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10GB;
1108 break;
Michael Chanc0c050c2015-10-22 16:01:17 -04001109 case SPEED_20000:
Michael Chan9d9cee02016-04-05 14:09:02 -04001110 if (support_spds & BNXT_LINK_SPEED_MSK_20GB)
1111 fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_20GB;
1112 break;
Michael Chanc0c050c2015-10-22 16:01:17 -04001113 case SPEED_25000:
Michael Chan9d9cee02016-04-05 14:09:02 -04001114 if (support_spds & BNXT_LINK_SPEED_MSK_25GB)
1115 fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_25GB;
1116 break;
Michael Chanc0c050c2015-10-22 16:01:17 -04001117 case SPEED_40000:
Michael Chan9d9cee02016-04-05 14:09:02 -04001118 if (support_spds & BNXT_LINK_SPEED_MSK_40GB)
1119 fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_40GB;
1120 break;
Michael Chanc0c050c2015-10-22 16:01:17 -04001121 case SPEED_50000:
Michael Chan9d9cee02016-04-05 14:09:02 -04001122 if (support_spds & BNXT_LINK_SPEED_MSK_50GB)
1123 fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_50GB;
1124 break;
Michael Chanc0c050c2015-10-22 16:01:17 -04001125 default:
1126 netdev_err(dev, "unsupported speed!\n");
1127 break;
1128 }
Michael Chan9d9cee02016-04-05 14:09:02 -04001129 return fw_speed;
Michael Chanc0c050c2015-10-22 16:01:17 -04001130}
1131
Michael Chan939f7f02016-04-05 14:08:58 -04001132u16 bnxt_get_fw_auto_link_speeds(u32 advertising)
Michael Chanc0c050c2015-10-22 16:01:17 -04001133{
1134 u16 fw_speed_mask = 0;
1135
1136 /* only support autoneg at speed 100, 1000, and 10000 */
1137 if (advertising & (ADVERTISED_100baseT_Full |
1138 ADVERTISED_100baseT_Half)) {
1139 fw_speed_mask |= BNXT_LINK_SPEED_MSK_100MB;
1140 }
1141 if (advertising & (ADVERTISED_1000baseT_Full |
1142 ADVERTISED_1000baseT_Half)) {
1143 fw_speed_mask |= BNXT_LINK_SPEED_MSK_1GB;
1144 }
1145 if (advertising & ADVERTISED_10000baseT_Full)
1146 fw_speed_mask |= BNXT_LINK_SPEED_MSK_10GB;
1147
Michael Chan1c49c422016-02-10 17:33:48 -05001148 if (advertising & ADVERTISED_40000baseCR4_Full)
1149 fw_speed_mask |= BNXT_LINK_SPEED_MSK_40GB;
1150
Michael Chanc0c050c2015-10-22 16:01:17 -04001151 return fw_speed_mask;
1152}
1153
Michael Chan00c04a92016-06-13 02:25:38 -04001154static int bnxt_set_link_ksettings(struct net_device *dev,
1155 const struct ethtool_link_ksettings *lk_ksettings)
Michael Chanc0c050c2015-10-22 16:01:17 -04001156{
Michael Chanc0c050c2015-10-22 16:01:17 -04001157 struct bnxt *bp = netdev_priv(dev);
1158 struct bnxt_link_info *link_info = &bp->link_info;
Michael Chan00c04a92016-06-13 02:25:38 -04001159 const struct ethtool_link_settings *base = &lk_ksettings->base;
Michael Chanc0c050c2015-10-22 16:01:17 -04001160 bool set_pause = false;
Michael Chan68515a12016-12-29 12:13:34 -05001161 u16 fw_advertising = 0;
1162 u32 speed;
Michael Chan00c04a92016-06-13 02:25:38 -04001163 int rc = 0;
Michael Chanc0c050c2015-10-22 16:01:17 -04001164
Satish Baddipadige567b2ab2016-06-13 02:25:31 -04001165 if (!BNXT_SINGLE_PF(bp))
Michael Chan00c04a92016-06-13 02:25:38 -04001166 return -EOPNOTSUPP;
Michael Chanc0c050c2015-10-22 16:01:17 -04001167
Michael Chan00c04a92016-06-13 02:25:38 -04001168 if (base->autoneg == AUTONEG_ENABLE) {
1169 BNXT_ETHTOOL_TO_FW_SPDS(fw_advertising, lk_ksettings,
1170 advertising);
Michael Chanc0c050c2015-10-22 16:01:17 -04001171 link_info->autoneg |= BNXT_AUTONEG_SPEED;
1172 if (!fw_advertising)
Michael Chan93ed8112016-06-13 02:25:37 -04001173 link_info->advertising = link_info->support_auto_speeds;
Michael Chanc0c050c2015-10-22 16:01:17 -04001174 else
1175 link_info->advertising = fw_advertising;
1176 /* any change to autoneg will cause link change, therefore the
1177 * driver should put back the original pause setting in autoneg
1178 */
1179 set_pause = true;
1180 } else {
Michael Chan9d9cee02016-04-05 14:09:02 -04001181 u16 fw_speed;
Michael Chan03efbec2016-04-11 04:11:11 -04001182 u8 phy_type = link_info->phy_type;
Michael Chan9d9cee02016-04-05 14:09:02 -04001183
Michael Chan03efbec2016-04-11 04:11:11 -04001184 if (phy_type == PORT_PHY_QCFG_RESP_PHY_TYPE_BASET ||
1185 phy_type == PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE ||
1186 link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) {
1187 netdev_err(dev, "10GBase-T devices must autoneg\n");
1188 rc = -EINVAL;
1189 goto set_setting_exit;
1190 }
Michael Chan00c04a92016-06-13 02:25:38 -04001191 if (base->duplex == DUPLEX_HALF) {
Michael Chanc0c050c2015-10-22 16:01:17 -04001192 netdev_err(dev, "HALF DUPLEX is not supported!\n");
1193 rc = -EINVAL;
1194 goto set_setting_exit;
1195 }
Michael Chan00c04a92016-06-13 02:25:38 -04001196 speed = base->speed;
Michael Chan9d9cee02016-04-05 14:09:02 -04001197 fw_speed = bnxt_get_fw_speed(dev, speed);
1198 if (!fw_speed) {
1199 rc = -EINVAL;
1200 goto set_setting_exit;
1201 }
1202 link_info->req_link_speed = fw_speed;
Michael Chanc0c050c2015-10-22 16:01:17 -04001203 link_info->req_duplex = BNXT_LINK_DUPLEX_FULL;
Michael Chanb7634992016-02-10 17:33:46 -05001204 link_info->autoneg = 0;
Michael Chanc0c050c2015-10-22 16:01:17 -04001205 link_info->advertising = 0;
1206 }
1207
1208 if (netif_running(dev))
Michael Chan939f7f02016-04-05 14:08:58 -04001209 rc = bnxt_hwrm_set_link_setting(bp, set_pause, false);
Michael Chanc0c050c2015-10-22 16:01:17 -04001210
1211set_setting_exit:
1212 return rc;
1213}
1214
1215static void bnxt_get_pauseparam(struct net_device *dev,
1216 struct ethtool_pauseparam *epause)
1217{
1218 struct bnxt *bp = netdev_priv(dev);
1219 struct bnxt_link_info *link_info = &bp->link_info;
1220
1221 if (BNXT_VF(bp))
1222 return;
Michael Chanb7634992016-02-10 17:33:46 -05001223 epause->autoneg = !!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL);
Michael Chan3c02d1b2016-03-28 19:46:07 -04001224 epause->rx_pause = !!(link_info->req_flow_ctrl & BNXT_LINK_PAUSE_RX);
1225 epause->tx_pause = !!(link_info->req_flow_ctrl & BNXT_LINK_PAUSE_TX);
Michael Chanc0c050c2015-10-22 16:01:17 -04001226}
1227
1228static int bnxt_set_pauseparam(struct net_device *dev,
1229 struct ethtool_pauseparam *epause)
1230{
1231 int rc = 0;
1232 struct bnxt *bp = netdev_priv(dev);
1233 struct bnxt_link_info *link_info = &bp->link_info;
1234
Satish Baddipadige567b2ab2016-06-13 02:25:31 -04001235 if (!BNXT_SINGLE_PF(bp))
Michael Chan75362a32016-07-01 18:46:19 -04001236 return -EOPNOTSUPP;
Michael Chanc0c050c2015-10-22 16:01:17 -04001237
1238 if (epause->autoneg) {
Michael Chanb7634992016-02-10 17:33:46 -05001239 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED))
1240 return -EINVAL;
1241
Michael Chanc0c050c2015-10-22 16:01:17 -04001242 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
Michael Chanc9ee9512016-04-05 14:08:56 -04001243 if (bp->hwrm_spec_code >= 0x10201)
1244 link_info->req_flow_ctrl =
1245 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE;
Michael Chanc0c050c2015-10-22 16:01:17 -04001246 } else {
1247 /* when transition from auto pause to force pause,
1248 * force a link change
1249 */
1250 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
1251 link_info->force_link_chng = true;
1252 link_info->autoneg &= ~BNXT_AUTONEG_FLOW_CTRL;
Michael Chanc9ee9512016-04-05 14:08:56 -04001253 link_info->req_flow_ctrl = 0;
Michael Chanc0c050c2015-10-22 16:01:17 -04001254 }
1255 if (epause->rx_pause)
1256 link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_RX;
Michael Chanc0c050c2015-10-22 16:01:17 -04001257
1258 if (epause->tx_pause)
1259 link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_TX;
Michael Chanc0c050c2015-10-22 16:01:17 -04001260
1261 if (netif_running(dev))
1262 rc = bnxt_hwrm_set_pause(bp);
1263 return rc;
1264}
1265
1266static u32 bnxt_get_link(struct net_device *dev)
1267{
1268 struct bnxt *bp = netdev_priv(dev);
1269
1270 /* TODO: handle MF, VF, driver close case */
1271 return bp->link_info.link_up;
1272}
1273
Rob Swindell5ac67d82016-09-19 03:58:03 -04001274static int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
1275 u16 ext, u16 *index, u32 *item_length,
1276 u32 *data_length);
1277
Michael Chanc0c050c2015-10-22 16:01:17 -04001278static int bnxt_flash_nvram(struct net_device *dev,
1279 u16 dir_type,
1280 u16 dir_ordinal,
1281 u16 dir_ext,
1282 u16 dir_attr,
1283 const u8 *data,
1284 size_t data_len)
1285{
1286 struct bnxt *bp = netdev_priv(dev);
1287 int rc;
1288 struct hwrm_nvm_write_input req = {0};
1289 dma_addr_t dma_handle;
1290 u8 *kmem;
1291
1292 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_WRITE, -1, -1);
1293
1294 req.dir_type = cpu_to_le16(dir_type);
1295 req.dir_ordinal = cpu_to_le16(dir_ordinal);
1296 req.dir_ext = cpu_to_le16(dir_ext);
1297 req.dir_attr = cpu_to_le16(dir_attr);
1298 req.dir_data_length = cpu_to_le32(data_len);
1299
1300 kmem = dma_alloc_coherent(&bp->pdev->dev, data_len, &dma_handle,
1301 GFP_KERNEL);
1302 if (!kmem) {
1303 netdev_err(dev, "dma_alloc_coherent failure, length = %u\n",
1304 (unsigned)data_len);
1305 return -ENOMEM;
1306 }
1307 memcpy(kmem, data, data_len);
1308 req.host_src_addr = cpu_to_le64(dma_handle);
1309
1310 rc = hwrm_send_message(bp, &req, sizeof(req), FLASH_NVRAM_TIMEOUT);
1311 dma_free_coherent(&bp->pdev->dev, data_len, kmem, dma_handle);
1312
1313 return rc;
1314}
1315
Rob Swindelld2d63182016-01-07 19:56:58 -05001316static int bnxt_firmware_reset(struct net_device *dev,
1317 u16 dir_type)
1318{
1319 struct bnxt *bp = netdev_priv(dev);
1320 struct hwrm_fw_reset_input req = {0};
1321
1322 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_RESET, -1, -1);
1323
1324 /* TODO: Support ASAP ChiMP self-reset (e.g. upon PF driver unload) */
1325 /* TODO: Address self-reset of APE/KONG/BONO/TANG or ungraceful reset */
1326 /* (e.g. when firmware isn't already running) */
1327 switch (dir_type) {
1328 case BNX_DIR_TYPE_CHIMP_PATCH:
1329 case BNX_DIR_TYPE_BOOTCODE:
1330 case BNX_DIR_TYPE_BOOTCODE_2:
1331 req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_BOOT;
1332 /* Self-reset ChiMP upon next PCIe reset: */
1333 req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST;
1334 break;
1335 case BNX_DIR_TYPE_APE_FW:
1336 case BNX_DIR_TYPE_APE_PATCH:
1337 req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT;
Rob Swindell08141e02016-07-01 18:46:23 -04001338 /* Self-reset APE upon next PCIe reset: */
1339 req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST;
Rob Swindelld2d63182016-01-07 19:56:58 -05001340 break;
1341 case BNX_DIR_TYPE_KONG_FW:
1342 case BNX_DIR_TYPE_KONG_PATCH:
1343 req.embedded_proc_type =
1344 FW_RESET_REQ_EMBEDDED_PROC_TYPE_NETCTRL;
1345 break;
1346 case BNX_DIR_TYPE_BONO_FW:
1347 case BNX_DIR_TYPE_BONO_PATCH:
1348 req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE;
1349 break;
1350 default:
1351 return -EINVAL;
1352 }
1353
1354 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
1355}
1356
Michael Chanc0c050c2015-10-22 16:01:17 -04001357static int bnxt_flash_firmware(struct net_device *dev,
1358 u16 dir_type,
1359 const u8 *fw_data,
1360 size_t fw_size)
1361{
1362 int rc = 0;
1363 u16 code_type;
1364 u32 stored_crc;
1365 u32 calculated_crc;
1366 struct bnxt_fw_header *header = (struct bnxt_fw_header *)fw_data;
1367
1368 switch (dir_type) {
1369 case BNX_DIR_TYPE_BOOTCODE:
1370 case BNX_DIR_TYPE_BOOTCODE_2:
1371 code_type = CODE_BOOT;
1372 break;
Rob Swindell93e0b4f2016-07-01 18:46:24 -04001373 case BNX_DIR_TYPE_CHIMP_PATCH:
1374 code_type = CODE_CHIMP_PATCH;
1375 break;
Rob Swindell2731d702015-12-27 18:19:20 -05001376 case BNX_DIR_TYPE_APE_FW:
1377 code_type = CODE_MCTP_PASSTHRU;
1378 break;
Rob Swindell93e0b4f2016-07-01 18:46:24 -04001379 case BNX_DIR_TYPE_APE_PATCH:
1380 code_type = CODE_APE_PATCH;
1381 break;
1382 case BNX_DIR_TYPE_KONG_FW:
1383 code_type = CODE_KONG_FW;
1384 break;
1385 case BNX_DIR_TYPE_KONG_PATCH:
1386 code_type = CODE_KONG_PATCH;
1387 break;
1388 case BNX_DIR_TYPE_BONO_FW:
1389 code_type = CODE_BONO_FW;
1390 break;
1391 case BNX_DIR_TYPE_BONO_PATCH:
1392 code_type = CODE_BONO_PATCH;
1393 break;
Michael Chanc0c050c2015-10-22 16:01:17 -04001394 default:
1395 netdev_err(dev, "Unsupported directory entry type: %u\n",
1396 dir_type);
1397 return -EINVAL;
1398 }
1399 if (fw_size < sizeof(struct bnxt_fw_header)) {
1400 netdev_err(dev, "Invalid firmware file size: %u\n",
1401 (unsigned int)fw_size);
1402 return -EINVAL;
1403 }
1404 if (header->signature != cpu_to_le32(BNXT_FIRMWARE_BIN_SIGNATURE)) {
1405 netdev_err(dev, "Invalid firmware signature: %08X\n",
1406 le32_to_cpu(header->signature));
1407 return -EINVAL;
1408 }
1409 if (header->code_type != code_type) {
1410 netdev_err(dev, "Expected firmware type: %d, read: %d\n",
1411 code_type, header->code_type);
1412 return -EINVAL;
1413 }
1414 if (header->device != DEVICE_CUMULUS_FAMILY) {
1415 netdev_err(dev, "Expected firmware device family %d, read: %d\n",
1416 DEVICE_CUMULUS_FAMILY, header->device);
1417 return -EINVAL;
1418 }
1419 /* Confirm the CRC32 checksum of the file: */
1420 stored_crc = le32_to_cpu(*(__le32 *)(fw_data + fw_size -
1421 sizeof(stored_crc)));
1422 calculated_crc = ~crc32(~0, fw_data, fw_size - sizeof(stored_crc));
1423 if (calculated_crc != stored_crc) {
1424 netdev_err(dev, "Firmware file CRC32 checksum (%08lX) does not match calculated checksum (%08lX)\n",
1425 (unsigned long)stored_crc,
1426 (unsigned long)calculated_crc);
1427 return -EINVAL;
1428 }
Michael Chanc0c050c2015-10-22 16:01:17 -04001429 rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST,
1430 0, 0, fw_data, fw_size);
Rob Swindelld2d63182016-01-07 19:56:58 -05001431 if (rc == 0) /* Firmware update successful */
1432 rc = bnxt_firmware_reset(dev, dir_type);
1433
Michael Chanc0c050c2015-10-22 16:01:17 -04001434 return rc;
1435}
1436
Rob Swindell5ac67d82016-09-19 03:58:03 -04001437static int bnxt_flash_microcode(struct net_device *dev,
1438 u16 dir_type,
1439 const u8 *fw_data,
1440 size_t fw_size)
1441{
1442 struct bnxt_ucode_trailer *trailer;
1443 u32 calculated_crc;
1444 u32 stored_crc;
1445 int rc = 0;
1446
1447 if (fw_size < sizeof(struct bnxt_ucode_trailer)) {
1448 netdev_err(dev, "Invalid microcode file size: %u\n",
1449 (unsigned int)fw_size);
1450 return -EINVAL;
1451 }
1452 trailer = (struct bnxt_ucode_trailer *)(fw_data + (fw_size -
1453 sizeof(*trailer)));
1454 if (trailer->sig != cpu_to_le32(BNXT_UCODE_TRAILER_SIGNATURE)) {
1455 netdev_err(dev, "Invalid microcode trailer signature: %08X\n",
1456 le32_to_cpu(trailer->sig));
1457 return -EINVAL;
1458 }
1459 if (le16_to_cpu(trailer->dir_type) != dir_type) {
1460 netdev_err(dev, "Expected microcode type: %d, read: %d\n",
1461 dir_type, le16_to_cpu(trailer->dir_type));
1462 return -EINVAL;
1463 }
1464 if (le16_to_cpu(trailer->trailer_length) <
1465 sizeof(struct bnxt_ucode_trailer)) {
1466 netdev_err(dev, "Invalid microcode trailer length: %d\n",
1467 le16_to_cpu(trailer->trailer_length));
1468 return -EINVAL;
1469 }
1470
1471 /* Confirm the CRC32 checksum of the file: */
1472 stored_crc = le32_to_cpu(*(__le32 *)(fw_data + fw_size -
1473 sizeof(stored_crc)));
1474 calculated_crc = ~crc32(~0, fw_data, fw_size - sizeof(stored_crc));
1475 if (calculated_crc != stored_crc) {
1476 netdev_err(dev,
1477 "CRC32 (%08lX) does not match calculated: %08lX\n",
1478 (unsigned long)stored_crc,
1479 (unsigned long)calculated_crc);
1480 return -EINVAL;
1481 }
1482 rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST,
1483 0, 0, fw_data, fw_size);
1484
1485 return rc;
1486}
1487
Michael Chanc0c050c2015-10-22 16:01:17 -04001488static bool bnxt_dir_type_is_ape_bin_format(u16 dir_type)
1489{
1490 switch (dir_type) {
1491 case BNX_DIR_TYPE_CHIMP_PATCH:
1492 case BNX_DIR_TYPE_BOOTCODE:
1493 case BNX_DIR_TYPE_BOOTCODE_2:
1494 case BNX_DIR_TYPE_APE_FW:
1495 case BNX_DIR_TYPE_APE_PATCH:
1496 case BNX_DIR_TYPE_KONG_FW:
1497 case BNX_DIR_TYPE_KONG_PATCH:
Rob Swindell93e0b4f2016-07-01 18:46:24 -04001498 case BNX_DIR_TYPE_BONO_FW:
1499 case BNX_DIR_TYPE_BONO_PATCH:
Michael Chanc0c050c2015-10-22 16:01:17 -04001500 return true;
1501 }
1502
1503 return false;
1504}
1505
Rob Swindell5ac67d82016-09-19 03:58:03 -04001506static bool bnxt_dir_type_is_other_exec_format(u16 dir_type)
Michael Chanc0c050c2015-10-22 16:01:17 -04001507{
1508 switch (dir_type) {
1509 case BNX_DIR_TYPE_AVS:
1510 case BNX_DIR_TYPE_EXP_ROM_MBA:
1511 case BNX_DIR_TYPE_PCIE:
1512 case BNX_DIR_TYPE_TSCF_UCODE:
1513 case BNX_DIR_TYPE_EXT_PHY:
1514 case BNX_DIR_TYPE_CCM:
1515 case BNX_DIR_TYPE_ISCSI_BOOT:
1516 case BNX_DIR_TYPE_ISCSI_BOOT_IPV6:
1517 case BNX_DIR_TYPE_ISCSI_BOOT_IPV4N6:
1518 return true;
1519 }
1520
1521 return false;
1522}
1523
1524static bool bnxt_dir_type_is_executable(u16 dir_type)
1525{
1526 return bnxt_dir_type_is_ape_bin_format(dir_type) ||
Rob Swindell5ac67d82016-09-19 03:58:03 -04001527 bnxt_dir_type_is_other_exec_format(dir_type);
Michael Chanc0c050c2015-10-22 16:01:17 -04001528}
1529
1530static int bnxt_flash_firmware_from_file(struct net_device *dev,
1531 u16 dir_type,
1532 const char *filename)
1533{
1534 const struct firmware *fw;
1535 int rc;
1536
Michael Chanc0c050c2015-10-22 16:01:17 -04001537 rc = request_firmware(&fw, filename, &dev->dev);
1538 if (rc != 0) {
1539 netdev_err(dev, "Error %d requesting firmware file: %s\n",
1540 rc, filename);
1541 return rc;
1542 }
1543 if (bnxt_dir_type_is_ape_bin_format(dir_type) == true)
1544 rc = bnxt_flash_firmware(dev, dir_type, fw->data, fw->size);
Rob Swindell5ac67d82016-09-19 03:58:03 -04001545 else if (bnxt_dir_type_is_other_exec_format(dir_type) == true)
1546 rc = bnxt_flash_microcode(dev, dir_type, fw->data, fw->size);
Michael Chanc0c050c2015-10-22 16:01:17 -04001547 else
1548 rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST,
1549 0, 0, fw->data, fw->size);
1550 release_firmware(fw);
1551 return rc;
1552}
1553
1554static int bnxt_flash_package_from_file(struct net_device *dev,
Rob Swindell5ac67d82016-09-19 03:58:03 -04001555 char *filename, u32 install_type)
Michael Chanc0c050c2015-10-22 16:01:17 -04001556{
Rob Swindell5ac67d82016-09-19 03:58:03 -04001557 struct bnxt *bp = netdev_priv(dev);
1558 struct hwrm_nvm_install_update_output *resp = bp->hwrm_cmd_resp_addr;
1559 struct hwrm_nvm_install_update_input install = {0};
1560 const struct firmware *fw;
1561 u32 item_len;
1562 u16 index;
1563 int rc;
1564
1565 bnxt_hwrm_fw_set_time(bp);
1566
1567 if (bnxt_find_nvram_item(dev, BNX_DIR_TYPE_UPDATE,
1568 BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE,
1569 &index, &item_len, NULL) != 0) {
1570 netdev_err(dev, "PKG update area not created in nvram\n");
1571 return -ENOBUFS;
1572 }
1573
1574 rc = request_firmware(&fw, filename, &dev->dev);
1575 if (rc != 0) {
1576 netdev_err(dev, "PKG error %d requesting file: %s\n",
1577 rc, filename);
1578 return rc;
1579 }
1580
1581 if (fw->size > item_len) {
1582 netdev_err(dev, "PKG insufficient update area in nvram: %lu",
1583 (unsigned long)fw->size);
1584 rc = -EFBIG;
1585 } else {
1586 dma_addr_t dma_handle;
1587 u8 *kmem;
1588 struct hwrm_nvm_modify_input modify = {0};
1589
1590 bnxt_hwrm_cmd_hdr_init(bp, &modify, HWRM_NVM_MODIFY, -1, -1);
1591
1592 modify.dir_idx = cpu_to_le16(index);
1593 modify.len = cpu_to_le32(fw->size);
1594
1595 kmem = dma_alloc_coherent(&bp->pdev->dev, fw->size,
1596 &dma_handle, GFP_KERNEL);
1597 if (!kmem) {
1598 netdev_err(dev,
1599 "dma_alloc_coherent failure, length = %u\n",
1600 (unsigned int)fw->size);
1601 rc = -ENOMEM;
1602 } else {
1603 memcpy(kmem, fw->data, fw->size);
1604 modify.host_src_addr = cpu_to_le64(dma_handle);
1605
1606 rc = hwrm_send_message(bp, &modify, sizeof(modify),
1607 FLASH_PACKAGE_TIMEOUT);
1608 dma_free_coherent(&bp->pdev->dev, fw->size, kmem,
1609 dma_handle);
1610 }
1611 }
1612 release_firmware(fw);
1613 if (rc)
1614 return rc;
1615
1616 if ((install_type & 0xffff) == 0)
1617 install_type >>= 16;
1618 bnxt_hwrm_cmd_hdr_init(bp, &install, HWRM_NVM_INSTALL_UPDATE, -1, -1);
1619 install.install_type = cpu_to_le32(install_type);
1620
Kshitij Sonicb4d1d62017-02-12 19:18:11 -05001621 mutex_lock(&bp->hwrm_cmd_lock);
1622 rc = _hwrm_send_message(bp, &install, sizeof(install),
1623 INSTALL_PACKAGE_TIMEOUT);
1624 if (rc) {
1625 rc = -EOPNOTSUPP;
1626 goto flash_pkg_exit;
1627 }
1628
1629 if (resp->error_code) {
1630 u8 error_code = ((struct hwrm_err_output *)resp)->cmd_err;
1631
1632 if (error_code == NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR) {
1633 install.flags |= cpu_to_le16(
1634 NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG);
1635 rc = _hwrm_send_message(bp, &install, sizeof(install),
1636 INSTALL_PACKAGE_TIMEOUT);
1637 if (rc) {
1638 rc = -EOPNOTSUPP;
1639 goto flash_pkg_exit;
1640 }
1641 }
1642 }
Rob Swindell5ac67d82016-09-19 03:58:03 -04001643
1644 if (resp->result) {
1645 netdev_err(dev, "PKG install error = %d, problem_item = %d\n",
1646 (s8)resp->result, (int)resp->problem_item);
Kshitij Sonicb4d1d62017-02-12 19:18:11 -05001647 rc = -ENOPKG;
Rob Swindell5ac67d82016-09-19 03:58:03 -04001648 }
Kshitij Sonicb4d1d62017-02-12 19:18:11 -05001649flash_pkg_exit:
1650 mutex_unlock(&bp->hwrm_cmd_lock);
1651 return rc;
Michael Chanc0c050c2015-10-22 16:01:17 -04001652}
1653
1654static int bnxt_flash_device(struct net_device *dev,
1655 struct ethtool_flash *flash)
1656{
1657 if (!BNXT_PF((struct bnxt *)netdev_priv(dev))) {
1658 netdev_err(dev, "flashdev not supported from a virtual function\n");
1659 return -EINVAL;
1660 }
1661
Rob Swindell5ac67d82016-09-19 03:58:03 -04001662 if (flash->region == ETHTOOL_FLASH_ALL_REGIONS ||
1663 flash->region > 0xffff)
1664 return bnxt_flash_package_from_file(dev, flash->data,
1665 flash->region);
Michael Chanc0c050c2015-10-22 16:01:17 -04001666
1667 return bnxt_flash_firmware_from_file(dev, flash->region, flash->data);
1668}
1669
1670static int nvm_get_dir_info(struct net_device *dev, u32 *entries, u32 *length)
1671{
1672 struct bnxt *bp = netdev_priv(dev);
1673 int rc;
1674 struct hwrm_nvm_get_dir_info_input req = {0};
1675 struct hwrm_nvm_get_dir_info_output *output = bp->hwrm_cmd_resp_addr;
1676
1677 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_DIR_INFO, -1, -1);
1678
1679 mutex_lock(&bp->hwrm_cmd_lock);
1680 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
1681 if (!rc) {
1682 *entries = le32_to_cpu(output->entries);
1683 *length = le32_to_cpu(output->entry_length);
1684 }
1685 mutex_unlock(&bp->hwrm_cmd_lock);
1686 return rc;
1687}
1688
1689static int bnxt_get_eeprom_len(struct net_device *dev)
1690{
1691 /* The -1 return value allows the entire 32-bit range of offsets to be
1692 * passed via the ethtool command-line utility.
1693 */
1694 return -1;
1695}
1696
1697static int bnxt_get_nvram_directory(struct net_device *dev, u32 len, u8 *data)
1698{
1699 struct bnxt *bp = netdev_priv(dev);
1700 int rc;
1701 u32 dir_entries;
1702 u32 entry_length;
1703 u8 *buf;
1704 size_t buflen;
1705 dma_addr_t dma_handle;
1706 struct hwrm_nvm_get_dir_entries_input req = {0};
1707
1708 rc = nvm_get_dir_info(dev, &dir_entries, &entry_length);
1709 if (rc != 0)
1710 return rc;
1711
1712 /* Insert 2 bytes of directory info (count and size of entries) */
1713 if (len < 2)
1714 return -EINVAL;
1715
1716 *data++ = dir_entries;
1717 *data++ = entry_length;
1718 len -= 2;
1719 memset(data, 0xff, len);
1720
1721 buflen = dir_entries * entry_length;
1722 buf = dma_alloc_coherent(&bp->pdev->dev, buflen, &dma_handle,
1723 GFP_KERNEL);
1724 if (!buf) {
1725 netdev_err(dev, "dma_alloc_coherent failure, length = %u\n",
1726 (unsigned)buflen);
1727 return -ENOMEM;
1728 }
1729 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_DIR_ENTRIES, -1, -1);
1730 req.host_dest_addr = cpu_to_le64(dma_handle);
1731 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
1732 if (rc == 0)
1733 memcpy(data, buf, len > buflen ? buflen : len);
1734 dma_free_coherent(&bp->pdev->dev, buflen, buf, dma_handle);
1735 return rc;
1736}
1737
1738static int bnxt_get_nvram_item(struct net_device *dev, u32 index, u32 offset,
1739 u32 length, u8 *data)
1740{
1741 struct bnxt *bp = netdev_priv(dev);
1742 int rc;
1743 u8 *buf;
1744 dma_addr_t dma_handle;
1745 struct hwrm_nvm_read_input req = {0};
1746
1747 buf = dma_alloc_coherent(&bp->pdev->dev, length, &dma_handle,
1748 GFP_KERNEL);
1749 if (!buf) {
1750 netdev_err(dev, "dma_alloc_coherent failure, length = %u\n",
1751 (unsigned)length);
1752 return -ENOMEM;
1753 }
1754 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_READ, -1, -1);
1755 req.host_dest_addr = cpu_to_le64(dma_handle);
1756 req.dir_idx = cpu_to_le16(index);
1757 req.offset = cpu_to_le32(offset);
1758 req.len = cpu_to_le32(length);
1759
1760 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
1761 if (rc == 0)
1762 memcpy(data, buf, length);
1763 dma_free_coherent(&bp->pdev->dev, length, buf, dma_handle);
1764 return rc;
1765}
1766
Rob Swindell3ebf6f02016-02-26 04:00:06 -05001767static int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
1768 u16 ext, u16 *index, u32 *item_length,
1769 u32 *data_length)
1770{
1771 struct bnxt *bp = netdev_priv(dev);
1772 int rc;
1773 struct hwrm_nvm_find_dir_entry_input req = {0};
1774 struct hwrm_nvm_find_dir_entry_output *output = bp->hwrm_cmd_resp_addr;
1775
1776 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_FIND_DIR_ENTRY, -1, -1);
1777 req.enables = 0;
1778 req.dir_idx = 0;
1779 req.dir_type = cpu_to_le16(type);
1780 req.dir_ordinal = cpu_to_le16(ordinal);
1781 req.dir_ext = cpu_to_le16(ext);
1782 req.opt_ordinal = NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ;
Michael Chan90e209212016-02-26 04:00:08 -05001783 rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
Rob Swindell3ebf6f02016-02-26 04:00:06 -05001784 if (rc == 0) {
1785 if (index)
1786 *index = le16_to_cpu(output->dir_idx);
1787 if (item_length)
1788 *item_length = le32_to_cpu(output->dir_item_length);
1789 if (data_length)
1790 *data_length = le32_to_cpu(output->dir_data_length);
1791 }
1792 return rc;
1793}
1794
1795static char *bnxt_parse_pkglog(int desired_field, u8 *data, size_t datalen)
1796{
1797 char *retval = NULL;
1798 char *p;
1799 char *value;
1800 int field = 0;
1801
1802 if (datalen < 1)
1803 return NULL;
1804 /* null-terminate the log data (removing last '\n'): */
1805 data[datalen - 1] = 0;
1806 for (p = data; *p != 0; p++) {
1807 field = 0;
1808 retval = NULL;
1809 while (*p != 0 && *p != '\n') {
1810 value = p;
1811 while (*p != 0 && *p != '\t' && *p != '\n')
1812 p++;
1813 if (field == desired_field)
1814 retval = value;
1815 if (*p != '\t')
1816 break;
1817 *p = 0;
1818 field++;
1819 p++;
1820 }
1821 if (*p == 0)
1822 break;
1823 *p = 0;
1824 }
1825 return retval;
1826}
1827
1828static char *bnxt_get_pkgver(struct net_device *dev, char *buf, size_t buflen)
1829{
1830 u16 index = 0;
1831 u32 datalen;
1832
1833 if (bnxt_find_nvram_item(dev, BNX_DIR_TYPE_PKG_LOG,
1834 BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE,
1835 &index, NULL, &datalen) != 0)
1836 return NULL;
1837
1838 memset(buf, 0, buflen);
1839 if (bnxt_get_nvram_item(dev, index, 0, datalen, buf) != 0)
1840 return NULL;
1841
1842 return bnxt_parse_pkglog(BNX_PKG_LOG_FIELD_IDX_PKG_VERSION, buf,
1843 datalen);
1844}
1845
Michael Chanc0c050c2015-10-22 16:01:17 -04001846static int bnxt_get_eeprom(struct net_device *dev,
1847 struct ethtool_eeprom *eeprom,
1848 u8 *data)
1849{
1850 u32 index;
1851 u32 offset;
1852
1853 if (eeprom->offset == 0) /* special offset value to get directory */
1854 return bnxt_get_nvram_directory(dev, eeprom->len, data);
1855
1856 index = eeprom->offset >> 24;
1857 offset = eeprom->offset & 0xffffff;
1858
1859 if (index == 0) {
1860 netdev_err(dev, "unsupported index value: %d\n", index);
1861 return -EINVAL;
1862 }
1863
1864 return bnxt_get_nvram_item(dev, index - 1, offset, eeprom->len, data);
1865}
1866
1867static int bnxt_erase_nvram_directory(struct net_device *dev, u8 index)
1868{
1869 struct bnxt *bp = netdev_priv(dev);
1870 struct hwrm_nvm_erase_dir_entry_input req = {0};
1871
1872 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_ERASE_DIR_ENTRY, -1, -1);
1873 req.dir_idx = cpu_to_le16(index);
1874 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
1875}
1876
1877static int bnxt_set_eeprom(struct net_device *dev,
1878 struct ethtool_eeprom *eeprom,
1879 u8 *data)
1880{
1881 struct bnxt *bp = netdev_priv(dev);
1882 u8 index, dir_op;
1883 u16 type, ext, ordinal, attr;
1884
1885 if (!BNXT_PF(bp)) {
1886 netdev_err(dev, "NVM write not supported from a virtual function\n");
1887 return -EINVAL;
1888 }
1889
1890 type = eeprom->magic >> 16;
1891
1892 if (type == 0xffff) { /* special value for directory operations */
1893 index = eeprom->magic & 0xff;
1894 dir_op = eeprom->magic >> 8;
1895 if (index == 0)
1896 return -EINVAL;
1897 switch (dir_op) {
1898 case 0x0e: /* erase */
1899 if (eeprom->offset != ~eeprom->magic)
1900 return -EINVAL;
1901 return bnxt_erase_nvram_directory(dev, index - 1);
1902 default:
1903 return -EINVAL;
1904 }
1905 }
1906
1907 /* Create or re-write an NVM item: */
1908 if (bnxt_dir_type_is_executable(type) == true)
Rob Swindell5ac67d82016-09-19 03:58:03 -04001909 return -EOPNOTSUPP;
Michael Chanc0c050c2015-10-22 16:01:17 -04001910 ext = eeprom->magic & 0xffff;
1911 ordinal = eeprom->offset >> 16;
1912 attr = eeprom->offset & 0xffff;
1913
1914 return bnxt_flash_nvram(dev, type, ordinal, ext, attr, data,
1915 eeprom->len);
1916}
1917
Michael Chan72b34f02016-04-05 14:08:59 -04001918static int bnxt_set_eee(struct net_device *dev, struct ethtool_eee *edata)
1919{
1920 struct bnxt *bp = netdev_priv(dev);
1921 struct ethtool_eee *eee = &bp->eee;
1922 struct bnxt_link_info *link_info = &bp->link_info;
1923 u32 advertising =
1924 _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
1925 int rc = 0;
1926
Satish Baddipadige567b2ab2016-06-13 02:25:31 -04001927 if (!BNXT_SINGLE_PF(bp))
Michael Chan75362a32016-07-01 18:46:19 -04001928 return -EOPNOTSUPP;
Michael Chan72b34f02016-04-05 14:08:59 -04001929
1930 if (!(bp->flags & BNXT_FLAG_EEE_CAP))
1931 return -EOPNOTSUPP;
1932
1933 if (!edata->eee_enabled)
1934 goto eee_ok;
1935
1936 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
1937 netdev_warn(dev, "EEE requires autoneg\n");
1938 return -EINVAL;
1939 }
1940 if (edata->tx_lpi_enabled) {
1941 if (bp->lpi_tmr_hi && (edata->tx_lpi_timer > bp->lpi_tmr_hi ||
1942 edata->tx_lpi_timer < bp->lpi_tmr_lo)) {
1943 netdev_warn(dev, "Valid LPI timer range is %d and %d microsecs\n",
1944 bp->lpi_tmr_lo, bp->lpi_tmr_hi);
1945 return -EINVAL;
1946 } else if (!bp->lpi_tmr_hi) {
1947 edata->tx_lpi_timer = eee->tx_lpi_timer;
1948 }
1949 }
1950 if (!edata->advertised) {
1951 edata->advertised = advertising & eee->supported;
1952 } else if (edata->advertised & ~advertising) {
1953 netdev_warn(dev, "EEE advertised %x must be a subset of autoneg advertised speeds %x\n",
1954 edata->advertised, advertising);
1955 return -EINVAL;
1956 }
1957
1958 eee->advertised = edata->advertised;
1959 eee->tx_lpi_enabled = edata->tx_lpi_enabled;
1960 eee->tx_lpi_timer = edata->tx_lpi_timer;
1961eee_ok:
1962 eee->eee_enabled = edata->eee_enabled;
1963
1964 if (netif_running(dev))
1965 rc = bnxt_hwrm_set_link_setting(bp, false, true);
1966
1967 return rc;
1968}
1969
1970static int bnxt_get_eee(struct net_device *dev, struct ethtool_eee *edata)
1971{
1972 struct bnxt *bp = netdev_priv(dev);
1973
1974 if (!(bp->flags & BNXT_FLAG_EEE_CAP))
1975 return -EOPNOTSUPP;
1976
1977 *edata = bp->eee;
1978 if (!bp->eee.eee_enabled) {
1979 /* Preserve tx_lpi_timer so that the last value will be used
1980 * by default when it is re-enabled.
1981 */
1982 edata->advertised = 0;
1983 edata->tx_lpi_enabled = 0;
1984 }
1985
1986 if (!bp->eee.eee_active)
1987 edata->lp_advertised = 0;
1988
1989 return 0;
1990}
1991
Ajit Khaparde42ee18f2016-05-15 03:04:44 -04001992static int bnxt_read_sfp_module_eeprom_info(struct bnxt *bp, u16 i2c_addr,
1993 u16 page_number, u16 start_addr,
1994 u16 data_length, u8 *buf)
1995{
1996 struct hwrm_port_phy_i2c_read_input req = {0};
1997 struct hwrm_port_phy_i2c_read_output *output = bp->hwrm_cmd_resp_addr;
1998 int rc, byte_offset = 0;
1999
2000 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_I2C_READ, -1, -1);
2001 req.i2c_slave_addr = i2c_addr;
2002 req.page_number = cpu_to_le16(page_number);
2003 req.port_id = cpu_to_le16(bp->pf.port_id);
2004 do {
2005 u16 xfer_size;
2006
2007 xfer_size = min_t(u16, data_length, BNXT_MAX_PHY_I2C_RESP_SIZE);
2008 data_length -= xfer_size;
2009 req.page_offset = cpu_to_le16(start_addr + byte_offset);
2010 req.data_length = xfer_size;
2011 req.enables = cpu_to_le32(start_addr + byte_offset ?
2012 PORT_PHY_I2C_READ_REQ_ENABLES_PAGE_OFFSET : 0);
2013 mutex_lock(&bp->hwrm_cmd_lock);
2014 rc = _hwrm_send_message(bp, &req, sizeof(req),
2015 HWRM_CMD_TIMEOUT);
2016 if (!rc)
2017 memcpy(buf + byte_offset, output->data, xfer_size);
2018 mutex_unlock(&bp->hwrm_cmd_lock);
2019 byte_offset += xfer_size;
2020 } while (!rc && data_length > 0);
2021
2022 return rc;
2023}
2024
2025static int bnxt_get_module_info(struct net_device *dev,
2026 struct ethtool_modinfo *modinfo)
2027{
2028 struct bnxt *bp = netdev_priv(dev);
2029 struct hwrm_port_phy_i2c_read_input req = {0};
2030 struct hwrm_port_phy_i2c_read_output *output = bp->hwrm_cmd_resp_addr;
2031 int rc;
2032
2033 /* No point in going further if phy status indicates
2034 * module is not inserted or if it is powered down or
2035 * if it is of type 10GBase-T
2036 */
2037 if (bp->link_info.module_status >
2038 PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG)
2039 return -EOPNOTSUPP;
2040
2041 /* This feature is not supported in older firmware versions */
2042 if (bp->hwrm_spec_code < 0x10202)
2043 return -EOPNOTSUPP;
2044
2045 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_I2C_READ, -1, -1);
2046 req.i2c_slave_addr = I2C_DEV_ADDR_A0;
2047 req.page_number = 0;
2048 req.page_offset = cpu_to_le16(SFP_EEPROM_SFF_8472_COMP_ADDR);
2049 req.data_length = SFP_EEPROM_SFF_8472_COMP_SIZE;
2050 req.port_id = cpu_to_le16(bp->pf.port_id);
2051 mutex_lock(&bp->hwrm_cmd_lock);
2052 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
2053 if (!rc) {
2054 u32 module_id = le32_to_cpu(output->data[0]);
2055
2056 switch (module_id) {
2057 case SFF_MODULE_ID_SFP:
2058 modinfo->type = ETH_MODULE_SFF_8472;
2059 modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
2060 break;
2061 case SFF_MODULE_ID_QSFP:
2062 case SFF_MODULE_ID_QSFP_PLUS:
2063 modinfo->type = ETH_MODULE_SFF_8436;
2064 modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
2065 break;
2066 case SFF_MODULE_ID_QSFP28:
2067 modinfo->type = ETH_MODULE_SFF_8636;
2068 modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
2069 break;
2070 default:
2071 rc = -EOPNOTSUPP;
2072 break;
2073 }
2074 }
2075 mutex_unlock(&bp->hwrm_cmd_lock);
2076 return rc;
2077}
2078
2079static int bnxt_get_module_eeprom(struct net_device *dev,
2080 struct ethtool_eeprom *eeprom,
2081 u8 *data)
2082{
2083 struct bnxt *bp = netdev_priv(dev);
2084 u16 start = eeprom->offset, length = eeprom->len;
Colin Ian Kingf3ea3112016-07-08 16:42:48 +01002085 int rc = 0;
Ajit Khaparde42ee18f2016-05-15 03:04:44 -04002086
2087 memset(data, 0, eeprom->len);
2088
2089 /* Read A0 portion of the EEPROM */
2090 if (start < ETH_MODULE_SFF_8436_LEN) {
2091 if (start + eeprom->len > ETH_MODULE_SFF_8436_LEN)
2092 length = ETH_MODULE_SFF_8436_LEN - start;
2093 rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A0, 0,
2094 start, length, data);
2095 if (rc)
2096 return rc;
2097 start += length;
2098 data += length;
2099 length = eeprom->len - length;
2100 }
2101
2102 /* Read A2 portion of the EEPROM */
2103 if (length) {
2104 start -= ETH_MODULE_SFF_8436_LEN;
2105 bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A2, 1, start,
2106 length, data);
2107 }
2108 return rc;
2109}
2110
Deepak Khungarae8e98a2016-09-19 03:58:08 -04002111static int bnxt_nway_reset(struct net_device *dev)
2112{
2113 int rc = 0;
2114
2115 struct bnxt *bp = netdev_priv(dev);
2116 struct bnxt_link_info *link_info = &bp->link_info;
2117
2118 if (!BNXT_SINGLE_PF(bp))
2119 return -EOPNOTSUPP;
2120
2121 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED))
2122 return -EINVAL;
2123
2124 if (netif_running(dev))
2125 rc = bnxt_hwrm_set_link_setting(bp, true, false);
2126
2127 return rc;
2128}
2129
Michael Chan5ad2cbe2017-01-13 01:32:03 -05002130static int bnxt_set_phys_id(struct net_device *dev,
2131 enum ethtool_phys_id_state state)
2132{
2133 struct hwrm_port_led_cfg_input req = {0};
2134 struct bnxt *bp = netdev_priv(dev);
2135 struct bnxt_pf_info *pf = &bp->pf;
2136 struct bnxt_led_cfg *led_cfg;
2137 u8 led_state;
2138 __le16 duration;
2139 int i, rc;
2140
2141 if (!bp->num_leds || BNXT_VF(bp))
2142 return -EOPNOTSUPP;
2143
2144 if (state == ETHTOOL_ID_ACTIVE) {
2145 led_state = PORT_LED_CFG_REQ_LED0_STATE_BLINKALT;
2146 duration = cpu_to_le16(500);
2147 } else if (state == ETHTOOL_ID_INACTIVE) {
2148 led_state = PORT_LED_CFG_REQ_LED1_STATE_DEFAULT;
2149 duration = cpu_to_le16(0);
2150 } else {
2151 return -EINVAL;
2152 }
2153 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_LED_CFG, -1, -1);
2154 req.port_id = cpu_to_le16(pf->port_id);
2155 req.num_leds = bp->num_leds;
2156 led_cfg = (struct bnxt_led_cfg *)&req.led0_id;
2157 for (i = 0; i < bp->num_leds; i++, led_cfg++) {
2158 req.enables |= BNXT_LED_DFLT_ENABLES(i);
2159 led_cfg->led_id = bp->leds[i].led_id;
2160 led_cfg->led_state = led_state;
2161 led_cfg->led_blink_on = duration;
2162 led_cfg->led_blink_off = duration;
2163 led_cfg->led_group_id = bp->leds[i].led_group_id;
2164 }
2165 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
2166 if (rc)
2167 rc = -EIO;
2168 return rc;
2169}
2170
Michael Chanc0c050c2015-10-22 16:01:17 -04002171const struct ethtool_ops bnxt_ethtool_ops = {
Michael Chan00c04a92016-06-13 02:25:38 -04002172 .get_link_ksettings = bnxt_get_link_ksettings,
2173 .set_link_ksettings = bnxt_set_link_ksettings,
Michael Chanc0c050c2015-10-22 16:01:17 -04002174 .get_pauseparam = bnxt_get_pauseparam,
2175 .set_pauseparam = bnxt_set_pauseparam,
2176 .get_drvinfo = bnxt_get_drvinfo,
Michael Chan8e202362017-04-04 18:14:09 -04002177 .get_wol = bnxt_get_wol,
Michael Chan5282db62017-04-04 18:14:10 -04002178 .set_wol = bnxt_set_wol,
Michael Chanc0c050c2015-10-22 16:01:17 -04002179 .get_coalesce = bnxt_get_coalesce,
2180 .set_coalesce = bnxt_set_coalesce,
2181 .get_msglevel = bnxt_get_msglevel,
2182 .set_msglevel = bnxt_set_msglevel,
2183 .get_sset_count = bnxt_get_sset_count,
2184 .get_strings = bnxt_get_strings,
2185 .get_ethtool_stats = bnxt_get_ethtool_stats,
2186 .set_ringparam = bnxt_set_ringparam,
2187 .get_ringparam = bnxt_get_ringparam,
2188 .get_channels = bnxt_get_channels,
2189 .set_channels = bnxt_set_channels,
Michael Chanc0c050c2015-10-22 16:01:17 -04002190 .get_rxnfc = bnxt_get_rxnfc,
Michael Chana0119522016-11-16 21:13:10 -05002191 .set_rxnfc = bnxt_set_rxnfc,
Michael Chanc0c050c2015-10-22 16:01:17 -04002192 .get_rxfh_indir_size = bnxt_get_rxfh_indir_size,
2193 .get_rxfh_key_size = bnxt_get_rxfh_key_size,
2194 .get_rxfh = bnxt_get_rxfh,
2195 .flash_device = bnxt_flash_device,
2196 .get_eeprom_len = bnxt_get_eeprom_len,
2197 .get_eeprom = bnxt_get_eeprom,
2198 .set_eeprom = bnxt_set_eeprom,
2199 .get_link = bnxt_get_link,
Michael Chan72b34f02016-04-05 14:08:59 -04002200 .get_eee = bnxt_get_eee,
2201 .set_eee = bnxt_set_eee,
Ajit Khaparde42ee18f2016-05-15 03:04:44 -04002202 .get_module_info = bnxt_get_module_info,
2203 .get_module_eeprom = bnxt_get_module_eeprom,
Michael Chan5ad2cbe2017-01-13 01:32:03 -05002204 .nway_reset = bnxt_nway_reset,
2205 .set_phys_id = bnxt_set_phys_id,
Michael Chanc0c050c2015-10-22 16:01:17 -04002206};