blob: ac9d6e5bc58520ab405eea7cf7abf63ef83c26d4 [file] [log] [blame]
Michael Chanc0c050c2015-10-22 16:01:17 -04001/* Broadcom NetXtreme-C/E network driver.
2 *
3 * Copyright (c) 2014-2015 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 */
9
Rob Swindell3ebf6f02016-02-26 04:00:06 -050010#include <linux/ctype.h>
Michael Chanc0c050c2015-10-22 16:01:17 -040011#include <linux/ethtool.h>
12#include <linux/interrupt.h>
13#include <linux/pci.h>
14#include <linux/etherdevice.h>
15#include <linux/crc32.h>
16#include <linux/firmware.h>
17#include "bnxt_hsi.h"
18#include "bnxt.h"
19#include "bnxt_ethtool.h"
20#include "bnxt_nvm_defs.h" /* NVRAM content constant and structure defs */
21#include "bnxt_fw_hdr.h" /* Firmware hdr constant and structure defs */
22#define FLASH_NVRAM_TIMEOUT ((HWRM_CMD_TIMEOUT) * 100)
23
Rob Swindell3ebf6f02016-02-26 04:00:06 -050024static char *bnxt_get_pkgver(struct net_device *dev, char *buf, size_t buflen);
25
Michael Chanc0c050c2015-10-22 16:01:17 -040026static u32 bnxt_get_msglevel(struct net_device *dev)
27{
28 struct bnxt *bp = netdev_priv(dev);
29
30 return bp->msg_enable;
31}
32
33static void bnxt_set_msglevel(struct net_device *dev, u32 value)
34{
35 struct bnxt *bp = netdev_priv(dev);
36
37 bp->msg_enable = value;
38}
39
40static int bnxt_get_coalesce(struct net_device *dev,
41 struct ethtool_coalesce *coal)
42{
43 struct bnxt *bp = netdev_priv(dev);
44
45 memset(coal, 0, sizeof(*coal));
46
Michael Chandfb5b892016-02-26 04:00:01 -050047 coal->rx_coalesce_usecs = bp->rx_coal_ticks;
48 /* 2 completion records per rx packet */
49 coal->rx_max_coalesced_frames = bp->rx_coal_bufs / 2;
50 coal->rx_coalesce_usecs_irq = bp->rx_coal_ticks_irq;
51 coal->rx_max_coalesced_frames_irq = bp->rx_coal_bufs_irq / 2;
Michael Chanc0c050c2015-10-22 16:01:17 -040052
Michael Chandfc9c942016-02-26 04:00:03 -050053 coal->tx_coalesce_usecs = bp->tx_coal_ticks;
54 coal->tx_max_coalesced_frames = bp->tx_coal_bufs;
55 coal->tx_coalesce_usecs_irq = bp->tx_coal_ticks_irq;
56 coal->tx_max_coalesced_frames_irq = bp->tx_coal_bufs_irq;
57
Michael Chanc0c050c2015-10-22 16:01:17 -040058 return 0;
59}
60
61static int bnxt_set_coalesce(struct net_device *dev,
62 struct ethtool_coalesce *coal)
63{
64 struct bnxt *bp = netdev_priv(dev);
65 int rc = 0;
66
Michael Chandfb5b892016-02-26 04:00:01 -050067 bp->rx_coal_ticks = coal->rx_coalesce_usecs;
68 /* 2 completion records per rx packet */
69 bp->rx_coal_bufs = coal->rx_max_coalesced_frames * 2;
70 bp->rx_coal_ticks_irq = coal->rx_coalesce_usecs_irq;
71 bp->rx_coal_bufs_irq = coal->rx_max_coalesced_frames_irq * 2;
Michael Chanc0c050c2015-10-22 16:01:17 -040072
Michael Chandfc9c942016-02-26 04:00:03 -050073 bp->tx_coal_ticks = coal->tx_coalesce_usecs;
74 bp->tx_coal_bufs = coal->tx_max_coalesced_frames;
75 bp->tx_coal_ticks_irq = coal->tx_coalesce_usecs_irq;
76 bp->tx_coal_bufs_irq = coal->tx_max_coalesced_frames_irq;
77
Michael Chanc0c050c2015-10-22 16:01:17 -040078 if (netif_running(dev))
79 rc = bnxt_hwrm_set_coal(bp);
80
81 return rc;
82}
83
84#define BNXT_NUM_STATS 21
85
86static int bnxt_get_sset_count(struct net_device *dev, int sset)
87{
88 struct bnxt *bp = netdev_priv(dev);
89
90 switch (sset) {
91 case ETH_SS_STATS:
92 return BNXT_NUM_STATS * bp->cp_nr_rings;
93 default:
94 return -EOPNOTSUPP;
95 }
96}
97
98static void bnxt_get_ethtool_stats(struct net_device *dev,
99 struct ethtool_stats *stats, u64 *buf)
100{
101 u32 i, j = 0;
102 struct bnxt *bp = netdev_priv(dev);
103 u32 buf_size = sizeof(struct ctx_hw_stats) * bp->cp_nr_rings;
104 u32 stat_fields = sizeof(struct ctx_hw_stats) / 8;
105
106 memset(buf, 0, buf_size);
107
108 if (!bp->bnapi)
109 return;
110
111 for (i = 0; i < bp->cp_nr_rings; i++) {
112 struct bnxt_napi *bnapi = bp->bnapi[i];
113 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
114 __le64 *hw_stats = (__le64 *)cpr->hw_stats;
115 int k;
116
117 for (k = 0; k < stat_fields; j++, k++)
118 buf[j] = le64_to_cpu(hw_stats[k]);
119 buf[j++] = cpr->rx_l4_csum_errors;
120 }
121}
122
123static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
124{
125 struct bnxt *bp = netdev_priv(dev);
126 u32 i;
127
128 switch (stringset) {
129 /* The number of strings must match BNXT_NUM_STATS defined above. */
130 case ETH_SS_STATS:
131 for (i = 0; i < bp->cp_nr_rings; i++) {
132 sprintf(buf, "[%d]: rx_ucast_packets", i);
133 buf += ETH_GSTRING_LEN;
134 sprintf(buf, "[%d]: rx_mcast_packets", i);
135 buf += ETH_GSTRING_LEN;
136 sprintf(buf, "[%d]: rx_bcast_packets", i);
137 buf += ETH_GSTRING_LEN;
138 sprintf(buf, "[%d]: rx_discards", i);
139 buf += ETH_GSTRING_LEN;
140 sprintf(buf, "[%d]: rx_drops", i);
141 buf += ETH_GSTRING_LEN;
142 sprintf(buf, "[%d]: rx_ucast_bytes", i);
143 buf += ETH_GSTRING_LEN;
144 sprintf(buf, "[%d]: rx_mcast_bytes", i);
145 buf += ETH_GSTRING_LEN;
146 sprintf(buf, "[%d]: rx_bcast_bytes", i);
147 buf += ETH_GSTRING_LEN;
148 sprintf(buf, "[%d]: tx_ucast_packets", i);
149 buf += ETH_GSTRING_LEN;
150 sprintf(buf, "[%d]: tx_mcast_packets", i);
151 buf += ETH_GSTRING_LEN;
152 sprintf(buf, "[%d]: tx_bcast_packets", i);
153 buf += ETH_GSTRING_LEN;
154 sprintf(buf, "[%d]: tx_discards", i);
155 buf += ETH_GSTRING_LEN;
156 sprintf(buf, "[%d]: tx_drops", i);
157 buf += ETH_GSTRING_LEN;
158 sprintf(buf, "[%d]: tx_ucast_bytes", i);
159 buf += ETH_GSTRING_LEN;
160 sprintf(buf, "[%d]: tx_mcast_bytes", i);
161 buf += ETH_GSTRING_LEN;
162 sprintf(buf, "[%d]: tx_bcast_bytes", i);
163 buf += ETH_GSTRING_LEN;
164 sprintf(buf, "[%d]: tpa_packets", i);
165 buf += ETH_GSTRING_LEN;
166 sprintf(buf, "[%d]: tpa_bytes", i);
167 buf += ETH_GSTRING_LEN;
168 sprintf(buf, "[%d]: tpa_events", i);
169 buf += ETH_GSTRING_LEN;
170 sprintf(buf, "[%d]: tpa_aborts", i);
171 buf += ETH_GSTRING_LEN;
172 sprintf(buf, "[%d]: rx_l4_csum_errors", i);
173 buf += ETH_GSTRING_LEN;
174 }
175 break;
176 default:
177 netdev_err(bp->dev, "bnxt_get_strings invalid request %x\n",
178 stringset);
179 break;
180 }
181}
182
183static void bnxt_get_ringparam(struct net_device *dev,
184 struct ethtool_ringparam *ering)
185{
186 struct bnxt *bp = netdev_priv(dev);
187
188 ering->rx_max_pending = BNXT_MAX_RX_DESC_CNT;
189 ering->rx_jumbo_max_pending = BNXT_MAX_RX_JUM_DESC_CNT;
190 ering->tx_max_pending = BNXT_MAX_TX_DESC_CNT;
191
192 ering->rx_pending = bp->rx_ring_size;
193 ering->rx_jumbo_pending = bp->rx_agg_ring_size;
194 ering->tx_pending = bp->tx_ring_size;
195}
196
197static int bnxt_set_ringparam(struct net_device *dev,
198 struct ethtool_ringparam *ering)
199{
200 struct bnxt *bp = netdev_priv(dev);
201
202 if ((ering->rx_pending > BNXT_MAX_RX_DESC_CNT) ||
203 (ering->tx_pending > BNXT_MAX_TX_DESC_CNT) ||
204 (ering->tx_pending <= MAX_SKB_FRAGS))
205 return -EINVAL;
206
207 if (netif_running(dev))
208 bnxt_close_nic(bp, false, false);
209
210 bp->rx_ring_size = ering->rx_pending;
211 bp->tx_ring_size = ering->tx_pending;
212 bnxt_set_ring_params(bp);
213
214 if (netif_running(dev))
215 return bnxt_open_nic(bp, false, false);
216
217 return 0;
218}
219
220static void bnxt_get_channels(struct net_device *dev,
221 struct ethtool_channels *channel)
222{
223 struct bnxt *bp = netdev_priv(dev);
224 int max_rx_rings, max_tx_rings, tcs;
225
Michael Chan6e6c5a52016-01-02 23:45:02 -0500226 bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, true);
Michael Chan068c9ec2016-01-02 23:45:04 -0500227 channel->max_combined = max_rx_rings;
228
229 bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, false);
Michael Chanc0c050c2015-10-22 16:01:17 -0400230 tcs = netdev_get_num_tc(dev);
231 if (tcs > 1)
232 max_tx_rings /= tcs;
233
234 channel->max_rx = max_rx_rings;
235 channel->max_tx = max_tx_rings;
236 channel->max_other = 0;
Michael Chan068c9ec2016-01-02 23:45:04 -0500237 if (bp->flags & BNXT_FLAG_SHARED_RINGS) {
238 channel->combined_count = bp->rx_nr_rings;
239 } else {
240 channel->rx_count = bp->rx_nr_rings;
241 channel->tx_count = bp->tx_nr_rings_per_tc;
242 }
Michael Chanc0c050c2015-10-22 16:01:17 -0400243}
244
245static int bnxt_set_channels(struct net_device *dev,
246 struct ethtool_channels *channel)
247{
248 struct bnxt *bp = netdev_priv(dev);
249 int max_rx_rings, max_tx_rings, tcs;
250 u32 rc = 0;
Michael Chan068c9ec2016-01-02 23:45:04 -0500251 bool sh = false;
Michael Chanc0c050c2015-10-22 16:01:17 -0400252
Michael Chan068c9ec2016-01-02 23:45:04 -0500253 if (channel->other_count)
Michael Chanc0c050c2015-10-22 16:01:17 -0400254 return -EINVAL;
255
Michael Chan068c9ec2016-01-02 23:45:04 -0500256 if (!channel->combined_count &&
257 (!channel->rx_count || !channel->tx_count))
258 return -EINVAL;
259
260 if (channel->combined_count &&
261 (channel->rx_count || channel->tx_count))
262 return -EINVAL;
263
264 if (channel->combined_count)
265 sh = true;
266
267 bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, sh);
268
Michael Chanc0c050c2015-10-22 16:01:17 -0400269 tcs = netdev_get_num_tc(dev);
270 if (tcs > 1)
271 max_tx_rings /= tcs;
272
Michael Chan068c9ec2016-01-02 23:45:04 -0500273 if (sh && (channel->combined_count > max_rx_rings ||
274 channel->combined_count > max_tx_rings))
275 return -ENOMEM;
276
277 if (!sh && (channel->rx_count > max_rx_rings ||
278 channel->tx_count > max_tx_rings))
279 return -ENOMEM;
Michael Chanc0c050c2015-10-22 16:01:17 -0400280
281 if (netif_running(dev)) {
282 if (BNXT_PF(bp)) {
283 /* TODO CHIMP_FW: Send message to all VF's
284 * before PF unload
285 */
286 }
287 rc = bnxt_close_nic(bp, true, false);
288 if (rc) {
289 netdev_err(bp->dev, "Set channel failure rc :%x\n",
290 rc);
291 return rc;
292 }
293 }
294
Michael Chan068c9ec2016-01-02 23:45:04 -0500295 if (sh) {
296 bp->flags |= BNXT_FLAG_SHARED_RINGS;
297 bp->rx_nr_rings = channel->combined_count;
298 bp->tx_nr_rings_per_tc = channel->combined_count;
299 } else {
300 bp->flags &= ~BNXT_FLAG_SHARED_RINGS;
301 bp->rx_nr_rings = channel->rx_count;
302 bp->tx_nr_rings_per_tc = channel->tx_count;
303 }
304
Michael Chanc0c050c2015-10-22 16:01:17 -0400305 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
306 if (tcs > 1)
307 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tcs;
Michael Chan068c9ec2016-01-02 23:45:04 -0500308
309 bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
310 bp->tx_nr_rings + bp->rx_nr_rings;
311
Michael Chanc0c050c2015-10-22 16:01:17 -0400312 bp->num_stat_ctxs = bp->cp_nr_rings;
313
Michael Chan2bcfa6f2015-12-27 18:19:24 -0500314 /* After changing number of rx channels, update NTUPLE feature. */
315 netdev_update_features(dev);
Michael Chanc0c050c2015-10-22 16:01:17 -0400316 if (netif_running(dev)) {
317 rc = bnxt_open_nic(bp, true, false);
318 if ((!rc) && BNXT_PF(bp)) {
319 /* TODO CHIMP_FW: Send message to all VF's
320 * to renable
321 */
322 }
323 }
324
325 return rc;
326}
327
328#ifdef CONFIG_RFS_ACCEL
329static int bnxt_grxclsrlall(struct bnxt *bp, struct ethtool_rxnfc *cmd,
330 u32 *rule_locs)
331{
332 int i, j = 0;
333
334 cmd->data = bp->ntp_fltr_count;
335 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
336 struct hlist_head *head;
337 struct bnxt_ntuple_filter *fltr;
338
339 head = &bp->ntp_fltr_hash_tbl[i];
340 rcu_read_lock();
341 hlist_for_each_entry_rcu(fltr, head, hash) {
342 if (j == cmd->rule_cnt)
343 break;
344 rule_locs[j++] = fltr->sw_id;
345 }
346 rcu_read_unlock();
347 if (j == cmd->rule_cnt)
348 break;
349 }
350 cmd->rule_cnt = j;
351 return 0;
352}
353
354static int bnxt_grxclsrule(struct bnxt *bp, struct ethtool_rxnfc *cmd)
355{
356 struct ethtool_rx_flow_spec *fs =
357 (struct ethtool_rx_flow_spec *)&cmd->fs;
358 struct bnxt_ntuple_filter *fltr;
359 struct flow_keys *fkeys;
360 int i, rc = -EINVAL;
361
362 if (fs->location < 0 || fs->location >= BNXT_NTP_FLTR_MAX_FLTR)
363 return rc;
364
365 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
366 struct hlist_head *head;
367
368 head = &bp->ntp_fltr_hash_tbl[i];
369 rcu_read_lock();
370 hlist_for_each_entry_rcu(fltr, head, hash) {
371 if (fltr->sw_id == fs->location)
372 goto fltr_found;
373 }
374 rcu_read_unlock();
375 }
376 return rc;
377
378fltr_found:
379 fkeys = &fltr->fkeys;
380 if (fkeys->basic.ip_proto == IPPROTO_TCP)
381 fs->flow_type = TCP_V4_FLOW;
382 else if (fkeys->basic.ip_proto == IPPROTO_UDP)
383 fs->flow_type = UDP_V4_FLOW;
384 else
385 goto fltr_err;
386
387 fs->h_u.tcp_ip4_spec.ip4src = fkeys->addrs.v4addrs.src;
388 fs->m_u.tcp_ip4_spec.ip4src = cpu_to_be32(~0);
389
390 fs->h_u.tcp_ip4_spec.ip4dst = fkeys->addrs.v4addrs.dst;
391 fs->m_u.tcp_ip4_spec.ip4dst = cpu_to_be32(~0);
392
393 fs->h_u.tcp_ip4_spec.psrc = fkeys->ports.src;
394 fs->m_u.tcp_ip4_spec.psrc = cpu_to_be16(~0);
395
396 fs->h_u.tcp_ip4_spec.pdst = fkeys->ports.dst;
397 fs->m_u.tcp_ip4_spec.pdst = cpu_to_be16(~0);
398
399 fs->ring_cookie = fltr->rxq;
400 rc = 0;
401
402fltr_err:
403 rcu_read_unlock();
404
405 return rc;
406}
407
408static int bnxt_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
409 u32 *rule_locs)
410{
411 struct bnxt *bp = netdev_priv(dev);
412 int rc = 0;
413
414 switch (cmd->cmd) {
415 case ETHTOOL_GRXRINGS:
416 cmd->data = bp->rx_nr_rings;
417 break;
418
419 case ETHTOOL_GRXCLSRLCNT:
420 cmd->rule_cnt = bp->ntp_fltr_count;
421 cmd->data = BNXT_NTP_FLTR_MAX_FLTR;
422 break;
423
424 case ETHTOOL_GRXCLSRLALL:
425 rc = bnxt_grxclsrlall(bp, cmd, (u32 *)rule_locs);
426 break;
427
428 case ETHTOOL_GRXCLSRULE:
429 rc = bnxt_grxclsrule(bp, cmd);
430 break;
431
432 default:
433 rc = -EOPNOTSUPP;
434 break;
435 }
436
437 return rc;
438}
439#endif
440
441static u32 bnxt_get_rxfh_indir_size(struct net_device *dev)
442{
443 return HW_HASH_INDEX_SIZE;
444}
445
446static u32 bnxt_get_rxfh_key_size(struct net_device *dev)
447{
448 return HW_HASH_KEY_SIZE;
449}
450
451static int bnxt_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
452 u8 *hfunc)
453{
454 struct bnxt *bp = netdev_priv(dev);
455 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
456 int i = 0;
457
458 if (hfunc)
459 *hfunc = ETH_RSS_HASH_TOP;
460
461 if (indir)
462 for (i = 0; i < HW_HASH_INDEX_SIZE; i++)
463 indir[i] = le16_to_cpu(vnic->rss_table[i]);
464
465 if (key)
466 memcpy(key, vnic->rss_hash_key, HW_HASH_KEY_SIZE);
467
468 return 0;
469}
470
471static void bnxt_get_drvinfo(struct net_device *dev,
472 struct ethtool_drvinfo *info)
473{
474 struct bnxt *bp = netdev_priv(dev);
Rob Swindell3ebf6f02016-02-26 04:00:06 -0500475 char *pkglog;
476 char *pkgver = NULL;
Michael Chanc0c050c2015-10-22 16:01:17 -0400477
Rob Swindell3ebf6f02016-02-26 04:00:06 -0500478 pkglog = kmalloc(BNX_PKG_LOG_MAX_LENGTH, GFP_KERNEL);
479 if (pkglog)
480 pkgver = bnxt_get_pkgver(dev, pkglog, BNX_PKG_LOG_MAX_LENGTH);
Michael Chanc0c050c2015-10-22 16:01:17 -0400481 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
482 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
Rob Swindell3ebf6f02016-02-26 04:00:06 -0500483 if (pkgver && *pkgver != 0 && isdigit(*pkgver))
484 snprintf(info->fw_version, sizeof(info->fw_version) - 1,
485 "%s pkg %s", bp->fw_ver_str, pkgver);
486 else
487 strlcpy(info->fw_version, bp->fw_ver_str,
488 sizeof(info->fw_version));
Michael Chanc0c050c2015-10-22 16:01:17 -0400489 strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
490 info->n_stats = BNXT_NUM_STATS * bp->cp_nr_rings;
491 info->testinfo_len = BNXT_NUM_TESTS(bp);
492 /* TODO CHIMP_FW: eeprom dump details */
493 info->eedump_len = 0;
494 /* TODO CHIMP FW: reg dump details */
495 info->regdump_len = 0;
Rob Swindell3ebf6f02016-02-26 04:00:06 -0500496 kfree(pkglog);
Michael Chanc0c050c2015-10-22 16:01:17 -0400497}
498
Michael Chan27c4d572016-03-07 15:38:41 -0500499static u32 _bnxt_fw_to_ethtool_adv_spds(u16 fw_speeds, u8 fw_pause)
Michael Chanc0c050c2015-10-22 16:01:17 -0400500{
Michael Chanc0c050c2015-10-22 16:01:17 -0400501 u32 speed_mask = 0;
502
503 /* TODO: support 25GB, 40GB, 50GB with different cable type */
504 /* set the advertised speeds */
505 if (fw_speeds & BNXT_LINK_SPEED_MSK_100MB)
506 speed_mask |= ADVERTISED_100baseT_Full;
507 if (fw_speeds & BNXT_LINK_SPEED_MSK_1GB)
508 speed_mask |= ADVERTISED_1000baseT_Full;
509 if (fw_speeds & BNXT_LINK_SPEED_MSK_2_5GB)
510 speed_mask |= ADVERTISED_2500baseX_Full;
511 if (fw_speeds & BNXT_LINK_SPEED_MSK_10GB)
512 speed_mask |= ADVERTISED_10000baseT_Full;
Michael Chanc0c050c2015-10-22 16:01:17 -0400513 if (fw_speeds & BNXT_LINK_SPEED_MSK_40GB)
Michael Chan1c49c422016-02-10 17:33:48 -0500514 speed_mask |= ADVERTISED_40000baseCR4_Full;
Michael Chan27c4d572016-03-07 15:38:41 -0500515
516 if ((fw_pause & BNXT_LINK_PAUSE_BOTH) == BNXT_LINK_PAUSE_BOTH)
517 speed_mask |= ADVERTISED_Pause;
518 else if (fw_pause & BNXT_LINK_PAUSE_TX)
519 speed_mask |= ADVERTISED_Asym_Pause;
520 else if (fw_pause & BNXT_LINK_PAUSE_RX)
521 speed_mask |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
522
Michael Chanc0c050c2015-10-22 16:01:17 -0400523 return speed_mask;
524}
525
Michael Chan27c4d572016-03-07 15:38:41 -0500526static u32 bnxt_fw_to_ethtool_advertised_spds(struct bnxt_link_info *link_info)
527{
528 u16 fw_speeds = link_info->auto_link_speeds;
529 u8 fw_pause = 0;
530
531 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
532 fw_pause = link_info->auto_pause_setting;
533
534 return _bnxt_fw_to_ethtool_adv_spds(fw_speeds, fw_pause);
535}
536
Michael Chan32773602016-03-07 15:38:42 -0500537static u32 bnxt_fw_to_ethtool_lp_adv(struct bnxt_link_info *link_info)
538{
539 u16 fw_speeds = link_info->lp_auto_link_speeds;
540 u8 fw_pause = 0;
541
542 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
543 fw_pause = link_info->lp_pause;
544
545 return _bnxt_fw_to_ethtool_adv_spds(fw_speeds, fw_pause);
546}
547
Michael Chan4b32cac2016-03-07 15:38:43 -0500548static u32 bnxt_fw_to_ethtool_support_spds(struct bnxt_link_info *link_info)
549{
550 u16 fw_speeds = link_info->support_speeds;
551 u32 supported;
552
553 supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
554 return supported | SUPPORTED_Pause | SUPPORTED_Asym_Pause;
555}
556
Michael Chanc0c050c2015-10-22 16:01:17 -0400557u32 bnxt_fw_to_ethtool_speed(u16 fw_link_speed)
558{
559 switch (fw_link_speed) {
560 case BNXT_LINK_SPEED_100MB:
561 return SPEED_100;
562 case BNXT_LINK_SPEED_1GB:
563 return SPEED_1000;
564 case BNXT_LINK_SPEED_2_5GB:
565 return SPEED_2500;
566 case BNXT_LINK_SPEED_10GB:
567 return SPEED_10000;
568 case BNXT_LINK_SPEED_20GB:
569 return SPEED_20000;
570 case BNXT_LINK_SPEED_25GB:
571 return SPEED_25000;
572 case BNXT_LINK_SPEED_40GB:
573 return SPEED_40000;
574 case BNXT_LINK_SPEED_50GB:
575 return SPEED_50000;
576 default:
577 return SPEED_UNKNOWN;
578 }
579}
580
581static int bnxt_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
582{
583 struct bnxt *bp = netdev_priv(dev);
584 struct bnxt_link_info *link_info = &bp->link_info;
585 u16 ethtool_speed;
586
587 cmd->supported = bnxt_fw_to_ethtool_support_spds(link_info);
588
589 if (link_info->auto_link_speeds)
590 cmd->supported |= SUPPORTED_Autoneg;
591
Michael Chanb7634992016-02-10 17:33:46 -0500592 if (link_info->autoneg) {
Michael Chanc0c050c2015-10-22 16:01:17 -0400593 cmd->advertising =
594 bnxt_fw_to_ethtool_advertised_spds(link_info);
595 cmd->advertising |= ADVERTISED_Autoneg;
596 cmd->autoneg = AUTONEG_ENABLE;
Michael Chan32773602016-03-07 15:38:42 -0500597 if (link_info->phy_link_status == BNXT_LINK_LINK)
598 cmd->lp_advertising =
599 bnxt_fw_to_ethtool_lp_adv(link_info);
Michael Chanc0c050c2015-10-22 16:01:17 -0400600 } else {
601 cmd->autoneg = AUTONEG_DISABLE;
602 cmd->advertising = 0;
603 }
Michael Chanc0c050c2015-10-22 16:01:17 -0400604
605 cmd->port = PORT_NONE;
606 if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) {
607 cmd->port = PORT_TP;
608 cmd->supported |= SUPPORTED_TP;
609 cmd->advertising |= ADVERTISED_TP;
610 } else {
611 cmd->supported |= SUPPORTED_FIBRE;
612 cmd->advertising |= ADVERTISED_FIBRE;
613
614 if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC)
615 cmd->port = PORT_DA;
616 else if (link_info->media_type ==
617 PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE)
618 cmd->port = PORT_FIBRE;
619 }
620
621 if (link_info->phy_link_status == BNXT_LINK_LINK) {
622 if (link_info->duplex & BNXT_LINK_DUPLEX_FULL)
623 cmd->duplex = DUPLEX_FULL;
624 } else {
625 cmd->duplex = DUPLEX_UNKNOWN;
626 }
627 ethtool_speed = bnxt_fw_to_ethtool_speed(link_info->link_speed);
628 ethtool_cmd_speed_set(cmd, ethtool_speed);
629 if (link_info->transceiver ==
630 PORT_PHY_QCFG_RESP_TRANSCEIVER_TYPE_XCVR_INTERNAL)
631 cmd->transceiver = XCVR_INTERNAL;
632 else
633 cmd->transceiver = XCVR_EXTERNAL;
634 cmd->phy_address = link_info->phy_addr;
635
636 return 0;
637}
638
639static u32 bnxt_get_fw_speed(struct net_device *dev, u16 ethtool_speed)
640{
641 switch (ethtool_speed) {
642 case SPEED_100:
643 return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100MB;
644 case SPEED_1000:
645 return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_1GB;
646 case SPEED_2500:
647 return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2_5GB;
648 case SPEED_10000:
649 return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10GB;
650 case SPEED_20000:
651 return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_20GB;
652 case SPEED_25000:
653 return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_25GB;
654 case SPEED_40000:
655 return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_40GB;
656 case SPEED_50000:
657 return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_50GB;
658 default:
659 netdev_err(dev, "unsupported speed!\n");
660 break;
661 }
662 return 0;
663}
664
665static u16 bnxt_get_fw_auto_link_speeds(u32 advertising)
666{
667 u16 fw_speed_mask = 0;
668
669 /* only support autoneg at speed 100, 1000, and 10000 */
670 if (advertising & (ADVERTISED_100baseT_Full |
671 ADVERTISED_100baseT_Half)) {
672 fw_speed_mask |= BNXT_LINK_SPEED_MSK_100MB;
673 }
674 if (advertising & (ADVERTISED_1000baseT_Full |
675 ADVERTISED_1000baseT_Half)) {
676 fw_speed_mask |= BNXT_LINK_SPEED_MSK_1GB;
677 }
678 if (advertising & ADVERTISED_10000baseT_Full)
679 fw_speed_mask |= BNXT_LINK_SPEED_MSK_10GB;
680
Michael Chan1c49c422016-02-10 17:33:48 -0500681 if (advertising & ADVERTISED_40000baseCR4_Full)
682 fw_speed_mask |= BNXT_LINK_SPEED_MSK_40GB;
683
Michael Chanc0c050c2015-10-22 16:01:17 -0400684 return fw_speed_mask;
685}
686
687static int bnxt_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
688{
689 int rc = 0;
690 struct bnxt *bp = netdev_priv(dev);
691 struct bnxt_link_info *link_info = &bp->link_info;
692 u32 speed, fw_advertising = 0;
693 bool set_pause = false;
694
695 if (BNXT_VF(bp))
696 return rc;
697
698 if (cmd->autoneg == AUTONEG_ENABLE) {
699 if (link_info->media_type != PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) {
700 netdev_err(dev, "Media type doesn't support autoneg\n");
701 rc = -EINVAL;
702 goto set_setting_exit;
703 }
704 if (cmd->advertising & ~(BNXT_ALL_COPPER_ETHTOOL_SPEED |
705 ADVERTISED_Autoneg |
706 ADVERTISED_TP |
707 ADVERTISED_Pause |
708 ADVERTISED_Asym_Pause)) {
709 netdev_err(dev, "Unsupported advertising mask (adv: 0x%x)\n",
710 cmd->advertising);
711 rc = -EINVAL;
712 goto set_setting_exit;
713 }
714 fw_advertising = bnxt_get_fw_auto_link_speeds(cmd->advertising);
715 if (fw_advertising & ~link_info->support_speeds) {
716 netdev_err(dev, "Advertising parameters are not supported! (adv: 0x%x)\n",
717 cmd->advertising);
718 rc = -EINVAL;
719 goto set_setting_exit;
720 }
721 link_info->autoneg |= BNXT_AUTONEG_SPEED;
722 if (!fw_advertising)
723 link_info->advertising = link_info->support_speeds;
724 else
725 link_info->advertising = fw_advertising;
726 /* any change to autoneg will cause link change, therefore the
727 * driver should put back the original pause setting in autoneg
728 */
729 set_pause = true;
730 } else {
731 /* TODO: currently don't support half duplex */
732 if (cmd->duplex == DUPLEX_HALF) {
733 netdev_err(dev, "HALF DUPLEX is not supported!\n");
734 rc = -EINVAL;
735 goto set_setting_exit;
736 }
737 /* If received a request for an unknown duplex, assume full*/
738 if (cmd->duplex == DUPLEX_UNKNOWN)
739 cmd->duplex = DUPLEX_FULL;
740 speed = ethtool_cmd_speed(cmd);
741 link_info->req_link_speed = bnxt_get_fw_speed(dev, speed);
742 link_info->req_duplex = BNXT_LINK_DUPLEX_FULL;
Michael Chanb7634992016-02-10 17:33:46 -0500743 link_info->autoneg = 0;
Michael Chanc0c050c2015-10-22 16:01:17 -0400744 link_info->advertising = 0;
745 }
746
747 if (netif_running(dev))
748 rc = bnxt_hwrm_set_link_setting(bp, set_pause);
749
750set_setting_exit:
751 return rc;
752}
753
754static void bnxt_get_pauseparam(struct net_device *dev,
755 struct ethtool_pauseparam *epause)
756{
757 struct bnxt *bp = netdev_priv(dev);
758 struct bnxt_link_info *link_info = &bp->link_info;
759
760 if (BNXT_VF(bp))
761 return;
Michael Chanb7634992016-02-10 17:33:46 -0500762 epause->autoneg = !!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL);
Michael Chan32773602016-03-07 15:38:42 -0500763 epause->rx_pause =
764 ((link_info->auto_pause_setting & BNXT_LINK_PAUSE_RX) != 0);
765 epause->tx_pause =
766 ((link_info->auto_pause_setting & BNXT_LINK_PAUSE_TX) != 0);
Michael Chanc0c050c2015-10-22 16:01:17 -0400767}
768
769static int bnxt_set_pauseparam(struct net_device *dev,
770 struct ethtool_pauseparam *epause)
771{
772 int rc = 0;
773 struct bnxt *bp = netdev_priv(dev);
774 struct bnxt_link_info *link_info = &bp->link_info;
775
776 if (BNXT_VF(bp))
777 return rc;
778
779 if (epause->autoneg) {
Michael Chanb7634992016-02-10 17:33:46 -0500780 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED))
781 return -EINVAL;
782
Michael Chanc0c050c2015-10-22 16:01:17 -0400783 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
784 link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_BOTH;
785 } else {
786 /* when transition from auto pause to force pause,
787 * force a link change
788 */
789 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
790 link_info->force_link_chng = true;
791 link_info->autoneg &= ~BNXT_AUTONEG_FLOW_CTRL;
792 link_info->req_flow_ctrl &= ~BNXT_LINK_PAUSE_BOTH;
793 }
794 if (epause->rx_pause)
795 link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_RX;
796 else
797 link_info->req_flow_ctrl &= ~BNXT_LINK_PAUSE_RX;
798
799 if (epause->tx_pause)
800 link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_TX;
801 else
802 link_info->req_flow_ctrl &= ~BNXT_LINK_PAUSE_TX;
803
804 if (netif_running(dev))
805 rc = bnxt_hwrm_set_pause(bp);
806 return rc;
807}
808
809static u32 bnxt_get_link(struct net_device *dev)
810{
811 struct bnxt *bp = netdev_priv(dev);
812
813 /* TODO: handle MF, VF, driver close case */
814 return bp->link_info.link_up;
815}
816
817static int bnxt_flash_nvram(struct net_device *dev,
818 u16 dir_type,
819 u16 dir_ordinal,
820 u16 dir_ext,
821 u16 dir_attr,
822 const u8 *data,
823 size_t data_len)
824{
825 struct bnxt *bp = netdev_priv(dev);
826 int rc;
827 struct hwrm_nvm_write_input req = {0};
828 dma_addr_t dma_handle;
829 u8 *kmem;
830
831 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_WRITE, -1, -1);
832
833 req.dir_type = cpu_to_le16(dir_type);
834 req.dir_ordinal = cpu_to_le16(dir_ordinal);
835 req.dir_ext = cpu_to_le16(dir_ext);
836 req.dir_attr = cpu_to_le16(dir_attr);
837 req.dir_data_length = cpu_to_le32(data_len);
838
839 kmem = dma_alloc_coherent(&bp->pdev->dev, data_len, &dma_handle,
840 GFP_KERNEL);
841 if (!kmem) {
842 netdev_err(dev, "dma_alloc_coherent failure, length = %u\n",
843 (unsigned)data_len);
844 return -ENOMEM;
845 }
846 memcpy(kmem, data, data_len);
847 req.host_src_addr = cpu_to_le64(dma_handle);
848
849 rc = hwrm_send_message(bp, &req, sizeof(req), FLASH_NVRAM_TIMEOUT);
850 dma_free_coherent(&bp->pdev->dev, data_len, kmem, dma_handle);
851
852 return rc;
853}
854
Rob Swindelld2d63182016-01-07 19:56:58 -0500855static int bnxt_firmware_reset(struct net_device *dev,
856 u16 dir_type)
857{
858 struct bnxt *bp = netdev_priv(dev);
859 struct hwrm_fw_reset_input req = {0};
860
861 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_RESET, -1, -1);
862
863 /* TODO: Support ASAP ChiMP self-reset (e.g. upon PF driver unload) */
864 /* TODO: Address self-reset of APE/KONG/BONO/TANG or ungraceful reset */
865 /* (e.g. when firmware isn't already running) */
866 switch (dir_type) {
867 case BNX_DIR_TYPE_CHIMP_PATCH:
868 case BNX_DIR_TYPE_BOOTCODE:
869 case BNX_DIR_TYPE_BOOTCODE_2:
870 req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_BOOT;
871 /* Self-reset ChiMP upon next PCIe reset: */
872 req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST;
873 break;
874 case BNX_DIR_TYPE_APE_FW:
875 case BNX_DIR_TYPE_APE_PATCH:
876 req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT;
877 break;
878 case BNX_DIR_TYPE_KONG_FW:
879 case BNX_DIR_TYPE_KONG_PATCH:
880 req.embedded_proc_type =
881 FW_RESET_REQ_EMBEDDED_PROC_TYPE_NETCTRL;
882 break;
883 case BNX_DIR_TYPE_BONO_FW:
884 case BNX_DIR_TYPE_BONO_PATCH:
885 req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE;
886 break;
887 default:
888 return -EINVAL;
889 }
890
891 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
892}
893
Michael Chanc0c050c2015-10-22 16:01:17 -0400894static int bnxt_flash_firmware(struct net_device *dev,
895 u16 dir_type,
896 const u8 *fw_data,
897 size_t fw_size)
898{
899 int rc = 0;
900 u16 code_type;
901 u32 stored_crc;
902 u32 calculated_crc;
903 struct bnxt_fw_header *header = (struct bnxt_fw_header *)fw_data;
904
905 switch (dir_type) {
906 case BNX_DIR_TYPE_BOOTCODE:
907 case BNX_DIR_TYPE_BOOTCODE_2:
908 code_type = CODE_BOOT;
909 break;
Rob Swindell2731d702015-12-27 18:19:20 -0500910 case BNX_DIR_TYPE_APE_FW:
911 code_type = CODE_MCTP_PASSTHRU;
912 break;
Michael Chanc0c050c2015-10-22 16:01:17 -0400913 default:
914 netdev_err(dev, "Unsupported directory entry type: %u\n",
915 dir_type);
916 return -EINVAL;
917 }
918 if (fw_size < sizeof(struct bnxt_fw_header)) {
919 netdev_err(dev, "Invalid firmware file size: %u\n",
920 (unsigned int)fw_size);
921 return -EINVAL;
922 }
923 if (header->signature != cpu_to_le32(BNXT_FIRMWARE_BIN_SIGNATURE)) {
924 netdev_err(dev, "Invalid firmware signature: %08X\n",
925 le32_to_cpu(header->signature));
926 return -EINVAL;
927 }
928 if (header->code_type != code_type) {
929 netdev_err(dev, "Expected firmware type: %d, read: %d\n",
930 code_type, header->code_type);
931 return -EINVAL;
932 }
933 if (header->device != DEVICE_CUMULUS_FAMILY) {
934 netdev_err(dev, "Expected firmware device family %d, read: %d\n",
935 DEVICE_CUMULUS_FAMILY, header->device);
936 return -EINVAL;
937 }
938 /* Confirm the CRC32 checksum of the file: */
939 stored_crc = le32_to_cpu(*(__le32 *)(fw_data + fw_size -
940 sizeof(stored_crc)));
941 calculated_crc = ~crc32(~0, fw_data, fw_size - sizeof(stored_crc));
942 if (calculated_crc != stored_crc) {
943 netdev_err(dev, "Firmware file CRC32 checksum (%08lX) does not match calculated checksum (%08lX)\n",
944 (unsigned long)stored_crc,
945 (unsigned long)calculated_crc);
946 return -EINVAL;
947 }
948 /* TODO: Validate digital signature (RSA-encrypted SHA-256 hash) here */
949 rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST,
950 0, 0, fw_data, fw_size);
Rob Swindelld2d63182016-01-07 19:56:58 -0500951 if (rc == 0) /* Firmware update successful */
952 rc = bnxt_firmware_reset(dev, dir_type);
953
Michael Chanc0c050c2015-10-22 16:01:17 -0400954 return rc;
955}
956
957static bool bnxt_dir_type_is_ape_bin_format(u16 dir_type)
958{
959 switch (dir_type) {
960 case BNX_DIR_TYPE_CHIMP_PATCH:
961 case BNX_DIR_TYPE_BOOTCODE:
962 case BNX_DIR_TYPE_BOOTCODE_2:
963 case BNX_DIR_TYPE_APE_FW:
964 case BNX_DIR_TYPE_APE_PATCH:
965 case BNX_DIR_TYPE_KONG_FW:
966 case BNX_DIR_TYPE_KONG_PATCH:
967 return true;
968 }
969
970 return false;
971}
972
973static bool bnxt_dir_type_is_unprotected_exec_format(u16 dir_type)
974{
975 switch (dir_type) {
976 case BNX_DIR_TYPE_AVS:
977 case BNX_DIR_TYPE_EXP_ROM_MBA:
978 case BNX_DIR_TYPE_PCIE:
979 case BNX_DIR_TYPE_TSCF_UCODE:
980 case BNX_DIR_TYPE_EXT_PHY:
981 case BNX_DIR_TYPE_CCM:
982 case BNX_DIR_TYPE_ISCSI_BOOT:
983 case BNX_DIR_TYPE_ISCSI_BOOT_IPV6:
984 case BNX_DIR_TYPE_ISCSI_BOOT_IPV4N6:
985 return true;
986 }
987
988 return false;
989}
990
991static bool bnxt_dir_type_is_executable(u16 dir_type)
992{
993 return bnxt_dir_type_is_ape_bin_format(dir_type) ||
994 bnxt_dir_type_is_unprotected_exec_format(dir_type);
995}
996
997static int bnxt_flash_firmware_from_file(struct net_device *dev,
998 u16 dir_type,
999 const char *filename)
1000{
1001 const struct firmware *fw;
1002 int rc;
1003
1004 if (bnxt_dir_type_is_executable(dir_type) == false)
1005 return -EINVAL;
1006
1007 rc = request_firmware(&fw, filename, &dev->dev);
1008 if (rc != 0) {
1009 netdev_err(dev, "Error %d requesting firmware file: %s\n",
1010 rc, filename);
1011 return rc;
1012 }
1013 if (bnxt_dir_type_is_ape_bin_format(dir_type) == true)
1014 rc = bnxt_flash_firmware(dev, dir_type, fw->data, fw->size);
1015 else
1016 rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST,
1017 0, 0, fw->data, fw->size);
1018 release_firmware(fw);
1019 return rc;
1020}
1021
1022static int bnxt_flash_package_from_file(struct net_device *dev,
1023 char *filename)
1024{
1025 netdev_err(dev, "packages are not yet supported\n");
1026 return -EINVAL;
1027}
1028
1029static int bnxt_flash_device(struct net_device *dev,
1030 struct ethtool_flash *flash)
1031{
1032 if (!BNXT_PF((struct bnxt *)netdev_priv(dev))) {
1033 netdev_err(dev, "flashdev not supported from a virtual function\n");
1034 return -EINVAL;
1035 }
1036
1037 if (flash->region == ETHTOOL_FLASH_ALL_REGIONS)
1038 return bnxt_flash_package_from_file(dev, flash->data);
1039
1040 return bnxt_flash_firmware_from_file(dev, flash->region, flash->data);
1041}
1042
1043static int nvm_get_dir_info(struct net_device *dev, u32 *entries, u32 *length)
1044{
1045 struct bnxt *bp = netdev_priv(dev);
1046 int rc;
1047 struct hwrm_nvm_get_dir_info_input req = {0};
1048 struct hwrm_nvm_get_dir_info_output *output = bp->hwrm_cmd_resp_addr;
1049
1050 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_DIR_INFO, -1, -1);
1051
1052 mutex_lock(&bp->hwrm_cmd_lock);
1053 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
1054 if (!rc) {
1055 *entries = le32_to_cpu(output->entries);
1056 *length = le32_to_cpu(output->entry_length);
1057 }
1058 mutex_unlock(&bp->hwrm_cmd_lock);
1059 return rc;
1060}
1061
1062static int bnxt_get_eeprom_len(struct net_device *dev)
1063{
1064 /* The -1 return value allows the entire 32-bit range of offsets to be
1065 * passed via the ethtool command-line utility.
1066 */
1067 return -1;
1068}
1069
1070static int bnxt_get_nvram_directory(struct net_device *dev, u32 len, u8 *data)
1071{
1072 struct bnxt *bp = netdev_priv(dev);
1073 int rc;
1074 u32 dir_entries;
1075 u32 entry_length;
1076 u8 *buf;
1077 size_t buflen;
1078 dma_addr_t dma_handle;
1079 struct hwrm_nvm_get_dir_entries_input req = {0};
1080
1081 rc = nvm_get_dir_info(dev, &dir_entries, &entry_length);
1082 if (rc != 0)
1083 return rc;
1084
1085 /* Insert 2 bytes of directory info (count and size of entries) */
1086 if (len < 2)
1087 return -EINVAL;
1088
1089 *data++ = dir_entries;
1090 *data++ = entry_length;
1091 len -= 2;
1092 memset(data, 0xff, len);
1093
1094 buflen = dir_entries * entry_length;
1095 buf = dma_alloc_coherent(&bp->pdev->dev, buflen, &dma_handle,
1096 GFP_KERNEL);
1097 if (!buf) {
1098 netdev_err(dev, "dma_alloc_coherent failure, length = %u\n",
1099 (unsigned)buflen);
1100 return -ENOMEM;
1101 }
1102 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_DIR_ENTRIES, -1, -1);
1103 req.host_dest_addr = cpu_to_le64(dma_handle);
1104 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
1105 if (rc == 0)
1106 memcpy(data, buf, len > buflen ? buflen : len);
1107 dma_free_coherent(&bp->pdev->dev, buflen, buf, dma_handle);
1108 return rc;
1109}
1110
1111static int bnxt_get_nvram_item(struct net_device *dev, u32 index, u32 offset,
1112 u32 length, u8 *data)
1113{
1114 struct bnxt *bp = netdev_priv(dev);
1115 int rc;
1116 u8 *buf;
1117 dma_addr_t dma_handle;
1118 struct hwrm_nvm_read_input req = {0};
1119
1120 buf = dma_alloc_coherent(&bp->pdev->dev, length, &dma_handle,
1121 GFP_KERNEL);
1122 if (!buf) {
1123 netdev_err(dev, "dma_alloc_coherent failure, length = %u\n",
1124 (unsigned)length);
1125 return -ENOMEM;
1126 }
1127 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_READ, -1, -1);
1128 req.host_dest_addr = cpu_to_le64(dma_handle);
1129 req.dir_idx = cpu_to_le16(index);
1130 req.offset = cpu_to_le32(offset);
1131 req.len = cpu_to_le32(length);
1132
1133 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
1134 if (rc == 0)
1135 memcpy(data, buf, length);
1136 dma_free_coherent(&bp->pdev->dev, length, buf, dma_handle);
1137 return rc;
1138}
1139
Rob Swindell3ebf6f02016-02-26 04:00:06 -05001140static int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
1141 u16 ext, u16 *index, u32 *item_length,
1142 u32 *data_length)
1143{
1144 struct bnxt *bp = netdev_priv(dev);
1145 int rc;
1146 struct hwrm_nvm_find_dir_entry_input req = {0};
1147 struct hwrm_nvm_find_dir_entry_output *output = bp->hwrm_cmd_resp_addr;
1148
1149 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_FIND_DIR_ENTRY, -1, -1);
1150 req.enables = 0;
1151 req.dir_idx = 0;
1152 req.dir_type = cpu_to_le16(type);
1153 req.dir_ordinal = cpu_to_le16(ordinal);
1154 req.dir_ext = cpu_to_le16(ext);
1155 req.opt_ordinal = NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ;
Michael Chan90e209212016-02-26 04:00:08 -05001156 rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
Rob Swindell3ebf6f02016-02-26 04:00:06 -05001157 if (rc == 0) {
1158 if (index)
1159 *index = le16_to_cpu(output->dir_idx);
1160 if (item_length)
1161 *item_length = le32_to_cpu(output->dir_item_length);
1162 if (data_length)
1163 *data_length = le32_to_cpu(output->dir_data_length);
1164 }
1165 return rc;
1166}
1167
1168static char *bnxt_parse_pkglog(int desired_field, u8 *data, size_t datalen)
1169{
1170 char *retval = NULL;
1171 char *p;
1172 char *value;
1173 int field = 0;
1174
1175 if (datalen < 1)
1176 return NULL;
1177 /* null-terminate the log data (removing last '\n'): */
1178 data[datalen - 1] = 0;
1179 for (p = data; *p != 0; p++) {
1180 field = 0;
1181 retval = NULL;
1182 while (*p != 0 && *p != '\n') {
1183 value = p;
1184 while (*p != 0 && *p != '\t' && *p != '\n')
1185 p++;
1186 if (field == desired_field)
1187 retval = value;
1188 if (*p != '\t')
1189 break;
1190 *p = 0;
1191 field++;
1192 p++;
1193 }
1194 if (*p == 0)
1195 break;
1196 *p = 0;
1197 }
1198 return retval;
1199}
1200
1201static char *bnxt_get_pkgver(struct net_device *dev, char *buf, size_t buflen)
1202{
1203 u16 index = 0;
1204 u32 datalen;
1205
1206 if (bnxt_find_nvram_item(dev, BNX_DIR_TYPE_PKG_LOG,
1207 BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE,
1208 &index, NULL, &datalen) != 0)
1209 return NULL;
1210
1211 memset(buf, 0, buflen);
1212 if (bnxt_get_nvram_item(dev, index, 0, datalen, buf) != 0)
1213 return NULL;
1214
1215 return bnxt_parse_pkglog(BNX_PKG_LOG_FIELD_IDX_PKG_VERSION, buf,
1216 datalen);
1217}
1218
Michael Chanc0c050c2015-10-22 16:01:17 -04001219static int bnxt_get_eeprom(struct net_device *dev,
1220 struct ethtool_eeprom *eeprom,
1221 u8 *data)
1222{
1223 u32 index;
1224 u32 offset;
1225
1226 if (eeprom->offset == 0) /* special offset value to get directory */
1227 return bnxt_get_nvram_directory(dev, eeprom->len, data);
1228
1229 index = eeprom->offset >> 24;
1230 offset = eeprom->offset & 0xffffff;
1231
1232 if (index == 0) {
1233 netdev_err(dev, "unsupported index value: %d\n", index);
1234 return -EINVAL;
1235 }
1236
1237 return bnxt_get_nvram_item(dev, index - 1, offset, eeprom->len, data);
1238}
1239
1240static int bnxt_erase_nvram_directory(struct net_device *dev, u8 index)
1241{
1242 struct bnxt *bp = netdev_priv(dev);
1243 struct hwrm_nvm_erase_dir_entry_input req = {0};
1244
1245 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_ERASE_DIR_ENTRY, -1, -1);
1246 req.dir_idx = cpu_to_le16(index);
1247 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
1248}
1249
1250static int bnxt_set_eeprom(struct net_device *dev,
1251 struct ethtool_eeprom *eeprom,
1252 u8 *data)
1253{
1254 struct bnxt *bp = netdev_priv(dev);
1255 u8 index, dir_op;
1256 u16 type, ext, ordinal, attr;
1257
1258 if (!BNXT_PF(bp)) {
1259 netdev_err(dev, "NVM write not supported from a virtual function\n");
1260 return -EINVAL;
1261 }
1262
1263 type = eeprom->magic >> 16;
1264
1265 if (type == 0xffff) { /* special value for directory operations */
1266 index = eeprom->magic & 0xff;
1267 dir_op = eeprom->magic >> 8;
1268 if (index == 0)
1269 return -EINVAL;
1270 switch (dir_op) {
1271 case 0x0e: /* erase */
1272 if (eeprom->offset != ~eeprom->magic)
1273 return -EINVAL;
1274 return bnxt_erase_nvram_directory(dev, index - 1);
1275 default:
1276 return -EINVAL;
1277 }
1278 }
1279
1280 /* Create or re-write an NVM item: */
1281 if (bnxt_dir_type_is_executable(type) == true)
1282 return -EINVAL;
1283 ext = eeprom->magic & 0xffff;
1284 ordinal = eeprom->offset >> 16;
1285 attr = eeprom->offset & 0xffff;
1286
1287 return bnxt_flash_nvram(dev, type, ordinal, ext, attr, data,
1288 eeprom->len);
1289}
1290
1291const struct ethtool_ops bnxt_ethtool_ops = {
1292 .get_settings = bnxt_get_settings,
1293 .set_settings = bnxt_set_settings,
1294 .get_pauseparam = bnxt_get_pauseparam,
1295 .set_pauseparam = bnxt_set_pauseparam,
1296 .get_drvinfo = bnxt_get_drvinfo,
1297 .get_coalesce = bnxt_get_coalesce,
1298 .set_coalesce = bnxt_set_coalesce,
1299 .get_msglevel = bnxt_get_msglevel,
1300 .set_msglevel = bnxt_set_msglevel,
1301 .get_sset_count = bnxt_get_sset_count,
1302 .get_strings = bnxt_get_strings,
1303 .get_ethtool_stats = bnxt_get_ethtool_stats,
1304 .set_ringparam = bnxt_set_ringparam,
1305 .get_ringparam = bnxt_get_ringparam,
1306 .get_channels = bnxt_get_channels,
1307 .set_channels = bnxt_set_channels,
1308#ifdef CONFIG_RFS_ACCEL
1309 .get_rxnfc = bnxt_get_rxnfc,
1310#endif
1311 .get_rxfh_indir_size = bnxt_get_rxfh_indir_size,
1312 .get_rxfh_key_size = bnxt_get_rxfh_key_size,
1313 .get_rxfh = bnxt_get_rxfh,
1314 .flash_device = bnxt_flash_device,
1315 .get_eeprom_len = bnxt_get_eeprom_len,
1316 .get_eeprom = bnxt_get_eeprom,
1317 .set_eeprom = bnxt_set_eeprom,
1318 .get_link = bnxt_get_link,
1319};