Alexander Duyck | 82dd0f7 | 2014-09-20 19:50:15 -0400 | [diff] [blame^] | 1 | /* Intel Ethernet Switch Host Interface Driver |
| 2 | * Copyright(c) 2013 - 2014 Intel Corporation. |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify it |
| 5 | * under the terms and conditions of the GNU General Public License, |
| 6 | * version 2, as published by the Free Software Foundation. |
| 7 | * |
| 8 | * This program is distributed in the hope it will be useful, but WITHOUT |
| 9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| 10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
| 11 | * more details. |
| 12 | * |
| 13 | * The full GNU General Public License is included in this distribution in |
| 14 | * the file called "COPYING". |
| 15 | * |
| 16 | * Contact Information: |
| 17 | * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> |
| 18 | * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 |
| 19 | */ |
| 20 | |
| 21 | #include "fm10k.h" |
| 22 | |
| 23 | struct fm10k_stats { |
| 24 | char stat_string[ETH_GSTRING_LEN]; |
| 25 | int sizeof_stat; |
| 26 | int stat_offset; |
| 27 | }; |
| 28 | |
| 29 | #define FM10K_NETDEV_STAT(_net_stat) { \ |
| 30 | .stat_string = #_net_stat, \ |
| 31 | .sizeof_stat = FIELD_SIZEOF(struct net_device_stats, _net_stat), \ |
| 32 | .stat_offset = offsetof(struct net_device_stats, _net_stat) \ |
| 33 | } |
| 34 | |
| 35 | static const struct fm10k_stats fm10k_gstrings_net_stats[] = { |
| 36 | FM10K_NETDEV_STAT(tx_packets), |
| 37 | FM10K_NETDEV_STAT(tx_bytes), |
| 38 | FM10K_NETDEV_STAT(tx_errors), |
| 39 | FM10K_NETDEV_STAT(rx_packets), |
| 40 | FM10K_NETDEV_STAT(rx_bytes), |
| 41 | FM10K_NETDEV_STAT(rx_errors), |
| 42 | FM10K_NETDEV_STAT(rx_dropped), |
| 43 | |
| 44 | /* detailed Rx errors */ |
| 45 | FM10K_NETDEV_STAT(rx_length_errors), |
| 46 | FM10K_NETDEV_STAT(rx_crc_errors), |
| 47 | FM10K_NETDEV_STAT(rx_fifo_errors), |
| 48 | }; |
| 49 | |
| 50 | #define FM10K_NETDEV_STATS_LEN ARRAY_SIZE(fm10k_gstrings_net_stats) |
| 51 | |
| 52 | #define FM10K_STAT(_name, _stat) { \ |
| 53 | .stat_string = _name, \ |
| 54 | .sizeof_stat = FIELD_SIZEOF(struct fm10k_intfc, _stat), \ |
| 55 | .stat_offset = offsetof(struct fm10k_intfc, _stat) \ |
| 56 | } |
| 57 | |
| 58 | static const struct fm10k_stats fm10k_gstrings_stats[] = { |
| 59 | FM10K_STAT("tx_restart_queue", restart_queue), |
| 60 | FM10K_STAT("tx_busy", tx_busy), |
| 61 | FM10K_STAT("tx_csum_errors", tx_csum_errors), |
| 62 | FM10K_STAT("rx_alloc_failed", alloc_failed), |
| 63 | FM10K_STAT("rx_csum_errors", rx_csum_errors), |
| 64 | FM10K_STAT("rx_errors", rx_errors), |
| 65 | |
| 66 | FM10K_STAT("tx_packets_nic", tx_packets_nic), |
| 67 | FM10K_STAT("tx_bytes_nic", tx_bytes_nic), |
| 68 | FM10K_STAT("rx_packets_nic", rx_packets_nic), |
| 69 | FM10K_STAT("rx_bytes_nic", rx_bytes_nic), |
| 70 | FM10K_STAT("rx_drops_nic", rx_drops_nic), |
| 71 | FM10K_STAT("rx_overrun_pf", rx_overrun_pf), |
| 72 | FM10K_STAT("rx_overrun_vf", rx_overrun_vf), |
| 73 | |
| 74 | FM10K_STAT("timeout", stats.timeout.count), |
| 75 | FM10K_STAT("ur", stats.ur.count), |
| 76 | FM10K_STAT("ca", stats.ca.count), |
| 77 | FM10K_STAT("um", stats.um.count), |
| 78 | FM10K_STAT("xec", stats.xec.count), |
| 79 | FM10K_STAT("vlan_drop", stats.vlan_drop.count), |
| 80 | FM10K_STAT("loopback_drop", stats.loopback_drop.count), |
| 81 | FM10K_STAT("nodesc_drop", stats.nodesc_drop.count), |
| 82 | |
| 83 | FM10K_STAT("swapi_status", hw.swapi.status), |
| 84 | FM10K_STAT("mac_rules_used", hw.swapi.mac.used), |
| 85 | FM10K_STAT("mac_rules_avail", hw.swapi.mac.avail), |
| 86 | |
| 87 | FM10K_STAT("mbx_tx_busy", hw.mbx.tx_busy), |
| 88 | FM10K_STAT("mbx_tx_dropped", hw.mbx.tx_dropped), |
| 89 | FM10K_STAT("mbx_tx_messages", hw.mbx.tx_messages), |
| 90 | FM10K_STAT("mbx_tx_dwords", hw.mbx.tx_dwords), |
| 91 | FM10K_STAT("mbx_rx_messages", hw.mbx.rx_messages), |
| 92 | FM10K_STAT("mbx_rx_dwords", hw.mbx.rx_dwords), |
| 93 | FM10K_STAT("mbx_rx_parse_err", hw.mbx.rx_parse_err), |
| 94 | }; |
| 95 | |
| 96 | #define FM10K_GLOBAL_STATS_LEN ARRAY_SIZE(fm10k_gstrings_stats) |
| 97 | |
| 98 | #define FM10K_QUEUE_STATS_LEN \ |
| 99 | (MAX_QUEUES * 2 * (sizeof(struct fm10k_queue_stats) / sizeof(u64))) |
| 100 | |
| 101 | #define FM10K_STATS_LEN (FM10K_GLOBAL_STATS_LEN + \ |
| 102 | FM10K_NETDEV_STATS_LEN + \ |
| 103 | FM10K_QUEUE_STATS_LEN) |
| 104 | |
| 105 | static void fm10k_get_strings(struct net_device *dev, u32 stringset, |
| 106 | u8 *data) |
| 107 | { |
| 108 | char *p = (char *)data; |
| 109 | int i; |
| 110 | |
| 111 | switch (stringset) { |
| 112 | case ETH_SS_STATS: |
| 113 | for (i = 0; i < FM10K_NETDEV_STATS_LEN; i++) { |
| 114 | memcpy(p, fm10k_gstrings_net_stats[i].stat_string, |
| 115 | ETH_GSTRING_LEN); |
| 116 | p += ETH_GSTRING_LEN; |
| 117 | } |
| 118 | for (i = 0; i < FM10K_GLOBAL_STATS_LEN; i++) { |
| 119 | memcpy(p, fm10k_gstrings_stats[i].stat_string, |
| 120 | ETH_GSTRING_LEN); |
| 121 | p += ETH_GSTRING_LEN; |
| 122 | } |
| 123 | |
| 124 | for (i = 0; i < MAX_QUEUES; i++) { |
| 125 | sprintf(p, "tx_queue_%u_packets", i); |
| 126 | p += ETH_GSTRING_LEN; |
| 127 | sprintf(p, "tx_queue_%u_bytes", i); |
| 128 | p += ETH_GSTRING_LEN; |
| 129 | sprintf(p, "rx_queue_%u_packets", i); |
| 130 | p += ETH_GSTRING_LEN; |
| 131 | sprintf(p, "rx_queue_%u_bytes", i); |
| 132 | p += ETH_GSTRING_LEN; |
| 133 | } |
| 134 | break; |
| 135 | } |
| 136 | } |
| 137 | |
| 138 | static int fm10k_get_sset_count(struct net_device *dev, int sset) |
| 139 | { |
| 140 | switch (sset) { |
| 141 | case ETH_SS_STATS: |
| 142 | return FM10K_STATS_LEN; |
| 143 | default: |
| 144 | return -EOPNOTSUPP; |
| 145 | } |
| 146 | } |
| 147 | |
| 148 | static void fm10k_get_ethtool_stats(struct net_device *netdev, |
| 149 | struct ethtool_stats *stats, u64 *data) |
| 150 | { |
| 151 | const int stat_count = sizeof(struct fm10k_queue_stats) / sizeof(u64); |
| 152 | struct fm10k_intfc *interface = netdev_priv(netdev); |
| 153 | struct net_device_stats *net_stats = &netdev->stats; |
| 154 | char *p; |
| 155 | int i, j; |
| 156 | |
| 157 | fm10k_update_stats(interface); |
| 158 | |
| 159 | for (i = 0; i < FM10K_NETDEV_STATS_LEN; i++) { |
| 160 | p = (char *)net_stats + fm10k_gstrings_net_stats[i].stat_offset; |
| 161 | *(data++) = (fm10k_gstrings_net_stats[i].sizeof_stat == |
| 162 | sizeof(u64)) ? *(u64 *)p : *(u32 *)p; |
| 163 | } |
| 164 | |
| 165 | for (i = 0; i < FM10K_GLOBAL_STATS_LEN; i++) { |
| 166 | p = (char *)interface + fm10k_gstrings_stats[i].stat_offset; |
| 167 | *(data++) = (fm10k_gstrings_stats[i].sizeof_stat == |
| 168 | sizeof(u64)) ? *(u64 *)p : *(u32 *)p; |
| 169 | } |
| 170 | |
| 171 | for (i = 0; i < MAX_QUEUES; i++) { |
| 172 | struct fm10k_ring *ring; |
| 173 | u64 *queue_stat; |
| 174 | |
| 175 | ring = interface->tx_ring[i]; |
| 176 | if (ring) |
| 177 | queue_stat = (u64 *)&ring->stats; |
| 178 | for (j = 0; j < stat_count; j++) |
| 179 | *(data++) = ring ? queue_stat[j] : 0; |
| 180 | |
| 181 | ring = interface->rx_ring[i]; |
| 182 | if (ring) |
| 183 | queue_stat = (u64 *)&ring->stats; |
| 184 | for (j = 0; j < stat_count; j++) |
| 185 | *(data++) = ring ? queue_stat[j] : 0; |
| 186 | } |
| 187 | } |
| 188 | |
| 189 | /* If function below adds more registers this define needs to be updated */ |
| 190 | #define FM10K_REGS_LEN_Q 29 |
| 191 | |
| 192 | static void fm10k_get_reg_q(struct fm10k_hw *hw, u32 *buff, int i) |
| 193 | { |
| 194 | int idx = 0; |
| 195 | |
| 196 | buff[idx++] = fm10k_read_reg(hw, FM10K_RDBAL(i)); |
| 197 | buff[idx++] = fm10k_read_reg(hw, FM10K_RDBAH(i)); |
| 198 | buff[idx++] = fm10k_read_reg(hw, FM10K_RDLEN(i)); |
| 199 | buff[idx++] = fm10k_read_reg(hw, FM10K_TPH_RXCTRL(i)); |
| 200 | buff[idx++] = fm10k_read_reg(hw, FM10K_RDH(i)); |
| 201 | buff[idx++] = fm10k_read_reg(hw, FM10K_RDT(i)); |
| 202 | buff[idx++] = fm10k_read_reg(hw, FM10K_RXQCTL(i)); |
| 203 | buff[idx++] = fm10k_read_reg(hw, FM10K_RXDCTL(i)); |
| 204 | buff[idx++] = fm10k_read_reg(hw, FM10K_RXINT(i)); |
| 205 | buff[idx++] = fm10k_read_reg(hw, FM10K_SRRCTL(i)); |
| 206 | buff[idx++] = fm10k_read_reg(hw, FM10K_QPRC(i)); |
| 207 | buff[idx++] = fm10k_read_reg(hw, FM10K_QPRDC(i)); |
| 208 | buff[idx++] = fm10k_read_reg(hw, FM10K_QBRC_L(i)); |
| 209 | buff[idx++] = fm10k_read_reg(hw, FM10K_QBRC_H(i)); |
| 210 | buff[idx++] = fm10k_read_reg(hw, FM10K_TDBAL(i)); |
| 211 | buff[idx++] = fm10k_read_reg(hw, FM10K_TDBAH(i)); |
| 212 | buff[idx++] = fm10k_read_reg(hw, FM10K_TDLEN(i)); |
| 213 | buff[idx++] = fm10k_read_reg(hw, FM10K_TPH_TXCTRL(i)); |
| 214 | buff[idx++] = fm10k_read_reg(hw, FM10K_TDH(i)); |
| 215 | buff[idx++] = fm10k_read_reg(hw, FM10K_TDT(i)); |
| 216 | buff[idx++] = fm10k_read_reg(hw, FM10K_TXDCTL(i)); |
| 217 | buff[idx++] = fm10k_read_reg(hw, FM10K_TXQCTL(i)); |
| 218 | buff[idx++] = fm10k_read_reg(hw, FM10K_TXINT(i)); |
| 219 | buff[idx++] = fm10k_read_reg(hw, FM10K_QPTC(i)); |
| 220 | buff[idx++] = fm10k_read_reg(hw, FM10K_QBTC_L(i)); |
| 221 | buff[idx++] = fm10k_read_reg(hw, FM10K_QBTC_H(i)); |
| 222 | buff[idx++] = fm10k_read_reg(hw, FM10K_TQDLOC(i)); |
| 223 | buff[idx++] = fm10k_read_reg(hw, FM10K_TX_SGLORT(i)); |
| 224 | buff[idx++] = fm10k_read_reg(hw, FM10K_PFVTCTL(i)); |
| 225 | |
| 226 | BUG_ON(idx != FM10K_REGS_LEN_Q); |
| 227 | } |
| 228 | |
| 229 | /* If function above adds more registers this define needs to be updated */ |
| 230 | #define FM10K_REGS_LEN_VSI 43 |
| 231 | |
| 232 | static void fm10k_get_reg_vsi(struct fm10k_hw *hw, u32 *buff, int i) |
| 233 | { |
| 234 | int idx = 0, j; |
| 235 | |
| 236 | buff[idx++] = fm10k_read_reg(hw, FM10K_MRQC(i)); |
| 237 | for (j = 0; j < 10; j++) |
| 238 | buff[idx++] = fm10k_read_reg(hw, FM10K_RSSRK(i, j)); |
| 239 | for (j = 0; j < 32; j++) |
| 240 | buff[idx++] = fm10k_read_reg(hw, FM10K_RETA(i, j)); |
| 241 | |
| 242 | BUG_ON(idx != FM10K_REGS_LEN_VSI); |
| 243 | } |
| 244 | |
| 245 | static void fm10k_get_regs(struct net_device *netdev, |
| 246 | struct ethtool_regs *regs, void *p) |
| 247 | { |
| 248 | struct fm10k_intfc *interface = netdev_priv(netdev); |
| 249 | struct fm10k_hw *hw = &interface->hw; |
| 250 | u32 *buff = p; |
| 251 | u16 i; |
| 252 | |
| 253 | regs->version = (1 << 24) | (hw->revision_id << 16) | hw->device_id; |
| 254 | |
| 255 | switch (hw->mac.type) { |
| 256 | case fm10k_mac_pf: |
| 257 | /* General PF Registers */ |
| 258 | *(buff++) = fm10k_read_reg(hw, FM10K_CTRL); |
| 259 | *(buff++) = fm10k_read_reg(hw, FM10K_CTRL_EXT); |
| 260 | *(buff++) = fm10k_read_reg(hw, FM10K_GCR); |
| 261 | *(buff++) = fm10k_read_reg(hw, FM10K_GCR_EXT); |
| 262 | |
| 263 | for (i = 0; i < 8; i++) { |
| 264 | *(buff++) = fm10k_read_reg(hw, FM10K_DGLORTMAP(i)); |
| 265 | *(buff++) = fm10k_read_reg(hw, FM10K_DGLORTDEC(i)); |
| 266 | } |
| 267 | |
| 268 | for (i = 0; i < 65; i++) { |
| 269 | fm10k_get_reg_vsi(hw, buff, i); |
| 270 | buff += FM10K_REGS_LEN_VSI; |
| 271 | } |
| 272 | |
| 273 | *(buff++) = fm10k_read_reg(hw, FM10K_DMA_CTRL); |
| 274 | *(buff++) = fm10k_read_reg(hw, FM10K_DMA_CTRL2); |
| 275 | |
| 276 | for (i = 0; i < FM10K_MAX_QUEUES_PF; i++) { |
| 277 | fm10k_get_reg_q(hw, buff, i); |
| 278 | buff += FM10K_REGS_LEN_Q; |
| 279 | } |
| 280 | |
| 281 | *(buff++) = fm10k_read_reg(hw, FM10K_TPH_CTRL); |
| 282 | |
| 283 | for (i = 0; i < 8; i++) |
| 284 | *(buff++) = fm10k_read_reg(hw, FM10K_INT_MAP(i)); |
| 285 | |
| 286 | /* Interrupt Throttling Registers */ |
| 287 | for (i = 0; i < 130; i++) |
| 288 | *(buff++) = fm10k_read_reg(hw, FM10K_ITR(i)); |
| 289 | |
| 290 | break; |
| 291 | default: |
| 292 | return; |
| 293 | } |
| 294 | } |
| 295 | |
| 296 | /* If function above adds more registers these define need to be updated */ |
| 297 | #define FM10K_REGS_LEN_PF \ |
| 298 | (162 + (65 * FM10K_REGS_LEN_VSI) + (FM10K_MAX_QUEUES_PF * FM10K_REGS_LEN_Q)) |
| 299 | |
| 300 | static int fm10k_get_regs_len(struct net_device *netdev) |
| 301 | { |
| 302 | struct fm10k_intfc *interface = netdev_priv(netdev); |
| 303 | struct fm10k_hw *hw = &interface->hw; |
| 304 | |
| 305 | switch (hw->mac.type) { |
| 306 | case fm10k_mac_pf: |
| 307 | return FM10K_REGS_LEN_PF * sizeof(u32); |
| 308 | default: |
| 309 | return 0; |
| 310 | } |
| 311 | } |
| 312 | |
| 313 | static void fm10k_get_drvinfo(struct net_device *dev, |
| 314 | struct ethtool_drvinfo *info) |
| 315 | { |
| 316 | struct fm10k_intfc *interface = netdev_priv(dev); |
| 317 | |
| 318 | strncpy(info->driver, fm10k_driver_name, |
| 319 | sizeof(info->driver) - 1); |
| 320 | strncpy(info->version, fm10k_driver_version, |
| 321 | sizeof(info->version) - 1); |
| 322 | strncpy(info->bus_info, pci_name(interface->pdev), |
| 323 | sizeof(info->bus_info) - 1); |
| 324 | |
| 325 | info->n_stats = FM10K_STATS_LEN; |
| 326 | |
| 327 | info->regdump_len = fm10k_get_regs_len(dev); |
| 328 | } |
| 329 | |
| 330 | static void fm10k_get_pauseparam(struct net_device *dev, |
| 331 | struct ethtool_pauseparam *pause) |
| 332 | { |
| 333 | struct fm10k_intfc *interface = netdev_priv(dev); |
| 334 | |
| 335 | /* record fixed values for autoneg and tx pause */ |
| 336 | pause->autoneg = 0; |
| 337 | pause->tx_pause = 1; |
| 338 | |
| 339 | pause->rx_pause = interface->rx_pause ? 1 : 0; |
| 340 | } |
| 341 | |
| 342 | static int fm10k_set_pauseparam(struct net_device *dev, |
| 343 | struct ethtool_pauseparam *pause) |
| 344 | { |
| 345 | struct fm10k_intfc *interface = netdev_priv(dev); |
| 346 | struct fm10k_hw *hw = &interface->hw; |
| 347 | |
| 348 | if (pause->autoneg || !pause->tx_pause) |
| 349 | return -EINVAL; |
| 350 | |
| 351 | /* we can only support pause on the PF to avoid head-of-line blocking */ |
| 352 | if (hw->mac.type == fm10k_mac_pf) |
| 353 | interface->rx_pause = pause->rx_pause ? ~0 : 0; |
| 354 | else if (pause->rx_pause) |
| 355 | return -EINVAL; |
| 356 | |
| 357 | if (netif_running(dev)) |
| 358 | fm10k_update_rx_drop_en(interface); |
| 359 | |
| 360 | return 0; |
| 361 | } |
| 362 | |
| 363 | static u32 fm10k_get_msglevel(struct net_device *netdev) |
| 364 | { |
| 365 | struct fm10k_intfc *interface = netdev_priv(netdev); |
| 366 | |
| 367 | return interface->msg_enable; |
| 368 | } |
| 369 | |
| 370 | static void fm10k_set_msglevel(struct net_device *netdev, u32 data) |
| 371 | { |
| 372 | struct fm10k_intfc *interface = netdev_priv(netdev); |
| 373 | |
| 374 | interface->msg_enable = data; |
| 375 | } |
| 376 | |
| 377 | static void fm10k_get_ringparam(struct net_device *netdev, |
| 378 | struct ethtool_ringparam *ring) |
| 379 | { |
| 380 | struct fm10k_intfc *interface = netdev_priv(netdev); |
| 381 | |
| 382 | ring->rx_max_pending = FM10K_MAX_RXD; |
| 383 | ring->tx_max_pending = FM10K_MAX_TXD; |
| 384 | ring->rx_mini_max_pending = 0; |
| 385 | ring->rx_jumbo_max_pending = 0; |
| 386 | ring->rx_pending = interface->rx_ring_count; |
| 387 | ring->tx_pending = interface->tx_ring_count; |
| 388 | ring->rx_mini_pending = 0; |
| 389 | ring->rx_jumbo_pending = 0; |
| 390 | } |
| 391 | |
| 392 | static int fm10k_set_ringparam(struct net_device *netdev, |
| 393 | struct ethtool_ringparam *ring) |
| 394 | { |
| 395 | struct fm10k_intfc *interface = netdev_priv(netdev); |
| 396 | struct fm10k_ring *temp_ring; |
| 397 | int i, err = 0; |
| 398 | u32 new_rx_count, new_tx_count; |
| 399 | |
| 400 | if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) |
| 401 | return -EINVAL; |
| 402 | |
| 403 | new_tx_count = clamp_t(u32, ring->tx_pending, |
| 404 | FM10K_MIN_TXD, FM10K_MAX_TXD); |
| 405 | new_tx_count = ALIGN(new_tx_count, FM10K_REQ_TX_DESCRIPTOR_MULTIPLE); |
| 406 | |
| 407 | new_rx_count = clamp_t(u32, ring->rx_pending, |
| 408 | FM10K_MIN_RXD, FM10K_MAX_RXD); |
| 409 | new_rx_count = ALIGN(new_rx_count, FM10K_REQ_RX_DESCRIPTOR_MULTIPLE); |
| 410 | |
| 411 | if ((new_tx_count == interface->tx_ring_count) && |
| 412 | (new_rx_count == interface->rx_ring_count)) { |
| 413 | /* nothing to do */ |
| 414 | return 0; |
| 415 | } |
| 416 | |
| 417 | while (test_and_set_bit(__FM10K_RESETTING, &interface->state)) |
| 418 | usleep_range(1000, 2000); |
| 419 | |
| 420 | if (!netif_running(interface->netdev)) { |
| 421 | for (i = 0; i < interface->num_tx_queues; i++) |
| 422 | interface->tx_ring[i]->count = new_tx_count; |
| 423 | for (i = 0; i < interface->num_rx_queues; i++) |
| 424 | interface->rx_ring[i]->count = new_rx_count; |
| 425 | interface->tx_ring_count = new_tx_count; |
| 426 | interface->rx_ring_count = new_rx_count; |
| 427 | goto clear_reset; |
| 428 | } |
| 429 | |
| 430 | /* allocate temporary buffer to store rings in */ |
| 431 | i = max_t(int, interface->num_tx_queues, interface->num_rx_queues); |
| 432 | temp_ring = vmalloc(i * sizeof(struct fm10k_ring)); |
| 433 | |
| 434 | if (!temp_ring) { |
| 435 | err = -ENOMEM; |
| 436 | goto clear_reset; |
| 437 | } |
| 438 | |
| 439 | fm10k_down(interface); |
| 440 | |
| 441 | /* Setup new Tx resources and free the old Tx resources in that order. |
| 442 | * We can then assign the new resources to the rings via a memcpy. |
| 443 | * The advantage to this approach is that we are guaranteed to still |
| 444 | * have resources even in the case of an allocation failure. |
| 445 | */ |
| 446 | if (new_tx_count != interface->tx_ring_count) { |
| 447 | for (i = 0; i < interface->num_tx_queues; i++) { |
| 448 | memcpy(&temp_ring[i], interface->tx_ring[i], |
| 449 | sizeof(struct fm10k_ring)); |
| 450 | |
| 451 | temp_ring[i].count = new_tx_count; |
| 452 | err = fm10k_setup_tx_resources(&temp_ring[i]); |
| 453 | if (err) { |
| 454 | while (i) { |
| 455 | i--; |
| 456 | fm10k_free_tx_resources(&temp_ring[i]); |
| 457 | } |
| 458 | goto err_setup; |
| 459 | } |
| 460 | } |
| 461 | |
| 462 | for (i = 0; i < interface->num_tx_queues; i++) { |
| 463 | fm10k_free_tx_resources(interface->tx_ring[i]); |
| 464 | |
| 465 | memcpy(interface->tx_ring[i], &temp_ring[i], |
| 466 | sizeof(struct fm10k_ring)); |
| 467 | } |
| 468 | |
| 469 | interface->tx_ring_count = new_tx_count; |
| 470 | } |
| 471 | |
| 472 | /* Repeat the process for the Rx rings if needed */ |
| 473 | if (new_rx_count != interface->rx_ring_count) { |
| 474 | for (i = 0; i < interface->num_rx_queues; i++) { |
| 475 | memcpy(&temp_ring[i], interface->rx_ring[i], |
| 476 | sizeof(struct fm10k_ring)); |
| 477 | |
| 478 | temp_ring[i].count = new_rx_count; |
| 479 | err = fm10k_setup_rx_resources(&temp_ring[i]); |
| 480 | if (err) { |
| 481 | while (i) { |
| 482 | i--; |
| 483 | fm10k_free_rx_resources(&temp_ring[i]); |
| 484 | } |
| 485 | goto err_setup; |
| 486 | } |
| 487 | } |
| 488 | |
| 489 | for (i = 0; i < interface->num_rx_queues; i++) { |
| 490 | fm10k_free_rx_resources(interface->rx_ring[i]); |
| 491 | |
| 492 | memcpy(interface->rx_ring[i], &temp_ring[i], |
| 493 | sizeof(struct fm10k_ring)); |
| 494 | } |
| 495 | |
| 496 | interface->rx_ring_count = new_rx_count; |
| 497 | } |
| 498 | |
| 499 | err_setup: |
| 500 | fm10k_up(interface); |
| 501 | vfree(temp_ring); |
| 502 | clear_reset: |
| 503 | clear_bit(__FM10K_RESETTING, &interface->state); |
| 504 | return err; |
| 505 | } |
| 506 | |
| 507 | static int fm10k_get_coalesce(struct net_device *dev, |
| 508 | struct ethtool_coalesce *ec) |
| 509 | { |
| 510 | struct fm10k_intfc *interface = netdev_priv(dev); |
| 511 | |
| 512 | ec->use_adaptive_tx_coalesce = |
| 513 | !!(interface->tx_itr & FM10K_ITR_ADAPTIVE); |
| 514 | ec->tx_coalesce_usecs = interface->tx_itr & ~FM10K_ITR_ADAPTIVE; |
| 515 | |
| 516 | ec->use_adaptive_rx_coalesce = |
| 517 | !!(interface->rx_itr & FM10K_ITR_ADAPTIVE); |
| 518 | ec->rx_coalesce_usecs = interface->rx_itr & ~FM10K_ITR_ADAPTIVE; |
| 519 | |
| 520 | return 0; |
| 521 | } |
| 522 | |
| 523 | static int fm10k_set_coalesce(struct net_device *dev, |
| 524 | struct ethtool_coalesce *ec) |
| 525 | { |
| 526 | struct fm10k_intfc *interface = netdev_priv(dev); |
| 527 | struct fm10k_q_vector *qv; |
| 528 | u16 tx_itr, rx_itr; |
| 529 | int i; |
| 530 | |
| 531 | /* verify limits */ |
| 532 | if ((ec->rx_coalesce_usecs > FM10K_ITR_MAX) || |
| 533 | (ec->tx_coalesce_usecs > FM10K_ITR_MAX)) |
| 534 | return -EINVAL; |
| 535 | |
| 536 | /* record settings */ |
| 537 | tx_itr = ec->tx_coalesce_usecs; |
| 538 | rx_itr = ec->rx_coalesce_usecs; |
| 539 | |
| 540 | /* set initial values for adaptive ITR */ |
| 541 | if (ec->use_adaptive_tx_coalesce) |
| 542 | tx_itr = FM10K_ITR_ADAPTIVE | FM10K_ITR_10K; |
| 543 | |
| 544 | if (ec->use_adaptive_rx_coalesce) |
| 545 | rx_itr = FM10K_ITR_ADAPTIVE | FM10K_ITR_20K; |
| 546 | |
| 547 | /* update interface */ |
| 548 | interface->tx_itr = tx_itr; |
| 549 | interface->rx_itr = rx_itr; |
| 550 | |
| 551 | /* update q_vectors */ |
| 552 | for (i = 0; i < interface->num_q_vectors; i++) { |
| 553 | qv = interface->q_vector[i]; |
| 554 | qv->tx.itr = tx_itr; |
| 555 | qv->rx.itr = rx_itr; |
| 556 | } |
| 557 | |
| 558 | return 0; |
| 559 | } |
| 560 | |
| 561 | static int fm10k_get_rss_hash_opts(struct fm10k_intfc *interface, |
| 562 | struct ethtool_rxnfc *cmd) |
| 563 | { |
| 564 | cmd->data = 0; |
| 565 | |
| 566 | /* Report default options for RSS on fm10k */ |
| 567 | switch (cmd->flow_type) { |
| 568 | case TCP_V4_FLOW: |
| 569 | case TCP_V6_FLOW: |
| 570 | cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; |
| 571 | /* fall through */ |
| 572 | case UDP_V4_FLOW: |
| 573 | if (interface->flags & FM10K_FLAG_RSS_FIELD_IPV4_UDP) |
| 574 | cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; |
| 575 | /* fall through */ |
| 576 | case SCTP_V4_FLOW: |
| 577 | case SCTP_V6_FLOW: |
| 578 | case AH_ESP_V4_FLOW: |
| 579 | case AH_ESP_V6_FLOW: |
| 580 | case AH_V4_FLOW: |
| 581 | case AH_V6_FLOW: |
| 582 | case ESP_V4_FLOW: |
| 583 | case ESP_V6_FLOW: |
| 584 | case IPV4_FLOW: |
| 585 | case IPV6_FLOW: |
| 586 | cmd->data |= RXH_IP_SRC | RXH_IP_DST; |
| 587 | break; |
| 588 | case UDP_V6_FLOW: |
| 589 | if (interface->flags & FM10K_FLAG_RSS_FIELD_IPV6_UDP) |
| 590 | cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; |
| 591 | cmd->data |= RXH_IP_SRC | RXH_IP_DST; |
| 592 | break; |
| 593 | default: |
| 594 | return -EINVAL; |
| 595 | } |
| 596 | |
| 597 | return 0; |
| 598 | } |
| 599 | |
| 600 | static int fm10k_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, |
| 601 | u32 *rule_locs) |
| 602 | { |
| 603 | struct fm10k_intfc *interface = netdev_priv(dev); |
| 604 | int ret = -EOPNOTSUPP; |
| 605 | |
| 606 | switch (cmd->cmd) { |
| 607 | case ETHTOOL_GRXRINGS: |
| 608 | cmd->data = interface->num_rx_queues; |
| 609 | ret = 0; |
| 610 | break; |
| 611 | case ETHTOOL_GRXFH: |
| 612 | ret = fm10k_get_rss_hash_opts(interface, cmd); |
| 613 | break; |
| 614 | default: |
| 615 | break; |
| 616 | } |
| 617 | |
| 618 | return ret; |
| 619 | } |
| 620 | |
| 621 | #define UDP_RSS_FLAGS (FM10K_FLAG_RSS_FIELD_IPV4_UDP | \ |
| 622 | FM10K_FLAG_RSS_FIELD_IPV6_UDP) |
| 623 | static int fm10k_set_rss_hash_opt(struct fm10k_intfc *interface, |
| 624 | struct ethtool_rxnfc *nfc) |
| 625 | { |
| 626 | u32 flags = interface->flags; |
| 627 | |
| 628 | /* RSS does not support anything other than hashing |
| 629 | * to queues on src and dst IPs and ports |
| 630 | */ |
| 631 | if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST | |
| 632 | RXH_L4_B_0_1 | RXH_L4_B_2_3)) |
| 633 | return -EINVAL; |
| 634 | |
| 635 | switch (nfc->flow_type) { |
| 636 | case TCP_V4_FLOW: |
| 637 | case TCP_V6_FLOW: |
| 638 | if (!(nfc->data & RXH_IP_SRC) || |
| 639 | !(nfc->data & RXH_IP_DST) || |
| 640 | !(nfc->data & RXH_L4_B_0_1) || |
| 641 | !(nfc->data & RXH_L4_B_2_3)) |
| 642 | return -EINVAL; |
| 643 | break; |
| 644 | case UDP_V4_FLOW: |
| 645 | if (!(nfc->data & RXH_IP_SRC) || |
| 646 | !(nfc->data & RXH_IP_DST)) |
| 647 | return -EINVAL; |
| 648 | switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { |
| 649 | case 0: |
| 650 | flags &= ~FM10K_FLAG_RSS_FIELD_IPV4_UDP; |
| 651 | break; |
| 652 | case (RXH_L4_B_0_1 | RXH_L4_B_2_3): |
| 653 | flags |= FM10K_FLAG_RSS_FIELD_IPV4_UDP; |
| 654 | break; |
| 655 | default: |
| 656 | return -EINVAL; |
| 657 | } |
| 658 | break; |
| 659 | case UDP_V6_FLOW: |
| 660 | if (!(nfc->data & RXH_IP_SRC) || |
| 661 | !(nfc->data & RXH_IP_DST)) |
| 662 | return -EINVAL; |
| 663 | switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { |
| 664 | case 0: |
| 665 | flags &= ~FM10K_FLAG_RSS_FIELD_IPV6_UDP; |
| 666 | break; |
| 667 | case (RXH_L4_B_0_1 | RXH_L4_B_2_3): |
| 668 | flags |= FM10K_FLAG_RSS_FIELD_IPV6_UDP; |
| 669 | break; |
| 670 | default: |
| 671 | return -EINVAL; |
| 672 | } |
| 673 | break; |
| 674 | case AH_ESP_V4_FLOW: |
| 675 | case AH_V4_FLOW: |
| 676 | case ESP_V4_FLOW: |
| 677 | case SCTP_V4_FLOW: |
| 678 | case AH_ESP_V6_FLOW: |
| 679 | case AH_V6_FLOW: |
| 680 | case ESP_V6_FLOW: |
| 681 | case SCTP_V6_FLOW: |
| 682 | if (!(nfc->data & RXH_IP_SRC) || |
| 683 | !(nfc->data & RXH_IP_DST) || |
| 684 | (nfc->data & RXH_L4_B_0_1) || |
| 685 | (nfc->data & RXH_L4_B_2_3)) |
| 686 | return -EINVAL; |
| 687 | break; |
| 688 | default: |
| 689 | return -EINVAL; |
| 690 | } |
| 691 | |
| 692 | /* if we changed something we need to update flags */ |
| 693 | if (flags != interface->flags) { |
| 694 | struct fm10k_hw *hw = &interface->hw; |
| 695 | u32 mrqc; |
| 696 | |
| 697 | if ((flags & UDP_RSS_FLAGS) && |
| 698 | !(interface->flags & UDP_RSS_FLAGS)) |
| 699 | netif_warn(interface, drv, interface->netdev, |
| 700 | "enabling UDP RSS: fragmented packets may arrive out of order to the stack above\n"); |
| 701 | |
| 702 | interface->flags = flags; |
| 703 | |
| 704 | /* Perform hash on these packet types */ |
| 705 | mrqc = FM10K_MRQC_IPV4 | |
| 706 | FM10K_MRQC_TCP_IPV4 | |
| 707 | FM10K_MRQC_IPV6 | |
| 708 | FM10K_MRQC_TCP_IPV6; |
| 709 | |
| 710 | if (flags & FM10K_FLAG_RSS_FIELD_IPV4_UDP) |
| 711 | mrqc |= FM10K_MRQC_UDP_IPV4; |
| 712 | if (flags & FM10K_FLAG_RSS_FIELD_IPV6_UDP) |
| 713 | mrqc |= FM10K_MRQC_UDP_IPV6; |
| 714 | |
| 715 | fm10k_write_reg(hw, FM10K_MRQC(0), mrqc); |
| 716 | } |
| 717 | |
| 718 | return 0; |
| 719 | } |
| 720 | |
| 721 | static int fm10k_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) |
| 722 | { |
| 723 | struct fm10k_intfc *interface = netdev_priv(dev); |
| 724 | int ret = -EOPNOTSUPP; |
| 725 | |
| 726 | switch (cmd->cmd) { |
| 727 | case ETHTOOL_SRXFH: |
| 728 | ret = fm10k_set_rss_hash_opt(interface, cmd); |
| 729 | break; |
| 730 | default: |
| 731 | break; |
| 732 | } |
| 733 | |
| 734 | return ret; |
| 735 | } |
| 736 | |
| 737 | static u32 fm10k_get_reta_size(struct net_device *netdev) |
| 738 | { |
| 739 | return FM10K_RETA_SIZE * FM10K_RETA_ENTRIES_PER_REG; |
| 740 | } |
| 741 | |
| 742 | static int fm10k_get_reta(struct net_device *netdev, u32 *indir) |
| 743 | { |
| 744 | struct fm10k_intfc *interface = netdev_priv(netdev); |
| 745 | int i; |
| 746 | |
| 747 | if (!indir) |
| 748 | return 0; |
| 749 | |
| 750 | for (i = 0; i < FM10K_RETA_SIZE; i++, indir += 4) { |
| 751 | u32 reta = interface->reta[i]; |
| 752 | |
| 753 | indir[0] = (reta << 24) >> 24; |
| 754 | indir[1] = (reta << 16) >> 24; |
| 755 | indir[2] = (reta << 8) >> 24; |
| 756 | indir[3] = (reta) >> 24; |
| 757 | } |
| 758 | |
| 759 | return 0; |
| 760 | } |
| 761 | |
| 762 | static int fm10k_set_reta(struct net_device *netdev, const u32 *indir) |
| 763 | { |
| 764 | struct fm10k_intfc *interface = netdev_priv(netdev); |
| 765 | struct fm10k_hw *hw = &interface->hw; |
| 766 | int i; |
| 767 | u16 rss_i; |
| 768 | |
| 769 | if (!indir) |
| 770 | return 0; |
| 771 | |
| 772 | /* Verify user input. */ |
| 773 | rss_i = interface->ring_feature[RING_F_RSS].indices; |
| 774 | for (i = fm10k_get_reta_size(netdev); i--;) { |
| 775 | if (indir[i] < rss_i) |
| 776 | continue; |
| 777 | return -EINVAL; |
| 778 | } |
| 779 | |
| 780 | /* record entries to reta table */ |
| 781 | for (i = 0; i < FM10K_RETA_SIZE; i++, indir += 4) { |
| 782 | u32 reta = indir[0] | |
| 783 | (indir[1] << 8) | |
| 784 | (indir[2] << 16) | |
| 785 | (indir[3] << 24); |
| 786 | |
| 787 | if (interface->reta[i] == reta) |
| 788 | continue; |
| 789 | |
| 790 | interface->reta[i] = reta; |
| 791 | fm10k_write_reg(hw, FM10K_RETA(0, i), reta); |
| 792 | } |
| 793 | |
| 794 | return 0; |
| 795 | } |
| 796 | |
| 797 | static u32 fm10k_get_rssrk_size(struct net_device *netdev) |
| 798 | { |
| 799 | return FM10K_RSSRK_SIZE * FM10K_RSSRK_ENTRIES_PER_REG; |
| 800 | } |
| 801 | |
| 802 | static int fm10k_get_rssh(struct net_device *netdev, u32 *indir, u8 *key) |
| 803 | { |
| 804 | struct fm10k_intfc *interface = netdev_priv(netdev); |
| 805 | int i, err; |
| 806 | |
| 807 | err = fm10k_get_reta(netdev, indir); |
| 808 | if (err || !key) |
| 809 | return err; |
| 810 | |
| 811 | for (i = 0; i < FM10K_RSSRK_SIZE; i++, key += 4) |
| 812 | *(__le32 *)key = cpu_to_le32(interface->rssrk[i]); |
| 813 | |
| 814 | return 0; |
| 815 | } |
| 816 | |
| 817 | static int fm10k_set_rssh(struct net_device *netdev, const u32 *indir, |
| 818 | const u8 *key) |
| 819 | { |
| 820 | struct fm10k_intfc *interface = netdev_priv(netdev); |
| 821 | struct fm10k_hw *hw = &interface->hw; |
| 822 | int i, err; |
| 823 | |
| 824 | err = fm10k_set_reta(netdev, indir); |
| 825 | if (err || !key) |
| 826 | return err; |
| 827 | |
| 828 | for (i = 0; i < FM10K_RSSRK_SIZE; i++, key += 4) { |
| 829 | u32 rssrk = le32_to_cpu(*(__le32 *)key); |
| 830 | |
| 831 | if (interface->rssrk[i] == rssrk) |
| 832 | continue; |
| 833 | |
| 834 | interface->rssrk[i] = rssrk; |
| 835 | fm10k_write_reg(hw, FM10K_RSSRK(0, i), rssrk); |
| 836 | } |
| 837 | |
| 838 | return 0; |
| 839 | } |
| 840 | |
| 841 | static const struct ethtool_ops fm10k_ethtool_ops = { |
| 842 | .get_strings = fm10k_get_strings, |
| 843 | .get_sset_count = fm10k_get_sset_count, |
| 844 | .get_ethtool_stats = fm10k_get_ethtool_stats, |
| 845 | .get_drvinfo = fm10k_get_drvinfo, |
| 846 | .get_link = ethtool_op_get_link, |
| 847 | .get_pauseparam = fm10k_get_pauseparam, |
| 848 | .set_pauseparam = fm10k_set_pauseparam, |
| 849 | .get_msglevel = fm10k_get_msglevel, |
| 850 | .set_msglevel = fm10k_set_msglevel, |
| 851 | .get_ringparam = fm10k_get_ringparam, |
| 852 | .set_ringparam = fm10k_set_ringparam, |
| 853 | .get_coalesce = fm10k_get_coalesce, |
| 854 | .set_coalesce = fm10k_set_coalesce, |
| 855 | .get_rxnfc = fm10k_get_rxnfc, |
| 856 | .set_rxnfc = fm10k_set_rxnfc, |
| 857 | .get_regs = fm10k_get_regs, |
| 858 | .get_regs_len = fm10k_get_regs_len, |
| 859 | .get_rxfh_indir_size = fm10k_get_reta_size, |
| 860 | .get_rxfh_key_size = fm10k_get_rssrk_size, |
| 861 | .get_rxfh = fm10k_get_rssh, |
| 862 | .set_rxfh = fm10k_set_rssh, |
| 863 | }; |
| 864 | |
| 865 | void fm10k_set_ethtool_ops(struct net_device *dev) |
| 866 | { |
| 867 | dev->ethtool_ops = &fm10k_ethtool_ops; |
| 868 | } |