Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 1 | /**************************************************************************** |
| 2 | * Driver for Solarflare network controllers and boards |
| 3 | * Copyright 2012-2013 Solarflare Communications Inc. |
| 4 | * |
| 5 | * This program is free software; you can redistribute it and/or modify it |
| 6 | * under the terms of the GNU General Public License version 2 as published |
| 7 | * by the Free Software Foundation, incorporated herein by reference. |
| 8 | */ |
| 9 | |
| 10 | #include "net_driver.h" |
| 11 | #include "ef10_regs.h" |
| 12 | #include "io.h" |
| 13 | #include "mcdi.h" |
| 14 | #include "mcdi_pcol.h" |
| 15 | #include "nic.h" |
| 16 | #include "workarounds.h" |
Jon Cooper | 74cd60a | 2013-09-16 14:18:51 +0100 | [diff] [blame] | 17 | #include "selftest.h" |
Shradha Shah | 7fa8d54 | 2015-05-06 00:55:13 +0100 | [diff] [blame] | 18 | #include "ef10_sriov.h" |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 19 | #include <linux/in.h> |
| 20 | #include <linux/jhash.h> |
| 21 | #include <linux/wait.h> |
| 22 | #include <linux/workqueue.h> |
| 23 | |
| 24 | /* Hardware control for EF10 architecture including 'Huntington'. */ |
| 25 | |
| 26 | #define EFX_EF10_DRVGEN_EV 7 |
| 27 | enum { |
| 28 | EFX_EF10_TEST = 1, |
| 29 | EFX_EF10_REFILL, |
| 30 | }; |
| 31 | |
| 32 | /* The reserved RSS context value */ |
| 33 | #define EFX_EF10_RSS_CONTEXT_INVALID 0xffffffff |
Jon Cooper | 267c015 | 2015-05-06 00:59:38 +0100 | [diff] [blame] | 34 | /* The maximum size of a shared RSS context */ |
| 35 | /* TODO: this should really be from the mcdi protocol export */ |
| 36 | #define EFX_EF10_MAX_SHARED_RSS_CONTEXT_SIZE 64UL |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 37 | |
| 38 | /* The filter table(s) are managed by firmware and we have write-only |
| 39 | * access. When removing filters we must identify them to the |
| 40 | * firmware by a 64-bit handle, but this is too wide for Linux kernel |
| 41 | * interfaces (32-bit for RX NFC, 16-bit for RFS). Also, we need to |
| 42 | * be able to tell in advance whether a requested insertion will |
| 43 | * replace an existing filter. Therefore we maintain a software hash |
| 44 | * table, which should be at least as large as the hardware hash |
| 45 | * table. |
| 46 | * |
| 47 | * Huntington has a single 8K filter table shared between all filter |
| 48 | * types and both ports. |
| 49 | */ |
| 50 | #define HUNT_FILTER_TBL_ROWS 8192 |
| 51 | |
Edward Cree | 12fb0da | 2015-07-21 15:11:00 +0100 | [diff] [blame] | 52 | #define EFX_EF10_FILTER_ID_INVALID 0xffff |
Daniel Pieczko | 822b96f | 2015-07-21 15:10:27 +0100 | [diff] [blame] | 53 | struct efx_ef10_dev_addr { |
| 54 | u8 addr[ETH_ALEN]; |
| 55 | u16 id; |
| 56 | }; |
| 57 | |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 58 | struct efx_ef10_filter_table { |
| 59 | /* The RX match field masks supported by this fw & hw, in order of priority */ |
| 60 | enum efx_filter_match_flags rx_match_flags[ |
| 61 | MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MAXNUM]; |
| 62 | unsigned int rx_match_count; |
| 63 | |
| 64 | struct { |
| 65 | unsigned long spec; /* pointer to spec plus flag bits */ |
Ben Hutchings | b59e6ef | 2013-11-21 19:02:22 +0000 | [diff] [blame] | 66 | /* BUSY flag indicates that an update is in progress. AUTO_OLD is |
| 67 | * used to mark and sweep MAC filters for the device address lists. |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 68 | */ |
| 69 | #define EFX_EF10_FILTER_FLAG_BUSY 1UL |
Ben Hutchings | b59e6ef | 2013-11-21 19:02:22 +0000 | [diff] [blame] | 70 | #define EFX_EF10_FILTER_FLAG_AUTO_OLD 2UL |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 71 | #define EFX_EF10_FILTER_FLAGS 3UL |
| 72 | u64 handle; /* firmware handle */ |
| 73 | } *entry; |
| 74 | wait_queue_head_t waitq; |
| 75 | /* Shadow of net_device address lists, guarded by mac_lock */ |
Ben Hutchings | b59e6ef | 2013-11-21 19:02:22 +0000 | [diff] [blame] | 76 | #define EFX_EF10_FILTER_DEV_UC_MAX 32 |
| 77 | #define EFX_EF10_FILTER_DEV_MC_MAX 256 |
Daniel Pieczko | 822b96f | 2015-07-21 15:10:27 +0100 | [diff] [blame] | 78 | struct efx_ef10_dev_addr dev_uc_list[EFX_EF10_FILTER_DEV_UC_MAX]; |
| 79 | struct efx_ef10_dev_addr dev_mc_list[EFX_EF10_FILTER_DEV_MC_MAX]; |
Edward Cree | 12fb0da | 2015-07-21 15:11:00 +0100 | [diff] [blame] | 80 | int dev_uc_count; |
| 81 | int dev_mc_count; |
| 82 | /* Indices (like efx_ef10_dev_addr.id) for promisc/allmulti filters */ |
| 83 | u16 ucdef_id; |
| 84 | u16 bcast_id; |
| 85 | u16 mcdef_id; |
Andrew Rybchenko | b071c3a | 2016-06-15 17:43:00 +0100 | [diff] [blame] | 86 | /* Whether in multicast promiscuous mode when last changed */ |
| 87 | bool mc_promisc_last; |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 88 | }; |
| 89 | |
| 90 | /* An arbitrary search limit for the software hash table */ |
| 91 | #define EFX_EF10_FILTER_SEARCH_LIMIT 200 |
| 92 | |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 93 | static void efx_ef10_rx_free_indir_table(struct efx_nic *efx); |
| 94 | static void efx_ef10_filter_table_remove(struct efx_nic *efx); |
| 95 | |
| 96 | static int efx_ef10_get_warm_boot_count(struct efx_nic *efx) |
| 97 | { |
| 98 | efx_dword_t reg; |
| 99 | |
| 100 | efx_readd(efx, ®, ER_DZ_BIU_MC_SFT_STATUS); |
| 101 | return EFX_DWORD_FIELD(reg, EFX_WORD_1) == 0xb007 ? |
| 102 | EFX_DWORD_FIELD(reg, EFX_WORD_0) : -EIO; |
| 103 | } |
| 104 | |
| 105 | static unsigned int efx_ef10_mem_map_size(struct efx_nic *efx) |
| 106 | { |
Shradha Shah | 02246a7 | 2015-05-06 00:58:14 +0100 | [diff] [blame] | 107 | int bar; |
| 108 | |
| 109 | bar = efx->type->mem_bar; |
| 110 | return resource_size(&efx->pci_dev->resource[bar]); |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 111 | } |
| 112 | |
Daniel Pieczko | 7a186f4 | 2015-07-07 11:37:19 +0100 | [diff] [blame] | 113 | static bool efx_ef10_is_vf(struct efx_nic *efx) |
| 114 | { |
| 115 | return efx->type->is_vf; |
| 116 | } |
| 117 | |
Daniel Pieczko | 1cd9ecb | 2015-05-06 00:57:53 +0100 | [diff] [blame] | 118 | static int efx_ef10_get_pf_index(struct efx_nic *efx) |
| 119 | { |
| 120 | MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_FUNCTION_INFO_OUT_LEN); |
| 121 | struct efx_ef10_nic_data *nic_data = efx->nic_data; |
| 122 | size_t outlen; |
| 123 | int rc; |
| 124 | |
| 125 | rc = efx_mcdi_rpc(efx, MC_CMD_GET_FUNCTION_INFO, NULL, 0, outbuf, |
| 126 | sizeof(outbuf), &outlen); |
| 127 | if (rc) |
| 128 | return rc; |
| 129 | if (outlen < sizeof(outbuf)) |
| 130 | return -EIO; |
| 131 | |
| 132 | nic_data->pf_index = MCDI_DWORD(outbuf, GET_FUNCTION_INFO_OUT_PF); |
| 133 | return 0; |
| 134 | } |
| 135 | |
Shradha Shah | 88a37de | 2015-05-20 11:09:15 +0100 | [diff] [blame] | 136 | #ifdef CONFIG_SFC_SRIOV |
| 137 | static int efx_ef10_get_vf_index(struct efx_nic *efx) |
| 138 | { |
| 139 | MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_FUNCTION_INFO_OUT_LEN); |
| 140 | struct efx_ef10_nic_data *nic_data = efx->nic_data; |
| 141 | size_t outlen; |
| 142 | int rc; |
| 143 | |
| 144 | rc = efx_mcdi_rpc(efx, MC_CMD_GET_FUNCTION_INFO, NULL, 0, outbuf, |
| 145 | sizeof(outbuf), &outlen); |
| 146 | if (rc) |
| 147 | return rc; |
| 148 | if (outlen < sizeof(outbuf)) |
| 149 | return -EIO; |
| 150 | |
| 151 | nic_data->vf_index = MCDI_DWORD(outbuf, GET_FUNCTION_INFO_OUT_VF); |
| 152 | return 0; |
| 153 | } |
| 154 | #endif |
| 155 | |
Ben Hutchings | e5a2538 | 2013-09-05 22:50:59 +0100 | [diff] [blame] | 156 | static int efx_ef10_init_datapath_caps(struct efx_nic *efx) |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 157 | { |
| 158 | MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CAPABILITIES_OUT_LEN); |
| 159 | struct efx_ef10_nic_data *nic_data = efx->nic_data; |
| 160 | size_t outlen; |
| 161 | int rc; |
| 162 | |
| 163 | BUILD_BUG_ON(MC_CMD_GET_CAPABILITIES_IN_LEN != 0); |
| 164 | |
| 165 | rc = efx_mcdi_rpc(efx, MC_CMD_GET_CAPABILITIES, NULL, 0, |
| 166 | outbuf, sizeof(outbuf), &outlen); |
| 167 | if (rc) |
| 168 | return rc; |
Ben Hutchings | e5a2538 | 2013-09-05 22:50:59 +0100 | [diff] [blame] | 169 | if (outlen < sizeof(outbuf)) { |
| 170 | netif_err(efx, drv, efx->net_dev, |
| 171 | "unable to read datapath firmware capabilities\n"); |
| 172 | return -EIO; |
| 173 | } |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 174 | |
Ben Hutchings | e5a2538 | 2013-09-05 22:50:59 +0100 | [diff] [blame] | 175 | nic_data->datapath_caps = |
| 176 | MCDI_DWORD(outbuf, GET_CAPABILITIES_OUT_FLAGS1); |
| 177 | |
Daniel Pieczko | 8d9f9dd | 2015-05-06 00:56:55 +0100 | [diff] [blame] | 178 | /* record the DPCPU firmware IDs to determine VEB vswitching support. |
| 179 | */ |
| 180 | nic_data->rx_dpcpu_fw_id = |
| 181 | MCDI_WORD(outbuf, GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID); |
| 182 | nic_data->tx_dpcpu_fw_id = |
| 183 | MCDI_WORD(outbuf, GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID); |
| 184 | |
Ben Hutchings | e5a2538 | 2013-09-05 22:50:59 +0100 | [diff] [blame] | 185 | if (!(nic_data->datapath_caps & |
Ben Hutchings | e5a2538 | 2013-09-05 22:50:59 +0100 | [diff] [blame] | 186 | (1 << MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_14_LBN))) { |
| 187 | netif_err(efx, probe, efx->net_dev, |
| 188 | "current firmware does not support an RX prefix\n"); |
| 189 | return -ENODEV; |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 190 | } |
| 191 | |
| 192 | return 0; |
| 193 | } |
| 194 | |
| 195 | static int efx_ef10_get_sysclk_freq(struct efx_nic *efx) |
| 196 | { |
| 197 | MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CLOCK_OUT_LEN); |
| 198 | int rc; |
| 199 | |
| 200 | rc = efx_mcdi_rpc(efx, MC_CMD_GET_CLOCK, NULL, 0, |
| 201 | outbuf, sizeof(outbuf), NULL); |
| 202 | if (rc) |
| 203 | return rc; |
| 204 | rc = MCDI_DWORD(outbuf, GET_CLOCK_OUT_SYS_FREQ); |
| 205 | return rc > 0 ? rc : -ERANGE; |
| 206 | } |
| 207 | |
Daniel Pieczko | 0d5e0fb | 2015-05-20 11:10:20 +0100 | [diff] [blame] | 208 | static int efx_ef10_get_mac_address_pf(struct efx_nic *efx, u8 *mac_address) |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 209 | { |
| 210 | MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_MAC_ADDRESSES_OUT_LEN); |
| 211 | size_t outlen; |
| 212 | int rc; |
| 213 | |
| 214 | BUILD_BUG_ON(MC_CMD_GET_MAC_ADDRESSES_IN_LEN != 0); |
| 215 | |
| 216 | rc = efx_mcdi_rpc(efx, MC_CMD_GET_MAC_ADDRESSES, NULL, 0, |
| 217 | outbuf, sizeof(outbuf), &outlen); |
| 218 | if (rc) |
| 219 | return rc; |
| 220 | if (outlen < MC_CMD_GET_MAC_ADDRESSES_OUT_LEN) |
| 221 | return -EIO; |
| 222 | |
Edward Cree | cd84ff4 | 2014-03-07 18:27:41 +0000 | [diff] [blame] | 223 | ether_addr_copy(mac_address, |
| 224 | MCDI_PTR(outbuf, GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE)); |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 225 | return 0; |
| 226 | } |
| 227 | |
Daniel Pieczko | 0d5e0fb | 2015-05-20 11:10:20 +0100 | [diff] [blame] | 228 | static int efx_ef10_get_mac_address_vf(struct efx_nic *efx, u8 *mac_address) |
| 229 | { |
| 230 | MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN); |
| 231 | MCDI_DECLARE_BUF(outbuf, MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX); |
| 232 | size_t outlen; |
| 233 | int num_addrs, rc; |
| 234 | |
| 235 | MCDI_SET_DWORD(inbuf, VPORT_GET_MAC_ADDRESSES_IN_VPORT_ID, |
| 236 | EVB_PORT_ID_ASSIGNED); |
| 237 | rc = efx_mcdi_rpc(efx, MC_CMD_VPORT_GET_MAC_ADDRESSES, inbuf, |
| 238 | sizeof(inbuf), outbuf, sizeof(outbuf), &outlen); |
| 239 | |
| 240 | if (rc) |
| 241 | return rc; |
| 242 | if (outlen < MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMIN) |
| 243 | return -EIO; |
| 244 | |
| 245 | num_addrs = MCDI_DWORD(outbuf, |
| 246 | VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_COUNT); |
| 247 | |
| 248 | WARN_ON(num_addrs != 1); |
| 249 | |
| 250 | ether_addr_copy(mac_address, |
| 251 | MCDI_PTR(outbuf, VPORT_GET_MAC_ADDRESSES_OUT_MACADDR)); |
| 252 | |
| 253 | return 0; |
| 254 | } |
| 255 | |
Shradha Shah | 0f5c084 | 2015-06-02 11:37:58 +0100 | [diff] [blame] | 256 | static ssize_t efx_ef10_show_link_control_flag(struct device *dev, |
| 257 | struct device_attribute *attr, |
| 258 | char *buf) |
| 259 | { |
| 260 | struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); |
| 261 | |
| 262 | return sprintf(buf, "%d\n", |
| 263 | ((efx->mcdi->fn_flags) & |
| 264 | (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL)) |
| 265 | ? 1 : 0); |
| 266 | } |
| 267 | |
| 268 | static ssize_t efx_ef10_show_primary_flag(struct device *dev, |
| 269 | struct device_attribute *attr, |
| 270 | char *buf) |
| 271 | { |
| 272 | struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); |
| 273 | |
| 274 | return sprintf(buf, "%d\n", |
| 275 | ((efx->mcdi->fn_flags) & |
| 276 | (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY)) |
| 277 | ? 1 : 0); |
| 278 | } |
| 279 | |
| 280 | static DEVICE_ATTR(link_control_flag, 0444, efx_ef10_show_link_control_flag, |
| 281 | NULL); |
| 282 | static DEVICE_ATTR(primary_flag, 0444, efx_ef10_show_primary_flag, NULL); |
| 283 | |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 284 | static int efx_ef10_probe(struct efx_nic *efx) |
| 285 | { |
| 286 | struct efx_ef10_nic_data *nic_data; |
Shradha Shah | 8be4132 | 2015-06-02 11:37:25 +0100 | [diff] [blame] | 287 | struct net_device *net_dev = efx->net_dev; |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 288 | int i, rc; |
| 289 | |
Ben Hutchings | aa3930e | 2014-02-12 18:59:19 +0000 | [diff] [blame] | 290 | /* We can have one VI for each 8K region. However, until we |
| 291 | * use TX option descriptors we need two TX queues per channel. |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 292 | */ |
Shradha Shah | b0fbdae | 2015-08-28 10:55:42 +0100 | [diff] [blame] | 293 | efx->max_channels = min_t(unsigned int, |
| 294 | EFX_MAX_CHANNELS, |
| 295 | efx_ef10_mem_map_size(efx) / |
| 296 | (EFX_VI_PAGE_SIZE * EFX_TXQ_TYPES)); |
| 297 | efx->max_tx_channels = efx->max_channels; |
Edward Cree | 9fd3d3a | 2014-11-03 14:14:35 +0000 | [diff] [blame] | 298 | if (WARN_ON(efx->max_channels == 0)) |
| 299 | return -EIO; |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 300 | |
| 301 | nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL); |
| 302 | if (!nic_data) |
| 303 | return -ENOMEM; |
| 304 | efx->nic_data = nic_data; |
| 305 | |
Edward Cree | 75aba2a | 2015-05-27 13:13:54 +0100 | [diff] [blame] | 306 | /* we assume later that we can copy from this buffer in dwords */ |
| 307 | BUILD_BUG_ON(MCDI_CTL_SDU_LEN_MAX_V2 % 4); |
| 308 | |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 309 | rc = efx_nic_alloc_buffer(efx, &nic_data->mcdi_buf, |
| 310 | 8 + MCDI_CTL_SDU_LEN_MAX_V2, GFP_KERNEL); |
| 311 | if (rc) |
| 312 | goto fail1; |
| 313 | |
| 314 | /* Get the MC's warm boot count. In case it's rebooting right |
| 315 | * now, be prepared to retry. |
| 316 | */ |
| 317 | i = 0; |
| 318 | for (;;) { |
| 319 | rc = efx_ef10_get_warm_boot_count(efx); |
| 320 | if (rc >= 0) |
| 321 | break; |
| 322 | if (++i == 5) |
| 323 | goto fail2; |
| 324 | ssleep(1); |
| 325 | } |
| 326 | nic_data->warm_boot_count = rc; |
| 327 | |
| 328 | nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID; |
| 329 | |
Daniel Pieczko | 45b2449 | 2015-05-06 00:57:14 +0100 | [diff] [blame] | 330 | nic_data->vport_id = EVB_PORT_ID_ASSIGNED; |
| 331 | |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 332 | /* In case we're recovering from a crash (kexec), we want to |
| 333 | * cancel any outstanding request by the previous user of this |
| 334 | * function. We send a special message using the least |
| 335 | * significant bits of the 'high' (doorbell) register. |
| 336 | */ |
| 337 | _efx_writed(efx, cpu_to_le32(1), ER_DZ_MC_DB_HWRD); |
| 338 | |
| 339 | rc = efx_mcdi_init(efx); |
| 340 | if (rc) |
| 341 | goto fail2; |
| 342 | |
| 343 | /* Reset (most) configuration for this function */ |
| 344 | rc = efx_mcdi_reset(efx, RESET_TYPE_ALL); |
| 345 | if (rc) |
| 346 | goto fail3; |
| 347 | |
| 348 | /* Enable event logging */ |
| 349 | rc = efx_mcdi_log_ctrl(efx, true, false, 0); |
| 350 | if (rc) |
| 351 | goto fail3; |
| 352 | |
Shradha Shah | 0f5c084 | 2015-06-02 11:37:58 +0100 | [diff] [blame] | 353 | rc = device_create_file(&efx->pci_dev->dev, |
| 354 | &dev_attr_link_control_flag); |
Daniel Pieczko | 1cd9ecb | 2015-05-06 00:57:53 +0100 | [diff] [blame] | 355 | if (rc) |
| 356 | goto fail3; |
| 357 | |
Shradha Shah | 0f5c084 | 2015-06-02 11:37:58 +0100 | [diff] [blame] | 358 | rc = device_create_file(&efx->pci_dev->dev, &dev_attr_primary_flag); |
| 359 | if (rc) |
| 360 | goto fail4; |
| 361 | |
| 362 | rc = efx_ef10_get_pf_index(efx); |
| 363 | if (rc) |
| 364 | goto fail5; |
| 365 | |
Ben Hutchings | e5a2538 | 2013-09-05 22:50:59 +0100 | [diff] [blame] | 366 | rc = efx_ef10_init_datapath_caps(efx); |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 367 | if (rc < 0) |
Shradha Shah | 0f5c084 | 2015-06-02 11:37:58 +0100 | [diff] [blame] | 368 | goto fail5; |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 369 | |
| 370 | efx->rx_packet_len_offset = |
| 371 | ES_DZ_RX_PREFIX_PKTLEN_OFST - ES_DZ_RX_PREFIX_SIZE; |
| 372 | |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 373 | rc = efx_mcdi_port_get_number(efx); |
| 374 | if (rc < 0) |
Shradha Shah | 0f5c084 | 2015-06-02 11:37:58 +0100 | [diff] [blame] | 375 | goto fail5; |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 376 | efx->port_num = rc; |
Shradha Shah | 8be4132 | 2015-06-02 11:37:25 +0100 | [diff] [blame] | 377 | net_dev->dev_port = rc; |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 378 | |
Daniel Pieczko | 0d5e0fb | 2015-05-20 11:10:20 +0100 | [diff] [blame] | 379 | rc = efx->type->get_mac_address(efx, efx->net_dev->perm_addr); |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 380 | if (rc) |
Shradha Shah | 0f5c084 | 2015-06-02 11:37:58 +0100 | [diff] [blame] | 381 | goto fail5; |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 382 | |
| 383 | rc = efx_ef10_get_sysclk_freq(efx); |
| 384 | if (rc < 0) |
Shradha Shah | 0f5c084 | 2015-06-02 11:37:58 +0100 | [diff] [blame] | 385 | goto fail5; |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 386 | efx->timer_quantum_ns = 1536000 / rc; /* 1536 cycles */ |
| 387 | |
Edward Cree | 267d9d7 | 2015-05-06 00:59:18 +0100 | [diff] [blame] | 388 | /* Check whether firmware supports bug 35388 workaround. |
| 389 | * First try to enable it, then if we get EPERM, just |
| 390 | * ask if it's already enabled |
| 391 | */ |
Daniel Pieczko | 34ccfe6 | 2015-07-21 15:09:43 +0100 | [diff] [blame] | 392 | rc = efx_mcdi_set_workaround(efx, MC_CMD_WORKAROUND_BUG35388, true, NULL); |
Shradha Shah | c9012e0 | 2015-06-02 11:37:41 +0100 | [diff] [blame] | 393 | if (rc == 0) { |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 394 | nic_data->workaround_35388 = true; |
Shradha Shah | c9012e0 | 2015-06-02 11:37:41 +0100 | [diff] [blame] | 395 | } else if (rc == -EPERM) { |
Edward Cree | 267d9d7 | 2015-05-06 00:59:18 +0100 | [diff] [blame] | 396 | unsigned int enabled; |
| 397 | |
| 398 | rc = efx_mcdi_get_workarounds(efx, NULL, &enabled); |
| 399 | if (rc) |
| 400 | goto fail3; |
| 401 | nic_data->workaround_35388 = enabled & |
| 402 | MC_CMD_GET_WORKAROUNDS_OUT_BUG35388; |
Shradha Shah | c9012e0 | 2015-06-02 11:37:41 +0100 | [diff] [blame] | 403 | } else if (rc != -ENOSYS && rc != -ENOENT) { |
Shradha Shah | 0f5c084 | 2015-06-02 11:37:58 +0100 | [diff] [blame] | 404 | goto fail5; |
Shradha Shah | c9012e0 | 2015-06-02 11:37:41 +0100 | [diff] [blame] | 405 | } |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 406 | netif_dbg(efx, probe, efx->net_dev, |
| 407 | "workaround for bug 35388 is %sabled\n", |
| 408 | nic_data->workaround_35388 ? "en" : "dis"); |
| 409 | |
| 410 | rc = efx_mcdi_mon_probe(efx); |
Edward Cree | 267d9d7 | 2015-05-06 00:59:18 +0100 | [diff] [blame] | 411 | if (rc && rc != -EPERM) |
Shradha Shah | 0f5c084 | 2015-06-02 11:37:58 +0100 | [diff] [blame] | 412 | goto fail5; |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 413 | |
Ben Hutchings | 9aecda9 | 2013-12-05 21:28:42 +0000 | [diff] [blame] | 414 | efx_ptp_probe(efx, NULL); |
| 415 | |
Shradha Shah | 1d051e0 | 2015-06-02 11:38:16 +0100 | [diff] [blame] | 416 | #ifdef CONFIG_SFC_SRIOV |
| 417 | if ((efx->pci_dev->physfn) && (!efx->pci_dev->is_physfn)) { |
| 418 | struct pci_dev *pci_dev_pf = efx->pci_dev->physfn; |
| 419 | struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf); |
| 420 | |
| 421 | efx_pf->type->get_mac_address(efx_pf, nic_data->port_id); |
| 422 | } else |
| 423 | #endif |
| 424 | ether_addr_copy(nic_data->port_id, efx->net_dev->perm_addr); |
| 425 | |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 426 | return 0; |
| 427 | |
Shradha Shah | 0f5c084 | 2015-06-02 11:37:58 +0100 | [diff] [blame] | 428 | fail5: |
| 429 | device_remove_file(&efx->pci_dev->dev, &dev_attr_primary_flag); |
| 430 | fail4: |
| 431 | device_remove_file(&efx->pci_dev->dev, &dev_attr_link_control_flag); |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 432 | fail3: |
| 433 | efx_mcdi_fini(efx); |
| 434 | fail2: |
| 435 | efx_nic_free_buffer(efx, &nic_data->mcdi_buf); |
| 436 | fail1: |
| 437 | kfree(nic_data); |
| 438 | efx->nic_data = NULL; |
| 439 | return rc; |
| 440 | } |
| 441 | |
| 442 | static int efx_ef10_free_vis(struct efx_nic *efx) |
| 443 | { |
Jon Cooper | aa09a3d | 2015-05-20 11:10:41 +0100 | [diff] [blame] | 444 | MCDI_DECLARE_BUF_ERR(outbuf); |
Edward Cree | 1e0b812 | 2013-05-31 18:36:12 +0100 | [diff] [blame] | 445 | size_t outlen; |
| 446 | int rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FREE_VIS, NULL, 0, |
| 447 | outbuf, sizeof(outbuf), &outlen); |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 448 | |
| 449 | /* -EALREADY means nothing to free, so ignore */ |
| 450 | if (rc == -EALREADY) |
| 451 | rc = 0; |
Edward Cree | 1e0b812 | 2013-05-31 18:36:12 +0100 | [diff] [blame] | 452 | if (rc) |
| 453 | efx_mcdi_display_error(efx, MC_CMD_FREE_VIS, 0, outbuf, outlen, |
| 454 | rc); |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 455 | return rc; |
| 456 | } |
| 457 | |
Ben Hutchings | 183233b | 2013-06-28 21:47:12 +0100 | [diff] [blame] | 458 | #ifdef EFX_USE_PIO |
| 459 | |
| 460 | static void efx_ef10_free_piobufs(struct efx_nic *efx) |
| 461 | { |
| 462 | struct efx_ef10_nic_data *nic_data = efx->nic_data; |
| 463 | MCDI_DECLARE_BUF(inbuf, MC_CMD_FREE_PIOBUF_IN_LEN); |
| 464 | unsigned int i; |
| 465 | int rc; |
| 466 | |
| 467 | BUILD_BUG_ON(MC_CMD_FREE_PIOBUF_OUT_LEN != 0); |
| 468 | |
| 469 | for (i = 0; i < nic_data->n_piobufs; i++) { |
| 470 | MCDI_SET_DWORD(inbuf, FREE_PIOBUF_IN_PIOBUF_HANDLE, |
| 471 | nic_data->piobuf_handle[i]); |
| 472 | rc = efx_mcdi_rpc(efx, MC_CMD_FREE_PIOBUF, inbuf, sizeof(inbuf), |
| 473 | NULL, 0, NULL); |
| 474 | WARN_ON(rc); |
| 475 | } |
| 476 | |
| 477 | nic_data->n_piobufs = 0; |
| 478 | } |
| 479 | |
| 480 | static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n) |
| 481 | { |
| 482 | struct efx_ef10_nic_data *nic_data = efx->nic_data; |
| 483 | MCDI_DECLARE_BUF(outbuf, MC_CMD_ALLOC_PIOBUF_OUT_LEN); |
| 484 | unsigned int i; |
| 485 | size_t outlen; |
| 486 | int rc = 0; |
| 487 | |
| 488 | BUILD_BUG_ON(MC_CMD_ALLOC_PIOBUF_IN_LEN != 0); |
| 489 | |
| 490 | for (i = 0; i < n; i++) { |
Bert Kenward | 09a0420 | 2015-12-23 08:58:15 +0000 | [diff] [blame] | 491 | rc = efx_mcdi_rpc_quiet(efx, MC_CMD_ALLOC_PIOBUF, NULL, 0, |
| 492 | outbuf, sizeof(outbuf), &outlen); |
| 493 | if (rc) { |
| 494 | /* Don't display the MC error if we didn't have space |
| 495 | * for a VF. |
| 496 | */ |
| 497 | if (!(efx_ef10_is_vf(efx) && rc == -ENOSPC)) |
| 498 | efx_mcdi_display_error(efx, MC_CMD_ALLOC_PIOBUF, |
| 499 | 0, outbuf, outlen, rc); |
Ben Hutchings | 183233b | 2013-06-28 21:47:12 +0100 | [diff] [blame] | 500 | break; |
Bert Kenward | 09a0420 | 2015-12-23 08:58:15 +0000 | [diff] [blame] | 501 | } |
Ben Hutchings | 183233b | 2013-06-28 21:47:12 +0100 | [diff] [blame] | 502 | if (outlen < MC_CMD_ALLOC_PIOBUF_OUT_LEN) { |
| 503 | rc = -EIO; |
| 504 | break; |
| 505 | } |
| 506 | nic_data->piobuf_handle[i] = |
| 507 | MCDI_DWORD(outbuf, ALLOC_PIOBUF_OUT_PIOBUF_HANDLE); |
| 508 | netif_dbg(efx, probe, efx->net_dev, |
| 509 | "allocated PIO buffer %u handle %x\n", i, |
| 510 | nic_data->piobuf_handle[i]); |
| 511 | } |
| 512 | |
| 513 | nic_data->n_piobufs = i; |
| 514 | if (rc) |
| 515 | efx_ef10_free_piobufs(efx); |
| 516 | return rc; |
| 517 | } |
| 518 | |
| 519 | static int efx_ef10_link_piobufs(struct efx_nic *efx) |
| 520 | { |
| 521 | struct efx_ef10_nic_data *nic_data = efx->nic_data; |
Jon Cooper | aa09a3d | 2015-05-20 11:10:41 +0100 | [diff] [blame] | 522 | _MCDI_DECLARE_BUF(inbuf, |
| 523 | max(MC_CMD_LINK_PIOBUF_IN_LEN, |
| 524 | MC_CMD_UNLINK_PIOBUF_IN_LEN)); |
Ben Hutchings | 183233b | 2013-06-28 21:47:12 +0100 | [diff] [blame] | 525 | struct efx_channel *channel; |
| 526 | struct efx_tx_queue *tx_queue; |
| 527 | unsigned int offset, index; |
| 528 | int rc; |
| 529 | |
| 530 | BUILD_BUG_ON(MC_CMD_LINK_PIOBUF_OUT_LEN != 0); |
| 531 | BUILD_BUG_ON(MC_CMD_UNLINK_PIOBUF_OUT_LEN != 0); |
| 532 | |
Jon Cooper | aa09a3d | 2015-05-20 11:10:41 +0100 | [diff] [blame] | 533 | memset(inbuf, 0, sizeof(inbuf)); |
| 534 | |
Ben Hutchings | 183233b | 2013-06-28 21:47:12 +0100 | [diff] [blame] | 535 | /* Link a buffer to each VI in the write-combining mapping */ |
| 536 | for (index = 0; index < nic_data->n_piobufs; ++index) { |
| 537 | MCDI_SET_DWORD(inbuf, LINK_PIOBUF_IN_PIOBUF_HANDLE, |
| 538 | nic_data->piobuf_handle[index]); |
| 539 | MCDI_SET_DWORD(inbuf, LINK_PIOBUF_IN_TXQ_INSTANCE, |
| 540 | nic_data->pio_write_vi_base + index); |
| 541 | rc = efx_mcdi_rpc(efx, MC_CMD_LINK_PIOBUF, |
| 542 | inbuf, MC_CMD_LINK_PIOBUF_IN_LEN, |
| 543 | NULL, 0, NULL); |
| 544 | if (rc) { |
| 545 | netif_err(efx, drv, efx->net_dev, |
| 546 | "failed to link VI %u to PIO buffer %u (%d)\n", |
| 547 | nic_data->pio_write_vi_base + index, index, |
| 548 | rc); |
| 549 | goto fail; |
| 550 | } |
| 551 | netif_dbg(efx, probe, efx->net_dev, |
| 552 | "linked VI %u to PIO buffer %u\n", |
| 553 | nic_data->pio_write_vi_base + index, index); |
| 554 | } |
| 555 | |
| 556 | /* Link a buffer to each TX queue */ |
| 557 | efx_for_each_channel(channel, efx) { |
| 558 | efx_for_each_channel_tx_queue(tx_queue, channel) { |
| 559 | /* We assign the PIO buffers to queues in |
| 560 | * reverse order to allow for the following |
| 561 | * special case. |
| 562 | */ |
| 563 | offset = ((efx->tx_channel_offset + efx->n_tx_channels - |
| 564 | tx_queue->channel->channel - 1) * |
| 565 | efx_piobuf_size); |
| 566 | index = offset / ER_DZ_TX_PIOBUF_SIZE; |
| 567 | offset = offset % ER_DZ_TX_PIOBUF_SIZE; |
| 568 | |
| 569 | /* When the host page size is 4K, the first |
| 570 | * host page in the WC mapping may be within |
| 571 | * the same VI page as the last TX queue. We |
| 572 | * can only link one buffer to each VI. |
| 573 | */ |
| 574 | if (tx_queue->queue == nic_data->pio_write_vi_base) { |
| 575 | BUG_ON(index != 0); |
| 576 | rc = 0; |
| 577 | } else { |
| 578 | MCDI_SET_DWORD(inbuf, |
| 579 | LINK_PIOBUF_IN_PIOBUF_HANDLE, |
| 580 | nic_data->piobuf_handle[index]); |
| 581 | MCDI_SET_DWORD(inbuf, |
| 582 | LINK_PIOBUF_IN_TXQ_INSTANCE, |
| 583 | tx_queue->queue); |
| 584 | rc = efx_mcdi_rpc(efx, MC_CMD_LINK_PIOBUF, |
| 585 | inbuf, MC_CMD_LINK_PIOBUF_IN_LEN, |
| 586 | NULL, 0, NULL); |
| 587 | } |
| 588 | |
| 589 | if (rc) { |
| 590 | /* This is non-fatal; the TX path just |
| 591 | * won't use PIO for this queue |
| 592 | */ |
| 593 | netif_err(efx, drv, efx->net_dev, |
| 594 | "failed to link VI %u to PIO buffer %u (%d)\n", |
| 595 | tx_queue->queue, index, rc); |
| 596 | tx_queue->piobuf = NULL; |
| 597 | } else { |
| 598 | tx_queue->piobuf = |
| 599 | nic_data->pio_write_base + |
| 600 | index * EFX_VI_PAGE_SIZE + offset; |
| 601 | tx_queue->piobuf_offset = offset; |
| 602 | netif_dbg(efx, probe, efx->net_dev, |
| 603 | "linked VI %u to PIO buffer %u offset %x addr %p\n", |
| 604 | tx_queue->queue, index, |
| 605 | tx_queue->piobuf_offset, |
| 606 | tx_queue->piobuf); |
| 607 | } |
| 608 | } |
| 609 | } |
| 610 | |
| 611 | return 0; |
| 612 | |
| 613 | fail: |
| 614 | while (index--) { |
| 615 | MCDI_SET_DWORD(inbuf, UNLINK_PIOBUF_IN_TXQ_INSTANCE, |
| 616 | nic_data->pio_write_vi_base + index); |
| 617 | efx_mcdi_rpc(efx, MC_CMD_UNLINK_PIOBUF, |
| 618 | inbuf, MC_CMD_UNLINK_PIOBUF_IN_LEN, |
| 619 | NULL, 0, NULL); |
| 620 | } |
| 621 | return rc; |
| 622 | } |
| 623 | |
Edward Cree | c0795bf | 2016-05-24 18:53:36 +0100 | [diff] [blame] | 624 | static void efx_ef10_forget_old_piobufs(struct efx_nic *efx) |
| 625 | { |
| 626 | struct efx_channel *channel; |
| 627 | struct efx_tx_queue *tx_queue; |
| 628 | |
| 629 | /* All our existing PIO buffers went away */ |
| 630 | efx_for_each_channel(channel, efx) |
| 631 | efx_for_each_channel_tx_queue(tx_queue, channel) |
| 632 | tx_queue->piobuf = NULL; |
| 633 | } |
| 634 | |
Ben Hutchings | 183233b | 2013-06-28 21:47:12 +0100 | [diff] [blame] | 635 | #else /* !EFX_USE_PIO */ |
| 636 | |
| 637 | static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n) |
| 638 | { |
| 639 | return n == 0 ? 0 : -ENOBUFS; |
| 640 | } |
| 641 | |
| 642 | static int efx_ef10_link_piobufs(struct efx_nic *efx) |
| 643 | { |
| 644 | return 0; |
| 645 | } |
| 646 | |
| 647 | static void efx_ef10_free_piobufs(struct efx_nic *efx) |
| 648 | { |
| 649 | } |
| 650 | |
Edward Cree | c0795bf | 2016-05-24 18:53:36 +0100 | [diff] [blame] | 651 | static void efx_ef10_forget_old_piobufs(struct efx_nic *efx) |
| 652 | { |
| 653 | } |
| 654 | |
Ben Hutchings | 183233b | 2013-06-28 21:47:12 +0100 | [diff] [blame] | 655 | #endif /* EFX_USE_PIO */ |
| 656 | |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 657 | static void efx_ef10_remove(struct efx_nic *efx) |
| 658 | { |
| 659 | struct efx_ef10_nic_data *nic_data = efx->nic_data; |
| 660 | int rc; |
| 661 | |
Shradha Shah | f1122a3 | 2015-05-20 11:09:46 +0100 | [diff] [blame] | 662 | #ifdef CONFIG_SFC_SRIOV |
| 663 | struct efx_ef10_nic_data *nic_data_pf; |
| 664 | struct pci_dev *pci_dev_pf; |
| 665 | struct efx_nic *efx_pf; |
| 666 | struct ef10_vf *vf; |
| 667 | |
| 668 | if (efx->pci_dev->is_virtfn) { |
| 669 | pci_dev_pf = efx->pci_dev->physfn; |
| 670 | if (pci_dev_pf) { |
| 671 | efx_pf = pci_get_drvdata(pci_dev_pf); |
| 672 | nic_data_pf = efx_pf->nic_data; |
| 673 | vf = nic_data_pf->vf + nic_data->vf_index; |
| 674 | vf->efx = NULL; |
| 675 | } else |
| 676 | netif_info(efx, drv, efx->net_dev, |
| 677 | "Could not get the PF id from VF\n"); |
| 678 | } |
| 679 | #endif |
| 680 | |
Ben Hutchings | 9aecda9 | 2013-12-05 21:28:42 +0000 | [diff] [blame] | 681 | efx_ptp_remove(efx); |
| 682 | |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 683 | efx_mcdi_mon_remove(efx); |
| 684 | |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 685 | efx_ef10_rx_free_indir_table(efx); |
| 686 | |
Ben Hutchings | 183233b | 2013-06-28 21:47:12 +0100 | [diff] [blame] | 687 | if (nic_data->wc_membase) |
| 688 | iounmap(nic_data->wc_membase); |
| 689 | |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 690 | rc = efx_ef10_free_vis(efx); |
| 691 | WARN_ON(rc != 0); |
| 692 | |
Ben Hutchings | 183233b | 2013-06-28 21:47:12 +0100 | [diff] [blame] | 693 | if (!nic_data->must_restore_piobufs) |
| 694 | efx_ef10_free_piobufs(efx); |
| 695 | |
Shradha Shah | 0f5c084 | 2015-06-02 11:37:58 +0100 | [diff] [blame] | 696 | device_remove_file(&efx->pci_dev->dev, &dev_attr_primary_flag); |
| 697 | device_remove_file(&efx->pci_dev->dev, &dev_attr_link_control_flag); |
| 698 | |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 699 | efx_mcdi_fini(efx); |
| 700 | efx_nic_free_buffer(efx, &nic_data->mcdi_buf); |
| 701 | kfree(nic_data); |
| 702 | } |
| 703 | |
Shradha Shah | 88a37de | 2015-05-20 11:09:15 +0100 | [diff] [blame] | 704 | static int efx_ef10_probe_pf(struct efx_nic *efx) |
| 705 | { |
| 706 | return efx_ef10_probe(efx); |
| 707 | } |
| 708 | |
Daniel Pieczko | 7a186f4 | 2015-07-07 11:37:19 +0100 | [diff] [blame] | 709 | int efx_ef10_vadaptor_alloc(struct efx_nic *efx, unsigned int port_id) |
| 710 | { |
| 711 | MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_ALLOC_IN_LEN); |
| 712 | |
| 713 | MCDI_SET_DWORD(inbuf, VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID, port_id); |
| 714 | return efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_ALLOC, inbuf, sizeof(inbuf), |
| 715 | NULL, 0, NULL); |
| 716 | } |
| 717 | |
| 718 | int efx_ef10_vadaptor_free(struct efx_nic *efx, unsigned int port_id) |
| 719 | { |
| 720 | MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_FREE_IN_LEN); |
| 721 | |
| 722 | MCDI_SET_DWORD(inbuf, VADAPTOR_FREE_IN_UPSTREAM_PORT_ID, port_id); |
| 723 | return efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_FREE, inbuf, sizeof(inbuf), |
| 724 | NULL, 0, NULL); |
| 725 | } |
| 726 | |
| 727 | int efx_ef10_vport_add_mac(struct efx_nic *efx, |
| 728 | unsigned int port_id, u8 *mac) |
| 729 | { |
| 730 | MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_LEN); |
| 731 | |
| 732 | MCDI_SET_DWORD(inbuf, VPORT_ADD_MAC_ADDRESS_IN_VPORT_ID, port_id); |
| 733 | ether_addr_copy(MCDI_PTR(inbuf, VPORT_ADD_MAC_ADDRESS_IN_MACADDR), mac); |
| 734 | |
| 735 | return efx_mcdi_rpc(efx, MC_CMD_VPORT_ADD_MAC_ADDRESS, inbuf, |
| 736 | sizeof(inbuf), NULL, 0, NULL); |
| 737 | } |
| 738 | |
| 739 | int efx_ef10_vport_del_mac(struct efx_nic *efx, |
| 740 | unsigned int port_id, u8 *mac) |
| 741 | { |
| 742 | MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_LEN); |
| 743 | |
| 744 | MCDI_SET_DWORD(inbuf, VPORT_DEL_MAC_ADDRESS_IN_VPORT_ID, port_id); |
| 745 | ether_addr_copy(MCDI_PTR(inbuf, VPORT_DEL_MAC_ADDRESS_IN_MACADDR), mac); |
| 746 | |
| 747 | return efx_mcdi_rpc(efx, MC_CMD_VPORT_DEL_MAC_ADDRESS, inbuf, |
| 748 | sizeof(inbuf), NULL, 0, NULL); |
| 749 | } |
| 750 | |
Shradha Shah | 88a37de | 2015-05-20 11:09:15 +0100 | [diff] [blame] | 751 | #ifdef CONFIG_SFC_SRIOV |
| 752 | static int efx_ef10_probe_vf(struct efx_nic *efx) |
| 753 | { |
| 754 | int rc; |
Daniel Pieczko | 6598dad | 2015-06-02 11:41:00 +0100 | [diff] [blame] | 755 | struct pci_dev *pci_dev_pf; |
| 756 | |
| 757 | /* If the parent PF has no VF data structure, it doesn't know about this |
| 758 | * VF so fail probe. The VF needs to be re-created. This can happen |
| 759 | * if the PF driver is unloaded while the VF is assigned to a guest. |
| 760 | */ |
| 761 | pci_dev_pf = efx->pci_dev->physfn; |
| 762 | if (pci_dev_pf) { |
| 763 | struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf); |
| 764 | struct efx_ef10_nic_data *nic_data_pf = efx_pf->nic_data; |
| 765 | |
| 766 | if (!nic_data_pf->vf) { |
| 767 | netif_info(efx, drv, efx->net_dev, |
| 768 | "The VF cannot link to its parent PF; " |
| 769 | "please destroy and re-create the VF\n"); |
| 770 | return -EBUSY; |
| 771 | } |
| 772 | } |
Shradha Shah | 88a37de | 2015-05-20 11:09:15 +0100 | [diff] [blame] | 773 | |
| 774 | rc = efx_ef10_probe(efx); |
| 775 | if (rc) |
| 776 | return rc; |
| 777 | |
| 778 | rc = efx_ef10_get_vf_index(efx); |
| 779 | if (rc) |
| 780 | goto fail; |
| 781 | |
Shradha Shah | f1122a3 | 2015-05-20 11:09:46 +0100 | [diff] [blame] | 782 | if (efx->pci_dev->is_virtfn) { |
| 783 | if (efx->pci_dev->physfn) { |
| 784 | struct efx_nic *efx_pf = |
| 785 | pci_get_drvdata(efx->pci_dev->physfn); |
| 786 | struct efx_ef10_nic_data *nic_data_p = efx_pf->nic_data; |
| 787 | struct efx_ef10_nic_data *nic_data = efx->nic_data; |
| 788 | |
| 789 | nic_data_p->vf[nic_data->vf_index].efx = efx; |
Daniel Pieczko | 6598dad | 2015-06-02 11:41:00 +0100 | [diff] [blame] | 790 | nic_data_p->vf[nic_data->vf_index].pci_dev = |
| 791 | efx->pci_dev; |
Shradha Shah | f1122a3 | 2015-05-20 11:09:46 +0100 | [diff] [blame] | 792 | } else |
| 793 | netif_info(efx, drv, efx->net_dev, |
| 794 | "Could not get the PF id from VF\n"); |
| 795 | } |
| 796 | |
Shradha Shah | 88a37de | 2015-05-20 11:09:15 +0100 | [diff] [blame] | 797 | return 0; |
| 798 | |
| 799 | fail: |
| 800 | efx_ef10_remove(efx); |
| 801 | return rc; |
| 802 | } |
| 803 | #else |
| 804 | static int efx_ef10_probe_vf(struct efx_nic *efx __attribute__ ((unused))) |
| 805 | { |
| 806 | return 0; |
| 807 | } |
| 808 | #endif |
| 809 | |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 810 | static int efx_ef10_alloc_vis(struct efx_nic *efx, |
| 811 | unsigned int min_vis, unsigned int max_vis) |
| 812 | { |
| 813 | MCDI_DECLARE_BUF(inbuf, MC_CMD_ALLOC_VIS_IN_LEN); |
| 814 | MCDI_DECLARE_BUF(outbuf, MC_CMD_ALLOC_VIS_OUT_LEN); |
| 815 | struct efx_ef10_nic_data *nic_data = efx->nic_data; |
| 816 | size_t outlen; |
| 817 | int rc; |
| 818 | |
| 819 | MCDI_SET_DWORD(inbuf, ALLOC_VIS_IN_MIN_VI_COUNT, min_vis); |
| 820 | MCDI_SET_DWORD(inbuf, ALLOC_VIS_IN_MAX_VI_COUNT, max_vis); |
| 821 | rc = efx_mcdi_rpc(efx, MC_CMD_ALLOC_VIS, inbuf, sizeof(inbuf), |
| 822 | outbuf, sizeof(outbuf), &outlen); |
| 823 | if (rc != 0) |
| 824 | return rc; |
| 825 | |
| 826 | if (outlen < MC_CMD_ALLOC_VIS_OUT_LEN) |
| 827 | return -EIO; |
| 828 | |
| 829 | netif_dbg(efx, drv, efx->net_dev, "base VI is A0x%03x\n", |
| 830 | MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_BASE)); |
| 831 | |
| 832 | nic_data->vi_base = MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_BASE); |
| 833 | nic_data->n_allocated_vis = MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_COUNT); |
| 834 | return 0; |
| 835 | } |
| 836 | |
Ben Hutchings | 183233b | 2013-06-28 21:47:12 +0100 | [diff] [blame] | 837 | /* Note that the failure path of this function does not free |
| 838 | * resources, as this will be done by efx_ef10_remove(). |
| 839 | */ |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 840 | static int efx_ef10_dimension_resources(struct efx_nic *efx) |
| 841 | { |
Ben Hutchings | 183233b | 2013-06-28 21:47:12 +0100 | [diff] [blame] | 842 | struct efx_ef10_nic_data *nic_data = efx->nic_data; |
| 843 | unsigned int uc_mem_map_size, wc_mem_map_size; |
Shradha Shah | b0fbdae | 2015-08-28 10:55:42 +0100 | [diff] [blame] | 844 | unsigned int min_vis = max(EFX_TXQ_TYPES, |
| 845 | efx_separate_tx_channels ? 2 : 1); |
| 846 | unsigned int channel_vis, pio_write_vi_base, max_vis; |
Ben Hutchings | 183233b | 2013-06-28 21:47:12 +0100 | [diff] [blame] | 847 | void __iomem *membase; |
| 848 | int rc; |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 849 | |
Shradha Shah | b0fbdae | 2015-08-28 10:55:42 +0100 | [diff] [blame] | 850 | channel_vis = max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES); |
Ben Hutchings | 183233b | 2013-06-28 21:47:12 +0100 | [diff] [blame] | 851 | |
| 852 | #ifdef EFX_USE_PIO |
| 853 | /* Try to allocate PIO buffers if wanted and if the full |
| 854 | * number of PIO buffers would be sufficient to allocate one |
| 855 | * copy-buffer per TX channel. Failure is non-fatal, as there |
| 856 | * are only a small number of PIO buffers shared between all |
| 857 | * functions of the controller. |
| 858 | */ |
| 859 | if (efx_piobuf_size != 0 && |
| 860 | ER_DZ_TX_PIOBUF_SIZE / efx_piobuf_size * EF10_TX_PIOBUF_COUNT >= |
| 861 | efx->n_tx_channels) { |
| 862 | unsigned int n_piobufs = |
| 863 | DIV_ROUND_UP(efx->n_tx_channels, |
| 864 | ER_DZ_TX_PIOBUF_SIZE / efx_piobuf_size); |
| 865 | |
| 866 | rc = efx_ef10_alloc_piobufs(efx, n_piobufs); |
| 867 | if (rc) |
| 868 | netif_err(efx, probe, efx->net_dev, |
| 869 | "failed to allocate PIO buffers (%d)\n", rc); |
| 870 | else |
| 871 | netif_dbg(efx, probe, efx->net_dev, |
| 872 | "allocated %u PIO buffers\n", n_piobufs); |
| 873 | } |
| 874 | #else |
| 875 | nic_data->n_piobufs = 0; |
| 876 | #endif |
| 877 | |
| 878 | /* PIO buffers should be mapped with write-combining enabled, |
| 879 | * and we want to make single UC and WC mappings rather than |
| 880 | * several of each (in fact that's the only option if host |
| 881 | * page size is >4K). So we may allocate some extra VIs just |
| 882 | * for writing PIO buffers through. |
Daniel Pieczko | 52ad762 | 2014-04-01 13:10:34 +0100 | [diff] [blame] | 883 | * |
Shradha Shah | b0fbdae | 2015-08-28 10:55:42 +0100 | [diff] [blame] | 884 | * The UC mapping contains (channel_vis - 1) complete VIs and the |
Daniel Pieczko | 52ad762 | 2014-04-01 13:10:34 +0100 | [diff] [blame] | 885 | * first half of the next VI. Then the WC mapping begins with |
| 886 | * the second half of this last VI. |
Ben Hutchings | 183233b | 2013-06-28 21:47:12 +0100 | [diff] [blame] | 887 | */ |
Shradha Shah | b0fbdae | 2015-08-28 10:55:42 +0100 | [diff] [blame] | 888 | uc_mem_map_size = PAGE_ALIGN((channel_vis - 1) * EFX_VI_PAGE_SIZE + |
Ben Hutchings | 183233b | 2013-06-28 21:47:12 +0100 | [diff] [blame] | 889 | ER_DZ_TX_PIOBUF); |
| 890 | if (nic_data->n_piobufs) { |
Daniel Pieczko | 52ad762 | 2014-04-01 13:10:34 +0100 | [diff] [blame] | 891 | /* pio_write_vi_base rounds down to give the number of complete |
| 892 | * VIs inside the UC mapping. |
| 893 | */ |
Ben Hutchings | 183233b | 2013-06-28 21:47:12 +0100 | [diff] [blame] | 894 | pio_write_vi_base = uc_mem_map_size / EFX_VI_PAGE_SIZE; |
| 895 | wc_mem_map_size = (PAGE_ALIGN((pio_write_vi_base + |
| 896 | nic_data->n_piobufs) * |
| 897 | EFX_VI_PAGE_SIZE) - |
| 898 | uc_mem_map_size); |
| 899 | max_vis = pio_write_vi_base + nic_data->n_piobufs; |
| 900 | } else { |
| 901 | pio_write_vi_base = 0; |
| 902 | wc_mem_map_size = 0; |
Shradha Shah | b0fbdae | 2015-08-28 10:55:42 +0100 | [diff] [blame] | 903 | max_vis = channel_vis; |
Ben Hutchings | 183233b | 2013-06-28 21:47:12 +0100 | [diff] [blame] | 904 | } |
| 905 | |
| 906 | /* In case the last attached driver failed to free VIs, do it now */ |
| 907 | rc = efx_ef10_free_vis(efx); |
| 908 | if (rc != 0) |
| 909 | return rc; |
| 910 | |
| 911 | rc = efx_ef10_alloc_vis(efx, min_vis, max_vis); |
| 912 | if (rc != 0) |
| 913 | return rc; |
| 914 | |
Shradha Shah | b0fbdae | 2015-08-28 10:55:42 +0100 | [diff] [blame] | 915 | if (nic_data->n_allocated_vis < channel_vis) { |
| 916 | netif_info(efx, drv, efx->net_dev, |
| 917 | "Could not allocate enough VIs to satisfy RSS" |
| 918 | " requirements. Performance may not be optimal.\n"); |
| 919 | /* We didn't get the VIs to populate our channels. |
| 920 | * We could keep what we got but then we'd have more |
| 921 | * interrupts than we need. |
| 922 | * Instead calculate new max_channels and restart |
| 923 | */ |
| 924 | efx->max_channels = nic_data->n_allocated_vis; |
| 925 | efx->max_tx_channels = |
| 926 | nic_data->n_allocated_vis / EFX_TXQ_TYPES; |
| 927 | |
| 928 | efx_ef10_free_vis(efx); |
| 929 | return -EAGAIN; |
| 930 | } |
| 931 | |
Ben Hutchings | 183233b | 2013-06-28 21:47:12 +0100 | [diff] [blame] | 932 | /* If we didn't get enough VIs to map all the PIO buffers, free the |
| 933 | * PIO buffers |
| 934 | */ |
| 935 | if (nic_data->n_piobufs && |
| 936 | nic_data->n_allocated_vis < |
| 937 | pio_write_vi_base + nic_data->n_piobufs) { |
| 938 | netif_dbg(efx, probe, efx->net_dev, |
| 939 | "%u VIs are not sufficient to map %u PIO buffers\n", |
| 940 | nic_data->n_allocated_vis, nic_data->n_piobufs); |
| 941 | efx_ef10_free_piobufs(efx); |
| 942 | } |
| 943 | |
| 944 | /* Shrink the original UC mapping of the memory BAR */ |
| 945 | membase = ioremap_nocache(efx->membase_phys, uc_mem_map_size); |
| 946 | if (!membase) { |
| 947 | netif_err(efx, probe, efx->net_dev, |
| 948 | "could not shrink memory BAR to %x\n", |
| 949 | uc_mem_map_size); |
| 950 | return -ENOMEM; |
| 951 | } |
| 952 | iounmap(efx->membase); |
| 953 | efx->membase = membase; |
| 954 | |
| 955 | /* Set up the WC mapping if needed */ |
| 956 | if (wc_mem_map_size) { |
| 957 | nic_data->wc_membase = ioremap_wc(efx->membase_phys + |
| 958 | uc_mem_map_size, |
| 959 | wc_mem_map_size); |
| 960 | if (!nic_data->wc_membase) { |
| 961 | netif_err(efx, probe, efx->net_dev, |
| 962 | "could not allocate WC mapping of size %x\n", |
| 963 | wc_mem_map_size); |
| 964 | return -ENOMEM; |
| 965 | } |
| 966 | nic_data->pio_write_vi_base = pio_write_vi_base; |
| 967 | nic_data->pio_write_base = |
| 968 | nic_data->wc_membase + |
| 969 | (pio_write_vi_base * EFX_VI_PAGE_SIZE + ER_DZ_TX_PIOBUF - |
| 970 | uc_mem_map_size); |
| 971 | |
| 972 | rc = efx_ef10_link_piobufs(efx); |
| 973 | if (rc) |
| 974 | efx_ef10_free_piobufs(efx); |
| 975 | } |
| 976 | |
| 977 | netif_dbg(efx, probe, efx->net_dev, |
| 978 | "memory BAR at %pa (virtual %p+%x UC, %p+%x WC)\n", |
| 979 | &efx->membase_phys, efx->membase, uc_mem_map_size, |
| 980 | nic_data->wc_membase, wc_mem_map_size); |
| 981 | |
| 982 | return 0; |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 983 | } |
| 984 | |
| 985 | static int efx_ef10_init_nic(struct efx_nic *efx) |
| 986 | { |
| 987 | struct efx_ef10_nic_data *nic_data = efx->nic_data; |
| 988 | int rc; |
| 989 | |
Ben Hutchings | a915ccc | 2013-09-05 22:51:55 +0100 | [diff] [blame] | 990 | if (nic_data->must_check_datapath_caps) { |
| 991 | rc = efx_ef10_init_datapath_caps(efx); |
| 992 | if (rc) |
| 993 | return rc; |
| 994 | nic_data->must_check_datapath_caps = false; |
| 995 | } |
| 996 | |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 997 | if (nic_data->must_realloc_vis) { |
| 998 | /* We cannot let the number of VIs change now */ |
| 999 | rc = efx_ef10_alloc_vis(efx, nic_data->n_allocated_vis, |
| 1000 | nic_data->n_allocated_vis); |
| 1001 | if (rc) |
| 1002 | return rc; |
| 1003 | nic_data->must_realloc_vis = false; |
| 1004 | } |
| 1005 | |
Ben Hutchings | 183233b | 2013-06-28 21:47:12 +0100 | [diff] [blame] | 1006 | if (nic_data->must_restore_piobufs && nic_data->n_piobufs) { |
| 1007 | rc = efx_ef10_alloc_piobufs(efx, nic_data->n_piobufs); |
| 1008 | if (rc == 0) { |
| 1009 | rc = efx_ef10_link_piobufs(efx); |
| 1010 | if (rc) |
| 1011 | efx_ef10_free_piobufs(efx); |
| 1012 | } |
| 1013 | |
| 1014 | /* Log an error on failure, but this is non-fatal */ |
| 1015 | if (rc) |
| 1016 | netif_err(efx, drv, efx->net_dev, |
| 1017 | "failed to restore PIO buffers (%d)\n", rc); |
| 1018 | nic_data->must_restore_piobufs = false; |
| 1019 | } |
| 1020 | |
Jon Cooper | 267c015 | 2015-05-06 00:59:38 +0100 | [diff] [blame] | 1021 | /* don't fail init if RSS setup doesn't work */ |
| 1022 | efx->type->rx_push_rss_config(efx, false, efx->rx_indir_table); |
| 1023 | |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 1024 | return 0; |
| 1025 | } |
| 1026 | |
Jon Cooper | 3e33626 | 2014-01-17 19:48:06 +0000 | [diff] [blame] | 1027 | static void efx_ef10_reset_mc_allocations(struct efx_nic *efx) |
| 1028 | { |
| 1029 | struct efx_ef10_nic_data *nic_data = efx->nic_data; |
Daniel Pieczko | 774ad03 | 2015-07-31 11:15:22 +0100 | [diff] [blame] | 1030 | #ifdef CONFIG_SFC_SRIOV |
| 1031 | unsigned int i; |
| 1032 | #endif |
Jon Cooper | 3e33626 | 2014-01-17 19:48:06 +0000 | [diff] [blame] | 1033 | |
| 1034 | /* All our allocations have been reset */ |
| 1035 | nic_data->must_realloc_vis = true; |
| 1036 | nic_data->must_restore_filters = true; |
| 1037 | nic_data->must_restore_piobufs = true; |
Edward Cree | c0795bf | 2016-05-24 18:53:36 +0100 | [diff] [blame] | 1038 | efx_ef10_forget_old_piobufs(efx); |
Jon Cooper | 3e33626 | 2014-01-17 19:48:06 +0000 | [diff] [blame] | 1039 | nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID; |
Daniel Pieczko | 774ad03 | 2015-07-31 11:15:22 +0100 | [diff] [blame] | 1040 | |
| 1041 | /* Driver-created vswitches and vports must be re-created */ |
| 1042 | nic_data->must_probe_vswitching = true; |
| 1043 | nic_data->vport_id = EVB_PORT_ID_ASSIGNED; |
| 1044 | #ifdef CONFIG_SFC_SRIOV |
| 1045 | if (nic_data->vf) |
| 1046 | for (i = 0; i < efx->vf_count; i++) |
| 1047 | nic_data->vf[i].vport_id = 0; |
| 1048 | #endif |
Jon Cooper | 3e33626 | 2014-01-17 19:48:06 +0000 | [diff] [blame] | 1049 | } |
| 1050 | |
Jon Cooper | 087e902 | 2015-05-20 11:11:35 +0100 | [diff] [blame] | 1051 | static enum reset_type efx_ef10_map_reset_reason(enum reset_type reason) |
| 1052 | { |
| 1053 | if (reason == RESET_TYPE_MC_FAILURE) |
| 1054 | return RESET_TYPE_DATAPATH; |
| 1055 | |
| 1056 | return efx_mcdi_map_reset_reason(reason); |
| 1057 | } |
| 1058 | |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 1059 | static int efx_ef10_map_reset_flags(u32 *flags) |
| 1060 | { |
| 1061 | enum { |
| 1062 | EF10_RESET_PORT = ((ETH_RESET_MAC | ETH_RESET_PHY) << |
| 1063 | ETH_RESET_SHARED_SHIFT), |
| 1064 | EF10_RESET_MC = ((ETH_RESET_DMA | ETH_RESET_FILTER | |
| 1065 | ETH_RESET_OFFLOAD | ETH_RESET_MAC | |
| 1066 | ETH_RESET_PHY | ETH_RESET_MGMT) << |
| 1067 | ETH_RESET_SHARED_SHIFT) |
| 1068 | }; |
| 1069 | |
| 1070 | /* We assume for now that our PCI function is permitted to |
| 1071 | * reset everything. |
| 1072 | */ |
| 1073 | |
| 1074 | if ((*flags & EF10_RESET_MC) == EF10_RESET_MC) { |
| 1075 | *flags &= ~EF10_RESET_MC; |
| 1076 | return RESET_TYPE_WORLD; |
| 1077 | } |
| 1078 | |
| 1079 | if ((*flags & EF10_RESET_PORT) == EF10_RESET_PORT) { |
| 1080 | *flags &= ~EF10_RESET_PORT; |
| 1081 | return RESET_TYPE_ALL; |
| 1082 | } |
| 1083 | |
| 1084 | /* no invisible reset implemented */ |
| 1085 | |
| 1086 | return -EINVAL; |
| 1087 | } |
| 1088 | |
Jon Cooper | 3e33626 | 2014-01-17 19:48:06 +0000 | [diff] [blame] | 1089 | static int efx_ef10_reset(struct efx_nic *efx, enum reset_type reset_type) |
| 1090 | { |
| 1091 | int rc = efx_mcdi_reset(efx, reset_type); |
| 1092 | |
Daniel Pieczko | 2732482 | 2015-07-31 11:14:54 +0100 | [diff] [blame] | 1093 | /* Unprivileged functions return -EPERM, but need to return success |
| 1094 | * here so that the datapath is brought back up. |
| 1095 | */ |
| 1096 | if (reset_type == RESET_TYPE_WORLD && rc == -EPERM) |
| 1097 | rc = 0; |
| 1098 | |
Jon Cooper | 3e33626 | 2014-01-17 19:48:06 +0000 | [diff] [blame] | 1099 | /* If it was a port reset, trigger reallocation of MC resources. |
| 1100 | * Note that on an MC reset nothing needs to be done now because we'll |
| 1101 | * detect the MC reset later and handle it then. |
Edward Cree | e283546 | 2014-04-16 19:27:48 +0100 | [diff] [blame] | 1102 | * For an FLR, we never get an MC reset event, but the MC has reset all |
| 1103 | * resources assigned to us, so we have to trigger reallocation now. |
Jon Cooper | 3e33626 | 2014-01-17 19:48:06 +0000 | [diff] [blame] | 1104 | */ |
Edward Cree | e283546 | 2014-04-16 19:27:48 +0100 | [diff] [blame] | 1105 | if ((reset_type == RESET_TYPE_ALL || |
| 1106 | reset_type == RESET_TYPE_MCDI_TIMEOUT) && !rc) |
Jon Cooper | 3e33626 | 2014-01-17 19:48:06 +0000 | [diff] [blame] | 1107 | efx_ef10_reset_mc_allocations(efx); |
| 1108 | return rc; |
| 1109 | } |
| 1110 | |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 1111 | #define EF10_DMA_STAT(ext_name, mcdi_name) \ |
| 1112 | [EF10_STAT_ ## ext_name] = \ |
| 1113 | { #ext_name, 64, 8 * MC_CMD_MAC_ ## mcdi_name } |
| 1114 | #define EF10_DMA_INVIS_STAT(int_name, mcdi_name) \ |
| 1115 | [EF10_STAT_ ## int_name] = \ |
| 1116 | { NULL, 64, 8 * MC_CMD_MAC_ ## mcdi_name } |
| 1117 | #define EF10_OTHER_STAT(ext_name) \ |
| 1118 | [EF10_STAT_ ## ext_name] = { #ext_name, 0, 0 } |
Edward Cree | e4d112e | 2014-07-15 11:58:12 +0100 | [diff] [blame] | 1119 | #define GENERIC_SW_STAT(ext_name) \ |
| 1120 | [GENERIC_STAT_ ## ext_name] = { #ext_name, 0, 0 } |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 1121 | |
| 1122 | static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = { |
Daniel Pieczko | e80ca013 | 2015-06-02 11:38:34 +0100 | [diff] [blame] | 1123 | EF10_DMA_STAT(port_tx_bytes, TX_BYTES), |
| 1124 | EF10_DMA_STAT(port_tx_packets, TX_PKTS), |
| 1125 | EF10_DMA_STAT(port_tx_pause, TX_PAUSE_PKTS), |
| 1126 | EF10_DMA_STAT(port_tx_control, TX_CONTROL_PKTS), |
| 1127 | EF10_DMA_STAT(port_tx_unicast, TX_UNICAST_PKTS), |
| 1128 | EF10_DMA_STAT(port_tx_multicast, TX_MULTICAST_PKTS), |
| 1129 | EF10_DMA_STAT(port_tx_broadcast, TX_BROADCAST_PKTS), |
| 1130 | EF10_DMA_STAT(port_tx_lt64, TX_LT64_PKTS), |
| 1131 | EF10_DMA_STAT(port_tx_64, TX_64_PKTS), |
| 1132 | EF10_DMA_STAT(port_tx_65_to_127, TX_65_TO_127_PKTS), |
| 1133 | EF10_DMA_STAT(port_tx_128_to_255, TX_128_TO_255_PKTS), |
| 1134 | EF10_DMA_STAT(port_tx_256_to_511, TX_256_TO_511_PKTS), |
| 1135 | EF10_DMA_STAT(port_tx_512_to_1023, TX_512_TO_1023_PKTS), |
| 1136 | EF10_DMA_STAT(port_tx_1024_to_15xx, TX_1024_TO_15XX_PKTS), |
| 1137 | EF10_DMA_STAT(port_tx_15xx_to_jumbo, TX_15XX_TO_JUMBO_PKTS), |
| 1138 | EF10_DMA_STAT(port_rx_bytes, RX_BYTES), |
| 1139 | EF10_DMA_INVIS_STAT(port_rx_bytes_minus_good_bytes, RX_BAD_BYTES), |
| 1140 | EF10_OTHER_STAT(port_rx_good_bytes), |
| 1141 | EF10_OTHER_STAT(port_rx_bad_bytes), |
| 1142 | EF10_DMA_STAT(port_rx_packets, RX_PKTS), |
| 1143 | EF10_DMA_STAT(port_rx_good, RX_GOOD_PKTS), |
| 1144 | EF10_DMA_STAT(port_rx_bad, RX_BAD_FCS_PKTS), |
| 1145 | EF10_DMA_STAT(port_rx_pause, RX_PAUSE_PKTS), |
| 1146 | EF10_DMA_STAT(port_rx_control, RX_CONTROL_PKTS), |
| 1147 | EF10_DMA_STAT(port_rx_unicast, RX_UNICAST_PKTS), |
| 1148 | EF10_DMA_STAT(port_rx_multicast, RX_MULTICAST_PKTS), |
| 1149 | EF10_DMA_STAT(port_rx_broadcast, RX_BROADCAST_PKTS), |
| 1150 | EF10_DMA_STAT(port_rx_lt64, RX_UNDERSIZE_PKTS), |
| 1151 | EF10_DMA_STAT(port_rx_64, RX_64_PKTS), |
| 1152 | EF10_DMA_STAT(port_rx_65_to_127, RX_65_TO_127_PKTS), |
| 1153 | EF10_DMA_STAT(port_rx_128_to_255, RX_128_TO_255_PKTS), |
| 1154 | EF10_DMA_STAT(port_rx_256_to_511, RX_256_TO_511_PKTS), |
| 1155 | EF10_DMA_STAT(port_rx_512_to_1023, RX_512_TO_1023_PKTS), |
| 1156 | EF10_DMA_STAT(port_rx_1024_to_15xx, RX_1024_TO_15XX_PKTS), |
| 1157 | EF10_DMA_STAT(port_rx_15xx_to_jumbo, RX_15XX_TO_JUMBO_PKTS), |
| 1158 | EF10_DMA_STAT(port_rx_gtjumbo, RX_GTJUMBO_PKTS), |
| 1159 | EF10_DMA_STAT(port_rx_bad_gtjumbo, RX_JABBER_PKTS), |
| 1160 | EF10_DMA_STAT(port_rx_overflow, RX_OVERFLOW_PKTS), |
| 1161 | EF10_DMA_STAT(port_rx_align_error, RX_ALIGN_ERROR_PKTS), |
| 1162 | EF10_DMA_STAT(port_rx_length_error, RX_LENGTH_ERROR_PKTS), |
| 1163 | EF10_DMA_STAT(port_rx_nodesc_drops, RX_NODESC_DROPS), |
Edward Cree | e4d112e | 2014-07-15 11:58:12 +0100 | [diff] [blame] | 1164 | GENERIC_SW_STAT(rx_nodesc_trunc), |
| 1165 | GENERIC_SW_STAT(rx_noskb_drops), |
Daniel Pieczko | e80ca013 | 2015-06-02 11:38:34 +0100 | [diff] [blame] | 1166 | EF10_DMA_STAT(port_rx_pm_trunc_bb_overflow, PM_TRUNC_BB_OVERFLOW), |
| 1167 | EF10_DMA_STAT(port_rx_pm_discard_bb_overflow, PM_DISCARD_BB_OVERFLOW), |
| 1168 | EF10_DMA_STAT(port_rx_pm_trunc_vfifo_full, PM_TRUNC_VFIFO_FULL), |
| 1169 | EF10_DMA_STAT(port_rx_pm_discard_vfifo_full, PM_DISCARD_VFIFO_FULL), |
| 1170 | EF10_DMA_STAT(port_rx_pm_trunc_qbb, PM_TRUNC_QBB), |
| 1171 | EF10_DMA_STAT(port_rx_pm_discard_qbb, PM_DISCARD_QBB), |
| 1172 | EF10_DMA_STAT(port_rx_pm_discard_mapping, PM_DISCARD_MAPPING), |
| 1173 | EF10_DMA_STAT(port_rx_dp_q_disabled_packets, RXDP_Q_DISABLED_PKTS), |
| 1174 | EF10_DMA_STAT(port_rx_dp_di_dropped_packets, RXDP_DI_DROPPED_PKTS), |
| 1175 | EF10_DMA_STAT(port_rx_dp_streaming_packets, RXDP_STREAMING_PKTS), |
| 1176 | EF10_DMA_STAT(port_rx_dp_hlb_fetch, RXDP_HLB_FETCH_CONDITIONS), |
| 1177 | EF10_DMA_STAT(port_rx_dp_hlb_wait, RXDP_HLB_WAIT_CONDITIONS), |
Daniel Pieczko | 3c36a2a | 2015-06-02 11:39:06 +0100 | [diff] [blame] | 1178 | EF10_DMA_STAT(rx_unicast, VADAPTER_RX_UNICAST_PACKETS), |
| 1179 | EF10_DMA_STAT(rx_unicast_bytes, VADAPTER_RX_UNICAST_BYTES), |
| 1180 | EF10_DMA_STAT(rx_multicast, VADAPTER_RX_MULTICAST_PACKETS), |
| 1181 | EF10_DMA_STAT(rx_multicast_bytes, VADAPTER_RX_MULTICAST_BYTES), |
| 1182 | EF10_DMA_STAT(rx_broadcast, VADAPTER_RX_BROADCAST_PACKETS), |
| 1183 | EF10_DMA_STAT(rx_broadcast_bytes, VADAPTER_RX_BROADCAST_BYTES), |
| 1184 | EF10_DMA_STAT(rx_bad, VADAPTER_RX_BAD_PACKETS), |
| 1185 | EF10_DMA_STAT(rx_bad_bytes, VADAPTER_RX_BAD_BYTES), |
| 1186 | EF10_DMA_STAT(rx_overflow, VADAPTER_RX_OVERFLOW), |
| 1187 | EF10_DMA_STAT(tx_unicast, VADAPTER_TX_UNICAST_PACKETS), |
| 1188 | EF10_DMA_STAT(tx_unicast_bytes, VADAPTER_TX_UNICAST_BYTES), |
| 1189 | EF10_DMA_STAT(tx_multicast, VADAPTER_TX_MULTICAST_PACKETS), |
| 1190 | EF10_DMA_STAT(tx_multicast_bytes, VADAPTER_TX_MULTICAST_BYTES), |
| 1191 | EF10_DMA_STAT(tx_broadcast, VADAPTER_TX_BROADCAST_PACKETS), |
| 1192 | EF10_DMA_STAT(tx_broadcast_bytes, VADAPTER_TX_BROADCAST_BYTES), |
| 1193 | EF10_DMA_STAT(tx_bad, VADAPTER_TX_BAD_PACKETS), |
| 1194 | EF10_DMA_STAT(tx_bad_bytes, VADAPTER_TX_BAD_BYTES), |
| 1195 | EF10_DMA_STAT(tx_overflow, VADAPTER_TX_OVERFLOW), |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 1196 | }; |
| 1197 | |
Daniel Pieczko | e80ca013 | 2015-06-02 11:38:34 +0100 | [diff] [blame] | 1198 | #define HUNT_COMMON_STAT_MASK ((1ULL << EF10_STAT_port_tx_bytes) | \ |
| 1199 | (1ULL << EF10_STAT_port_tx_packets) | \ |
| 1200 | (1ULL << EF10_STAT_port_tx_pause) | \ |
| 1201 | (1ULL << EF10_STAT_port_tx_unicast) | \ |
| 1202 | (1ULL << EF10_STAT_port_tx_multicast) | \ |
| 1203 | (1ULL << EF10_STAT_port_tx_broadcast) | \ |
| 1204 | (1ULL << EF10_STAT_port_rx_bytes) | \ |
| 1205 | (1ULL << \ |
| 1206 | EF10_STAT_port_rx_bytes_minus_good_bytes) | \ |
| 1207 | (1ULL << EF10_STAT_port_rx_good_bytes) | \ |
| 1208 | (1ULL << EF10_STAT_port_rx_bad_bytes) | \ |
| 1209 | (1ULL << EF10_STAT_port_rx_packets) | \ |
| 1210 | (1ULL << EF10_STAT_port_rx_good) | \ |
| 1211 | (1ULL << EF10_STAT_port_rx_bad) | \ |
| 1212 | (1ULL << EF10_STAT_port_rx_pause) | \ |
| 1213 | (1ULL << EF10_STAT_port_rx_control) | \ |
| 1214 | (1ULL << EF10_STAT_port_rx_unicast) | \ |
| 1215 | (1ULL << EF10_STAT_port_rx_multicast) | \ |
| 1216 | (1ULL << EF10_STAT_port_rx_broadcast) | \ |
| 1217 | (1ULL << EF10_STAT_port_rx_lt64) | \ |
| 1218 | (1ULL << EF10_STAT_port_rx_64) | \ |
| 1219 | (1ULL << EF10_STAT_port_rx_65_to_127) | \ |
| 1220 | (1ULL << EF10_STAT_port_rx_128_to_255) | \ |
| 1221 | (1ULL << EF10_STAT_port_rx_256_to_511) | \ |
| 1222 | (1ULL << EF10_STAT_port_rx_512_to_1023) |\ |
| 1223 | (1ULL << EF10_STAT_port_rx_1024_to_15xx) |\ |
| 1224 | (1ULL << EF10_STAT_port_rx_15xx_to_jumbo) |\ |
| 1225 | (1ULL << EF10_STAT_port_rx_gtjumbo) | \ |
| 1226 | (1ULL << EF10_STAT_port_rx_bad_gtjumbo) |\ |
| 1227 | (1ULL << EF10_STAT_port_rx_overflow) | \ |
| 1228 | (1ULL << EF10_STAT_port_rx_nodesc_drops) |\ |
Edward Cree | e4d112e | 2014-07-15 11:58:12 +0100 | [diff] [blame] | 1229 | (1ULL << GENERIC_STAT_rx_nodesc_trunc) | \ |
| 1230 | (1ULL << GENERIC_STAT_rx_noskb_drops)) |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 1231 | |
| 1232 | /* These statistics are only provided by the 10G MAC. For a 10G/40G |
| 1233 | * switchable port we do not expose these because they might not |
| 1234 | * include all the packets they should. |
| 1235 | */ |
Daniel Pieczko | e80ca013 | 2015-06-02 11:38:34 +0100 | [diff] [blame] | 1236 | #define HUNT_10G_ONLY_STAT_MASK ((1ULL << EF10_STAT_port_tx_control) | \ |
| 1237 | (1ULL << EF10_STAT_port_tx_lt64) | \ |
| 1238 | (1ULL << EF10_STAT_port_tx_64) | \ |
| 1239 | (1ULL << EF10_STAT_port_tx_65_to_127) |\ |
| 1240 | (1ULL << EF10_STAT_port_tx_128_to_255) |\ |
| 1241 | (1ULL << EF10_STAT_port_tx_256_to_511) |\ |
| 1242 | (1ULL << EF10_STAT_port_tx_512_to_1023) |\ |
| 1243 | (1ULL << EF10_STAT_port_tx_1024_to_15xx) |\ |
| 1244 | (1ULL << EF10_STAT_port_tx_15xx_to_jumbo)) |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 1245 | |
| 1246 | /* These statistics are only provided by the 40G MAC. For a 10G/40G |
| 1247 | * switchable port we do expose these because the errors will otherwise |
| 1248 | * be silent. |
| 1249 | */ |
Daniel Pieczko | e80ca013 | 2015-06-02 11:38:34 +0100 | [diff] [blame] | 1250 | #define HUNT_40G_EXTRA_STAT_MASK ((1ULL << EF10_STAT_port_rx_align_error) |\ |
| 1251 | (1ULL << EF10_STAT_port_rx_length_error)) |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 1252 | |
Edward Cree | 568d7a0 | 2013-09-25 17:32:09 +0100 | [diff] [blame] | 1253 | /* These statistics are only provided if the firmware supports the |
| 1254 | * capability PM_AND_RXDP_COUNTERS. |
| 1255 | */ |
| 1256 | #define HUNT_PM_AND_RXDP_STAT_MASK ( \ |
Daniel Pieczko | e80ca013 | 2015-06-02 11:38:34 +0100 | [diff] [blame] | 1257 | (1ULL << EF10_STAT_port_rx_pm_trunc_bb_overflow) | \ |
| 1258 | (1ULL << EF10_STAT_port_rx_pm_discard_bb_overflow) | \ |
| 1259 | (1ULL << EF10_STAT_port_rx_pm_trunc_vfifo_full) | \ |
| 1260 | (1ULL << EF10_STAT_port_rx_pm_discard_vfifo_full) | \ |
| 1261 | (1ULL << EF10_STAT_port_rx_pm_trunc_qbb) | \ |
| 1262 | (1ULL << EF10_STAT_port_rx_pm_discard_qbb) | \ |
| 1263 | (1ULL << EF10_STAT_port_rx_pm_discard_mapping) | \ |
| 1264 | (1ULL << EF10_STAT_port_rx_dp_q_disabled_packets) | \ |
| 1265 | (1ULL << EF10_STAT_port_rx_dp_di_dropped_packets) | \ |
| 1266 | (1ULL << EF10_STAT_port_rx_dp_streaming_packets) | \ |
| 1267 | (1ULL << EF10_STAT_port_rx_dp_hlb_fetch) | \ |
| 1268 | (1ULL << EF10_STAT_port_rx_dp_hlb_wait)) |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 1269 | |
Edward Cree | 4bae913 | 2013-09-27 18:52:49 +0100 | [diff] [blame] | 1270 | static u64 efx_ef10_raw_stat_mask(struct efx_nic *efx) |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 1271 | { |
Edward Cree | 4bae913 | 2013-09-27 18:52:49 +0100 | [diff] [blame] | 1272 | u64 raw_mask = HUNT_COMMON_STAT_MASK; |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 1273 | u32 port_caps = efx_mcdi_phy_get_caps(efx); |
Edward Cree | 568d7a0 | 2013-09-25 17:32:09 +0100 | [diff] [blame] | 1274 | struct efx_ef10_nic_data *nic_data = efx->nic_data; |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 1275 | |
Daniel Pieczko | 3c36a2a | 2015-06-02 11:39:06 +0100 | [diff] [blame] | 1276 | if (!(efx->mcdi->fn_flags & |
| 1277 | 1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL)) |
| 1278 | return 0; |
| 1279 | |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 1280 | if (port_caps & (1 << MC_CMD_PHY_CAP_40000FDX_LBN)) |
Edward Cree | 4bae913 | 2013-09-27 18:52:49 +0100 | [diff] [blame] | 1281 | raw_mask |= HUNT_40G_EXTRA_STAT_MASK; |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 1282 | else |
Edward Cree | 4bae913 | 2013-09-27 18:52:49 +0100 | [diff] [blame] | 1283 | raw_mask |= HUNT_10G_ONLY_STAT_MASK; |
Edward Cree | 568d7a0 | 2013-09-25 17:32:09 +0100 | [diff] [blame] | 1284 | |
| 1285 | if (nic_data->datapath_caps & |
| 1286 | (1 << MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_LBN)) |
| 1287 | raw_mask |= HUNT_PM_AND_RXDP_STAT_MASK; |
| 1288 | |
Edward Cree | 4bae913 | 2013-09-27 18:52:49 +0100 | [diff] [blame] | 1289 | return raw_mask; |
| 1290 | } |
| 1291 | |
| 1292 | static void efx_ef10_get_stat_mask(struct efx_nic *efx, unsigned long *mask) |
| 1293 | { |
Daniel Pieczko | d94619c | 2015-06-02 11:40:05 +0100 | [diff] [blame] | 1294 | struct efx_ef10_nic_data *nic_data = efx->nic_data; |
Daniel Pieczko | 3c36a2a | 2015-06-02 11:39:06 +0100 | [diff] [blame] | 1295 | u64 raw_mask[2]; |
| 1296 | |
| 1297 | raw_mask[0] = efx_ef10_raw_stat_mask(efx); |
| 1298 | |
Daniel Pieczko | d94619c | 2015-06-02 11:40:05 +0100 | [diff] [blame] | 1299 | /* Only show vadaptor stats when EVB capability is present */ |
| 1300 | if (nic_data->datapath_caps & |
| 1301 | (1 << MC_CMD_GET_CAPABILITIES_OUT_EVB_LBN)) { |
| 1302 | raw_mask[0] |= ~((1ULL << EF10_STAT_rx_unicast) - 1); |
| 1303 | raw_mask[1] = (1ULL << (EF10_STAT_COUNT - 63)) - 1; |
| 1304 | } else { |
| 1305 | raw_mask[1] = 0; |
| 1306 | } |
Edward Cree | 4bae913 | 2013-09-27 18:52:49 +0100 | [diff] [blame] | 1307 | |
| 1308 | #if BITS_PER_LONG == 64 |
Daniel Pieczko | 3c36a2a | 2015-06-02 11:39:06 +0100 | [diff] [blame] | 1309 | mask[0] = raw_mask[0]; |
| 1310 | mask[1] = raw_mask[1]; |
Edward Cree | 4bae913 | 2013-09-27 18:52:49 +0100 | [diff] [blame] | 1311 | #else |
Daniel Pieczko | 3c36a2a | 2015-06-02 11:39:06 +0100 | [diff] [blame] | 1312 | mask[0] = raw_mask[0] & 0xffffffff; |
| 1313 | mask[1] = raw_mask[0] >> 32; |
| 1314 | mask[2] = raw_mask[1] & 0xffffffff; |
| 1315 | mask[3] = raw_mask[1] >> 32; |
Edward Cree | 4bae913 | 2013-09-27 18:52:49 +0100 | [diff] [blame] | 1316 | #endif |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 1317 | } |
| 1318 | |
| 1319 | static size_t efx_ef10_describe_stats(struct efx_nic *efx, u8 *names) |
| 1320 | { |
Edward Cree | 4bae913 | 2013-09-27 18:52:49 +0100 | [diff] [blame] | 1321 | DECLARE_BITMAP(mask, EF10_STAT_COUNT); |
| 1322 | |
| 1323 | efx_ef10_get_stat_mask(efx, mask); |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 1324 | return efx_nic_describe_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, |
Edward Cree | 4bae913 | 2013-09-27 18:52:49 +0100 | [diff] [blame] | 1325 | mask, names); |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 1326 | } |
| 1327 | |
Daniel Pieczko | d778819 | 2015-06-02 11:39:20 +0100 | [diff] [blame] | 1328 | static size_t efx_ef10_update_stats_common(struct efx_nic *efx, u64 *full_stats, |
| 1329 | struct rtnl_link_stats64 *core_stats) |
| 1330 | { |
| 1331 | DECLARE_BITMAP(mask, EF10_STAT_COUNT); |
| 1332 | struct efx_ef10_nic_data *nic_data = efx->nic_data; |
| 1333 | u64 *stats = nic_data->stats; |
| 1334 | size_t stats_count = 0, index; |
| 1335 | |
| 1336 | efx_ef10_get_stat_mask(efx, mask); |
| 1337 | |
| 1338 | if (full_stats) { |
| 1339 | for_each_set_bit(index, mask, EF10_STAT_COUNT) { |
| 1340 | if (efx_ef10_stat_desc[index].name) { |
| 1341 | *full_stats++ = stats[index]; |
| 1342 | ++stats_count; |
| 1343 | } |
| 1344 | } |
| 1345 | } |
| 1346 | |
Bert Kenward | fbe4307 | 2015-08-26 16:39:03 +0100 | [diff] [blame] | 1347 | if (!core_stats) |
| 1348 | return stats_count; |
| 1349 | |
| 1350 | if (nic_data->datapath_caps & |
| 1351 | 1 << MC_CMD_GET_CAPABILITIES_OUT_EVB_LBN) { |
| 1352 | /* Use vadaptor stats. */ |
Daniel Pieczko | 0fc95fc | 2015-06-02 11:39:33 +0100 | [diff] [blame] | 1353 | core_stats->rx_packets = stats[EF10_STAT_rx_unicast] + |
| 1354 | stats[EF10_STAT_rx_multicast] + |
| 1355 | stats[EF10_STAT_rx_broadcast]; |
| 1356 | core_stats->tx_packets = stats[EF10_STAT_tx_unicast] + |
| 1357 | stats[EF10_STAT_tx_multicast] + |
| 1358 | stats[EF10_STAT_tx_broadcast]; |
| 1359 | core_stats->rx_bytes = stats[EF10_STAT_rx_unicast_bytes] + |
| 1360 | stats[EF10_STAT_rx_multicast_bytes] + |
| 1361 | stats[EF10_STAT_rx_broadcast_bytes]; |
| 1362 | core_stats->tx_bytes = stats[EF10_STAT_tx_unicast_bytes] + |
| 1363 | stats[EF10_STAT_tx_multicast_bytes] + |
| 1364 | stats[EF10_STAT_tx_broadcast_bytes]; |
| 1365 | core_stats->rx_dropped = stats[GENERIC_STAT_rx_nodesc_trunc] + |
Daniel Pieczko | d778819 | 2015-06-02 11:39:20 +0100 | [diff] [blame] | 1366 | stats[GENERIC_STAT_rx_noskb_drops]; |
Daniel Pieczko | 0fc95fc | 2015-06-02 11:39:33 +0100 | [diff] [blame] | 1367 | core_stats->multicast = stats[EF10_STAT_rx_multicast]; |
| 1368 | core_stats->rx_crc_errors = stats[EF10_STAT_rx_bad]; |
| 1369 | core_stats->rx_fifo_errors = stats[EF10_STAT_rx_overflow]; |
| 1370 | core_stats->rx_errors = core_stats->rx_crc_errors; |
| 1371 | core_stats->tx_errors = stats[EF10_STAT_tx_bad]; |
Bert Kenward | fbe4307 | 2015-08-26 16:39:03 +0100 | [diff] [blame] | 1372 | } else { |
| 1373 | /* Use port stats. */ |
| 1374 | core_stats->rx_packets = stats[EF10_STAT_port_rx_packets]; |
| 1375 | core_stats->tx_packets = stats[EF10_STAT_port_tx_packets]; |
| 1376 | core_stats->rx_bytes = stats[EF10_STAT_port_rx_bytes]; |
| 1377 | core_stats->tx_bytes = stats[EF10_STAT_port_tx_bytes]; |
| 1378 | core_stats->rx_dropped = stats[EF10_STAT_port_rx_nodesc_drops] + |
| 1379 | stats[GENERIC_STAT_rx_nodesc_trunc] + |
| 1380 | stats[GENERIC_STAT_rx_noskb_drops]; |
| 1381 | core_stats->multicast = stats[EF10_STAT_port_rx_multicast]; |
| 1382 | core_stats->rx_length_errors = |
| 1383 | stats[EF10_STAT_port_rx_gtjumbo] + |
| 1384 | stats[EF10_STAT_port_rx_length_error]; |
| 1385 | core_stats->rx_crc_errors = stats[EF10_STAT_port_rx_bad]; |
| 1386 | core_stats->rx_frame_errors = |
| 1387 | stats[EF10_STAT_port_rx_align_error]; |
| 1388 | core_stats->rx_fifo_errors = stats[EF10_STAT_port_rx_overflow]; |
| 1389 | core_stats->rx_errors = (core_stats->rx_length_errors + |
| 1390 | core_stats->rx_crc_errors + |
| 1391 | core_stats->rx_frame_errors); |
Daniel Pieczko | d778819 | 2015-06-02 11:39:20 +0100 | [diff] [blame] | 1392 | } |
| 1393 | |
| 1394 | return stats_count; |
| 1395 | } |
| 1396 | |
| 1397 | static int efx_ef10_try_update_nic_stats_pf(struct efx_nic *efx) |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 1398 | { |
| 1399 | struct efx_ef10_nic_data *nic_data = efx->nic_data; |
Edward Cree | 4bae913 | 2013-09-27 18:52:49 +0100 | [diff] [blame] | 1400 | DECLARE_BITMAP(mask, EF10_STAT_COUNT); |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 1401 | __le64 generation_start, generation_end; |
| 1402 | u64 *stats = nic_data->stats; |
| 1403 | __le64 *dma_stats; |
| 1404 | |
Edward Cree | 4bae913 | 2013-09-27 18:52:49 +0100 | [diff] [blame] | 1405 | efx_ef10_get_stat_mask(efx, mask); |
| 1406 | |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 1407 | dma_stats = efx->stats_buffer.addr; |
| 1408 | nic_data = efx->nic_data; |
| 1409 | |
| 1410 | generation_end = dma_stats[MC_CMD_MAC_GENERATION_END]; |
| 1411 | if (generation_end == EFX_MC_STATS_GENERATION_INVALID) |
| 1412 | return 0; |
| 1413 | rmb(); |
Edward Cree | 4bae913 | 2013-09-27 18:52:49 +0100 | [diff] [blame] | 1414 | efx_nic_update_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, mask, |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 1415 | stats, efx->stats_buffer.addr, false); |
Jon Cooper | d546a89 | 2013-09-27 18:26:30 +0100 | [diff] [blame] | 1416 | rmb(); |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 1417 | generation_start = dma_stats[MC_CMD_MAC_GENERATION_START]; |
| 1418 | if (generation_end != generation_start) |
| 1419 | return -EAGAIN; |
| 1420 | |
| 1421 | /* Update derived statistics */ |
Daniel Pieczko | e80ca013 | 2015-06-02 11:38:34 +0100 | [diff] [blame] | 1422 | efx_nic_fix_nodesc_drop_stat(efx, |
| 1423 | &stats[EF10_STAT_port_rx_nodesc_drops]); |
| 1424 | stats[EF10_STAT_port_rx_good_bytes] = |
| 1425 | stats[EF10_STAT_port_rx_bytes] - |
| 1426 | stats[EF10_STAT_port_rx_bytes_minus_good_bytes]; |
| 1427 | efx_update_diff_stat(&stats[EF10_STAT_port_rx_bad_bytes], |
| 1428 | stats[EF10_STAT_port_rx_bytes_minus_good_bytes]); |
Edward Cree | e4d112e | 2014-07-15 11:58:12 +0100 | [diff] [blame] | 1429 | efx_update_sw_stats(efx, stats); |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 1430 | return 0; |
| 1431 | } |
| 1432 | |
| 1433 | |
Daniel Pieczko | d778819 | 2015-06-02 11:39:20 +0100 | [diff] [blame] | 1434 | static size_t efx_ef10_update_stats_pf(struct efx_nic *efx, u64 *full_stats, |
| 1435 | struct rtnl_link_stats64 *core_stats) |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 1436 | { |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 1437 | int retry; |
| 1438 | |
| 1439 | /* If we're unlucky enough to read statistics during the DMA, wait |
| 1440 | * up to 10ms for it to finish (typically takes <500us) |
| 1441 | */ |
| 1442 | for (retry = 0; retry < 100; ++retry) { |
Daniel Pieczko | d778819 | 2015-06-02 11:39:20 +0100 | [diff] [blame] | 1443 | if (efx_ef10_try_update_nic_stats_pf(efx) == 0) |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 1444 | break; |
| 1445 | udelay(100); |
| 1446 | } |
| 1447 | |
Daniel Pieczko | d778819 | 2015-06-02 11:39:20 +0100 | [diff] [blame] | 1448 | return efx_ef10_update_stats_common(efx, full_stats, core_stats); |
| 1449 | } |
| 1450 | |
| 1451 | static int efx_ef10_try_update_nic_stats_vf(struct efx_nic *efx) |
| 1452 | { |
| 1453 | MCDI_DECLARE_BUF(inbuf, MC_CMD_MAC_STATS_IN_LEN); |
| 1454 | struct efx_ef10_nic_data *nic_data = efx->nic_data; |
| 1455 | DECLARE_BITMAP(mask, EF10_STAT_COUNT); |
| 1456 | __le64 generation_start, generation_end; |
| 1457 | u64 *stats = nic_data->stats; |
| 1458 | u32 dma_len = MC_CMD_MAC_NSTATS * sizeof(u64); |
| 1459 | struct efx_buffer stats_buf; |
| 1460 | __le64 *dma_stats; |
| 1461 | int rc; |
| 1462 | |
Daniel Pieczko | f00bf23 | 2015-06-02 11:40:18 +0100 | [diff] [blame] | 1463 | spin_unlock_bh(&efx->stats_lock); |
| 1464 | |
| 1465 | if (in_interrupt()) { |
| 1466 | /* If in atomic context, cannot update stats. Just update the |
| 1467 | * software stats and return so the caller can continue. |
| 1468 | */ |
| 1469 | spin_lock_bh(&efx->stats_lock); |
| 1470 | efx_update_sw_stats(efx, stats); |
| 1471 | return 0; |
| 1472 | } |
| 1473 | |
Daniel Pieczko | d778819 | 2015-06-02 11:39:20 +0100 | [diff] [blame] | 1474 | efx_ef10_get_stat_mask(efx, mask); |
| 1475 | |
| 1476 | rc = efx_nic_alloc_buffer(efx, &stats_buf, dma_len, GFP_ATOMIC); |
Daniel Pieczko | f00bf23 | 2015-06-02 11:40:18 +0100 | [diff] [blame] | 1477 | if (rc) { |
| 1478 | spin_lock_bh(&efx->stats_lock); |
Daniel Pieczko | d778819 | 2015-06-02 11:39:20 +0100 | [diff] [blame] | 1479 | return rc; |
Daniel Pieczko | f00bf23 | 2015-06-02 11:40:18 +0100 | [diff] [blame] | 1480 | } |
Daniel Pieczko | d778819 | 2015-06-02 11:39:20 +0100 | [diff] [blame] | 1481 | |
| 1482 | dma_stats = stats_buf.addr; |
| 1483 | dma_stats[MC_CMD_MAC_GENERATION_END] = EFX_MC_STATS_GENERATION_INVALID; |
| 1484 | |
| 1485 | MCDI_SET_QWORD(inbuf, MAC_STATS_IN_DMA_ADDR, stats_buf.dma_addr); |
| 1486 | MCDI_POPULATE_DWORD_1(inbuf, MAC_STATS_IN_CMD, |
Daniel Pieczko | 0fc95fc | 2015-06-02 11:39:33 +0100 | [diff] [blame] | 1487 | MAC_STATS_IN_DMA, 1); |
Daniel Pieczko | d778819 | 2015-06-02 11:39:20 +0100 | [diff] [blame] | 1488 | MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len); |
| 1489 | MCDI_SET_DWORD(inbuf, MAC_STATS_IN_PORT_ID, EVB_PORT_ID_ASSIGNED); |
| 1490 | |
Daniel Pieczko | 6dd4859 | 2015-06-02 11:39:49 +0100 | [diff] [blame] | 1491 | rc = efx_mcdi_rpc_quiet(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf), |
| 1492 | NULL, 0, NULL); |
Daniel Pieczko | d778819 | 2015-06-02 11:39:20 +0100 | [diff] [blame] | 1493 | spin_lock_bh(&efx->stats_lock); |
Daniel Pieczko | 6dd4859 | 2015-06-02 11:39:49 +0100 | [diff] [blame] | 1494 | if (rc) { |
| 1495 | /* Expect ENOENT if DMA queues have not been set up */ |
| 1496 | if (rc != -ENOENT || atomic_read(&efx->active_queues)) |
| 1497 | efx_mcdi_display_error(efx, MC_CMD_MAC_STATS, |
| 1498 | sizeof(inbuf), NULL, 0, rc); |
Daniel Pieczko | d778819 | 2015-06-02 11:39:20 +0100 | [diff] [blame] | 1499 | goto out; |
Daniel Pieczko | 6dd4859 | 2015-06-02 11:39:49 +0100 | [diff] [blame] | 1500 | } |
Daniel Pieczko | d778819 | 2015-06-02 11:39:20 +0100 | [diff] [blame] | 1501 | |
| 1502 | generation_end = dma_stats[MC_CMD_MAC_GENERATION_END]; |
Daniel Pieczko | 0fc95fc | 2015-06-02 11:39:33 +0100 | [diff] [blame] | 1503 | if (generation_end == EFX_MC_STATS_GENERATION_INVALID) { |
| 1504 | WARN_ON_ONCE(1); |
Daniel Pieczko | d778819 | 2015-06-02 11:39:20 +0100 | [diff] [blame] | 1505 | goto out; |
Daniel Pieczko | 0fc95fc | 2015-06-02 11:39:33 +0100 | [diff] [blame] | 1506 | } |
Daniel Pieczko | d778819 | 2015-06-02 11:39:20 +0100 | [diff] [blame] | 1507 | rmb(); |
| 1508 | efx_nic_update_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, mask, |
| 1509 | stats, stats_buf.addr, false); |
| 1510 | rmb(); |
| 1511 | generation_start = dma_stats[MC_CMD_MAC_GENERATION_START]; |
| 1512 | if (generation_end != generation_start) { |
| 1513 | rc = -EAGAIN; |
| 1514 | goto out; |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 1515 | } |
| 1516 | |
Daniel Pieczko | d778819 | 2015-06-02 11:39:20 +0100 | [diff] [blame] | 1517 | efx_update_sw_stats(efx, stats); |
| 1518 | out: |
| 1519 | efx_nic_free_buffer(efx, &stats_buf); |
| 1520 | return rc; |
| 1521 | } |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 1522 | |
Daniel Pieczko | d778819 | 2015-06-02 11:39:20 +0100 | [diff] [blame] | 1523 | static size_t efx_ef10_update_stats_vf(struct efx_nic *efx, u64 *full_stats, |
| 1524 | struct rtnl_link_stats64 *core_stats) |
| 1525 | { |
| 1526 | if (efx_ef10_try_update_nic_stats_vf(efx)) |
| 1527 | return 0; |
| 1528 | |
| 1529 | return efx_ef10_update_stats_common(efx, full_stats, core_stats); |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 1530 | } |
| 1531 | |
| 1532 | static void efx_ef10_push_irq_moderation(struct efx_channel *channel) |
| 1533 | { |
| 1534 | struct efx_nic *efx = channel->efx; |
| 1535 | unsigned int mode, value; |
| 1536 | efx_dword_t timer_cmd; |
| 1537 | |
| 1538 | if (channel->irq_moderation) { |
| 1539 | mode = 3; |
| 1540 | value = channel->irq_moderation - 1; |
| 1541 | } else { |
| 1542 | mode = 0; |
| 1543 | value = 0; |
| 1544 | } |
| 1545 | |
| 1546 | if (EFX_EF10_WORKAROUND_35388(efx)) { |
| 1547 | EFX_POPULATE_DWORD_3(timer_cmd, ERF_DD_EVQ_IND_TIMER_FLAGS, |
| 1548 | EFE_DD_EVQ_IND_TIMER_FLAGS, |
| 1549 | ERF_DD_EVQ_IND_TIMER_MODE, mode, |
| 1550 | ERF_DD_EVQ_IND_TIMER_VAL, value); |
| 1551 | efx_writed_page(efx, &timer_cmd, ER_DD_EVQ_INDIRECT, |
| 1552 | channel->channel); |
| 1553 | } else { |
| 1554 | EFX_POPULATE_DWORD_2(timer_cmd, ERF_DZ_TC_TIMER_MODE, mode, |
| 1555 | ERF_DZ_TC_TIMER_VAL, value); |
| 1556 | efx_writed_page(efx, &timer_cmd, ER_DZ_EVQ_TMR, |
| 1557 | channel->channel); |
| 1558 | } |
| 1559 | } |
| 1560 | |
Shradha Shah | 02246a7 | 2015-05-06 00:58:14 +0100 | [diff] [blame] | 1561 | static void efx_ef10_get_wol_vf(struct efx_nic *efx, |
| 1562 | struct ethtool_wolinfo *wol) {} |
| 1563 | |
| 1564 | static int efx_ef10_set_wol_vf(struct efx_nic *efx, u32 type) |
| 1565 | { |
| 1566 | return -EOPNOTSUPP; |
| 1567 | } |
| 1568 | |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 1569 | static void efx_ef10_get_wol(struct efx_nic *efx, struct ethtool_wolinfo *wol) |
| 1570 | { |
| 1571 | wol->supported = 0; |
| 1572 | wol->wolopts = 0; |
| 1573 | memset(&wol->sopass, 0, sizeof(wol->sopass)); |
| 1574 | } |
| 1575 | |
| 1576 | static int efx_ef10_set_wol(struct efx_nic *efx, u32 type) |
| 1577 | { |
| 1578 | if (type != 0) |
| 1579 | return -EINVAL; |
| 1580 | return 0; |
| 1581 | } |
| 1582 | |
| 1583 | static void efx_ef10_mcdi_request(struct efx_nic *efx, |
| 1584 | const efx_dword_t *hdr, size_t hdr_len, |
| 1585 | const efx_dword_t *sdu, size_t sdu_len) |
| 1586 | { |
| 1587 | struct efx_ef10_nic_data *nic_data = efx->nic_data; |
| 1588 | u8 *pdu = nic_data->mcdi_buf.addr; |
| 1589 | |
| 1590 | memcpy(pdu, hdr, hdr_len); |
| 1591 | memcpy(pdu + hdr_len, sdu, sdu_len); |
| 1592 | wmb(); |
| 1593 | |
| 1594 | /* The hardware provides 'low' and 'high' (doorbell) registers |
| 1595 | * for passing the 64-bit address of an MCDI request to |
| 1596 | * firmware. However the dwords are swapped by firmware. The |
| 1597 | * least significant bits of the doorbell are then 0 for all |
| 1598 | * MCDI requests due to alignment. |
| 1599 | */ |
| 1600 | _efx_writed(efx, cpu_to_le32((u64)nic_data->mcdi_buf.dma_addr >> 32), |
| 1601 | ER_DZ_MC_DB_LWRD); |
| 1602 | _efx_writed(efx, cpu_to_le32((u32)nic_data->mcdi_buf.dma_addr), |
| 1603 | ER_DZ_MC_DB_HWRD); |
| 1604 | } |
| 1605 | |
| 1606 | static bool efx_ef10_mcdi_poll_response(struct efx_nic *efx) |
| 1607 | { |
| 1608 | struct efx_ef10_nic_data *nic_data = efx->nic_data; |
| 1609 | const efx_dword_t hdr = *(const efx_dword_t *)nic_data->mcdi_buf.addr; |
| 1610 | |
| 1611 | rmb(); |
| 1612 | return EFX_DWORD_FIELD(hdr, MCDI_HEADER_RESPONSE); |
| 1613 | } |
| 1614 | |
| 1615 | static void |
| 1616 | efx_ef10_mcdi_read_response(struct efx_nic *efx, efx_dword_t *outbuf, |
| 1617 | size_t offset, size_t outlen) |
| 1618 | { |
| 1619 | struct efx_ef10_nic_data *nic_data = efx->nic_data; |
| 1620 | const u8 *pdu = nic_data->mcdi_buf.addr; |
| 1621 | |
| 1622 | memcpy(outbuf, pdu + offset, outlen); |
| 1623 | } |
| 1624 | |
Daniel Pieczko | c577e59 | 2015-10-09 10:40:35 +0100 | [diff] [blame] | 1625 | static void efx_ef10_mcdi_reboot_detected(struct efx_nic *efx) |
| 1626 | { |
| 1627 | struct efx_ef10_nic_data *nic_data = efx->nic_data; |
| 1628 | |
| 1629 | /* All our allocations have been reset */ |
| 1630 | efx_ef10_reset_mc_allocations(efx); |
| 1631 | |
| 1632 | /* The datapath firmware might have been changed */ |
| 1633 | nic_data->must_check_datapath_caps = true; |
| 1634 | |
| 1635 | /* MAC statistics have been cleared on the NIC; clear the local |
| 1636 | * statistic that we update with efx_update_diff_stat(). |
| 1637 | */ |
| 1638 | nic_data->stats[EF10_STAT_port_rx_bad_bytes] = 0; |
| 1639 | } |
| 1640 | |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 1641 | static int efx_ef10_mcdi_poll_reboot(struct efx_nic *efx) |
| 1642 | { |
| 1643 | struct efx_ef10_nic_data *nic_data = efx->nic_data; |
| 1644 | int rc; |
| 1645 | |
| 1646 | rc = efx_ef10_get_warm_boot_count(efx); |
| 1647 | if (rc < 0) { |
| 1648 | /* The firmware is presumably in the process of |
| 1649 | * rebooting. However, we are supposed to report each |
| 1650 | * reboot just once, so we must only do that once we |
| 1651 | * can read and store the updated warm boot count. |
| 1652 | */ |
| 1653 | return 0; |
| 1654 | } |
| 1655 | |
| 1656 | if (rc == nic_data->warm_boot_count) |
| 1657 | return 0; |
| 1658 | |
| 1659 | nic_data->warm_boot_count = rc; |
Daniel Pieczko | c577e59 | 2015-10-09 10:40:35 +0100 | [diff] [blame] | 1660 | efx_ef10_mcdi_reboot_detected(efx); |
Ben Hutchings | 869070c | 2013-09-05 22:46:10 +0100 | [diff] [blame] | 1661 | |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 1662 | return -EIO; |
| 1663 | } |
| 1664 | |
| 1665 | /* Handle an MSI interrupt |
| 1666 | * |
| 1667 | * Handle an MSI hardware interrupt. This routine schedules event |
| 1668 | * queue processing. No interrupt acknowledgement cycle is necessary. |
| 1669 | * Also, we never need to check that the interrupt is for us, since |
| 1670 | * MSI interrupts cannot be shared. |
| 1671 | */ |
| 1672 | static irqreturn_t efx_ef10_msi_interrupt(int irq, void *dev_id) |
| 1673 | { |
| 1674 | struct efx_msi_context *context = dev_id; |
| 1675 | struct efx_nic *efx = context->efx; |
| 1676 | |
| 1677 | netif_vdbg(efx, intr, efx->net_dev, |
| 1678 | "IRQ %d on CPU %d\n", irq, raw_smp_processor_id()); |
| 1679 | |
| 1680 | if (likely(ACCESS_ONCE(efx->irq_soft_enabled))) { |
| 1681 | /* Note test interrupts */ |
| 1682 | if (context->index == efx->irq_level) |
| 1683 | efx->last_irq_cpu = raw_smp_processor_id(); |
| 1684 | |
| 1685 | /* Schedule processing of the channel */ |
| 1686 | efx_schedule_channel_irq(efx->channel[context->index]); |
| 1687 | } |
| 1688 | |
| 1689 | return IRQ_HANDLED; |
| 1690 | } |
| 1691 | |
| 1692 | static irqreturn_t efx_ef10_legacy_interrupt(int irq, void *dev_id) |
| 1693 | { |
| 1694 | struct efx_nic *efx = dev_id; |
| 1695 | bool soft_enabled = ACCESS_ONCE(efx->irq_soft_enabled); |
| 1696 | struct efx_channel *channel; |
| 1697 | efx_dword_t reg; |
| 1698 | u32 queues; |
| 1699 | |
| 1700 | /* Read the ISR which also ACKs the interrupts */ |
| 1701 | efx_readd(efx, ®, ER_DZ_BIU_INT_ISR); |
| 1702 | queues = EFX_DWORD_FIELD(reg, ERF_DZ_ISR_REG); |
| 1703 | |
| 1704 | if (queues == 0) |
| 1705 | return IRQ_NONE; |
| 1706 | |
| 1707 | if (likely(soft_enabled)) { |
| 1708 | /* Note test interrupts */ |
| 1709 | if (queues & (1U << efx->irq_level)) |
| 1710 | efx->last_irq_cpu = raw_smp_processor_id(); |
| 1711 | |
| 1712 | efx_for_each_channel(channel, efx) { |
| 1713 | if (queues & 1) |
| 1714 | efx_schedule_channel_irq(channel); |
| 1715 | queues >>= 1; |
| 1716 | } |
| 1717 | } |
| 1718 | |
| 1719 | netif_vdbg(efx, intr, efx->net_dev, |
| 1720 | "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n", |
| 1721 | irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg)); |
| 1722 | |
| 1723 | return IRQ_HANDLED; |
| 1724 | } |
| 1725 | |
| 1726 | static void efx_ef10_irq_test_generate(struct efx_nic *efx) |
| 1727 | { |
| 1728 | MCDI_DECLARE_BUF(inbuf, MC_CMD_TRIGGER_INTERRUPT_IN_LEN); |
| 1729 | |
| 1730 | BUILD_BUG_ON(MC_CMD_TRIGGER_INTERRUPT_OUT_LEN != 0); |
| 1731 | |
| 1732 | MCDI_SET_DWORD(inbuf, TRIGGER_INTERRUPT_IN_INTR_LEVEL, efx->irq_level); |
| 1733 | (void) efx_mcdi_rpc(efx, MC_CMD_TRIGGER_INTERRUPT, |
| 1734 | inbuf, sizeof(inbuf), NULL, 0, NULL); |
| 1735 | } |
| 1736 | |
| 1737 | static int efx_ef10_tx_probe(struct efx_tx_queue *tx_queue) |
| 1738 | { |
| 1739 | return efx_nic_alloc_buffer(tx_queue->efx, &tx_queue->txd.buf, |
| 1740 | (tx_queue->ptr_mask + 1) * |
| 1741 | sizeof(efx_qword_t), |
| 1742 | GFP_KERNEL); |
| 1743 | } |
| 1744 | |
| 1745 | /* This writes to the TX_DESC_WPTR and also pushes data */ |
| 1746 | static inline void efx_ef10_push_tx_desc(struct efx_tx_queue *tx_queue, |
| 1747 | const efx_qword_t *txd) |
| 1748 | { |
| 1749 | unsigned int write_ptr; |
| 1750 | efx_oword_t reg; |
| 1751 | |
| 1752 | write_ptr = tx_queue->write_count & tx_queue->ptr_mask; |
| 1753 | EFX_POPULATE_OWORD_1(reg, ERF_DZ_TX_DESC_WPTR, write_ptr); |
| 1754 | reg.qword[0] = *txd; |
| 1755 | efx_writeo_page(tx_queue->efx, ®, |
| 1756 | ER_DZ_TX_DESC_UPD, tx_queue->queue); |
| 1757 | } |
| 1758 | |
| 1759 | static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue) |
| 1760 | { |
| 1761 | MCDI_DECLARE_BUF(inbuf, MC_CMD_INIT_TXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 / |
| 1762 | EFX_BUF_SIZE)); |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 1763 | bool csum_offload = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD; |
| 1764 | size_t entries = tx_queue->txd.buf.len / EFX_BUF_SIZE; |
| 1765 | struct efx_channel *channel = tx_queue->channel; |
| 1766 | struct efx_nic *efx = tx_queue->efx; |
Daniel Pieczko | 45b2449 | 2015-05-06 00:57:14 +0100 | [diff] [blame] | 1767 | struct efx_ef10_nic_data *nic_data = efx->nic_data; |
Jon Cooper | aa09a3d | 2015-05-20 11:10:41 +0100 | [diff] [blame] | 1768 | size_t inlen; |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 1769 | dma_addr_t dma_addr; |
| 1770 | efx_qword_t *txd; |
| 1771 | int rc; |
| 1772 | int i; |
Jon Cooper | aa09a3d | 2015-05-20 11:10:41 +0100 | [diff] [blame] | 1773 | BUILD_BUG_ON(MC_CMD_INIT_TXQ_OUT_LEN != 0); |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 1774 | |
| 1775 | MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_SIZE, tx_queue->ptr_mask + 1); |
| 1776 | MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_TARGET_EVQ, channel->channel); |
| 1777 | MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_LABEL, tx_queue->queue); |
| 1778 | MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_INSTANCE, tx_queue->queue); |
| 1779 | MCDI_POPULATE_DWORD_2(inbuf, INIT_TXQ_IN_FLAGS, |
| 1780 | INIT_TXQ_IN_FLAG_IP_CSUM_DIS, !csum_offload, |
| 1781 | INIT_TXQ_IN_FLAG_TCP_CSUM_DIS, !csum_offload); |
| 1782 | MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_OWNER_ID, 0); |
Daniel Pieczko | 45b2449 | 2015-05-06 00:57:14 +0100 | [diff] [blame] | 1783 | MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_PORT_ID, nic_data->vport_id); |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 1784 | |
| 1785 | dma_addr = tx_queue->txd.buf.dma_addr; |
| 1786 | |
| 1787 | netif_dbg(efx, hw, efx->net_dev, "pushing TXQ %d. %zu entries (%llx)\n", |
| 1788 | tx_queue->queue, entries, (u64)dma_addr); |
| 1789 | |
| 1790 | for (i = 0; i < entries; ++i) { |
| 1791 | MCDI_SET_ARRAY_QWORD(inbuf, INIT_TXQ_IN_DMA_ADDR, i, dma_addr); |
| 1792 | dma_addr += EFX_BUF_SIZE; |
| 1793 | } |
| 1794 | |
| 1795 | inlen = MC_CMD_INIT_TXQ_IN_LEN(entries); |
| 1796 | |
| 1797 | rc = efx_mcdi_rpc(efx, MC_CMD_INIT_TXQ, inbuf, inlen, |
Jon Cooper | aa09a3d | 2015-05-20 11:10:41 +0100 | [diff] [blame] | 1798 | NULL, 0, NULL); |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 1799 | if (rc) |
| 1800 | goto fail; |
| 1801 | |
| 1802 | /* A previous user of this TX queue might have set us up the |
| 1803 | * bomb by writing a descriptor to the TX push collector but |
| 1804 | * not the doorbell. (Each collector belongs to a port, not a |
| 1805 | * queue or function, so cannot easily be reset.) We must |
| 1806 | * attempt to push a no-op descriptor in its place. |
| 1807 | */ |
| 1808 | tx_queue->buffer[0].flags = EFX_TX_BUF_OPTION; |
| 1809 | tx_queue->insert_count = 1; |
| 1810 | txd = efx_tx_desc(tx_queue, 0); |
| 1811 | EFX_POPULATE_QWORD_4(*txd, |
| 1812 | ESF_DZ_TX_DESC_IS_OPT, true, |
| 1813 | ESF_DZ_TX_OPTION_TYPE, |
| 1814 | ESE_DZ_TX_OPTION_DESC_CRC_CSUM, |
| 1815 | ESF_DZ_TX_OPTION_UDP_TCP_CSUM, csum_offload, |
| 1816 | ESF_DZ_TX_OPTION_IP_CSUM, csum_offload); |
| 1817 | tx_queue->write_count = 1; |
Bert Kenward | 93171b1 | 2015-11-30 09:05:35 +0000 | [diff] [blame] | 1818 | |
| 1819 | if (nic_data->datapath_caps & |
| 1820 | (1 << MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_LBN)) { |
| 1821 | tx_queue->tso_version = 1; |
| 1822 | } |
| 1823 | |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 1824 | wmb(); |
| 1825 | efx_ef10_push_tx_desc(tx_queue, txd); |
| 1826 | |
| 1827 | return; |
| 1828 | |
| 1829 | fail: |
Ben Hutchings | 48ce563 | 2013-11-01 16:42:44 +0000 | [diff] [blame] | 1830 | netdev_WARN(efx->net_dev, "failed to initialise TXQ %d\n", |
| 1831 | tx_queue->queue); |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 1832 | } |
| 1833 | |
| 1834 | static void efx_ef10_tx_fini(struct efx_tx_queue *tx_queue) |
| 1835 | { |
| 1836 | MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_TXQ_IN_LEN); |
Jon Cooper | aa09a3d | 2015-05-20 11:10:41 +0100 | [diff] [blame] | 1837 | MCDI_DECLARE_BUF_ERR(outbuf); |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 1838 | struct efx_nic *efx = tx_queue->efx; |
| 1839 | size_t outlen; |
| 1840 | int rc; |
| 1841 | |
| 1842 | MCDI_SET_DWORD(inbuf, FINI_TXQ_IN_INSTANCE, |
| 1843 | tx_queue->queue); |
| 1844 | |
Edward Cree | 1e0b812 | 2013-05-31 18:36:12 +0100 | [diff] [blame] | 1845 | rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_TXQ, inbuf, sizeof(inbuf), |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 1846 | outbuf, sizeof(outbuf), &outlen); |
| 1847 | |
| 1848 | if (rc && rc != -EALREADY) |
| 1849 | goto fail; |
| 1850 | |
| 1851 | return; |
| 1852 | |
| 1853 | fail: |
Edward Cree | 1e0b812 | 2013-05-31 18:36:12 +0100 | [diff] [blame] | 1854 | efx_mcdi_display_error(efx, MC_CMD_FINI_TXQ, MC_CMD_FINI_TXQ_IN_LEN, |
| 1855 | outbuf, outlen, rc); |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 1856 | } |
| 1857 | |
| 1858 | static void efx_ef10_tx_remove(struct efx_tx_queue *tx_queue) |
| 1859 | { |
| 1860 | efx_nic_free_buffer(tx_queue->efx, &tx_queue->txd.buf); |
| 1861 | } |
| 1862 | |
| 1863 | /* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */ |
| 1864 | static inline void efx_ef10_notify_tx_desc(struct efx_tx_queue *tx_queue) |
| 1865 | { |
| 1866 | unsigned int write_ptr; |
| 1867 | efx_dword_t reg; |
| 1868 | |
| 1869 | write_ptr = tx_queue->write_count & tx_queue->ptr_mask; |
| 1870 | EFX_POPULATE_DWORD_1(reg, ERF_DZ_TX_DESC_WPTR_DWORD, write_ptr); |
| 1871 | efx_writed_page(tx_queue->efx, ®, |
| 1872 | ER_DZ_TX_DESC_UPD_DWORD, tx_queue->queue); |
| 1873 | } |
| 1874 | |
| 1875 | static void efx_ef10_tx_write(struct efx_tx_queue *tx_queue) |
| 1876 | { |
| 1877 | unsigned int old_write_count = tx_queue->write_count; |
| 1878 | struct efx_tx_buffer *buffer; |
| 1879 | unsigned int write_ptr; |
| 1880 | efx_qword_t *txd; |
| 1881 | |
Martin Habets | b2663a4 | 2015-11-02 12:51:31 +0000 | [diff] [blame] | 1882 | tx_queue->xmit_more_available = false; |
| 1883 | if (unlikely(tx_queue->write_count == tx_queue->insert_count)) |
| 1884 | return; |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 1885 | |
| 1886 | do { |
| 1887 | write_ptr = tx_queue->write_count & tx_queue->ptr_mask; |
| 1888 | buffer = &tx_queue->buffer[write_ptr]; |
| 1889 | txd = efx_tx_desc(tx_queue, write_ptr); |
| 1890 | ++tx_queue->write_count; |
| 1891 | |
| 1892 | /* Create TX descriptor ring entry */ |
| 1893 | if (buffer->flags & EFX_TX_BUF_OPTION) { |
| 1894 | *txd = buffer->option; |
| 1895 | } else { |
| 1896 | BUILD_BUG_ON(EFX_TX_BUF_CONT != 1); |
| 1897 | EFX_POPULATE_QWORD_3( |
| 1898 | *txd, |
| 1899 | ESF_DZ_TX_KER_CONT, |
| 1900 | buffer->flags & EFX_TX_BUF_CONT, |
| 1901 | ESF_DZ_TX_KER_BYTE_CNT, buffer->len, |
| 1902 | ESF_DZ_TX_KER_BUF_ADDR, buffer->dma_addr); |
| 1903 | } |
| 1904 | } while (tx_queue->write_count != tx_queue->insert_count); |
| 1905 | |
| 1906 | wmb(); /* Ensure descriptors are written before they are fetched */ |
| 1907 | |
| 1908 | if (efx_nic_may_push_tx_desc(tx_queue, old_write_count)) { |
| 1909 | txd = efx_tx_desc(tx_queue, |
| 1910 | old_write_count & tx_queue->ptr_mask); |
| 1911 | efx_ef10_push_tx_desc(tx_queue, txd); |
| 1912 | ++tx_queue->pushes; |
| 1913 | } else { |
| 1914 | efx_ef10_notify_tx_desc(tx_queue); |
| 1915 | } |
| 1916 | } |
| 1917 | |
Jon Cooper | 267c015 | 2015-05-06 00:59:38 +0100 | [diff] [blame] | 1918 | static int efx_ef10_alloc_rss_context(struct efx_nic *efx, u32 *context, |
| 1919 | bool exclusive, unsigned *context_size) |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 1920 | { |
| 1921 | MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN); |
| 1922 | MCDI_DECLARE_BUF(outbuf, MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN); |
Daniel Pieczko | 45b2449 | 2015-05-06 00:57:14 +0100 | [diff] [blame] | 1923 | struct efx_ef10_nic_data *nic_data = efx->nic_data; |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 1924 | size_t outlen; |
| 1925 | int rc; |
Jon Cooper | 267c015 | 2015-05-06 00:59:38 +0100 | [diff] [blame] | 1926 | u32 alloc_type = exclusive ? |
| 1927 | MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_EXCLUSIVE : |
| 1928 | MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_SHARED; |
| 1929 | unsigned rss_spread = exclusive ? |
| 1930 | efx->rss_spread : |
| 1931 | min(rounddown_pow_of_two(efx->rss_spread), |
| 1932 | EFX_EF10_MAX_SHARED_RSS_CONTEXT_SIZE); |
| 1933 | |
| 1934 | if (!exclusive && rss_spread == 1) { |
| 1935 | *context = EFX_EF10_RSS_CONTEXT_INVALID; |
| 1936 | if (context_size) |
| 1937 | *context_size = 1; |
| 1938 | return 0; |
| 1939 | } |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 1940 | |
Jon Cooper | dcb4123 | 2016-04-25 16:51:00 +0100 | [diff] [blame] | 1941 | if (nic_data->datapath_caps & |
| 1942 | 1 << MC_CMD_GET_CAPABILITIES_OUT_RX_RSS_LIMITED_LBN) |
| 1943 | return -EOPNOTSUPP; |
| 1944 | |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 1945 | MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID, |
Daniel Pieczko | 45b2449 | 2015-05-06 00:57:14 +0100 | [diff] [blame] | 1946 | nic_data->vport_id); |
Jon Cooper | 267c015 | 2015-05-06 00:59:38 +0100 | [diff] [blame] | 1947 | MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_TYPE, alloc_type); |
| 1948 | MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_NUM_QUEUES, rss_spread); |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 1949 | |
| 1950 | rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_ALLOC, inbuf, sizeof(inbuf), |
| 1951 | outbuf, sizeof(outbuf), &outlen); |
| 1952 | if (rc != 0) |
| 1953 | return rc; |
| 1954 | |
| 1955 | if (outlen < MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN) |
| 1956 | return -EIO; |
| 1957 | |
| 1958 | *context = MCDI_DWORD(outbuf, RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID); |
| 1959 | |
Jon Cooper | 267c015 | 2015-05-06 00:59:38 +0100 | [diff] [blame] | 1960 | if (context_size) |
| 1961 | *context_size = rss_spread; |
| 1962 | |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 1963 | return 0; |
| 1964 | } |
| 1965 | |
| 1966 | static void efx_ef10_free_rss_context(struct efx_nic *efx, u32 context) |
| 1967 | { |
| 1968 | MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_FREE_IN_LEN); |
| 1969 | int rc; |
| 1970 | |
| 1971 | MCDI_SET_DWORD(inbuf, RSS_CONTEXT_FREE_IN_RSS_CONTEXT_ID, |
| 1972 | context); |
| 1973 | |
| 1974 | rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_FREE, inbuf, sizeof(inbuf), |
| 1975 | NULL, 0, NULL); |
| 1976 | WARN_ON(rc != 0); |
| 1977 | } |
| 1978 | |
Jon Cooper | 267c015 | 2015-05-06 00:59:38 +0100 | [diff] [blame] | 1979 | static int efx_ef10_populate_rss_table(struct efx_nic *efx, u32 context, |
| 1980 | const u32 *rx_indir_table) |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 1981 | { |
| 1982 | MCDI_DECLARE_BUF(tablebuf, MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN); |
| 1983 | MCDI_DECLARE_BUF(keybuf, MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN); |
| 1984 | int i, rc; |
| 1985 | |
| 1986 | MCDI_SET_DWORD(tablebuf, RSS_CONTEXT_SET_TABLE_IN_RSS_CONTEXT_ID, |
| 1987 | context); |
| 1988 | BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) != |
| 1989 | MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_LEN); |
| 1990 | |
| 1991 | for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); ++i) |
| 1992 | MCDI_PTR(tablebuf, |
| 1993 | RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE)[i] = |
Jon Cooper | 267c015 | 2015-05-06 00:59:38 +0100 | [diff] [blame] | 1994 | (u8) rx_indir_table[i]; |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 1995 | |
| 1996 | rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_TABLE, tablebuf, |
| 1997 | sizeof(tablebuf), NULL, 0, NULL); |
| 1998 | if (rc != 0) |
| 1999 | return rc; |
| 2000 | |
| 2001 | MCDI_SET_DWORD(keybuf, RSS_CONTEXT_SET_KEY_IN_RSS_CONTEXT_ID, |
| 2002 | context); |
| 2003 | BUILD_BUG_ON(ARRAY_SIZE(efx->rx_hash_key) != |
| 2004 | MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN); |
| 2005 | for (i = 0; i < ARRAY_SIZE(efx->rx_hash_key); ++i) |
| 2006 | MCDI_PTR(keybuf, RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY)[i] = |
| 2007 | efx->rx_hash_key[i]; |
| 2008 | |
| 2009 | return efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_KEY, keybuf, |
| 2010 | sizeof(keybuf), NULL, 0, NULL); |
| 2011 | } |
| 2012 | |
| 2013 | static void efx_ef10_rx_free_indir_table(struct efx_nic *efx) |
| 2014 | { |
| 2015 | struct efx_ef10_nic_data *nic_data = efx->nic_data; |
| 2016 | |
| 2017 | if (nic_data->rx_rss_context != EFX_EF10_RSS_CONTEXT_INVALID) |
| 2018 | efx_ef10_free_rss_context(efx, nic_data->rx_rss_context); |
| 2019 | nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID; |
| 2020 | } |
| 2021 | |
Jon Cooper | 267c015 | 2015-05-06 00:59:38 +0100 | [diff] [blame] | 2022 | static int efx_ef10_rx_push_shared_rss_config(struct efx_nic *efx, |
| 2023 | unsigned *context_size) |
| 2024 | { |
| 2025 | u32 new_rx_rss_context; |
| 2026 | struct efx_ef10_nic_data *nic_data = efx->nic_data; |
| 2027 | int rc = efx_ef10_alloc_rss_context(efx, &new_rx_rss_context, |
| 2028 | false, context_size); |
| 2029 | |
| 2030 | if (rc != 0) |
| 2031 | return rc; |
| 2032 | |
| 2033 | nic_data->rx_rss_context = new_rx_rss_context; |
| 2034 | nic_data->rx_rss_context_exclusive = false; |
| 2035 | efx_set_default_rx_indir_table(efx); |
| 2036 | return 0; |
| 2037 | } |
| 2038 | |
| 2039 | static int efx_ef10_rx_push_exclusive_rss_config(struct efx_nic *efx, |
| 2040 | const u32 *rx_indir_table) |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 2041 | { |
| 2042 | struct efx_ef10_nic_data *nic_data = efx->nic_data; |
| 2043 | int rc; |
Jon Cooper | 267c015 | 2015-05-06 00:59:38 +0100 | [diff] [blame] | 2044 | u32 new_rx_rss_context; |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 2045 | |
Jon Cooper | 267c015 | 2015-05-06 00:59:38 +0100 | [diff] [blame] | 2046 | if (nic_data->rx_rss_context == EFX_EF10_RSS_CONTEXT_INVALID || |
| 2047 | !nic_data->rx_rss_context_exclusive) { |
| 2048 | rc = efx_ef10_alloc_rss_context(efx, &new_rx_rss_context, |
| 2049 | true, NULL); |
| 2050 | if (rc == -EOPNOTSUPP) |
| 2051 | return rc; |
| 2052 | else if (rc != 0) |
| 2053 | goto fail1; |
| 2054 | } else { |
| 2055 | new_rx_rss_context = nic_data->rx_rss_context; |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 2056 | } |
| 2057 | |
Jon Cooper | 267c015 | 2015-05-06 00:59:38 +0100 | [diff] [blame] | 2058 | rc = efx_ef10_populate_rss_table(efx, new_rx_rss_context, |
| 2059 | rx_indir_table); |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 2060 | if (rc != 0) |
Jon Cooper | 267c015 | 2015-05-06 00:59:38 +0100 | [diff] [blame] | 2061 | goto fail2; |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 2062 | |
Jon Cooper | 267c015 | 2015-05-06 00:59:38 +0100 | [diff] [blame] | 2063 | if (nic_data->rx_rss_context != new_rx_rss_context) |
| 2064 | efx_ef10_rx_free_indir_table(efx); |
| 2065 | nic_data->rx_rss_context = new_rx_rss_context; |
| 2066 | nic_data->rx_rss_context_exclusive = true; |
| 2067 | if (rx_indir_table != efx->rx_indir_table) |
| 2068 | memcpy(efx->rx_indir_table, rx_indir_table, |
| 2069 | sizeof(efx->rx_indir_table)); |
| 2070 | return 0; |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 2071 | |
Jon Cooper | 267c015 | 2015-05-06 00:59:38 +0100 | [diff] [blame] | 2072 | fail2: |
| 2073 | if (new_rx_rss_context != nic_data->rx_rss_context) |
| 2074 | efx_ef10_free_rss_context(efx, new_rx_rss_context); |
| 2075 | fail1: |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 2076 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); |
Jon Cooper | 267c015 | 2015-05-06 00:59:38 +0100 | [diff] [blame] | 2077 | return rc; |
| 2078 | } |
| 2079 | |
| 2080 | static int efx_ef10_pf_rx_push_rss_config(struct efx_nic *efx, bool user, |
| 2081 | const u32 *rx_indir_table) |
| 2082 | { |
| 2083 | int rc; |
| 2084 | |
| 2085 | if (efx->rss_spread == 1) |
| 2086 | return 0; |
| 2087 | |
| 2088 | rc = efx_ef10_rx_push_exclusive_rss_config(efx, rx_indir_table); |
| 2089 | |
| 2090 | if (rc == -ENOBUFS && !user) { |
| 2091 | unsigned context_size; |
| 2092 | bool mismatch = false; |
| 2093 | size_t i; |
| 2094 | |
| 2095 | for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table) && !mismatch; |
| 2096 | i++) |
| 2097 | mismatch = rx_indir_table[i] != |
| 2098 | ethtool_rxfh_indir_default(i, efx->rss_spread); |
| 2099 | |
| 2100 | rc = efx_ef10_rx_push_shared_rss_config(efx, &context_size); |
| 2101 | if (rc == 0) { |
| 2102 | if (context_size != efx->rss_spread) |
| 2103 | netif_warn(efx, probe, efx->net_dev, |
| 2104 | "Could not allocate an exclusive RSS" |
| 2105 | " context; allocated a shared one of" |
| 2106 | " different size." |
| 2107 | " Wanted %u, got %u.\n", |
| 2108 | efx->rss_spread, context_size); |
| 2109 | else if (mismatch) |
| 2110 | netif_warn(efx, probe, efx->net_dev, |
| 2111 | "Could not allocate an exclusive RSS" |
| 2112 | " context; allocated a shared one but" |
| 2113 | " could not apply custom" |
| 2114 | " indirection.\n"); |
| 2115 | else |
| 2116 | netif_info(efx, probe, efx->net_dev, |
| 2117 | "Could not allocate an exclusive RSS" |
| 2118 | " context; allocated a shared one.\n"); |
| 2119 | } |
| 2120 | } |
| 2121 | return rc; |
| 2122 | } |
| 2123 | |
| 2124 | static int efx_ef10_vf_rx_push_rss_config(struct efx_nic *efx, bool user, |
| 2125 | const u32 *rx_indir_table |
| 2126 | __attribute__ ((unused))) |
| 2127 | { |
| 2128 | struct efx_ef10_nic_data *nic_data = efx->nic_data; |
| 2129 | |
| 2130 | if (user) |
| 2131 | return -EOPNOTSUPP; |
| 2132 | if (nic_data->rx_rss_context != EFX_EF10_RSS_CONTEXT_INVALID) |
| 2133 | return 0; |
| 2134 | return efx_ef10_rx_push_shared_rss_config(efx, NULL); |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 2135 | } |
| 2136 | |
| 2137 | static int efx_ef10_rx_probe(struct efx_rx_queue *rx_queue) |
| 2138 | { |
| 2139 | return efx_nic_alloc_buffer(rx_queue->efx, &rx_queue->rxd.buf, |
| 2140 | (rx_queue->ptr_mask + 1) * |
| 2141 | sizeof(efx_qword_t), |
| 2142 | GFP_KERNEL); |
| 2143 | } |
| 2144 | |
| 2145 | static void efx_ef10_rx_init(struct efx_rx_queue *rx_queue) |
| 2146 | { |
| 2147 | MCDI_DECLARE_BUF(inbuf, |
| 2148 | MC_CMD_INIT_RXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 / |
| 2149 | EFX_BUF_SIZE)); |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 2150 | struct efx_channel *channel = efx_rx_queue_channel(rx_queue); |
| 2151 | size_t entries = rx_queue->rxd.buf.len / EFX_BUF_SIZE; |
| 2152 | struct efx_nic *efx = rx_queue->efx; |
Daniel Pieczko | 45b2449 | 2015-05-06 00:57:14 +0100 | [diff] [blame] | 2153 | struct efx_ef10_nic_data *nic_data = efx->nic_data; |
Jon Cooper | aa09a3d | 2015-05-20 11:10:41 +0100 | [diff] [blame] | 2154 | size_t inlen; |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 2155 | dma_addr_t dma_addr; |
| 2156 | int rc; |
| 2157 | int i; |
Jon Cooper | aa09a3d | 2015-05-20 11:10:41 +0100 | [diff] [blame] | 2158 | BUILD_BUG_ON(MC_CMD_INIT_RXQ_OUT_LEN != 0); |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 2159 | |
| 2160 | rx_queue->scatter_n = 0; |
| 2161 | rx_queue->scatter_len = 0; |
| 2162 | |
| 2163 | MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_SIZE, rx_queue->ptr_mask + 1); |
| 2164 | MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_TARGET_EVQ, channel->channel); |
| 2165 | MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_LABEL, efx_rx_queue_index(rx_queue)); |
| 2166 | MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_INSTANCE, |
| 2167 | efx_rx_queue_index(rx_queue)); |
Jon Cooper | bd9a265 | 2013-11-18 12:54:41 +0000 | [diff] [blame] | 2168 | MCDI_POPULATE_DWORD_2(inbuf, INIT_RXQ_IN_FLAGS, |
| 2169 | INIT_RXQ_IN_FLAG_PREFIX, 1, |
| 2170 | INIT_RXQ_IN_FLAG_TIMESTAMP, 1); |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 2171 | MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_OWNER_ID, 0); |
Daniel Pieczko | 45b2449 | 2015-05-06 00:57:14 +0100 | [diff] [blame] | 2172 | MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_PORT_ID, nic_data->vport_id); |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 2173 | |
| 2174 | dma_addr = rx_queue->rxd.buf.dma_addr; |
| 2175 | |
| 2176 | netif_dbg(efx, hw, efx->net_dev, "pushing RXQ %d. %zu entries (%llx)\n", |
| 2177 | efx_rx_queue_index(rx_queue), entries, (u64)dma_addr); |
| 2178 | |
| 2179 | for (i = 0; i < entries; ++i) { |
| 2180 | MCDI_SET_ARRAY_QWORD(inbuf, INIT_RXQ_IN_DMA_ADDR, i, dma_addr); |
| 2181 | dma_addr += EFX_BUF_SIZE; |
| 2182 | } |
| 2183 | |
| 2184 | inlen = MC_CMD_INIT_RXQ_IN_LEN(entries); |
| 2185 | |
| 2186 | rc = efx_mcdi_rpc(efx, MC_CMD_INIT_RXQ, inbuf, inlen, |
Jon Cooper | aa09a3d | 2015-05-20 11:10:41 +0100 | [diff] [blame] | 2187 | NULL, 0, NULL); |
Ben Hutchings | 48ce563 | 2013-11-01 16:42:44 +0000 | [diff] [blame] | 2188 | if (rc) |
| 2189 | netdev_WARN(efx->net_dev, "failed to initialise RXQ %d\n", |
| 2190 | efx_rx_queue_index(rx_queue)); |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 2191 | } |
| 2192 | |
| 2193 | static void efx_ef10_rx_fini(struct efx_rx_queue *rx_queue) |
| 2194 | { |
| 2195 | MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_RXQ_IN_LEN); |
Jon Cooper | aa09a3d | 2015-05-20 11:10:41 +0100 | [diff] [blame] | 2196 | MCDI_DECLARE_BUF_ERR(outbuf); |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 2197 | struct efx_nic *efx = rx_queue->efx; |
| 2198 | size_t outlen; |
| 2199 | int rc; |
| 2200 | |
| 2201 | MCDI_SET_DWORD(inbuf, FINI_RXQ_IN_INSTANCE, |
| 2202 | efx_rx_queue_index(rx_queue)); |
| 2203 | |
Edward Cree | 1e0b812 | 2013-05-31 18:36:12 +0100 | [diff] [blame] | 2204 | rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_RXQ, inbuf, sizeof(inbuf), |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 2205 | outbuf, sizeof(outbuf), &outlen); |
| 2206 | |
| 2207 | if (rc && rc != -EALREADY) |
| 2208 | goto fail; |
| 2209 | |
| 2210 | return; |
| 2211 | |
| 2212 | fail: |
Edward Cree | 1e0b812 | 2013-05-31 18:36:12 +0100 | [diff] [blame] | 2213 | efx_mcdi_display_error(efx, MC_CMD_FINI_RXQ, MC_CMD_FINI_RXQ_IN_LEN, |
| 2214 | outbuf, outlen, rc); |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 2215 | } |
| 2216 | |
| 2217 | static void efx_ef10_rx_remove(struct efx_rx_queue *rx_queue) |
| 2218 | { |
| 2219 | efx_nic_free_buffer(rx_queue->efx, &rx_queue->rxd.buf); |
| 2220 | } |
| 2221 | |
| 2222 | /* This creates an entry in the RX descriptor queue */ |
| 2223 | static inline void |
| 2224 | efx_ef10_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index) |
| 2225 | { |
| 2226 | struct efx_rx_buffer *rx_buf; |
| 2227 | efx_qword_t *rxd; |
| 2228 | |
| 2229 | rxd = efx_rx_desc(rx_queue, index); |
| 2230 | rx_buf = efx_rx_buffer(rx_queue, index); |
| 2231 | EFX_POPULATE_QWORD_2(*rxd, |
| 2232 | ESF_DZ_RX_KER_BYTE_CNT, rx_buf->len, |
| 2233 | ESF_DZ_RX_KER_BUF_ADDR, rx_buf->dma_addr); |
| 2234 | } |
| 2235 | |
| 2236 | static void efx_ef10_rx_write(struct efx_rx_queue *rx_queue) |
| 2237 | { |
| 2238 | struct efx_nic *efx = rx_queue->efx; |
| 2239 | unsigned int write_count; |
| 2240 | efx_dword_t reg; |
| 2241 | |
| 2242 | /* Firmware requires that RX_DESC_WPTR be a multiple of 8 */ |
| 2243 | write_count = rx_queue->added_count & ~7; |
| 2244 | if (rx_queue->notified_count == write_count) |
| 2245 | return; |
| 2246 | |
| 2247 | do |
| 2248 | efx_ef10_build_rx_desc( |
| 2249 | rx_queue, |
| 2250 | rx_queue->notified_count & rx_queue->ptr_mask); |
| 2251 | while (++rx_queue->notified_count != write_count); |
| 2252 | |
| 2253 | wmb(); |
| 2254 | EFX_POPULATE_DWORD_1(reg, ERF_DZ_RX_DESC_WPTR, |
| 2255 | write_count & rx_queue->ptr_mask); |
| 2256 | efx_writed_page(efx, ®, ER_DZ_RX_DESC_UPD, |
| 2257 | efx_rx_queue_index(rx_queue)); |
| 2258 | } |
| 2259 | |
| 2260 | static efx_mcdi_async_completer efx_ef10_rx_defer_refill_complete; |
| 2261 | |
| 2262 | static void efx_ef10_rx_defer_refill(struct efx_rx_queue *rx_queue) |
| 2263 | { |
| 2264 | struct efx_channel *channel = efx_rx_queue_channel(rx_queue); |
| 2265 | MCDI_DECLARE_BUF(inbuf, MC_CMD_DRIVER_EVENT_IN_LEN); |
| 2266 | efx_qword_t event; |
| 2267 | |
| 2268 | EFX_POPULATE_QWORD_2(event, |
| 2269 | ESF_DZ_EV_CODE, EFX_EF10_DRVGEN_EV, |
| 2270 | ESF_DZ_EV_DATA, EFX_EF10_REFILL); |
| 2271 | |
| 2272 | MCDI_SET_DWORD(inbuf, DRIVER_EVENT_IN_EVQ, channel->channel); |
| 2273 | |
| 2274 | /* MCDI_SET_QWORD is not appropriate here since EFX_POPULATE_* has |
| 2275 | * already swapped the data to little-endian order. |
| 2276 | */ |
| 2277 | memcpy(MCDI_PTR(inbuf, DRIVER_EVENT_IN_DATA), &event.u64[0], |
| 2278 | sizeof(efx_qword_t)); |
| 2279 | |
| 2280 | efx_mcdi_rpc_async(channel->efx, MC_CMD_DRIVER_EVENT, |
| 2281 | inbuf, sizeof(inbuf), 0, |
| 2282 | efx_ef10_rx_defer_refill_complete, 0); |
| 2283 | } |
| 2284 | |
| 2285 | static void |
| 2286 | efx_ef10_rx_defer_refill_complete(struct efx_nic *efx, unsigned long cookie, |
| 2287 | int rc, efx_dword_t *outbuf, |
| 2288 | size_t outlen_actual) |
| 2289 | { |
| 2290 | /* nothing to do */ |
| 2291 | } |
| 2292 | |
| 2293 | static int efx_ef10_ev_probe(struct efx_channel *channel) |
| 2294 | { |
| 2295 | return efx_nic_alloc_buffer(channel->efx, &channel->eventq.buf, |
| 2296 | (channel->eventq_mask + 1) * |
| 2297 | sizeof(efx_qword_t), |
| 2298 | GFP_KERNEL); |
| 2299 | } |
| 2300 | |
Daniel Pieczko | 46e612b | 2015-07-21 15:09:18 +0100 | [diff] [blame] | 2301 | static void efx_ef10_ev_fini(struct efx_channel *channel) |
| 2302 | { |
| 2303 | MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_EVQ_IN_LEN); |
| 2304 | MCDI_DECLARE_BUF_ERR(outbuf); |
| 2305 | struct efx_nic *efx = channel->efx; |
| 2306 | size_t outlen; |
| 2307 | int rc; |
| 2308 | |
| 2309 | MCDI_SET_DWORD(inbuf, FINI_EVQ_IN_INSTANCE, channel->channel); |
| 2310 | |
| 2311 | rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_EVQ, inbuf, sizeof(inbuf), |
| 2312 | outbuf, sizeof(outbuf), &outlen); |
| 2313 | |
| 2314 | if (rc && rc != -EALREADY) |
| 2315 | goto fail; |
| 2316 | |
| 2317 | return; |
| 2318 | |
| 2319 | fail: |
| 2320 | efx_mcdi_display_error(efx, MC_CMD_FINI_EVQ, MC_CMD_FINI_EVQ_IN_LEN, |
| 2321 | outbuf, outlen, rc); |
| 2322 | } |
| 2323 | |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 2324 | static int efx_ef10_ev_init(struct efx_channel *channel) |
| 2325 | { |
| 2326 | MCDI_DECLARE_BUF(inbuf, |
| 2327 | MC_CMD_INIT_EVQ_IN_LEN(EFX_MAX_EVQ_SIZE * 8 / |
| 2328 | EFX_BUF_SIZE)); |
| 2329 | MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_EVQ_OUT_LEN); |
| 2330 | size_t entries = channel->eventq.buf.len / EFX_BUF_SIZE; |
| 2331 | struct efx_nic *efx = channel->efx; |
| 2332 | struct efx_ef10_nic_data *nic_data; |
| 2333 | bool supports_rx_merge; |
| 2334 | size_t inlen, outlen; |
Daniel Pieczko | 46e612b | 2015-07-21 15:09:18 +0100 | [diff] [blame] | 2335 | unsigned int enabled, implemented; |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 2336 | dma_addr_t dma_addr; |
| 2337 | int rc; |
| 2338 | int i; |
| 2339 | |
| 2340 | nic_data = efx->nic_data; |
| 2341 | supports_rx_merge = |
| 2342 | !!(nic_data->datapath_caps & |
| 2343 | 1 << MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN); |
| 2344 | |
| 2345 | /* Fill event queue with all ones (i.e. empty events) */ |
| 2346 | memset(channel->eventq.buf.addr, 0xff, channel->eventq.buf.len); |
| 2347 | |
| 2348 | MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_SIZE, channel->eventq_mask + 1); |
| 2349 | MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_INSTANCE, channel->channel); |
| 2350 | /* INIT_EVQ expects index in vector table, not absolute */ |
| 2351 | MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_IRQ_NUM, channel->channel); |
| 2352 | MCDI_POPULATE_DWORD_4(inbuf, INIT_EVQ_IN_FLAGS, |
| 2353 | INIT_EVQ_IN_FLAG_INTERRUPTING, 1, |
| 2354 | INIT_EVQ_IN_FLAG_RX_MERGE, 1, |
| 2355 | INIT_EVQ_IN_FLAG_TX_MERGE, 1, |
| 2356 | INIT_EVQ_IN_FLAG_CUT_THRU, !supports_rx_merge); |
| 2357 | MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_MODE, |
| 2358 | MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS); |
| 2359 | MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_LOAD, 0); |
| 2360 | MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_RELOAD, 0); |
| 2361 | MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_COUNT_MODE, |
| 2362 | MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS); |
| 2363 | MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_COUNT_THRSHLD, 0); |
| 2364 | |
| 2365 | dma_addr = channel->eventq.buf.dma_addr; |
| 2366 | for (i = 0; i < entries; ++i) { |
| 2367 | MCDI_SET_ARRAY_QWORD(inbuf, INIT_EVQ_IN_DMA_ADDR, i, dma_addr); |
| 2368 | dma_addr += EFX_BUF_SIZE; |
| 2369 | } |
| 2370 | |
| 2371 | inlen = MC_CMD_INIT_EVQ_IN_LEN(entries); |
| 2372 | |
| 2373 | rc = efx_mcdi_rpc(efx, MC_CMD_INIT_EVQ, inbuf, inlen, |
| 2374 | outbuf, sizeof(outbuf), &outlen); |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 2375 | /* IRQ return is ignored */ |
Daniel Pieczko | 46e612b | 2015-07-21 15:09:18 +0100 | [diff] [blame] | 2376 | if (channel->channel || rc) |
| 2377 | return rc; |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 2378 | |
Daniel Pieczko | 46e612b | 2015-07-21 15:09:18 +0100 | [diff] [blame] | 2379 | /* Successfully created event queue on channel 0 */ |
| 2380 | rc = efx_mcdi_get_workarounds(efx, &implemented, &enabled); |
Edward Cree | 832dc9e | 2015-07-21 15:09:31 +0100 | [diff] [blame] | 2381 | if (rc == -ENOSYS) { |
| 2382 | /* GET_WORKAROUNDS was implemented before the bug26807 |
| 2383 | * workaround, thus the latter must be unavailable in this fw |
| 2384 | */ |
| 2385 | nic_data->workaround_26807 = false; |
| 2386 | rc = 0; |
| 2387 | } else if (rc) { |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 2388 | goto fail; |
Edward Cree | 832dc9e | 2015-07-21 15:09:31 +0100 | [diff] [blame] | 2389 | } else { |
| 2390 | nic_data->workaround_26807 = |
| 2391 | !!(enabled & MC_CMD_GET_WORKAROUNDS_OUT_BUG26807); |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 2392 | |
Edward Cree | 832dc9e | 2015-07-21 15:09:31 +0100 | [diff] [blame] | 2393 | if (implemented & MC_CMD_GET_WORKAROUNDS_OUT_BUG26807 && |
| 2394 | !nic_data->workaround_26807) { |
Daniel Pieczko | 5a55a72 | 2015-07-21 15:10:02 +0100 | [diff] [blame] | 2395 | unsigned int flags; |
| 2396 | |
Daniel Pieczko | 34ccfe6 | 2015-07-21 15:09:43 +0100 | [diff] [blame] | 2397 | rc = efx_mcdi_set_workaround(efx, |
| 2398 | MC_CMD_WORKAROUND_BUG26807, |
Daniel Pieczko | 5a55a72 | 2015-07-21 15:10:02 +0100 | [diff] [blame] | 2399 | true, &flags); |
| 2400 | |
| 2401 | if (!rc) { |
| 2402 | if (flags & |
| 2403 | 1 << MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_LBN) { |
| 2404 | netif_info(efx, drv, efx->net_dev, |
| 2405 | "other functions on NIC have been reset\n"); |
Daniel Pieczko | abd86a5 | 2015-12-04 08:48:39 +0000 | [diff] [blame] | 2406 | |
| 2407 | /* With MCFW v4.6.x and earlier, the |
| 2408 | * boot count will have incremented, |
| 2409 | * so re-read the warm_boot_count |
| 2410 | * value now to ensure this function |
| 2411 | * doesn't think it has changed next |
| 2412 | * time it checks. |
| 2413 | */ |
| 2414 | rc = efx_ef10_get_warm_boot_count(efx); |
| 2415 | if (rc >= 0) { |
| 2416 | nic_data->warm_boot_count = rc; |
| 2417 | rc = 0; |
| 2418 | } |
Daniel Pieczko | 5a55a72 | 2015-07-21 15:10:02 +0100 | [diff] [blame] | 2419 | } |
Edward Cree | 832dc9e | 2015-07-21 15:09:31 +0100 | [diff] [blame] | 2420 | nic_data->workaround_26807 = true; |
Daniel Pieczko | 5a55a72 | 2015-07-21 15:10:02 +0100 | [diff] [blame] | 2421 | } else if (rc == -EPERM) { |
Edward Cree | 832dc9e | 2015-07-21 15:09:31 +0100 | [diff] [blame] | 2422 | rc = 0; |
Daniel Pieczko | 5a55a72 | 2015-07-21 15:10:02 +0100 | [diff] [blame] | 2423 | } |
Edward Cree | 832dc9e | 2015-07-21 15:09:31 +0100 | [diff] [blame] | 2424 | } |
Daniel Pieczko | 46e612b | 2015-07-21 15:09:18 +0100 | [diff] [blame] | 2425 | } |
| 2426 | |
| 2427 | if (!rc) |
| 2428 | return 0; |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 2429 | |
| 2430 | fail: |
Daniel Pieczko | 46e612b | 2015-07-21 15:09:18 +0100 | [diff] [blame] | 2431 | efx_ef10_ev_fini(channel); |
| 2432 | return rc; |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 2433 | } |
| 2434 | |
| 2435 | static void efx_ef10_ev_remove(struct efx_channel *channel) |
| 2436 | { |
| 2437 | efx_nic_free_buffer(channel->efx, &channel->eventq.buf); |
| 2438 | } |
| 2439 | |
| 2440 | static void efx_ef10_handle_rx_wrong_queue(struct efx_rx_queue *rx_queue, |
| 2441 | unsigned int rx_queue_label) |
| 2442 | { |
| 2443 | struct efx_nic *efx = rx_queue->efx; |
| 2444 | |
| 2445 | netif_info(efx, hw, efx->net_dev, |
| 2446 | "rx event arrived on queue %d labeled as queue %u\n", |
| 2447 | efx_rx_queue_index(rx_queue), rx_queue_label); |
| 2448 | |
| 2449 | efx_schedule_reset(efx, RESET_TYPE_DISABLE); |
| 2450 | } |
| 2451 | |
| 2452 | static void |
| 2453 | efx_ef10_handle_rx_bad_lbits(struct efx_rx_queue *rx_queue, |
| 2454 | unsigned int actual, unsigned int expected) |
| 2455 | { |
| 2456 | unsigned int dropped = (actual - expected) & rx_queue->ptr_mask; |
| 2457 | struct efx_nic *efx = rx_queue->efx; |
| 2458 | |
| 2459 | netif_info(efx, hw, efx->net_dev, |
| 2460 | "dropped %d events (index=%d expected=%d)\n", |
| 2461 | dropped, actual, expected); |
| 2462 | |
| 2463 | efx_schedule_reset(efx, RESET_TYPE_DISABLE); |
| 2464 | } |
| 2465 | |
| 2466 | /* partially received RX was aborted. clean up. */ |
| 2467 | static void efx_ef10_handle_rx_abort(struct efx_rx_queue *rx_queue) |
| 2468 | { |
| 2469 | unsigned int rx_desc_ptr; |
| 2470 | |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 2471 | netif_dbg(rx_queue->efx, hw, rx_queue->efx->net_dev, |
| 2472 | "scattered RX aborted (dropping %u buffers)\n", |
| 2473 | rx_queue->scatter_n); |
| 2474 | |
| 2475 | rx_desc_ptr = rx_queue->removed_count & rx_queue->ptr_mask; |
| 2476 | |
| 2477 | efx_rx_packet(rx_queue, rx_desc_ptr, rx_queue->scatter_n, |
| 2478 | 0, EFX_RX_PKT_DISCARD); |
| 2479 | |
| 2480 | rx_queue->removed_count += rx_queue->scatter_n; |
| 2481 | rx_queue->scatter_n = 0; |
| 2482 | rx_queue->scatter_len = 0; |
| 2483 | ++efx_rx_queue_channel(rx_queue)->n_rx_nodesc_trunc; |
| 2484 | } |
| 2485 | |
| 2486 | static int efx_ef10_handle_rx_event(struct efx_channel *channel, |
| 2487 | const efx_qword_t *event) |
| 2488 | { |
| 2489 | unsigned int rx_bytes, next_ptr_lbits, rx_queue_label, rx_l4_class; |
| 2490 | unsigned int n_descs, n_packets, i; |
| 2491 | struct efx_nic *efx = channel->efx; |
| 2492 | struct efx_rx_queue *rx_queue; |
| 2493 | bool rx_cont; |
| 2494 | u16 flags = 0; |
| 2495 | |
| 2496 | if (unlikely(ACCESS_ONCE(efx->reset_pending))) |
| 2497 | return 0; |
| 2498 | |
| 2499 | /* Basic packet information */ |
| 2500 | rx_bytes = EFX_QWORD_FIELD(*event, ESF_DZ_RX_BYTES); |
| 2501 | next_ptr_lbits = EFX_QWORD_FIELD(*event, ESF_DZ_RX_DSC_PTR_LBITS); |
| 2502 | rx_queue_label = EFX_QWORD_FIELD(*event, ESF_DZ_RX_QLABEL); |
| 2503 | rx_l4_class = EFX_QWORD_FIELD(*event, ESF_DZ_RX_L4_CLASS); |
| 2504 | rx_cont = EFX_QWORD_FIELD(*event, ESF_DZ_RX_CONT); |
| 2505 | |
Ben Hutchings | 48ce563 | 2013-11-01 16:42:44 +0000 | [diff] [blame] | 2506 | if (EFX_QWORD_FIELD(*event, ESF_DZ_RX_DROP_EVENT)) |
| 2507 | netdev_WARN(efx->net_dev, "saw RX_DROP_EVENT: event=" |
| 2508 | EFX_QWORD_FMT "\n", |
| 2509 | EFX_QWORD_VAL(*event)); |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 2510 | |
| 2511 | rx_queue = efx_channel_get_rx_queue(channel); |
| 2512 | |
| 2513 | if (unlikely(rx_queue_label != efx_rx_queue_index(rx_queue))) |
| 2514 | efx_ef10_handle_rx_wrong_queue(rx_queue, rx_queue_label); |
| 2515 | |
| 2516 | n_descs = ((next_ptr_lbits - rx_queue->removed_count) & |
| 2517 | ((1 << ESF_DZ_RX_DSC_PTR_LBITS_WIDTH) - 1)); |
| 2518 | |
| 2519 | if (n_descs != rx_queue->scatter_n + 1) { |
Ben Hutchings | 92a0416 | 2013-09-24 23:21:57 +0100 | [diff] [blame] | 2520 | struct efx_ef10_nic_data *nic_data = efx->nic_data; |
| 2521 | |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 2522 | /* detect rx abort */ |
| 2523 | if (unlikely(n_descs == rx_queue->scatter_n)) { |
Ben Hutchings | 48ce563 | 2013-11-01 16:42:44 +0000 | [diff] [blame] | 2524 | if (rx_queue->scatter_n == 0 || rx_bytes != 0) |
| 2525 | netdev_WARN(efx->net_dev, |
| 2526 | "invalid RX abort: scatter_n=%u event=" |
| 2527 | EFX_QWORD_FMT "\n", |
| 2528 | rx_queue->scatter_n, |
| 2529 | EFX_QWORD_VAL(*event)); |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 2530 | efx_ef10_handle_rx_abort(rx_queue); |
| 2531 | return 0; |
| 2532 | } |
| 2533 | |
Ben Hutchings | 92a0416 | 2013-09-24 23:21:57 +0100 | [diff] [blame] | 2534 | /* Check that RX completion merging is valid, i.e. |
| 2535 | * the current firmware supports it and this is a |
| 2536 | * non-scattered packet. |
| 2537 | */ |
| 2538 | if (!(nic_data->datapath_caps & |
| 2539 | (1 << MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN)) || |
| 2540 | rx_queue->scatter_n != 0 || rx_cont) { |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 2541 | efx_ef10_handle_rx_bad_lbits( |
| 2542 | rx_queue, next_ptr_lbits, |
| 2543 | (rx_queue->removed_count + |
| 2544 | rx_queue->scatter_n + 1) & |
| 2545 | ((1 << ESF_DZ_RX_DSC_PTR_LBITS_WIDTH) - 1)); |
| 2546 | return 0; |
| 2547 | } |
| 2548 | |
| 2549 | /* Merged completion for multiple non-scattered packets */ |
| 2550 | rx_queue->scatter_n = 1; |
| 2551 | rx_queue->scatter_len = 0; |
| 2552 | n_packets = n_descs; |
| 2553 | ++channel->n_rx_merge_events; |
| 2554 | channel->n_rx_merge_packets += n_packets; |
| 2555 | flags |= EFX_RX_PKT_PREFIX_LEN; |
| 2556 | } else { |
| 2557 | ++rx_queue->scatter_n; |
| 2558 | rx_queue->scatter_len += rx_bytes; |
| 2559 | if (rx_cont) |
| 2560 | return 0; |
| 2561 | n_packets = 1; |
| 2562 | } |
| 2563 | |
| 2564 | if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_RX_ECRC_ERR))) |
| 2565 | flags |= EFX_RX_PKT_DISCARD; |
| 2566 | |
| 2567 | if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_RX_IPCKSUM_ERR))) { |
| 2568 | channel->n_rx_ip_hdr_chksum_err += n_packets; |
| 2569 | } else if (unlikely(EFX_QWORD_FIELD(*event, |
| 2570 | ESF_DZ_RX_TCPUDP_CKSUM_ERR))) { |
| 2571 | channel->n_rx_tcp_udp_chksum_err += n_packets; |
| 2572 | } else if (rx_l4_class == ESE_DZ_L4_CLASS_TCP || |
| 2573 | rx_l4_class == ESE_DZ_L4_CLASS_UDP) { |
| 2574 | flags |= EFX_RX_PKT_CSUMMED; |
| 2575 | } |
| 2576 | |
| 2577 | if (rx_l4_class == ESE_DZ_L4_CLASS_TCP) |
| 2578 | flags |= EFX_RX_PKT_TCP; |
| 2579 | |
| 2580 | channel->irq_mod_score += 2 * n_packets; |
| 2581 | |
| 2582 | /* Handle received packet(s) */ |
| 2583 | for (i = 0; i < n_packets; i++) { |
| 2584 | efx_rx_packet(rx_queue, |
| 2585 | rx_queue->removed_count & rx_queue->ptr_mask, |
| 2586 | rx_queue->scatter_n, rx_queue->scatter_len, |
| 2587 | flags); |
| 2588 | rx_queue->removed_count += rx_queue->scatter_n; |
| 2589 | } |
| 2590 | |
| 2591 | rx_queue->scatter_n = 0; |
| 2592 | rx_queue->scatter_len = 0; |
| 2593 | |
| 2594 | return n_packets; |
| 2595 | } |
| 2596 | |
| 2597 | static int |
| 2598 | efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event) |
| 2599 | { |
| 2600 | struct efx_nic *efx = channel->efx; |
| 2601 | struct efx_tx_queue *tx_queue; |
| 2602 | unsigned int tx_ev_desc_ptr; |
| 2603 | unsigned int tx_ev_q_label; |
| 2604 | int tx_descs = 0; |
| 2605 | |
| 2606 | if (unlikely(ACCESS_ONCE(efx->reset_pending))) |
| 2607 | return 0; |
| 2608 | |
| 2609 | if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_TX_DROP_EVENT))) |
| 2610 | return 0; |
| 2611 | |
| 2612 | /* Transmit completion */ |
| 2613 | tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, ESF_DZ_TX_DESCR_INDX); |
| 2614 | tx_ev_q_label = EFX_QWORD_FIELD(*event, ESF_DZ_TX_QLABEL); |
| 2615 | tx_queue = efx_channel_get_tx_queue(channel, |
| 2616 | tx_ev_q_label % EFX_TXQ_TYPES); |
| 2617 | tx_descs = ((tx_ev_desc_ptr + 1 - tx_queue->read_count) & |
| 2618 | tx_queue->ptr_mask); |
| 2619 | efx_xmit_done(tx_queue, tx_ev_desc_ptr & tx_queue->ptr_mask); |
| 2620 | |
| 2621 | return tx_descs; |
| 2622 | } |
| 2623 | |
| 2624 | static void |
| 2625 | efx_ef10_handle_driver_event(struct efx_channel *channel, efx_qword_t *event) |
| 2626 | { |
| 2627 | struct efx_nic *efx = channel->efx; |
| 2628 | int subcode; |
| 2629 | |
| 2630 | subcode = EFX_QWORD_FIELD(*event, ESF_DZ_DRV_SUB_CODE); |
| 2631 | |
| 2632 | switch (subcode) { |
| 2633 | case ESE_DZ_DRV_TIMER_EV: |
| 2634 | case ESE_DZ_DRV_WAKE_UP_EV: |
| 2635 | break; |
| 2636 | case ESE_DZ_DRV_START_UP_EV: |
| 2637 | /* event queue init complete. ok. */ |
| 2638 | break; |
| 2639 | default: |
| 2640 | netif_err(efx, hw, efx->net_dev, |
| 2641 | "channel %d unknown driver event type %d" |
| 2642 | " (data " EFX_QWORD_FMT ")\n", |
| 2643 | channel->channel, subcode, |
| 2644 | EFX_QWORD_VAL(*event)); |
| 2645 | |
| 2646 | } |
| 2647 | } |
| 2648 | |
| 2649 | static void efx_ef10_handle_driver_generated_event(struct efx_channel *channel, |
| 2650 | efx_qword_t *event) |
| 2651 | { |
| 2652 | struct efx_nic *efx = channel->efx; |
| 2653 | u32 subcode; |
| 2654 | |
| 2655 | subcode = EFX_QWORD_FIELD(*event, EFX_DWORD_0); |
| 2656 | |
| 2657 | switch (subcode) { |
| 2658 | case EFX_EF10_TEST: |
| 2659 | channel->event_test_cpu = raw_smp_processor_id(); |
| 2660 | break; |
| 2661 | case EFX_EF10_REFILL: |
| 2662 | /* The queue must be empty, so we won't receive any rx |
| 2663 | * events, so efx_process_channel() won't refill the |
| 2664 | * queue. Refill it here |
| 2665 | */ |
Jon Cooper | cce2879 | 2013-10-02 11:04:14 +0100 | [diff] [blame] | 2666 | efx_fast_push_rx_descriptors(&channel->rx_queue, true); |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 2667 | break; |
| 2668 | default: |
| 2669 | netif_err(efx, hw, efx->net_dev, |
| 2670 | "channel %d unknown driver event type %u" |
| 2671 | " (data " EFX_QWORD_FMT ")\n", |
| 2672 | channel->channel, (unsigned) subcode, |
| 2673 | EFX_QWORD_VAL(*event)); |
| 2674 | } |
| 2675 | } |
| 2676 | |
| 2677 | static int efx_ef10_ev_process(struct efx_channel *channel, int quota) |
| 2678 | { |
| 2679 | struct efx_nic *efx = channel->efx; |
| 2680 | efx_qword_t event, *p_event; |
| 2681 | unsigned int read_ptr; |
| 2682 | int ev_code; |
| 2683 | int tx_descs = 0; |
| 2684 | int spent = 0; |
| 2685 | |
Eric W. Biederman | 75363a4 | 2014-03-14 18:11:22 -0700 | [diff] [blame] | 2686 | if (quota <= 0) |
| 2687 | return spent; |
| 2688 | |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 2689 | read_ptr = channel->eventq_read_ptr; |
| 2690 | |
| 2691 | for (;;) { |
| 2692 | p_event = efx_event(channel, read_ptr); |
| 2693 | event = *p_event; |
| 2694 | |
| 2695 | if (!efx_event_present(&event)) |
| 2696 | break; |
| 2697 | |
| 2698 | EFX_SET_QWORD(*p_event); |
| 2699 | |
| 2700 | ++read_ptr; |
| 2701 | |
| 2702 | ev_code = EFX_QWORD_FIELD(event, ESF_DZ_EV_CODE); |
| 2703 | |
| 2704 | netif_vdbg(efx, drv, efx->net_dev, |
| 2705 | "processing event on %d " EFX_QWORD_FMT "\n", |
| 2706 | channel->channel, EFX_QWORD_VAL(event)); |
| 2707 | |
| 2708 | switch (ev_code) { |
| 2709 | case ESE_DZ_EV_CODE_MCDI_EV: |
| 2710 | efx_mcdi_process_event(channel, &event); |
| 2711 | break; |
| 2712 | case ESE_DZ_EV_CODE_RX_EV: |
| 2713 | spent += efx_ef10_handle_rx_event(channel, &event); |
| 2714 | if (spent >= quota) { |
| 2715 | /* XXX can we split a merged event to |
| 2716 | * avoid going over-quota? |
| 2717 | */ |
| 2718 | spent = quota; |
| 2719 | goto out; |
| 2720 | } |
| 2721 | break; |
| 2722 | case ESE_DZ_EV_CODE_TX_EV: |
| 2723 | tx_descs += efx_ef10_handle_tx_event(channel, &event); |
| 2724 | if (tx_descs > efx->txq_entries) { |
| 2725 | spent = quota; |
| 2726 | goto out; |
| 2727 | } else if (++spent == quota) { |
| 2728 | goto out; |
| 2729 | } |
| 2730 | break; |
| 2731 | case ESE_DZ_EV_CODE_DRIVER_EV: |
| 2732 | efx_ef10_handle_driver_event(channel, &event); |
| 2733 | if (++spent == quota) |
| 2734 | goto out; |
| 2735 | break; |
| 2736 | case EFX_EF10_DRVGEN_EV: |
| 2737 | efx_ef10_handle_driver_generated_event(channel, &event); |
| 2738 | break; |
| 2739 | default: |
| 2740 | netif_err(efx, hw, efx->net_dev, |
| 2741 | "channel %d unknown event type %d" |
| 2742 | " (data " EFX_QWORD_FMT ")\n", |
| 2743 | channel->channel, ev_code, |
| 2744 | EFX_QWORD_VAL(event)); |
| 2745 | } |
| 2746 | } |
| 2747 | |
| 2748 | out: |
| 2749 | channel->eventq_read_ptr = read_ptr; |
| 2750 | return spent; |
| 2751 | } |
| 2752 | |
| 2753 | static void efx_ef10_ev_read_ack(struct efx_channel *channel) |
| 2754 | { |
| 2755 | struct efx_nic *efx = channel->efx; |
| 2756 | efx_dword_t rptr; |
| 2757 | |
| 2758 | if (EFX_EF10_WORKAROUND_35388(efx)) { |
| 2759 | BUILD_BUG_ON(EFX_MIN_EVQ_SIZE < |
| 2760 | (1 << ERF_DD_EVQ_IND_RPTR_WIDTH)); |
| 2761 | BUILD_BUG_ON(EFX_MAX_EVQ_SIZE > |
| 2762 | (1 << 2 * ERF_DD_EVQ_IND_RPTR_WIDTH)); |
| 2763 | |
| 2764 | EFX_POPULATE_DWORD_2(rptr, ERF_DD_EVQ_IND_RPTR_FLAGS, |
| 2765 | EFE_DD_EVQ_IND_RPTR_FLAGS_HIGH, |
| 2766 | ERF_DD_EVQ_IND_RPTR, |
| 2767 | (channel->eventq_read_ptr & |
| 2768 | channel->eventq_mask) >> |
| 2769 | ERF_DD_EVQ_IND_RPTR_WIDTH); |
| 2770 | efx_writed_page(efx, &rptr, ER_DD_EVQ_INDIRECT, |
| 2771 | channel->channel); |
| 2772 | EFX_POPULATE_DWORD_2(rptr, ERF_DD_EVQ_IND_RPTR_FLAGS, |
| 2773 | EFE_DD_EVQ_IND_RPTR_FLAGS_LOW, |
| 2774 | ERF_DD_EVQ_IND_RPTR, |
| 2775 | channel->eventq_read_ptr & |
| 2776 | ((1 << ERF_DD_EVQ_IND_RPTR_WIDTH) - 1)); |
| 2777 | efx_writed_page(efx, &rptr, ER_DD_EVQ_INDIRECT, |
| 2778 | channel->channel); |
| 2779 | } else { |
| 2780 | EFX_POPULATE_DWORD_1(rptr, ERF_DZ_EVQ_RPTR, |
| 2781 | channel->eventq_read_ptr & |
| 2782 | channel->eventq_mask); |
| 2783 | efx_writed_page(efx, &rptr, ER_DZ_EVQ_RPTR, channel->channel); |
| 2784 | } |
| 2785 | } |
| 2786 | |
| 2787 | static void efx_ef10_ev_test_generate(struct efx_channel *channel) |
| 2788 | { |
| 2789 | MCDI_DECLARE_BUF(inbuf, MC_CMD_DRIVER_EVENT_IN_LEN); |
| 2790 | struct efx_nic *efx = channel->efx; |
| 2791 | efx_qword_t event; |
| 2792 | int rc; |
| 2793 | |
| 2794 | EFX_POPULATE_QWORD_2(event, |
| 2795 | ESF_DZ_EV_CODE, EFX_EF10_DRVGEN_EV, |
| 2796 | ESF_DZ_EV_DATA, EFX_EF10_TEST); |
| 2797 | |
| 2798 | MCDI_SET_DWORD(inbuf, DRIVER_EVENT_IN_EVQ, channel->channel); |
| 2799 | |
| 2800 | /* MCDI_SET_QWORD is not appropriate here since EFX_POPULATE_* has |
| 2801 | * already swapped the data to little-endian order. |
| 2802 | */ |
| 2803 | memcpy(MCDI_PTR(inbuf, DRIVER_EVENT_IN_DATA), &event.u64[0], |
| 2804 | sizeof(efx_qword_t)); |
| 2805 | |
| 2806 | rc = efx_mcdi_rpc(efx, MC_CMD_DRIVER_EVENT, inbuf, sizeof(inbuf), |
| 2807 | NULL, 0, NULL); |
| 2808 | if (rc != 0) |
| 2809 | goto fail; |
| 2810 | |
| 2811 | return; |
| 2812 | |
| 2813 | fail: |
| 2814 | WARN_ON(true); |
| 2815 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); |
| 2816 | } |
| 2817 | |
| 2818 | void efx_ef10_handle_drain_event(struct efx_nic *efx) |
| 2819 | { |
| 2820 | if (atomic_dec_and_test(&efx->active_queues)) |
| 2821 | wake_up(&efx->flush_wq); |
| 2822 | |
| 2823 | WARN_ON(atomic_read(&efx->active_queues) < 0); |
| 2824 | } |
| 2825 | |
| 2826 | static int efx_ef10_fini_dmaq(struct efx_nic *efx) |
| 2827 | { |
| 2828 | struct efx_ef10_nic_data *nic_data = efx->nic_data; |
| 2829 | struct efx_channel *channel; |
| 2830 | struct efx_tx_queue *tx_queue; |
| 2831 | struct efx_rx_queue *rx_queue; |
| 2832 | int pending; |
| 2833 | |
| 2834 | /* If the MC has just rebooted, the TX/RX queues will have already been |
| 2835 | * torn down, but efx->active_queues needs to be set to zero. |
| 2836 | */ |
| 2837 | if (nic_data->must_realloc_vis) { |
| 2838 | atomic_set(&efx->active_queues, 0); |
| 2839 | return 0; |
| 2840 | } |
| 2841 | |
| 2842 | /* Do not attempt to write to the NIC during EEH recovery */ |
| 2843 | if (efx->state != STATE_RECOVERY) { |
| 2844 | efx_for_each_channel(channel, efx) { |
| 2845 | efx_for_each_channel_rx_queue(rx_queue, channel) |
| 2846 | efx_ef10_rx_fini(rx_queue); |
| 2847 | efx_for_each_channel_tx_queue(tx_queue, channel) |
| 2848 | efx_ef10_tx_fini(tx_queue); |
| 2849 | } |
| 2850 | |
| 2851 | wait_event_timeout(efx->flush_wq, |
| 2852 | atomic_read(&efx->active_queues) == 0, |
| 2853 | msecs_to_jiffies(EFX_MAX_FLUSH_TIME)); |
| 2854 | pending = atomic_read(&efx->active_queues); |
| 2855 | if (pending) { |
| 2856 | netif_err(efx, hw, efx->net_dev, "failed to flush %d queues\n", |
| 2857 | pending); |
| 2858 | return -ETIMEDOUT; |
| 2859 | } |
| 2860 | } |
| 2861 | |
| 2862 | return 0; |
| 2863 | } |
| 2864 | |
Edward Cree | e283546 | 2014-04-16 19:27:48 +0100 | [diff] [blame] | 2865 | static void efx_ef10_prepare_flr(struct efx_nic *efx) |
| 2866 | { |
| 2867 | atomic_set(&efx->active_queues, 0); |
| 2868 | } |
| 2869 | |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 2870 | static bool efx_ef10_filter_equal(const struct efx_filter_spec *left, |
| 2871 | const struct efx_filter_spec *right) |
| 2872 | { |
| 2873 | if ((left->match_flags ^ right->match_flags) | |
| 2874 | ((left->flags ^ right->flags) & |
| 2875 | (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX))) |
| 2876 | return false; |
| 2877 | |
| 2878 | return memcmp(&left->outer_vid, &right->outer_vid, |
| 2879 | sizeof(struct efx_filter_spec) - |
| 2880 | offsetof(struct efx_filter_spec, outer_vid)) == 0; |
| 2881 | } |
| 2882 | |
| 2883 | static unsigned int efx_ef10_filter_hash(const struct efx_filter_spec *spec) |
| 2884 | { |
| 2885 | BUILD_BUG_ON(offsetof(struct efx_filter_spec, outer_vid) & 3); |
| 2886 | return jhash2((const u32 *)&spec->outer_vid, |
| 2887 | (sizeof(struct efx_filter_spec) - |
| 2888 | offsetof(struct efx_filter_spec, outer_vid)) / 4, |
| 2889 | 0); |
| 2890 | /* XXX should we randomise the initval? */ |
| 2891 | } |
| 2892 | |
| 2893 | /* Decide whether a filter should be exclusive or else should allow |
| 2894 | * delivery to additional recipients. Currently we decide that |
| 2895 | * filters for specific local unicast MAC and IP addresses are |
| 2896 | * exclusive. |
| 2897 | */ |
| 2898 | static bool efx_ef10_filter_is_exclusive(const struct efx_filter_spec *spec) |
| 2899 | { |
| 2900 | if (spec->match_flags & EFX_FILTER_MATCH_LOC_MAC && |
| 2901 | !is_multicast_ether_addr(spec->loc_mac)) |
| 2902 | return true; |
| 2903 | |
| 2904 | if ((spec->match_flags & |
| 2905 | (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) == |
| 2906 | (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) { |
| 2907 | if (spec->ether_type == htons(ETH_P_IP) && |
| 2908 | !ipv4_is_multicast(spec->loc_host[0])) |
| 2909 | return true; |
| 2910 | if (spec->ether_type == htons(ETH_P_IPV6) && |
| 2911 | ((const u8 *)spec->loc_host)[0] != 0xff) |
| 2912 | return true; |
| 2913 | } |
| 2914 | |
| 2915 | return false; |
| 2916 | } |
| 2917 | |
| 2918 | static struct efx_filter_spec * |
| 2919 | efx_ef10_filter_entry_spec(const struct efx_ef10_filter_table *table, |
| 2920 | unsigned int filter_idx) |
| 2921 | { |
| 2922 | return (struct efx_filter_spec *)(table->entry[filter_idx].spec & |
| 2923 | ~EFX_EF10_FILTER_FLAGS); |
| 2924 | } |
| 2925 | |
| 2926 | static unsigned int |
| 2927 | efx_ef10_filter_entry_flags(const struct efx_ef10_filter_table *table, |
| 2928 | unsigned int filter_idx) |
| 2929 | { |
| 2930 | return table->entry[filter_idx].spec & EFX_EF10_FILTER_FLAGS; |
| 2931 | } |
| 2932 | |
| 2933 | static void |
| 2934 | efx_ef10_filter_set_entry(struct efx_ef10_filter_table *table, |
| 2935 | unsigned int filter_idx, |
| 2936 | const struct efx_filter_spec *spec, |
| 2937 | unsigned int flags) |
| 2938 | { |
| 2939 | table->entry[filter_idx].spec = (unsigned long)spec | flags; |
| 2940 | } |
| 2941 | |
| 2942 | static void efx_ef10_filter_push_prep(struct efx_nic *efx, |
| 2943 | const struct efx_filter_spec *spec, |
| 2944 | efx_dword_t *inbuf, u64 handle, |
| 2945 | bool replacing) |
| 2946 | { |
| 2947 | struct efx_ef10_nic_data *nic_data = efx->nic_data; |
Jon Cooper | dcb4123 | 2016-04-25 16:51:00 +0100 | [diff] [blame] | 2948 | u32 flags = spec->flags; |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 2949 | |
| 2950 | memset(inbuf, 0, MC_CMD_FILTER_OP_IN_LEN); |
| 2951 | |
Jon Cooper | dcb4123 | 2016-04-25 16:51:00 +0100 | [diff] [blame] | 2952 | /* Remove RSS flag if we don't have an RSS context. */ |
| 2953 | if (flags & EFX_FILTER_FLAG_RX_RSS && |
| 2954 | spec->rss_context == EFX_FILTER_RSS_CONTEXT_DEFAULT && |
| 2955 | nic_data->rx_rss_context == EFX_EF10_RSS_CONTEXT_INVALID) |
| 2956 | flags &= ~EFX_FILTER_FLAG_RX_RSS; |
| 2957 | |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 2958 | if (replacing) { |
| 2959 | MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP, |
| 2960 | MC_CMD_FILTER_OP_IN_OP_REPLACE); |
| 2961 | MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE, handle); |
| 2962 | } else { |
| 2963 | u32 match_fields = 0; |
| 2964 | |
| 2965 | MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP, |
| 2966 | efx_ef10_filter_is_exclusive(spec) ? |
| 2967 | MC_CMD_FILTER_OP_IN_OP_INSERT : |
| 2968 | MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE); |
| 2969 | |
| 2970 | /* Convert match flags and values. Unlike almost |
| 2971 | * everything else in MCDI, these fields are in |
| 2972 | * network byte order. |
| 2973 | */ |
| 2974 | if (spec->match_flags & EFX_FILTER_MATCH_LOC_MAC_IG) |
| 2975 | match_fields |= |
| 2976 | is_multicast_ether_addr(spec->loc_mac) ? |
| 2977 | 1 << MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_LBN : |
| 2978 | 1 << MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_LBN; |
| 2979 | #define COPY_FIELD(gen_flag, gen_field, mcdi_field) \ |
| 2980 | if (spec->match_flags & EFX_FILTER_MATCH_ ## gen_flag) { \ |
| 2981 | match_fields |= \ |
| 2982 | 1 << MC_CMD_FILTER_OP_IN_MATCH_ ## \ |
| 2983 | mcdi_field ## _LBN; \ |
| 2984 | BUILD_BUG_ON( \ |
| 2985 | MC_CMD_FILTER_OP_IN_ ## mcdi_field ## _LEN < \ |
| 2986 | sizeof(spec->gen_field)); \ |
| 2987 | memcpy(MCDI_PTR(inbuf, FILTER_OP_IN_ ## mcdi_field), \ |
| 2988 | &spec->gen_field, sizeof(spec->gen_field)); \ |
| 2989 | } |
| 2990 | COPY_FIELD(REM_HOST, rem_host, SRC_IP); |
| 2991 | COPY_FIELD(LOC_HOST, loc_host, DST_IP); |
| 2992 | COPY_FIELD(REM_MAC, rem_mac, SRC_MAC); |
| 2993 | COPY_FIELD(REM_PORT, rem_port, SRC_PORT); |
| 2994 | COPY_FIELD(LOC_MAC, loc_mac, DST_MAC); |
| 2995 | COPY_FIELD(LOC_PORT, loc_port, DST_PORT); |
| 2996 | COPY_FIELD(ETHER_TYPE, ether_type, ETHER_TYPE); |
| 2997 | COPY_FIELD(INNER_VID, inner_vid, INNER_VLAN); |
| 2998 | COPY_FIELD(OUTER_VID, outer_vid, OUTER_VLAN); |
| 2999 | COPY_FIELD(IP_PROTO, ip_proto, IP_PROTO); |
| 3000 | #undef COPY_FIELD |
| 3001 | MCDI_SET_DWORD(inbuf, FILTER_OP_IN_MATCH_FIELDS, |
| 3002 | match_fields); |
| 3003 | } |
| 3004 | |
Daniel Pieczko | 45b2449 | 2015-05-06 00:57:14 +0100 | [diff] [blame] | 3005 | MCDI_SET_DWORD(inbuf, FILTER_OP_IN_PORT_ID, nic_data->vport_id); |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 3006 | MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_DEST, |
| 3007 | spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ? |
| 3008 | MC_CMD_FILTER_OP_IN_RX_DEST_DROP : |
| 3009 | MC_CMD_FILTER_OP_IN_RX_DEST_HOST); |
Shradha Shah | e3d3629 | 2015-05-06 00:56:24 +0100 | [diff] [blame] | 3010 | MCDI_SET_DWORD(inbuf, FILTER_OP_IN_TX_DOMAIN, 0); |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 3011 | MCDI_SET_DWORD(inbuf, FILTER_OP_IN_TX_DEST, |
| 3012 | MC_CMD_FILTER_OP_IN_TX_DEST_DEFAULT); |
Ben Hutchings | a0bc348 | 2013-12-16 18:56:24 +0000 | [diff] [blame] | 3013 | MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_QUEUE, |
| 3014 | spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ? |
| 3015 | 0 : spec->dmaq_id); |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 3016 | MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_MODE, |
Jon Cooper | dcb4123 | 2016-04-25 16:51:00 +0100 | [diff] [blame] | 3017 | (flags & EFX_FILTER_FLAG_RX_RSS) ? |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 3018 | MC_CMD_FILTER_OP_IN_RX_MODE_RSS : |
| 3019 | MC_CMD_FILTER_OP_IN_RX_MODE_SIMPLE); |
Jon Cooper | dcb4123 | 2016-04-25 16:51:00 +0100 | [diff] [blame] | 3020 | if (flags & EFX_FILTER_FLAG_RX_RSS) |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 3021 | MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_CONTEXT, |
| 3022 | spec->rss_context != |
| 3023 | EFX_FILTER_RSS_CONTEXT_DEFAULT ? |
| 3024 | spec->rss_context : nic_data->rx_rss_context); |
| 3025 | } |
| 3026 | |
| 3027 | static int efx_ef10_filter_push(struct efx_nic *efx, |
| 3028 | const struct efx_filter_spec *spec, |
| 3029 | u64 *handle, bool replacing) |
| 3030 | { |
| 3031 | MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN); |
| 3032 | MCDI_DECLARE_BUF(outbuf, MC_CMD_FILTER_OP_OUT_LEN); |
| 3033 | int rc; |
| 3034 | |
| 3035 | efx_ef10_filter_push_prep(efx, spec, inbuf, *handle, replacing); |
| 3036 | rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf), |
| 3037 | outbuf, sizeof(outbuf), NULL); |
| 3038 | if (rc == 0) |
| 3039 | *handle = MCDI_QWORD(outbuf, FILTER_OP_OUT_HANDLE); |
Ben Hutchings | 065e64c | 2013-10-09 14:17:27 +0100 | [diff] [blame] | 3040 | if (rc == -ENOSPC) |
| 3041 | rc = -EBUSY; /* to match efx_farch_filter_insert() */ |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 3042 | return rc; |
| 3043 | } |
| 3044 | |
| 3045 | static int efx_ef10_filter_rx_match_pri(struct efx_ef10_filter_table *table, |
| 3046 | enum efx_filter_match_flags match_flags) |
| 3047 | { |
| 3048 | unsigned int match_pri; |
| 3049 | |
| 3050 | for (match_pri = 0; |
| 3051 | match_pri < table->rx_match_count; |
| 3052 | match_pri++) |
| 3053 | if (table->rx_match_flags[match_pri] == match_flags) |
| 3054 | return match_pri; |
| 3055 | |
| 3056 | return -EPROTONOSUPPORT; |
| 3057 | } |
| 3058 | |
| 3059 | static s32 efx_ef10_filter_insert(struct efx_nic *efx, |
| 3060 | struct efx_filter_spec *spec, |
| 3061 | bool replace_equal) |
| 3062 | { |
| 3063 | struct efx_ef10_filter_table *table = efx->filter_state; |
| 3064 | DECLARE_BITMAP(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT); |
| 3065 | struct efx_filter_spec *saved_spec; |
| 3066 | unsigned int match_pri, hash; |
| 3067 | unsigned int priv_flags; |
| 3068 | bool replacing = false; |
| 3069 | int ins_index = -1; |
| 3070 | DEFINE_WAIT(wait); |
| 3071 | bool is_mc_recip; |
| 3072 | s32 rc; |
| 3073 | |
| 3074 | /* For now, only support RX filters */ |
| 3075 | if ((spec->flags & (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)) != |
| 3076 | EFX_FILTER_FLAG_RX) |
| 3077 | return -EINVAL; |
| 3078 | |
| 3079 | rc = efx_ef10_filter_rx_match_pri(table, spec->match_flags); |
| 3080 | if (rc < 0) |
| 3081 | return rc; |
| 3082 | match_pri = rc; |
| 3083 | |
| 3084 | hash = efx_ef10_filter_hash(spec); |
| 3085 | is_mc_recip = efx_filter_is_mc_recipient(spec); |
| 3086 | if (is_mc_recip) |
| 3087 | bitmap_zero(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT); |
| 3088 | |
| 3089 | /* Find any existing filters with the same match tuple or |
| 3090 | * else a free slot to insert at. If any of them are busy, |
| 3091 | * we have to wait and retry. |
| 3092 | */ |
| 3093 | for (;;) { |
| 3094 | unsigned int depth = 1; |
| 3095 | unsigned int i; |
| 3096 | |
| 3097 | spin_lock_bh(&efx->filter_lock); |
| 3098 | |
| 3099 | for (;;) { |
| 3100 | i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1); |
| 3101 | saved_spec = efx_ef10_filter_entry_spec(table, i); |
| 3102 | |
| 3103 | if (!saved_spec) { |
| 3104 | if (ins_index < 0) |
| 3105 | ins_index = i; |
| 3106 | } else if (efx_ef10_filter_equal(spec, saved_spec)) { |
| 3107 | if (table->entry[i].spec & |
| 3108 | EFX_EF10_FILTER_FLAG_BUSY) |
| 3109 | break; |
| 3110 | if (spec->priority < saved_spec->priority && |
Ben Hutchings | 7665d1a | 2013-11-21 19:02:18 +0000 | [diff] [blame] | 3111 | spec->priority != EFX_FILTER_PRI_AUTO) { |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 3112 | rc = -EPERM; |
| 3113 | goto out_unlock; |
| 3114 | } |
| 3115 | if (!is_mc_recip) { |
| 3116 | /* This is the only one */ |
| 3117 | if (spec->priority == |
| 3118 | saved_spec->priority && |
| 3119 | !replace_equal) { |
| 3120 | rc = -EEXIST; |
| 3121 | goto out_unlock; |
| 3122 | } |
| 3123 | ins_index = i; |
| 3124 | goto found; |
| 3125 | } else if (spec->priority > |
| 3126 | saved_spec->priority || |
| 3127 | (spec->priority == |
| 3128 | saved_spec->priority && |
| 3129 | replace_equal)) { |
| 3130 | if (ins_index < 0) |
| 3131 | ins_index = i; |
| 3132 | else |
| 3133 | __set_bit(depth, mc_rem_map); |
| 3134 | } |
| 3135 | } |
| 3136 | |
| 3137 | /* Once we reach the maximum search depth, use |
| 3138 | * the first suitable slot or return -EBUSY if |
| 3139 | * there was none |
| 3140 | */ |
| 3141 | if (depth == EFX_EF10_FILTER_SEARCH_LIMIT) { |
| 3142 | if (ins_index < 0) { |
| 3143 | rc = -EBUSY; |
| 3144 | goto out_unlock; |
| 3145 | } |
| 3146 | goto found; |
| 3147 | } |
| 3148 | |
| 3149 | ++depth; |
| 3150 | } |
| 3151 | |
| 3152 | prepare_to_wait(&table->waitq, &wait, TASK_UNINTERRUPTIBLE); |
| 3153 | spin_unlock_bh(&efx->filter_lock); |
| 3154 | schedule(); |
| 3155 | } |
| 3156 | |
| 3157 | found: |
| 3158 | /* Create a software table entry if necessary, and mark it |
| 3159 | * busy. We might yet fail to insert, but any attempt to |
| 3160 | * insert a conflicting filter while we're waiting for the |
| 3161 | * firmware must find the busy entry. |
| 3162 | */ |
| 3163 | saved_spec = efx_ef10_filter_entry_spec(table, ins_index); |
| 3164 | if (saved_spec) { |
Ben Hutchings | 7665d1a | 2013-11-21 19:02:18 +0000 | [diff] [blame] | 3165 | if (spec->priority == EFX_FILTER_PRI_AUTO && |
| 3166 | saved_spec->priority >= EFX_FILTER_PRI_AUTO) { |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 3167 | /* Just make sure it won't be removed */ |
Ben Hutchings | 7665d1a | 2013-11-21 19:02:18 +0000 | [diff] [blame] | 3168 | if (saved_spec->priority > EFX_FILTER_PRI_AUTO) |
| 3169 | saved_spec->flags |= EFX_FILTER_FLAG_RX_OVER_AUTO; |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 3170 | table->entry[ins_index].spec &= |
Ben Hutchings | b59e6ef | 2013-11-21 19:02:22 +0000 | [diff] [blame] | 3171 | ~EFX_EF10_FILTER_FLAG_AUTO_OLD; |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 3172 | rc = ins_index; |
| 3173 | goto out_unlock; |
| 3174 | } |
| 3175 | replacing = true; |
| 3176 | priv_flags = efx_ef10_filter_entry_flags(table, ins_index); |
| 3177 | } else { |
| 3178 | saved_spec = kmalloc(sizeof(*spec), GFP_ATOMIC); |
| 3179 | if (!saved_spec) { |
| 3180 | rc = -ENOMEM; |
| 3181 | goto out_unlock; |
| 3182 | } |
| 3183 | *saved_spec = *spec; |
| 3184 | priv_flags = 0; |
| 3185 | } |
| 3186 | efx_ef10_filter_set_entry(table, ins_index, saved_spec, |
| 3187 | priv_flags | EFX_EF10_FILTER_FLAG_BUSY); |
| 3188 | |
| 3189 | /* Mark lower-priority multicast recipients busy prior to removal */ |
| 3190 | if (is_mc_recip) { |
| 3191 | unsigned int depth, i; |
| 3192 | |
| 3193 | for (depth = 0; depth < EFX_EF10_FILTER_SEARCH_LIMIT; depth++) { |
| 3194 | i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1); |
| 3195 | if (test_bit(depth, mc_rem_map)) |
| 3196 | table->entry[i].spec |= |
| 3197 | EFX_EF10_FILTER_FLAG_BUSY; |
| 3198 | } |
| 3199 | } |
| 3200 | |
| 3201 | spin_unlock_bh(&efx->filter_lock); |
| 3202 | |
| 3203 | rc = efx_ef10_filter_push(efx, spec, &table->entry[ins_index].handle, |
| 3204 | replacing); |
| 3205 | |
| 3206 | /* Finalise the software table entry */ |
| 3207 | spin_lock_bh(&efx->filter_lock); |
| 3208 | if (rc == 0) { |
| 3209 | if (replacing) { |
| 3210 | /* Update the fields that may differ */ |
Ben Hutchings | 7665d1a | 2013-11-21 19:02:18 +0000 | [diff] [blame] | 3211 | if (saved_spec->priority == EFX_FILTER_PRI_AUTO) |
| 3212 | saved_spec->flags |= |
| 3213 | EFX_FILTER_FLAG_RX_OVER_AUTO; |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 3214 | saved_spec->priority = spec->priority; |
Ben Hutchings | 7665d1a | 2013-11-21 19:02:18 +0000 | [diff] [blame] | 3215 | saved_spec->flags &= EFX_FILTER_FLAG_RX_OVER_AUTO; |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 3216 | saved_spec->flags |= spec->flags; |
| 3217 | saved_spec->rss_context = spec->rss_context; |
| 3218 | saved_spec->dmaq_id = spec->dmaq_id; |
| 3219 | } |
| 3220 | } else if (!replacing) { |
| 3221 | kfree(saved_spec); |
| 3222 | saved_spec = NULL; |
| 3223 | } |
| 3224 | efx_ef10_filter_set_entry(table, ins_index, saved_spec, priv_flags); |
| 3225 | |
| 3226 | /* Remove and finalise entries for lower-priority multicast |
| 3227 | * recipients |
| 3228 | */ |
| 3229 | if (is_mc_recip) { |
| 3230 | MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN); |
| 3231 | unsigned int depth, i; |
| 3232 | |
| 3233 | memset(inbuf, 0, sizeof(inbuf)); |
| 3234 | |
| 3235 | for (depth = 0; depth < EFX_EF10_FILTER_SEARCH_LIMIT; depth++) { |
| 3236 | if (!test_bit(depth, mc_rem_map)) |
| 3237 | continue; |
| 3238 | |
| 3239 | i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1); |
| 3240 | saved_spec = efx_ef10_filter_entry_spec(table, i); |
| 3241 | priv_flags = efx_ef10_filter_entry_flags(table, i); |
| 3242 | |
| 3243 | if (rc == 0) { |
| 3244 | spin_unlock_bh(&efx->filter_lock); |
| 3245 | MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP, |
| 3246 | MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE); |
| 3247 | MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE, |
| 3248 | table->entry[i].handle); |
| 3249 | rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP, |
| 3250 | inbuf, sizeof(inbuf), |
| 3251 | NULL, 0, NULL); |
| 3252 | spin_lock_bh(&efx->filter_lock); |
| 3253 | } |
| 3254 | |
| 3255 | if (rc == 0) { |
| 3256 | kfree(saved_spec); |
| 3257 | saved_spec = NULL; |
| 3258 | priv_flags = 0; |
| 3259 | } else { |
| 3260 | priv_flags &= ~EFX_EF10_FILTER_FLAG_BUSY; |
| 3261 | } |
| 3262 | efx_ef10_filter_set_entry(table, i, saved_spec, |
| 3263 | priv_flags); |
| 3264 | } |
| 3265 | } |
| 3266 | |
| 3267 | /* If successful, return the inserted filter ID */ |
| 3268 | if (rc == 0) |
| 3269 | rc = match_pri * HUNT_FILTER_TBL_ROWS + ins_index; |
| 3270 | |
| 3271 | wake_up_all(&table->waitq); |
| 3272 | out_unlock: |
| 3273 | spin_unlock_bh(&efx->filter_lock); |
| 3274 | finish_wait(&table->waitq, &wait); |
| 3275 | return rc; |
| 3276 | } |
| 3277 | |
Fengguang Wu | 9fd8095d | 2013-08-31 06:54:05 +0800 | [diff] [blame] | 3278 | static void efx_ef10_filter_update_rx_scatter(struct efx_nic *efx) |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 3279 | { |
| 3280 | /* no need to do anything here on EF10 */ |
| 3281 | } |
| 3282 | |
| 3283 | /* Remove a filter. |
Ben Hutchings | b59e6ef | 2013-11-21 19:02:22 +0000 | [diff] [blame] | 3284 | * If !by_index, remove by ID |
| 3285 | * If by_index, remove by index |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 3286 | * Filter ID may come from userland and must be range-checked. |
| 3287 | */ |
| 3288 | static int efx_ef10_filter_remove_internal(struct efx_nic *efx, |
Ben Hutchings | fbd7912 | 2013-11-21 19:15:03 +0000 | [diff] [blame] | 3289 | unsigned int priority_mask, |
Ben Hutchings | b59e6ef | 2013-11-21 19:02:22 +0000 | [diff] [blame] | 3290 | u32 filter_id, bool by_index) |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 3291 | { |
| 3292 | unsigned int filter_idx = filter_id % HUNT_FILTER_TBL_ROWS; |
| 3293 | struct efx_ef10_filter_table *table = efx->filter_state; |
| 3294 | MCDI_DECLARE_BUF(inbuf, |
| 3295 | MC_CMD_FILTER_OP_IN_HANDLE_OFST + |
| 3296 | MC_CMD_FILTER_OP_IN_HANDLE_LEN); |
| 3297 | struct efx_filter_spec *spec; |
| 3298 | DEFINE_WAIT(wait); |
| 3299 | int rc; |
| 3300 | |
| 3301 | /* Find the software table entry and mark it busy. Don't |
| 3302 | * remove it yet; any attempt to update while we're waiting |
| 3303 | * for the firmware must find the busy entry. |
| 3304 | */ |
| 3305 | for (;;) { |
| 3306 | spin_lock_bh(&efx->filter_lock); |
| 3307 | if (!(table->entry[filter_idx].spec & |
| 3308 | EFX_EF10_FILTER_FLAG_BUSY)) |
| 3309 | break; |
| 3310 | prepare_to_wait(&table->waitq, &wait, TASK_UNINTERRUPTIBLE); |
| 3311 | spin_unlock_bh(&efx->filter_lock); |
| 3312 | schedule(); |
| 3313 | } |
Ben Hutchings | 7665d1a | 2013-11-21 19:02:18 +0000 | [diff] [blame] | 3314 | |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 3315 | spec = efx_ef10_filter_entry_spec(table, filter_idx); |
Ben Hutchings | 7665d1a | 2013-11-21 19:02:18 +0000 | [diff] [blame] | 3316 | if (!spec || |
Ben Hutchings | b59e6ef | 2013-11-21 19:02:22 +0000 | [diff] [blame] | 3317 | (!by_index && |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 3318 | efx_ef10_filter_rx_match_pri(table, spec->match_flags) != |
| 3319 | filter_id / HUNT_FILTER_TBL_ROWS)) { |
| 3320 | rc = -ENOENT; |
| 3321 | goto out_unlock; |
| 3322 | } |
Ben Hutchings | 7665d1a | 2013-11-21 19:02:18 +0000 | [diff] [blame] | 3323 | |
| 3324 | if (spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO && |
Ben Hutchings | fbd7912 | 2013-11-21 19:15:03 +0000 | [diff] [blame] | 3325 | priority_mask == (1U << EFX_FILTER_PRI_AUTO)) { |
Ben Hutchings | 7665d1a | 2013-11-21 19:02:18 +0000 | [diff] [blame] | 3326 | /* Just remove flags */ |
| 3327 | spec->flags &= ~EFX_FILTER_FLAG_RX_OVER_AUTO; |
Ben Hutchings | b59e6ef | 2013-11-21 19:02:22 +0000 | [diff] [blame] | 3328 | table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_AUTO_OLD; |
Ben Hutchings | 7665d1a | 2013-11-21 19:02:18 +0000 | [diff] [blame] | 3329 | rc = 0; |
| 3330 | goto out_unlock; |
| 3331 | } |
| 3332 | |
Ben Hutchings | fbd7912 | 2013-11-21 19:15:03 +0000 | [diff] [blame] | 3333 | if (!(priority_mask & (1U << spec->priority))) { |
Ben Hutchings | 7665d1a | 2013-11-21 19:02:18 +0000 | [diff] [blame] | 3334 | rc = -ENOENT; |
| 3335 | goto out_unlock; |
| 3336 | } |
| 3337 | |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 3338 | table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY; |
| 3339 | spin_unlock_bh(&efx->filter_lock); |
| 3340 | |
Ben Hutchings | 7665d1a | 2013-11-21 19:02:18 +0000 | [diff] [blame] | 3341 | if (spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO) { |
Ben Hutchings | b59e6ef | 2013-11-21 19:02:22 +0000 | [diff] [blame] | 3342 | /* Reset to an automatic filter */ |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 3343 | |
| 3344 | struct efx_filter_spec new_spec = *spec; |
| 3345 | |
Ben Hutchings | 7665d1a | 2013-11-21 19:02:18 +0000 | [diff] [blame] | 3346 | new_spec.priority = EFX_FILTER_PRI_AUTO; |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 3347 | new_spec.flags = (EFX_FILTER_FLAG_RX | |
Bert Kenward | f1c2ef4 | 2015-12-11 09:39:32 +0000 | [diff] [blame] | 3348 | (efx_rss_enabled(efx) ? |
| 3349 | EFX_FILTER_FLAG_RX_RSS : 0)); |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 3350 | new_spec.dmaq_id = 0; |
| 3351 | new_spec.rss_context = EFX_FILTER_RSS_CONTEXT_DEFAULT; |
| 3352 | rc = efx_ef10_filter_push(efx, &new_spec, |
| 3353 | &table->entry[filter_idx].handle, |
| 3354 | true); |
| 3355 | |
| 3356 | spin_lock_bh(&efx->filter_lock); |
| 3357 | if (rc == 0) |
| 3358 | *spec = new_spec; |
| 3359 | } else { |
| 3360 | /* Really remove the filter */ |
| 3361 | |
| 3362 | MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP, |
| 3363 | efx_ef10_filter_is_exclusive(spec) ? |
| 3364 | MC_CMD_FILTER_OP_IN_OP_REMOVE : |
| 3365 | MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE); |
| 3366 | MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE, |
| 3367 | table->entry[filter_idx].handle); |
| 3368 | rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP, |
| 3369 | inbuf, sizeof(inbuf), NULL, 0, NULL); |
| 3370 | |
| 3371 | spin_lock_bh(&efx->filter_lock); |
| 3372 | if (rc == 0) { |
| 3373 | kfree(spec); |
| 3374 | efx_ef10_filter_set_entry(table, filter_idx, NULL, 0); |
| 3375 | } |
| 3376 | } |
Ben Hutchings | 7665d1a | 2013-11-21 19:02:18 +0000 | [diff] [blame] | 3377 | |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 3378 | table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_BUSY; |
| 3379 | wake_up_all(&table->waitq); |
| 3380 | out_unlock: |
| 3381 | spin_unlock_bh(&efx->filter_lock); |
| 3382 | finish_wait(&table->waitq, &wait); |
| 3383 | return rc; |
| 3384 | } |
| 3385 | |
| 3386 | static int efx_ef10_filter_remove_safe(struct efx_nic *efx, |
| 3387 | enum efx_filter_priority priority, |
| 3388 | u32 filter_id) |
| 3389 | { |
Ben Hutchings | fbd7912 | 2013-11-21 19:15:03 +0000 | [diff] [blame] | 3390 | return efx_ef10_filter_remove_internal(efx, 1U << priority, |
| 3391 | filter_id, false); |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 3392 | } |
| 3393 | |
Edward Cree | 12fb0da | 2015-07-21 15:11:00 +0100 | [diff] [blame] | 3394 | static u32 efx_ef10_filter_get_unsafe_id(struct efx_nic *efx, u32 filter_id) |
| 3395 | { |
| 3396 | return filter_id % HUNT_FILTER_TBL_ROWS; |
| 3397 | } |
| 3398 | |
| 3399 | static int efx_ef10_filter_remove_unsafe(struct efx_nic *efx, |
| 3400 | enum efx_filter_priority priority, |
| 3401 | u32 filter_id) |
| 3402 | { |
| 3403 | return efx_ef10_filter_remove_internal(efx, 1U << priority, |
| 3404 | filter_id, true); |
| 3405 | } |
| 3406 | |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 3407 | static int efx_ef10_filter_get_safe(struct efx_nic *efx, |
| 3408 | enum efx_filter_priority priority, |
| 3409 | u32 filter_id, struct efx_filter_spec *spec) |
| 3410 | { |
| 3411 | unsigned int filter_idx = filter_id % HUNT_FILTER_TBL_ROWS; |
| 3412 | struct efx_ef10_filter_table *table = efx->filter_state; |
| 3413 | const struct efx_filter_spec *saved_spec; |
| 3414 | int rc; |
| 3415 | |
| 3416 | spin_lock_bh(&efx->filter_lock); |
| 3417 | saved_spec = efx_ef10_filter_entry_spec(table, filter_idx); |
| 3418 | if (saved_spec && saved_spec->priority == priority && |
| 3419 | efx_ef10_filter_rx_match_pri(table, saved_spec->match_flags) == |
| 3420 | filter_id / HUNT_FILTER_TBL_ROWS) { |
| 3421 | *spec = *saved_spec; |
| 3422 | rc = 0; |
| 3423 | } else { |
| 3424 | rc = -ENOENT; |
| 3425 | } |
| 3426 | spin_unlock_bh(&efx->filter_lock); |
| 3427 | return rc; |
| 3428 | } |
| 3429 | |
Ben Hutchings | fbd7912 | 2013-11-21 19:15:03 +0000 | [diff] [blame] | 3430 | static int efx_ef10_filter_clear_rx(struct efx_nic *efx, |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 3431 | enum efx_filter_priority priority) |
| 3432 | { |
Ben Hutchings | fbd7912 | 2013-11-21 19:15:03 +0000 | [diff] [blame] | 3433 | unsigned int priority_mask; |
| 3434 | unsigned int i; |
| 3435 | int rc; |
| 3436 | |
| 3437 | priority_mask = (((1U << (priority + 1)) - 1) & |
| 3438 | ~(1U << EFX_FILTER_PRI_AUTO)); |
| 3439 | |
| 3440 | for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) { |
| 3441 | rc = efx_ef10_filter_remove_internal(efx, priority_mask, |
| 3442 | i, true); |
| 3443 | if (rc && rc != -ENOENT) |
| 3444 | return rc; |
| 3445 | } |
| 3446 | |
| 3447 | return 0; |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 3448 | } |
| 3449 | |
| 3450 | static u32 efx_ef10_filter_count_rx_used(struct efx_nic *efx, |
| 3451 | enum efx_filter_priority priority) |
| 3452 | { |
| 3453 | struct efx_ef10_filter_table *table = efx->filter_state; |
| 3454 | unsigned int filter_idx; |
| 3455 | s32 count = 0; |
| 3456 | |
| 3457 | spin_lock_bh(&efx->filter_lock); |
| 3458 | for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) { |
| 3459 | if (table->entry[filter_idx].spec && |
| 3460 | efx_ef10_filter_entry_spec(table, filter_idx)->priority == |
| 3461 | priority) |
| 3462 | ++count; |
| 3463 | } |
| 3464 | spin_unlock_bh(&efx->filter_lock); |
| 3465 | return count; |
| 3466 | } |
| 3467 | |
| 3468 | static u32 efx_ef10_filter_get_rx_id_limit(struct efx_nic *efx) |
| 3469 | { |
| 3470 | struct efx_ef10_filter_table *table = efx->filter_state; |
| 3471 | |
| 3472 | return table->rx_match_count * HUNT_FILTER_TBL_ROWS; |
| 3473 | } |
| 3474 | |
| 3475 | static s32 efx_ef10_filter_get_rx_ids(struct efx_nic *efx, |
| 3476 | enum efx_filter_priority priority, |
| 3477 | u32 *buf, u32 size) |
| 3478 | { |
| 3479 | struct efx_ef10_filter_table *table = efx->filter_state; |
| 3480 | struct efx_filter_spec *spec; |
| 3481 | unsigned int filter_idx; |
| 3482 | s32 count = 0; |
| 3483 | |
| 3484 | spin_lock_bh(&efx->filter_lock); |
| 3485 | for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) { |
| 3486 | spec = efx_ef10_filter_entry_spec(table, filter_idx); |
| 3487 | if (spec && spec->priority == priority) { |
| 3488 | if (count == size) { |
| 3489 | count = -EMSGSIZE; |
| 3490 | break; |
| 3491 | } |
| 3492 | buf[count++] = (efx_ef10_filter_rx_match_pri( |
| 3493 | table, spec->match_flags) * |
| 3494 | HUNT_FILTER_TBL_ROWS + |
| 3495 | filter_idx); |
| 3496 | } |
| 3497 | } |
| 3498 | spin_unlock_bh(&efx->filter_lock); |
| 3499 | return count; |
| 3500 | } |
| 3501 | |
| 3502 | #ifdef CONFIG_RFS_ACCEL |
| 3503 | |
| 3504 | static efx_mcdi_async_completer efx_ef10_filter_rfs_insert_complete; |
| 3505 | |
| 3506 | static s32 efx_ef10_filter_rfs_insert(struct efx_nic *efx, |
| 3507 | struct efx_filter_spec *spec) |
| 3508 | { |
| 3509 | struct efx_ef10_filter_table *table = efx->filter_state; |
| 3510 | MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN); |
| 3511 | struct efx_filter_spec *saved_spec; |
| 3512 | unsigned int hash, i, depth = 1; |
| 3513 | bool replacing = false; |
| 3514 | int ins_index = -1; |
| 3515 | u64 cookie; |
| 3516 | s32 rc; |
| 3517 | |
| 3518 | /* Must be an RX filter without RSS and not for a multicast |
| 3519 | * destination address (RFS only works for connected sockets). |
| 3520 | * These restrictions allow us to pass only a tiny amount of |
| 3521 | * data through to the completion function. |
| 3522 | */ |
| 3523 | EFX_WARN_ON_PARANOID(spec->flags != |
| 3524 | (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_RX_SCATTER)); |
| 3525 | EFX_WARN_ON_PARANOID(spec->priority != EFX_FILTER_PRI_HINT); |
| 3526 | EFX_WARN_ON_PARANOID(efx_filter_is_mc_recipient(spec)); |
| 3527 | |
| 3528 | hash = efx_ef10_filter_hash(spec); |
| 3529 | |
| 3530 | spin_lock_bh(&efx->filter_lock); |
| 3531 | |
| 3532 | /* Find any existing filter with the same match tuple or else |
| 3533 | * a free slot to insert at. If an existing filter is busy, |
| 3534 | * we have to give up. |
| 3535 | */ |
| 3536 | for (;;) { |
| 3537 | i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1); |
| 3538 | saved_spec = efx_ef10_filter_entry_spec(table, i); |
| 3539 | |
| 3540 | if (!saved_spec) { |
| 3541 | if (ins_index < 0) |
| 3542 | ins_index = i; |
| 3543 | } else if (efx_ef10_filter_equal(spec, saved_spec)) { |
| 3544 | if (table->entry[i].spec & EFX_EF10_FILTER_FLAG_BUSY) { |
| 3545 | rc = -EBUSY; |
| 3546 | goto fail_unlock; |
| 3547 | } |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 3548 | if (spec->priority < saved_spec->priority) { |
| 3549 | rc = -EPERM; |
| 3550 | goto fail_unlock; |
| 3551 | } |
| 3552 | ins_index = i; |
| 3553 | break; |
| 3554 | } |
| 3555 | |
| 3556 | /* Once we reach the maximum search depth, use the |
| 3557 | * first suitable slot or return -EBUSY if there was |
| 3558 | * none |
| 3559 | */ |
| 3560 | if (depth == EFX_EF10_FILTER_SEARCH_LIMIT) { |
| 3561 | if (ins_index < 0) { |
| 3562 | rc = -EBUSY; |
| 3563 | goto fail_unlock; |
| 3564 | } |
| 3565 | break; |
| 3566 | } |
| 3567 | |
| 3568 | ++depth; |
| 3569 | } |
| 3570 | |
| 3571 | /* Create a software table entry if necessary, and mark it |
| 3572 | * busy. We might yet fail to insert, but any attempt to |
| 3573 | * insert a conflicting filter while we're waiting for the |
| 3574 | * firmware must find the busy entry. |
| 3575 | */ |
| 3576 | saved_spec = efx_ef10_filter_entry_spec(table, ins_index); |
| 3577 | if (saved_spec) { |
| 3578 | replacing = true; |
| 3579 | } else { |
| 3580 | saved_spec = kmalloc(sizeof(*spec), GFP_ATOMIC); |
| 3581 | if (!saved_spec) { |
| 3582 | rc = -ENOMEM; |
| 3583 | goto fail_unlock; |
| 3584 | } |
| 3585 | *saved_spec = *spec; |
| 3586 | } |
| 3587 | efx_ef10_filter_set_entry(table, ins_index, saved_spec, |
| 3588 | EFX_EF10_FILTER_FLAG_BUSY); |
| 3589 | |
| 3590 | spin_unlock_bh(&efx->filter_lock); |
| 3591 | |
| 3592 | /* Pack up the variables needed on completion */ |
| 3593 | cookie = replacing << 31 | ins_index << 16 | spec->dmaq_id; |
| 3594 | |
| 3595 | efx_ef10_filter_push_prep(efx, spec, inbuf, |
| 3596 | table->entry[ins_index].handle, replacing); |
| 3597 | efx_mcdi_rpc_async(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf), |
| 3598 | MC_CMD_FILTER_OP_OUT_LEN, |
| 3599 | efx_ef10_filter_rfs_insert_complete, cookie); |
| 3600 | |
| 3601 | return ins_index; |
| 3602 | |
| 3603 | fail_unlock: |
| 3604 | spin_unlock_bh(&efx->filter_lock); |
| 3605 | return rc; |
| 3606 | } |
| 3607 | |
| 3608 | static void |
| 3609 | efx_ef10_filter_rfs_insert_complete(struct efx_nic *efx, unsigned long cookie, |
| 3610 | int rc, efx_dword_t *outbuf, |
| 3611 | size_t outlen_actual) |
| 3612 | { |
| 3613 | struct efx_ef10_filter_table *table = efx->filter_state; |
| 3614 | unsigned int ins_index, dmaq_id; |
| 3615 | struct efx_filter_spec *spec; |
| 3616 | bool replacing; |
| 3617 | |
| 3618 | /* Unpack the cookie */ |
| 3619 | replacing = cookie >> 31; |
| 3620 | ins_index = (cookie >> 16) & (HUNT_FILTER_TBL_ROWS - 1); |
| 3621 | dmaq_id = cookie & 0xffff; |
| 3622 | |
| 3623 | spin_lock_bh(&efx->filter_lock); |
| 3624 | spec = efx_ef10_filter_entry_spec(table, ins_index); |
| 3625 | if (rc == 0) { |
| 3626 | table->entry[ins_index].handle = |
| 3627 | MCDI_QWORD(outbuf, FILTER_OP_OUT_HANDLE); |
| 3628 | if (replacing) |
| 3629 | spec->dmaq_id = dmaq_id; |
| 3630 | } else if (!replacing) { |
| 3631 | kfree(spec); |
| 3632 | spec = NULL; |
| 3633 | } |
| 3634 | efx_ef10_filter_set_entry(table, ins_index, spec, 0); |
| 3635 | spin_unlock_bh(&efx->filter_lock); |
| 3636 | |
| 3637 | wake_up_all(&table->waitq); |
| 3638 | } |
| 3639 | |
| 3640 | static void |
| 3641 | efx_ef10_filter_rfs_expire_complete(struct efx_nic *efx, |
| 3642 | unsigned long filter_idx, |
| 3643 | int rc, efx_dword_t *outbuf, |
| 3644 | size_t outlen_actual); |
| 3645 | |
| 3646 | static bool efx_ef10_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id, |
| 3647 | unsigned int filter_idx) |
| 3648 | { |
| 3649 | struct efx_ef10_filter_table *table = efx->filter_state; |
| 3650 | struct efx_filter_spec *spec = |
| 3651 | efx_ef10_filter_entry_spec(table, filter_idx); |
| 3652 | MCDI_DECLARE_BUF(inbuf, |
| 3653 | MC_CMD_FILTER_OP_IN_HANDLE_OFST + |
| 3654 | MC_CMD_FILTER_OP_IN_HANDLE_LEN); |
| 3655 | |
| 3656 | if (!spec || |
| 3657 | (table->entry[filter_idx].spec & EFX_EF10_FILTER_FLAG_BUSY) || |
| 3658 | spec->priority != EFX_FILTER_PRI_HINT || |
| 3659 | !rps_may_expire_flow(efx->net_dev, spec->dmaq_id, |
| 3660 | flow_id, filter_idx)) |
| 3661 | return false; |
| 3662 | |
| 3663 | MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP, |
| 3664 | MC_CMD_FILTER_OP_IN_OP_REMOVE); |
| 3665 | MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE, |
| 3666 | table->entry[filter_idx].handle); |
| 3667 | if (efx_mcdi_rpc_async(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf), 0, |
| 3668 | efx_ef10_filter_rfs_expire_complete, filter_idx)) |
| 3669 | return false; |
| 3670 | |
| 3671 | table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY; |
| 3672 | return true; |
| 3673 | } |
| 3674 | |
| 3675 | static void |
| 3676 | efx_ef10_filter_rfs_expire_complete(struct efx_nic *efx, |
| 3677 | unsigned long filter_idx, |
| 3678 | int rc, efx_dword_t *outbuf, |
| 3679 | size_t outlen_actual) |
| 3680 | { |
| 3681 | struct efx_ef10_filter_table *table = efx->filter_state; |
| 3682 | struct efx_filter_spec *spec = |
| 3683 | efx_ef10_filter_entry_spec(table, filter_idx); |
| 3684 | |
| 3685 | spin_lock_bh(&efx->filter_lock); |
| 3686 | if (rc == 0) { |
| 3687 | kfree(spec); |
| 3688 | efx_ef10_filter_set_entry(table, filter_idx, NULL, 0); |
| 3689 | } |
| 3690 | table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_BUSY; |
| 3691 | wake_up_all(&table->waitq); |
| 3692 | spin_unlock_bh(&efx->filter_lock); |
| 3693 | } |
| 3694 | |
| 3695 | #endif /* CONFIG_RFS_ACCEL */ |
| 3696 | |
| 3697 | static int efx_ef10_filter_match_flags_from_mcdi(u32 mcdi_flags) |
| 3698 | { |
| 3699 | int match_flags = 0; |
| 3700 | |
| 3701 | #define MAP_FLAG(gen_flag, mcdi_field) { \ |
| 3702 | u32 old_mcdi_flags = mcdi_flags; \ |
| 3703 | mcdi_flags &= ~(1 << MC_CMD_FILTER_OP_IN_MATCH_ ## \ |
| 3704 | mcdi_field ## _LBN); \ |
| 3705 | if (mcdi_flags != old_mcdi_flags) \ |
| 3706 | match_flags |= EFX_FILTER_MATCH_ ## gen_flag; \ |
| 3707 | } |
| 3708 | MAP_FLAG(LOC_MAC_IG, UNKNOWN_UCAST_DST); |
| 3709 | MAP_FLAG(LOC_MAC_IG, UNKNOWN_MCAST_DST); |
| 3710 | MAP_FLAG(REM_HOST, SRC_IP); |
| 3711 | MAP_FLAG(LOC_HOST, DST_IP); |
| 3712 | MAP_FLAG(REM_MAC, SRC_MAC); |
| 3713 | MAP_FLAG(REM_PORT, SRC_PORT); |
| 3714 | MAP_FLAG(LOC_MAC, DST_MAC); |
| 3715 | MAP_FLAG(LOC_PORT, DST_PORT); |
| 3716 | MAP_FLAG(ETHER_TYPE, ETHER_TYPE); |
| 3717 | MAP_FLAG(INNER_VID, INNER_VLAN); |
| 3718 | MAP_FLAG(OUTER_VID, OUTER_VLAN); |
| 3719 | MAP_FLAG(IP_PROTO, IP_PROTO); |
| 3720 | #undef MAP_FLAG |
| 3721 | |
| 3722 | /* Did we map them all? */ |
| 3723 | if (mcdi_flags) |
| 3724 | return -EINVAL; |
| 3725 | |
| 3726 | return match_flags; |
| 3727 | } |
| 3728 | |
| 3729 | static int efx_ef10_filter_table_probe(struct efx_nic *efx) |
| 3730 | { |
| 3731 | MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_PARSER_DISP_INFO_IN_LEN); |
| 3732 | MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX); |
| 3733 | unsigned int pd_match_pri, pd_match_count; |
| 3734 | struct efx_ef10_filter_table *table; |
| 3735 | size_t outlen; |
| 3736 | int rc; |
| 3737 | |
Edward Cree | dd98708 | 2016-06-15 17:43:43 +0100 | [diff] [blame^] | 3738 | if (!efx_rwsem_assert_write_locked(&efx->filter_sem)) |
| 3739 | return -EINVAL; |
| 3740 | |
| 3741 | if (efx->filter_state) /* already probed */ |
| 3742 | return 0; |
| 3743 | |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 3744 | table = kzalloc(sizeof(*table), GFP_KERNEL); |
| 3745 | if (!table) |
| 3746 | return -ENOMEM; |
| 3747 | |
| 3748 | /* Find out which RX filter types are supported, and their priorities */ |
| 3749 | MCDI_SET_DWORD(inbuf, GET_PARSER_DISP_INFO_IN_OP, |
| 3750 | MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES); |
| 3751 | rc = efx_mcdi_rpc(efx, MC_CMD_GET_PARSER_DISP_INFO, |
| 3752 | inbuf, sizeof(inbuf), outbuf, sizeof(outbuf), |
| 3753 | &outlen); |
| 3754 | if (rc) |
| 3755 | goto fail; |
| 3756 | pd_match_count = MCDI_VAR_ARRAY_LEN( |
| 3757 | outlen, GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES); |
| 3758 | table->rx_match_count = 0; |
| 3759 | |
| 3760 | for (pd_match_pri = 0; pd_match_pri < pd_match_count; pd_match_pri++) { |
| 3761 | u32 mcdi_flags = |
| 3762 | MCDI_ARRAY_DWORD( |
| 3763 | outbuf, |
| 3764 | GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES, |
| 3765 | pd_match_pri); |
| 3766 | rc = efx_ef10_filter_match_flags_from_mcdi(mcdi_flags); |
| 3767 | if (rc < 0) { |
| 3768 | netif_dbg(efx, probe, efx->net_dev, |
| 3769 | "%s: fw flags %#x pri %u not supported in driver\n", |
| 3770 | __func__, mcdi_flags, pd_match_pri); |
| 3771 | } else { |
| 3772 | netif_dbg(efx, probe, efx->net_dev, |
| 3773 | "%s: fw flags %#x pri %u supported as driver flags %#x pri %u\n", |
| 3774 | __func__, mcdi_flags, pd_match_pri, |
| 3775 | rc, table->rx_match_count); |
| 3776 | table->rx_match_flags[table->rx_match_count++] = rc; |
| 3777 | } |
| 3778 | } |
| 3779 | |
| 3780 | table->entry = vzalloc(HUNT_FILTER_TBL_ROWS * sizeof(*table->entry)); |
| 3781 | if (!table->entry) { |
| 3782 | rc = -ENOMEM; |
| 3783 | goto fail; |
| 3784 | } |
| 3785 | |
Edward Cree | 12fb0da | 2015-07-21 15:11:00 +0100 | [diff] [blame] | 3786 | table->ucdef_id = EFX_EF10_FILTER_ID_INVALID; |
| 3787 | table->bcast_id = EFX_EF10_FILTER_ID_INVALID; |
| 3788 | table->mcdef_id = EFX_EF10_FILTER_ID_INVALID; |
Andrew Rybchenko | b071c3a | 2016-06-15 17:43:00 +0100 | [diff] [blame] | 3789 | table->mc_promisc_last = false; |
Edward Cree | 12fb0da | 2015-07-21 15:11:00 +0100 | [diff] [blame] | 3790 | |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 3791 | efx->filter_state = table; |
| 3792 | init_waitqueue_head(&table->waitq); |
| 3793 | return 0; |
| 3794 | |
| 3795 | fail: |
| 3796 | kfree(table); |
| 3797 | return rc; |
| 3798 | } |
| 3799 | |
Edward Cree | 0d32241 | 2015-05-20 11:10:03 +0100 | [diff] [blame] | 3800 | /* Caller must hold efx->filter_sem for read if race against |
| 3801 | * efx_ef10_filter_table_remove() is possible |
| 3802 | */ |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 3803 | static void efx_ef10_filter_table_restore(struct efx_nic *efx) |
| 3804 | { |
| 3805 | struct efx_ef10_filter_table *table = efx->filter_state; |
| 3806 | struct efx_ef10_nic_data *nic_data = efx->nic_data; |
| 3807 | struct efx_filter_spec *spec; |
| 3808 | unsigned int filter_idx; |
| 3809 | bool failed = false; |
| 3810 | int rc; |
| 3811 | |
Edward Cree | 0d32241 | 2015-05-20 11:10:03 +0100 | [diff] [blame] | 3812 | WARN_ON(!rwsem_is_locked(&efx->filter_sem)); |
| 3813 | |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 3814 | if (!nic_data->must_restore_filters) |
| 3815 | return; |
| 3816 | |
Edward Cree | 0d32241 | 2015-05-20 11:10:03 +0100 | [diff] [blame] | 3817 | if (!table) |
| 3818 | return; |
| 3819 | |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 3820 | spin_lock_bh(&efx->filter_lock); |
| 3821 | |
| 3822 | for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) { |
| 3823 | spec = efx_ef10_filter_entry_spec(table, filter_idx); |
| 3824 | if (!spec) |
| 3825 | continue; |
| 3826 | |
| 3827 | table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY; |
| 3828 | spin_unlock_bh(&efx->filter_lock); |
| 3829 | |
| 3830 | rc = efx_ef10_filter_push(efx, spec, |
| 3831 | &table->entry[filter_idx].handle, |
| 3832 | false); |
| 3833 | if (rc) |
| 3834 | failed = true; |
| 3835 | |
| 3836 | spin_lock_bh(&efx->filter_lock); |
| 3837 | if (rc) { |
| 3838 | kfree(spec); |
| 3839 | efx_ef10_filter_set_entry(table, filter_idx, NULL, 0); |
| 3840 | } else { |
| 3841 | table->entry[filter_idx].spec &= |
| 3842 | ~EFX_EF10_FILTER_FLAG_BUSY; |
| 3843 | } |
| 3844 | } |
| 3845 | |
| 3846 | spin_unlock_bh(&efx->filter_lock); |
| 3847 | |
| 3848 | if (failed) |
| 3849 | netif_err(efx, hw, efx->net_dev, |
| 3850 | "unable to restore all filters\n"); |
| 3851 | else |
| 3852 | nic_data->must_restore_filters = false; |
| 3853 | } |
| 3854 | |
| 3855 | static void efx_ef10_filter_table_remove(struct efx_nic *efx) |
| 3856 | { |
| 3857 | struct efx_ef10_filter_table *table = efx->filter_state; |
| 3858 | MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN); |
| 3859 | struct efx_filter_spec *spec; |
| 3860 | unsigned int filter_idx; |
| 3861 | int rc; |
| 3862 | |
Edward Cree | 0d32241 | 2015-05-20 11:10:03 +0100 | [diff] [blame] | 3863 | efx->filter_state = NULL; |
Edward Cree | dd98708 | 2016-06-15 17:43:43 +0100 | [diff] [blame^] | 3864 | /* If we were called without locking, then it's not safe to free |
| 3865 | * the table as others might be using it. So we just WARN, leak |
| 3866 | * the memory, and potentially get an inconsistent filter table |
| 3867 | * state. |
| 3868 | * This should never actually happen. |
| 3869 | */ |
| 3870 | if (!efx_rwsem_assert_write_locked(&efx->filter_sem)) |
| 3871 | return; |
| 3872 | |
Edward Cree | 0d32241 | 2015-05-20 11:10:03 +0100 | [diff] [blame] | 3873 | if (!table) |
| 3874 | return; |
| 3875 | |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 3876 | for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) { |
| 3877 | spec = efx_ef10_filter_entry_spec(table, filter_idx); |
| 3878 | if (!spec) |
| 3879 | continue; |
| 3880 | |
| 3881 | MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP, |
| 3882 | efx_ef10_filter_is_exclusive(spec) ? |
| 3883 | MC_CMD_FILTER_OP_IN_OP_REMOVE : |
| 3884 | MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE); |
| 3885 | MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE, |
| 3886 | table->entry[filter_idx].handle); |
Bert Kenward | e65a510 | 2015-12-23 08:57:36 +0000 | [diff] [blame] | 3887 | rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FILTER_OP, inbuf, |
| 3888 | sizeof(inbuf), NULL, 0, NULL); |
Ben Hutchings | 48ce563 | 2013-11-01 16:42:44 +0000 | [diff] [blame] | 3889 | if (rc) |
Bert Kenward | e65a510 | 2015-12-23 08:57:36 +0000 | [diff] [blame] | 3890 | netif_info(efx, drv, efx->net_dev, |
| 3891 | "%s: filter %04x remove failed\n", |
| 3892 | __func__, filter_idx); |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 3893 | kfree(spec); |
| 3894 | } |
| 3895 | |
| 3896 | vfree(table->entry); |
| 3897 | kfree(table); |
| 3898 | } |
| 3899 | |
Edward Cree | 12fb0da | 2015-07-21 15:11:00 +0100 | [diff] [blame] | 3900 | #define EFX_EF10_FILTER_DO_MARK_OLD(id) \ |
Bert Kenward | e65a510 | 2015-12-23 08:57:36 +0000 | [diff] [blame] | 3901 | if (id != EFX_EF10_FILTER_ID_INVALID) { \ |
| 3902 | filter_idx = efx_ef10_filter_get_unsafe_id(efx, id); \ |
| 3903 | if (!table->entry[filter_idx].spec) \ |
| 3904 | netif_dbg(efx, drv, efx->net_dev, \ |
| 3905 | "%s: marked null spec old %04x:%04x\n", \ |
| 3906 | __func__, id, filter_idx); \ |
| 3907 | table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_AUTO_OLD;\ |
| 3908 | } |
Daniel Pieczko | 822b96f | 2015-07-21 15:10:27 +0100 | [diff] [blame] | 3909 | static void efx_ef10_filter_mark_old(struct efx_nic *efx) |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 3910 | { |
| 3911 | struct efx_ef10_filter_table *table = efx->filter_state; |
Daniel Pieczko | 822b96f | 2015-07-21 15:10:27 +0100 | [diff] [blame] | 3912 | unsigned int filter_idx, i; |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 3913 | |
Edward Cree | 0d32241 | 2015-05-20 11:10:03 +0100 | [diff] [blame] | 3914 | if (!table) |
| 3915 | return; |
| 3916 | |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 3917 | /* Mark old filters that may need to be removed */ |
| 3918 | spin_lock_bh(&efx->filter_lock); |
Edward Cree | 12fb0da | 2015-07-21 15:11:00 +0100 | [diff] [blame] | 3919 | for (i = 0; i < table->dev_uc_count; i++) |
| 3920 | EFX_EF10_FILTER_DO_MARK_OLD(table->dev_uc_list[i].id); |
| 3921 | for (i = 0; i < table->dev_mc_count; i++) |
| 3922 | EFX_EF10_FILTER_DO_MARK_OLD(table->dev_mc_list[i].id); |
| 3923 | EFX_EF10_FILTER_DO_MARK_OLD(table->ucdef_id); |
| 3924 | EFX_EF10_FILTER_DO_MARK_OLD(table->bcast_id); |
| 3925 | EFX_EF10_FILTER_DO_MARK_OLD(table->mcdef_id); |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 3926 | spin_unlock_bh(&efx->filter_lock); |
Daniel Pieczko | 822b96f | 2015-07-21 15:10:27 +0100 | [diff] [blame] | 3927 | } |
Edward Cree | 12fb0da | 2015-07-21 15:11:00 +0100 | [diff] [blame] | 3928 | #undef EFX_EF10_FILTER_DO_MARK_OLD |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 3929 | |
Daniel Pieczko | 822b96f | 2015-07-21 15:10:27 +0100 | [diff] [blame] | 3930 | static void efx_ef10_filter_uc_addr_list(struct efx_nic *efx, bool *promisc) |
| 3931 | { |
| 3932 | struct efx_ef10_filter_table *table = efx->filter_state; |
| 3933 | struct net_device *net_dev = efx->net_dev; |
| 3934 | struct netdev_hw_addr *uc; |
Edward Cree | 12fb0da | 2015-07-21 15:11:00 +0100 | [diff] [blame] | 3935 | int addr_count; |
Daniel Pieczko | 822b96f | 2015-07-21 15:10:27 +0100 | [diff] [blame] | 3936 | unsigned int i; |
| 3937 | |
Edward Cree | 12fb0da | 2015-07-21 15:11:00 +0100 | [diff] [blame] | 3938 | table->ucdef_id = EFX_EF10_FILTER_ID_INVALID; |
| 3939 | addr_count = netdev_uc_count(net_dev); |
| 3940 | if (net_dev->flags & IFF_PROMISC) |
Daniel Pieczko | 822b96f | 2015-07-21 15:10:27 +0100 | [diff] [blame] | 3941 | *promisc = true; |
Edward Cree | 12fb0da | 2015-07-21 15:11:00 +0100 | [diff] [blame] | 3942 | table->dev_uc_count = 1 + addr_count; |
Daniel Pieczko | 822b96f | 2015-07-21 15:10:27 +0100 | [diff] [blame] | 3943 | ether_addr_copy(table->dev_uc_list[0].addr, net_dev->dev_addr); |
| 3944 | i = 1; |
| 3945 | netdev_for_each_uc_addr(uc, net_dev) { |
Edward Cree | 12fb0da | 2015-07-21 15:11:00 +0100 | [diff] [blame] | 3946 | if (i >= EFX_EF10_FILTER_DEV_UC_MAX) { |
| 3947 | *promisc = true; |
| 3948 | break; |
| 3949 | } |
Daniel Pieczko | 822b96f | 2015-07-21 15:10:27 +0100 | [diff] [blame] | 3950 | ether_addr_copy(table->dev_uc_list[i].addr, uc->addr); |
Edward Cree | 12fb0da | 2015-07-21 15:11:00 +0100 | [diff] [blame] | 3951 | table->dev_uc_list[i].id = EFX_EF10_FILTER_ID_INVALID; |
Daniel Pieczko | 822b96f | 2015-07-21 15:10:27 +0100 | [diff] [blame] | 3952 | i++; |
| 3953 | } |
| 3954 | } |
| 3955 | |
| 3956 | static void efx_ef10_filter_mc_addr_list(struct efx_nic *efx, bool *promisc) |
| 3957 | { |
| 3958 | struct efx_ef10_filter_table *table = efx->filter_state; |
| 3959 | struct net_device *net_dev = efx->net_dev; |
| 3960 | struct netdev_hw_addr *mc; |
Daniel Pieczko | ab8b1f7c | 2015-07-21 15:10:44 +0100 | [diff] [blame] | 3961 | unsigned int i, addr_count; |
Daniel Pieczko | 822b96f | 2015-07-21 15:10:27 +0100 | [diff] [blame] | 3962 | |
Edward Cree | 12fb0da | 2015-07-21 15:11:00 +0100 | [diff] [blame] | 3963 | table->mcdef_id = EFX_EF10_FILTER_ID_INVALID; |
| 3964 | table->bcast_id = EFX_EF10_FILTER_ID_INVALID; |
Daniel Pieczko | ab8b1f7c | 2015-07-21 15:10:44 +0100 | [diff] [blame] | 3965 | if (net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) |
Daniel Pieczko | 822b96f | 2015-07-21 15:10:27 +0100 | [diff] [blame] | 3966 | *promisc = true; |
Daniel Pieczko | 822b96f | 2015-07-21 15:10:27 +0100 | [diff] [blame] | 3967 | |
Edward Cree | 12fb0da | 2015-07-21 15:11:00 +0100 | [diff] [blame] | 3968 | addr_count = netdev_mc_count(net_dev); |
| 3969 | i = 0; |
Daniel Pieczko | ab8b1f7c | 2015-07-21 15:10:44 +0100 | [diff] [blame] | 3970 | netdev_for_each_mc_addr(mc, net_dev) { |
Edward Cree | 12fb0da | 2015-07-21 15:11:00 +0100 | [diff] [blame] | 3971 | if (i >= EFX_EF10_FILTER_DEV_MC_MAX) { |
| 3972 | *promisc = true; |
| 3973 | break; |
| 3974 | } |
Daniel Pieczko | ab8b1f7c | 2015-07-21 15:10:44 +0100 | [diff] [blame] | 3975 | ether_addr_copy(table->dev_mc_list[i].addr, mc->addr); |
Edward Cree | 12fb0da | 2015-07-21 15:11:00 +0100 | [diff] [blame] | 3976 | table->dev_mc_list[i].id = EFX_EF10_FILTER_ID_INVALID; |
Daniel Pieczko | ab8b1f7c | 2015-07-21 15:10:44 +0100 | [diff] [blame] | 3977 | i++; |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 3978 | } |
Edward Cree | 12fb0da | 2015-07-21 15:11:00 +0100 | [diff] [blame] | 3979 | |
| 3980 | table->dev_mc_count = i; |
Daniel Pieczko | 822b96f | 2015-07-21 15:10:27 +0100 | [diff] [blame] | 3981 | } |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 3982 | |
Edward Cree | 12fb0da | 2015-07-21 15:11:00 +0100 | [diff] [blame] | 3983 | static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx, |
| 3984 | bool multicast, bool rollback) |
Daniel Pieczko | 822b96f | 2015-07-21 15:10:27 +0100 | [diff] [blame] | 3985 | { |
| 3986 | struct efx_ef10_filter_table *table = efx->filter_state; |
| 3987 | struct efx_ef10_dev_addr *addr_list; |
Bert Kenward | f1c2ef4 | 2015-12-11 09:39:32 +0000 | [diff] [blame] | 3988 | enum efx_filter_flags filter_flags; |
Daniel Pieczko | 822b96f | 2015-07-21 15:10:27 +0100 | [diff] [blame] | 3989 | struct efx_filter_spec spec; |
Edward Cree | 12fb0da | 2015-07-21 15:11:00 +0100 | [diff] [blame] | 3990 | u8 baddr[ETH_ALEN]; |
| 3991 | unsigned int i, j; |
| 3992 | int addr_count; |
Daniel Pieczko | 822b96f | 2015-07-21 15:10:27 +0100 | [diff] [blame] | 3993 | int rc; |
| 3994 | |
| 3995 | if (multicast) { |
| 3996 | addr_list = table->dev_mc_list; |
Edward Cree | 12fb0da | 2015-07-21 15:11:00 +0100 | [diff] [blame] | 3997 | addr_count = table->dev_mc_count; |
Daniel Pieczko | 822b96f | 2015-07-21 15:10:27 +0100 | [diff] [blame] | 3998 | } else { |
| 3999 | addr_list = table->dev_uc_list; |
Edward Cree | 12fb0da | 2015-07-21 15:11:00 +0100 | [diff] [blame] | 4000 | addr_count = table->dev_uc_count; |
Daniel Pieczko | 822b96f | 2015-07-21 15:10:27 +0100 | [diff] [blame] | 4001 | } |
| 4002 | |
Bert Kenward | f1c2ef4 | 2015-12-11 09:39:32 +0000 | [diff] [blame] | 4003 | filter_flags = efx_rss_enabled(efx) ? EFX_FILTER_FLAG_RX_RSS : 0; |
| 4004 | |
Daniel Pieczko | 822b96f | 2015-07-21 15:10:27 +0100 | [diff] [blame] | 4005 | /* Insert/renew filters */ |
Edward Cree | 12fb0da | 2015-07-21 15:11:00 +0100 | [diff] [blame] | 4006 | for (i = 0; i < addr_count; i++) { |
Bert Kenward | f1c2ef4 | 2015-12-11 09:39:32 +0000 | [diff] [blame] | 4007 | efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0); |
Jon Cooper | b6f568e | 2015-07-21 15:10:15 +0100 | [diff] [blame] | 4008 | efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC, |
Daniel Pieczko | 822b96f | 2015-07-21 15:10:27 +0100 | [diff] [blame] | 4009 | addr_list[i].addr); |
Jon Cooper | b6f568e | 2015-07-21 15:10:15 +0100 | [diff] [blame] | 4010 | rc = efx_ef10_filter_insert(efx, &spec, true); |
| 4011 | if (rc < 0) { |
Edward Cree | 12fb0da | 2015-07-21 15:11:00 +0100 | [diff] [blame] | 4012 | if (rollback) { |
| 4013 | netif_info(efx, drv, efx->net_dev, |
| 4014 | "efx_ef10_filter_insert failed rc=%d\n", |
| 4015 | rc); |
| 4016 | /* Fall back to promiscuous */ |
| 4017 | for (j = 0; j < i; j++) { |
| 4018 | if (addr_list[j].id == EFX_EF10_FILTER_ID_INVALID) |
| 4019 | continue; |
| 4020 | efx_ef10_filter_remove_unsafe( |
| 4021 | efx, EFX_FILTER_PRI_AUTO, |
| 4022 | addr_list[j].id); |
| 4023 | addr_list[j].id = EFX_EF10_FILTER_ID_INVALID; |
| 4024 | } |
| 4025 | return rc; |
| 4026 | } else { |
| 4027 | /* mark as not inserted, and carry on */ |
| 4028 | rc = EFX_EF10_FILTER_ID_INVALID; |
Daniel Pieczko | 822b96f | 2015-07-21 15:10:27 +0100 | [diff] [blame] | 4029 | } |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 4030 | } |
Edward Cree | 12fb0da | 2015-07-21 15:11:00 +0100 | [diff] [blame] | 4031 | addr_list[i].id = efx_ef10_filter_get_unsafe_id(efx, rc); |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 4032 | } |
Daniel Pieczko | 822b96f | 2015-07-21 15:10:27 +0100 | [diff] [blame] | 4033 | |
Edward Cree | 12fb0da | 2015-07-21 15:11:00 +0100 | [diff] [blame] | 4034 | if (multicast && rollback) { |
| 4035 | /* Also need an Ethernet broadcast filter */ |
Bert Kenward | f1c2ef4 | 2015-12-11 09:39:32 +0000 | [diff] [blame] | 4036 | efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0); |
Edward Cree | 12fb0da | 2015-07-21 15:11:00 +0100 | [diff] [blame] | 4037 | eth_broadcast_addr(baddr); |
| 4038 | efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC, baddr); |
Daniel Pieczko | 822b96f | 2015-07-21 15:10:27 +0100 | [diff] [blame] | 4039 | rc = efx_ef10_filter_insert(efx, &spec, true); |
Edward Cree | 12fb0da | 2015-07-21 15:11:00 +0100 | [diff] [blame] | 4040 | if (rc < 0) { |
Daniel Pieczko | 822b96f | 2015-07-21 15:10:27 +0100 | [diff] [blame] | 4041 | netif_warn(efx, drv, efx->net_dev, |
Edward Cree | 12fb0da | 2015-07-21 15:11:00 +0100 | [diff] [blame] | 4042 | "Broadcast filter insert failed rc=%d\n", rc); |
| 4043 | /* Fall back to promiscuous */ |
| 4044 | for (j = 0; j < i; j++) { |
| 4045 | if (addr_list[j].id == EFX_EF10_FILTER_ID_INVALID) |
| 4046 | continue; |
| 4047 | efx_ef10_filter_remove_unsafe( |
| 4048 | efx, EFX_FILTER_PRI_AUTO, |
| 4049 | addr_list[j].id); |
| 4050 | addr_list[j].id = EFX_EF10_FILTER_ID_INVALID; |
| 4051 | } |
| 4052 | return rc; |
| 4053 | } else { |
| 4054 | table->bcast_id = efx_ef10_filter_get_unsafe_id(efx, rc); |
| 4055 | } |
Daniel Pieczko | 822b96f | 2015-07-21 15:10:27 +0100 | [diff] [blame] | 4056 | } |
Edward Cree | 12fb0da | 2015-07-21 15:11:00 +0100 | [diff] [blame] | 4057 | |
| 4058 | return 0; |
| 4059 | } |
| 4060 | |
| 4061 | static int efx_ef10_filter_insert_def(struct efx_nic *efx, bool multicast, |
| 4062 | bool rollback) |
| 4063 | { |
| 4064 | struct efx_ef10_filter_table *table = efx->filter_state; |
| 4065 | struct efx_ef10_nic_data *nic_data = efx->nic_data; |
Bert Kenward | f1c2ef4 | 2015-12-11 09:39:32 +0000 | [diff] [blame] | 4066 | enum efx_filter_flags filter_flags; |
Edward Cree | 12fb0da | 2015-07-21 15:11:00 +0100 | [diff] [blame] | 4067 | struct efx_filter_spec spec; |
| 4068 | u8 baddr[ETH_ALEN]; |
| 4069 | int rc; |
| 4070 | |
Bert Kenward | f1c2ef4 | 2015-12-11 09:39:32 +0000 | [diff] [blame] | 4071 | filter_flags = efx_rss_enabled(efx) ? EFX_FILTER_FLAG_RX_RSS : 0; |
| 4072 | |
| 4073 | efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0); |
Edward Cree | 12fb0da | 2015-07-21 15:11:00 +0100 | [diff] [blame] | 4074 | |
| 4075 | if (multicast) |
| 4076 | efx_filter_set_mc_def(&spec); |
| 4077 | else |
| 4078 | efx_filter_set_uc_def(&spec); |
| 4079 | |
| 4080 | rc = efx_ef10_filter_insert(efx, &spec, true); |
| 4081 | if (rc < 0) { |
Bert Kenward | 09a0420 | 2015-12-23 08:58:15 +0000 | [diff] [blame] | 4082 | netif_printk(efx, drv, rc == -EPERM ? KERN_DEBUG : KERN_WARNING, |
| 4083 | efx->net_dev, |
| 4084 | "%scast mismatch filter insert failed rc=%d\n", |
| 4085 | multicast ? "Multi" : "Uni", rc); |
Edward Cree | 12fb0da | 2015-07-21 15:11:00 +0100 | [diff] [blame] | 4086 | } else if (multicast) { |
| 4087 | table->mcdef_id = efx_ef10_filter_get_unsafe_id(efx, rc); |
| 4088 | if (!nic_data->workaround_26807) { |
| 4089 | /* Also need an Ethernet broadcast filter */ |
| 4090 | efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, |
Bert Kenward | f1c2ef4 | 2015-12-11 09:39:32 +0000 | [diff] [blame] | 4091 | filter_flags, 0); |
Edward Cree | 12fb0da | 2015-07-21 15:11:00 +0100 | [diff] [blame] | 4092 | eth_broadcast_addr(baddr); |
| 4093 | efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC, |
| 4094 | baddr); |
| 4095 | rc = efx_ef10_filter_insert(efx, &spec, true); |
| 4096 | if (rc < 0) { |
| 4097 | netif_warn(efx, drv, efx->net_dev, |
| 4098 | "Broadcast filter insert failed rc=%d\n", |
| 4099 | rc); |
| 4100 | if (rollback) { |
| 4101 | /* Roll back the mc_def filter */ |
| 4102 | efx_ef10_filter_remove_unsafe( |
| 4103 | efx, EFX_FILTER_PRI_AUTO, |
| 4104 | table->mcdef_id); |
| 4105 | table->mcdef_id = EFX_EF10_FILTER_ID_INVALID; |
| 4106 | return rc; |
| 4107 | } |
| 4108 | } else { |
| 4109 | table->bcast_id = efx_ef10_filter_get_unsafe_id(efx, rc); |
| 4110 | } |
| 4111 | } |
| 4112 | rc = 0; |
| 4113 | } else { |
| 4114 | table->ucdef_id = rc; |
| 4115 | rc = 0; |
| 4116 | } |
| 4117 | return rc; |
Daniel Pieczko | 822b96f | 2015-07-21 15:10:27 +0100 | [diff] [blame] | 4118 | } |
| 4119 | |
| 4120 | /* Remove filters that weren't renewed. Since nothing else changes the AUTO_OLD |
| 4121 | * flag or removes these filters, we don't need to hold the filter_lock while |
| 4122 | * scanning for these filters. |
| 4123 | */ |
| 4124 | static void efx_ef10_filter_remove_old(struct efx_nic *efx) |
| 4125 | { |
| 4126 | struct efx_ef10_filter_table *table = efx->filter_state; |
Bert Kenward | e65a510 | 2015-12-23 08:57:36 +0000 | [diff] [blame] | 4127 | int remove_failed = 0; |
| 4128 | int remove_noent = 0; |
| 4129 | int rc; |
Daniel Pieczko | 822b96f | 2015-07-21 15:10:27 +0100 | [diff] [blame] | 4130 | int i; |
| 4131 | |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 4132 | for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) { |
| 4133 | if (ACCESS_ONCE(table->entry[i].spec) & |
Ben Hutchings | b59e6ef | 2013-11-21 19:02:22 +0000 | [diff] [blame] | 4134 | EFX_EF10_FILTER_FLAG_AUTO_OLD) { |
Bert Kenward | e65a510 | 2015-12-23 08:57:36 +0000 | [diff] [blame] | 4135 | rc = efx_ef10_filter_remove_internal(efx, |
| 4136 | 1U << EFX_FILTER_PRI_AUTO, i, true); |
| 4137 | if (rc == -ENOENT) |
| 4138 | remove_noent++; |
| 4139 | else if (rc) |
| 4140 | remove_failed++; |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 4141 | } |
| 4142 | } |
Bert Kenward | e65a510 | 2015-12-23 08:57:36 +0000 | [diff] [blame] | 4143 | |
| 4144 | if (remove_failed) |
| 4145 | netif_info(efx, drv, efx->net_dev, |
| 4146 | "%s: failed to remove %d filters\n", |
| 4147 | __func__, remove_failed); |
| 4148 | if (remove_noent) |
| 4149 | netif_info(efx, drv, efx->net_dev, |
| 4150 | "%s: failed to remove %d non-existent filters\n", |
| 4151 | __func__, remove_noent); |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 4152 | } |
| 4153 | |
Daniel Pieczko | 7a186f4 | 2015-07-07 11:37:19 +0100 | [diff] [blame] | 4154 | static int efx_ef10_vport_set_mac_address(struct efx_nic *efx) |
| 4155 | { |
| 4156 | struct efx_ef10_nic_data *nic_data = efx->nic_data; |
| 4157 | u8 mac_old[ETH_ALEN]; |
| 4158 | int rc, rc2; |
| 4159 | |
| 4160 | /* Only reconfigure a PF-created vport */ |
| 4161 | if (is_zero_ether_addr(nic_data->vport_mac)) |
| 4162 | return 0; |
| 4163 | |
| 4164 | efx_device_detach_sync(efx); |
| 4165 | efx_net_stop(efx->net_dev); |
| 4166 | down_write(&efx->filter_sem); |
| 4167 | efx_ef10_filter_table_remove(efx); |
| 4168 | up_write(&efx->filter_sem); |
| 4169 | |
| 4170 | rc = efx_ef10_vadaptor_free(efx, nic_data->vport_id); |
| 4171 | if (rc) |
| 4172 | goto restore_filters; |
| 4173 | |
| 4174 | ether_addr_copy(mac_old, nic_data->vport_mac); |
| 4175 | rc = efx_ef10_vport_del_mac(efx, nic_data->vport_id, |
| 4176 | nic_data->vport_mac); |
| 4177 | if (rc) |
| 4178 | goto restore_vadaptor; |
| 4179 | |
| 4180 | rc = efx_ef10_vport_add_mac(efx, nic_data->vport_id, |
| 4181 | efx->net_dev->dev_addr); |
| 4182 | if (!rc) { |
| 4183 | ether_addr_copy(nic_data->vport_mac, efx->net_dev->dev_addr); |
| 4184 | } else { |
| 4185 | rc2 = efx_ef10_vport_add_mac(efx, nic_data->vport_id, mac_old); |
| 4186 | if (rc2) { |
| 4187 | /* Failed to add original MAC, so clear vport_mac */ |
| 4188 | eth_zero_addr(nic_data->vport_mac); |
| 4189 | goto reset_nic; |
| 4190 | } |
| 4191 | } |
| 4192 | |
| 4193 | restore_vadaptor: |
| 4194 | rc2 = efx_ef10_vadaptor_alloc(efx, nic_data->vport_id); |
| 4195 | if (rc2) |
| 4196 | goto reset_nic; |
| 4197 | restore_filters: |
| 4198 | down_write(&efx->filter_sem); |
| 4199 | rc2 = efx_ef10_filter_table_probe(efx); |
| 4200 | up_write(&efx->filter_sem); |
| 4201 | if (rc2) |
| 4202 | goto reset_nic; |
| 4203 | |
| 4204 | rc2 = efx_net_open(efx->net_dev); |
| 4205 | if (rc2) |
| 4206 | goto reset_nic; |
| 4207 | |
| 4208 | netif_device_attach(efx->net_dev); |
| 4209 | |
| 4210 | return rc; |
| 4211 | |
| 4212 | reset_nic: |
| 4213 | netif_err(efx, drv, efx->net_dev, |
| 4214 | "Failed to restore when changing MAC address - scheduling reset\n"); |
| 4215 | efx_schedule_reset(efx, RESET_TYPE_DATAPATH); |
| 4216 | |
| 4217 | return rc ? rc : rc2; |
| 4218 | } |
| 4219 | |
Daniel Pieczko | 822b96f | 2015-07-21 15:10:27 +0100 | [diff] [blame] | 4220 | /* Caller must hold efx->filter_sem for read if race against |
| 4221 | * efx_ef10_filter_table_remove() is possible |
| 4222 | */ |
| 4223 | static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx) |
| 4224 | { |
| 4225 | struct efx_ef10_filter_table *table = efx->filter_state; |
Daniel Pieczko | ab8b1f7c | 2015-07-21 15:10:44 +0100 | [diff] [blame] | 4226 | struct efx_ef10_nic_data *nic_data = efx->nic_data; |
Daniel Pieczko | 822b96f | 2015-07-21 15:10:27 +0100 | [diff] [blame] | 4227 | struct net_device *net_dev = efx->net_dev; |
| 4228 | bool uc_promisc = false, mc_promisc = false; |
| 4229 | |
| 4230 | if (!efx_dev_registered(efx)) |
| 4231 | return; |
| 4232 | |
| 4233 | if (!table) |
| 4234 | return; |
| 4235 | |
| 4236 | efx_ef10_filter_mark_old(efx); |
| 4237 | |
| 4238 | /* Copy/convert the address lists; add the primary station |
| 4239 | * address and broadcast address |
| 4240 | */ |
| 4241 | netif_addr_lock_bh(net_dev); |
| 4242 | efx_ef10_filter_uc_addr_list(efx, &uc_promisc); |
| 4243 | efx_ef10_filter_mc_addr_list(efx, &mc_promisc); |
| 4244 | netif_addr_unlock_bh(net_dev); |
| 4245 | |
Edward Cree | 12fb0da | 2015-07-21 15:11:00 +0100 | [diff] [blame] | 4246 | /* Insert/renew unicast filters */ |
| 4247 | if (uc_promisc) { |
| 4248 | efx_ef10_filter_insert_def(efx, false, false); |
| 4249 | efx_ef10_filter_insert_addr_list(efx, false, false); |
| 4250 | } else { |
| 4251 | /* If any of the filters failed to insert, fall back to |
| 4252 | * promiscuous mode - add in the uc_def filter. But keep |
| 4253 | * our individual unicast filters. |
| 4254 | */ |
| 4255 | if (efx_ef10_filter_insert_addr_list(efx, false, false)) |
| 4256 | efx_ef10_filter_insert_def(efx, false, false); |
| 4257 | } |
Daniel Pieczko | ab8b1f7c | 2015-07-21 15:10:44 +0100 | [diff] [blame] | 4258 | |
Edward Cree | 12fb0da | 2015-07-21 15:11:00 +0100 | [diff] [blame] | 4259 | /* Insert/renew multicast filters */ |
Daniel Pieczko | ab8b1f7c | 2015-07-21 15:10:44 +0100 | [diff] [blame] | 4260 | /* If changing promiscuous state with cascaded multicast filters, remove |
| 4261 | * old filters first, so that packets are dropped rather than duplicated |
| 4262 | */ |
Andrew Rybchenko | b071c3a | 2016-06-15 17:43:00 +0100 | [diff] [blame] | 4263 | if (nic_data->workaround_26807 && table->mc_promisc_last != mc_promisc) |
Daniel Pieczko | ab8b1f7c | 2015-07-21 15:10:44 +0100 | [diff] [blame] | 4264 | efx_ef10_filter_remove_old(efx); |
Edward Cree | 12fb0da | 2015-07-21 15:11:00 +0100 | [diff] [blame] | 4265 | if (mc_promisc) { |
| 4266 | if (nic_data->workaround_26807) { |
| 4267 | /* If we failed to insert promiscuous filters, rollback |
| 4268 | * and fall back to individual multicast filters |
| 4269 | */ |
| 4270 | if (efx_ef10_filter_insert_def(efx, true, true)) { |
| 4271 | /* Changing promisc state, so remove old filters */ |
| 4272 | efx_ef10_filter_remove_old(efx); |
| 4273 | efx_ef10_filter_insert_addr_list(efx, true, false); |
| 4274 | } |
| 4275 | } else { |
| 4276 | /* If we failed to insert promiscuous filters, don't |
| 4277 | * rollback. Regardless, also insert the mc_list |
| 4278 | */ |
| 4279 | efx_ef10_filter_insert_def(efx, true, false); |
| 4280 | efx_ef10_filter_insert_addr_list(efx, true, false); |
| 4281 | } |
| 4282 | } else { |
| 4283 | /* If any filters failed to insert, rollback and fall back to |
| 4284 | * promiscuous mode - mc_def filter and maybe broadcast. If |
| 4285 | * that fails, roll back again and insert as many of our |
| 4286 | * individual multicast filters as we can. |
| 4287 | */ |
| 4288 | if (efx_ef10_filter_insert_addr_list(efx, true, true)) { |
| 4289 | /* Changing promisc state, so remove old filters */ |
| 4290 | if (nic_data->workaround_26807) |
| 4291 | efx_ef10_filter_remove_old(efx); |
| 4292 | if (efx_ef10_filter_insert_def(efx, true, true)) |
| 4293 | efx_ef10_filter_insert_addr_list(efx, true, false); |
| 4294 | } |
| 4295 | } |
Daniel Pieczko | 822b96f | 2015-07-21 15:10:27 +0100 | [diff] [blame] | 4296 | |
| 4297 | efx_ef10_filter_remove_old(efx); |
Andrew Rybchenko | b071c3a | 2016-06-15 17:43:00 +0100 | [diff] [blame] | 4298 | table->mc_promisc_last = mc_promisc; |
Daniel Pieczko | 822b96f | 2015-07-21 15:10:27 +0100 | [diff] [blame] | 4299 | } |
| 4300 | |
Shradha Shah | 910c878 | 2015-05-20 11:12:48 +0100 | [diff] [blame] | 4301 | static int efx_ef10_set_mac_address(struct efx_nic *efx) |
| 4302 | { |
| 4303 | MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_SET_MAC_IN_LEN); |
| 4304 | struct efx_ef10_nic_data *nic_data = efx->nic_data; |
| 4305 | bool was_enabled = efx->port_enabled; |
| 4306 | int rc; |
| 4307 | |
| 4308 | efx_device_detach_sync(efx); |
| 4309 | efx_net_stop(efx->net_dev); |
| 4310 | down_write(&efx->filter_sem); |
| 4311 | efx_ef10_filter_table_remove(efx); |
| 4312 | |
| 4313 | ether_addr_copy(MCDI_PTR(inbuf, VADAPTOR_SET_MAC_IN_MACADDR), |
| 4314 | efx->net_dev->dev_addr); |
| 4315 | MCDI_SET_DWORD(inbuf, VADAPTOR_SET_MAC_IN_UPSTREAM_PORT_ID, |
| 4316 | nic_data->vport_id); |
Daniel Pieczko | 535a617 | 2015-07-07 11:37:33 +0100 | [diff] [blame] | 4317 | rc = efx_mcdi_rpc_quiet(efx, MC_CMD_VADAPTOR_SET_MAC, inbuf, |
| 4318 | sizeof(inbuf), NULL, 0, NULL); |
Shradha Shah | 910c878 | 2015-05-20 11:12:48 +0100 | [diff] [blame] | 4319 | |
| 4320 | efx_ef10_filter_table_probe(efx); |
| 4321 | up_write(&efx->filter_sem); |
| 4322 | if (was_enabled) |
| 4323 | efx_net_open(efx->net_dev); |
| 4324 | netif_device_attach(efx->net_dev); |
| 4325 | |
Daniel Pieczko | 9e9f665 | 2015-07-07 11:37:00 +0100 | [diff] [blame] | 4326 | #ifdef CONFIG_SFC_SRIOV |
| 4327 | if (efx->pci_dev->is_virtfn && efx->pci_dev->physfn) { |
Shradha Shah | 910c878 | 2015-05-20 11:12:48 +0100 | [diff] [blame] | 4328 | struct pci_dev *pci_dev_pf = efx->pci_dev->physfn; |
| 4329 | |
Daniel Pieczko | 9e9f665 | 2015-07-07 11:37:00 +0100 | [diff] [blame] | 4330 | if (rc == -EPERM) { |
| 4331 | struct efx_nic *efx_pf; |
Shradha Shah | 910c878 | 2015-05-20 11:12:48 +0100 | [diff] [blame] | 4332 | |
Daniel Pieczko | 9e9f665 | 2015-07-07 11:37:00 +0100 | [diff] [blame] | 4333 | /* Switch to PF and change MAC address on vport */ |
| 4334 | efx_pf = pci_get_drvdata(pci_dev_pf); |
| 4335 | |
| 4336 | rc = efx_ef10_sriov_set_vf_mac(efx_pf, |
Shradha Shah | 910c878 | 2015-05-20 11:12:48 +0100 | [diff] [blame] | 4337 | nic_data->vf_index, |
Daniel Pieczko | 9e9f665 | 2015-07-07 11:37:00 +0100 | [diff] [blame] | 4338 | efx->net_dev->dev_addr); |
| 4339 | } else if (!rc) { |
Shradha Shah | 910c878 | 2015-05-20 11:12:48 +0100 | [diff] [blame] | 4340 | struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf); |
| 4341 | struct efx_ef10_nic_data *nic_data = efx_pf->nic_data; |
| 4342 | unsigned int i; |
| 4343 | |
Daniel Pieczko | 9e9f665 | 2015-07-07 11:37:00 +0100 | [diff] [blame] | 4344 | /* MAC address successfully changed by VF (with MAC |
| 4345 | * spoofing) so update the parent PF if possible. |
| 4346 | */ |
Shradha Shah | 910c878 | 2015-05-20 11:12:48 +0100 | [diff] [blame] | 4347 | for (i = 0; i < efx_pf->vf_count; ++i) { |
| 4348 | struct ef10_vf *vf = nic_data->vf + i; |
| 4349 | |
| 4350 | if (vf->efx == efx) { |
| 4351 | ether_addr_copy(vf->mac, |
| 4352 | efx->net_dev->dev_addr); |
| 4353 | return 0; |
| 4354 | } |
| 4355 | } |
| 4356 | } |
Daniel Pieczko | 9e9f665 | 2015-07-07 11:37:00 +0100 | [diff] [blame] | 4357 | } else |
Shradha Shah | 910c878 | 2015-05-20 11:12:48 +0100 | [diff] [blame] | 4358 | #endif |
Daniel Pieczko | 9e9f665 | 2015-07-07 11:37:00 +0100 | [diff] [blame] | 4359 | if (rc == -EPERM) { |
| 4360 | netif_err(efx, drv, efx->net_dev, |
| 4361 | "Cannot change MAC address; use sfboot to enable" |
| 4362 | " mac-spoofing on this interface\n"); |
Daniel Pieczko | 7a186f4 | 2015-07-07 11:37:19 +0100 | [diff] [blame] | 4363 | } else if (rc == -ENOSYS && !efx_ef10_is_vf(efx)) { |
| 4364 | /* If the active MCFW does not support MC_CMD_VADAPTOR_SET_MAC |
| 4365 | * fall-back to the method of changing the MAC address on the |
| 4366 | * vport. This only applies to PFs because such versions of |
| 4367 | * MCFW do not support VFs. |
| 4368 | */ |
| 4369 | rc = efx_ef10_vport_set_mac_address(efx); |
Daniel Pieczko | 535a617 | 2015-07-07 11:37:33 +0100 | [diff] [blame] | 4370 | } else { |
| 4371 | efx_mcdi_display_error(efx, MC_CMD_VADAPTOR_SET_MAC, |
| 4372 | sizeof(inbuf), NULL, 0, rc); |
Daniel Pieczko | 9e9f665 | 2015-07-07 11:37:00 +0100 | [diff] [blame] | 4373 | } |
| 4374 | |
Shradha Shah | 910c878 | 2015-05-20 11:12:48 +0100 | [diff] [blame] | 4375 | return rc; |
| 4376 | } |
| 4377 | |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 4378 | static int efx_ef10_mac_reconfigure(struct efx_nic *efx) |
| 4379 | { |
| 4380 | efx_ef10_filter_sync_rx_mode(efx); |
| 4381 | |
| 4382 | return efx_mcdi_set_mac(efx); |
| 4383 | } |
| 4384 | |
Shradha Shah | 862f894 | 2015-05-20 11:08:56 +0100 | [diff] [blame] | 4385 | static int efx_ef10_mac_reconfigure_vf(struct efx_nic *efx) |
| 4386 | { |
| 4387 | efx_ef10_filter_sync_rx_mode(efx); |
| 4388 | |
| 4389 | return 0; |
| 4390 | } |
| 4391 | |
Jon Cooper | 74cd60a | 2013-09-16 14:18:51 +0100 | [diff] [blame] | 4392 | static int efx_ef10_start_bist(struct efx_nic *efx, u32 bist_type) |
| 4393 | { |
| 4394 | MCDI_DECLARE_BUF(inbuf, MC_CMD_START_BIST_IN_LEN); |
| 4395 | |
| 4396 | MCDI_SET_DWORD(inbuf, START_BIST_IN_TYPE, bist_type); |
| 4397 | return efx_mcdi_rpc(efx, MC_CMD_START_BIST, inbuf, sizeof(inbuf), |
| 4398 | NULL, 0, NULL); |
| 4399 | } |
| 4400 | |
| 4401 | /* MC BISTs follow a different poll mechanism to phy BISTs. |
| 4402 | * The BIST is done in the poll handler on the MC, and the MCDI command |
| 4403 | * will block until the BIST is done. |
| 4404 | */ |
| 4405 | static int efx_ef10_poll_bist(struct efx_nic *efx) |
| 4406 | { |
| 4407 | int rc; |
| 4408 | MCDI_DECLARE_BUF(outbuf, MC_CMD_POLL_BIST_OUT_LEN); |
| 4409 | size_t outlen; |
| 4410 | u32 result; |
| 4411 | |
| 4412 | rc = efx_mcdi_rpc(efx, MC_CMD_POLL_BIST, NULL, 0, |
| 4413 | outbuf, sizeof(outbuf), &outlen); |
| 4414 | if (rc != 0) |
| 4415 | return rc; |
| 4416 | |
| 4417 | if (outlen < MC_CMD_POLL_BIST_OUT_LEN) |
| 4418 | return -EIO; |
| 4419 | |
| 4420 | result = MCDI_DWORD(outbuf, POLL_BIST_OUT_RESULT); |
| 4421 | switch (result) { |
| 4422 | case MC_CMD_POLL_BIST_PASSED: |
| 4423 | netif_dbg(efx, hw, efx->net_dev, "BIST passed.\n"); |
| 4424 | return 0; |
| 4425 | case MC_CMD_POLL_BIST_TIMEOUT: |
| 4426 | netif_err(efx, hw, efx->net_dev, "BIST timed out\n"); |
| 4427 | return -EIO; |
| 4428 | case MC_CMD_POLL_BIST_FAILED: |
| 4429 | netif_err(efx, hw, efx->net_dev, "BIST failed.\n"); |
| 4430 | return -EIO; |
| 4431 | default: |
| 4432 | netif_err(efx, hw, efx->net_dev, |
| 4433 | "BIST returned unknown result %u", result); |
| 4434 | return -EIO; |
| 4435 | } |
| 4436 | } |
| 4437 | |
| 4438 | static int efx_ef10_run_bist(struct efx_nic *efx, u32 bist_type) |
| 4439 | { |
| 4440 | int rc; |
| 4441 | |
| 4442 | netif_dbg(efx, drv, efx->net_dev, "starting BIST type %u\n", bist_type); |
| 4443 | |
| 4444 | rc = efx_ef10_start_bist(efx, bist_type); |
| 4445 | if (rc != 0) |
| 4446 | return rc; |
| 4447 | |
| 4448 | return efx_ef10_poll_bist(efx); |
| 4449 | } |
| 4450 | |
| 4451 | static int |
| 4452 | efx_ef10_test_chip(struct efx_nic *efx, struct efx_self_tests *tests) |
| 4453 | { |
| 4454 | int rc, rc2; |
| 4455 | |
| 4456 | efx_reset_down(efx, RESET_TYPE_WORLD); |
| 4457 | |
| 4458 | rc = efx_mcdi_rpc(efx, MC_CMD_ENABLE_OFFLINE_BIST, |
| 4459 | NULL, 0, NULL, 0, NULL); |
| 4460 | if (rc != 0) |
| 4461 | goto out; |
| 4462 | |
| 4463 | tests->memory = efx_ef10_run_bist(efx, MC_CMD_MC_MEM_BIST) ? -1 : 1; |
| 4464 | tests->registers = efx_ef10_run_bist(efx, MC_CMD_REG_BIST) ? -1 : 1; |
| 4465 | |
| 4466 | rc = efx_mcdi_reset(efx, RESET_TYPE_WORLD); |
| 4467 | |
| 4468 | out: |
Daniel Pieczko | 2732482 | 2015-07-31 11:14:54 +0100 | [diff] [blame] | 4469 | if (rc == -EPERM) |
| 4470 | rc = 0; |
Jon Cooper | 74cd60a | 2013-09-16 14:18:51 +0100 | [diff] [blame] | 4471 | rc2 = efx_reset_up(efx, RESET_TYPE_WORLD, rc == 0); |
| 4472 | return rc ? rc : rc2; |
| 4473 | } |
| 4474 | |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 4475 | #ifdef CONFIG_SFC_MTD |
| 4476 | |
| 4477 | struct efx_ef10_nvram_type_info { |
| 4478 | u16 type, type_mask; |
| 4479 | u8 port; |
| 4480 | const char *name; |
| 4481 | }; |
| 4482 | |
| 4483 | static const struct efx_ef10_nvram_type_info efx_ef10_nvram_types[] = { |
| 4484 | { NVRAM_PARTITION_TYPE_MC_FIRMWARE, 0, 0, "sfc_mcfw" }, |
| 4485 | { NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP, 0, 0, "sfc_mcfw_backup" }, |
| 4486 | { NVRAM_PARTITION_TYPE_EXPANSION_ROM, 0, 0, "sfc_exp_rom" }, |
| 4487 | { NVRAM_PARTITION_TYPE_STATIC_CONFIG, 0, 0, "sfc_static_cfg" }, |
| 4488 | { NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG, 0, 0, "sfc_dynamic_cfg" }, |
| 4489 | { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0, 0, 0, "sfc_exp_rom_cfg" }, |
| 4490 | { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT1, 0, 1, "sfc_exp_rom_cfg" }, |
| 4491 | { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT2, 0, 2, "sfc_exp_rom_cfg" }, |
| 4492 | { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT3, 0, 3, "sfc_exp_rom_cfg" }, |
Ben Hutchings | a84f3bf9 | 2013-10-09 14:14:41 +0100 | [diff] [blame] | 4493 | { NVRAM_PARTITION_TYPE_LICENSE, 0, 0, "sfc_license" }, |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 4494 | { NVRAM_PARTITION_TYPE_PHY_MIN, 0xff, 0, "sfc_phy_fw" }, |
| 4495 | }; |
| 4496 | |
| 4497 | static int efx_ef10_mtd_probe_partition(struct efx_nic *efx, |
| 4498 | struct efx_mcdi_mtd_partition *part, |
| 4499 | unsigned int type) |
| 4500 | { |
| 4501 | MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_METADATA_IN_LEN); |
| 4502 | MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_METADATA_OUT_LENMAX); |
| 4503 | const struct efx_ef10_nvram_type_info *info; |
| 4504 | size_t size, erase_size, outlen; |
| 4505 | bool protected; |
| 4506 | int rc; |
| 4507 | |
| 4508 | for (info = efx_ef10_nvram_types; ; info++) { |
| 4509 | if (info == |
| 4510 | efx_ef10_nvram_types + ARRAY_SIZE(efx_ef10_nvram_types)) |
| 4511 | return -ENODEV; |
| 4512 | if ((type & ~info->type_mask) == info->type) |
| 4513 | break; |
| 4514 | } |
| 4515 | if (info->port != efx_port_num(efx)) |
| 4516 | return -ENODEV; |
| 4517 | |
| 4518 | rc = efx_mcdi_nvram_info(efx, type, &size, &erase_size, &protected); |
| 4519 | if (rc) |
| 4520 | return rc; |
| 4521 | if (protected) |
| 4522 | return -ENODEV; /* hide it */ |
| 4523 | |
| 4524 | part->nvram_type = type; |
| 4525 | |
| 4526 | MCDI_SET_DWORD(inbuf, NVRAM_METADATA_IN_TYPE, type); |
| 4527 | rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_METADATA, inbuf, sizeof(inbuf), |
| 4528 | outbuf, sizeof(outbuf), &outlen); |
| 4529 | if (rc) |
| 4530 | return rc; |
| 4531 | if (outlen < MC_CMD_NVRAM_METADATA_OUT_LENMIN) |
| 4532 | return -EIO; |
| 4533 | if (MCDI_DWORD(outbuf, NVRAM_METADATA_OUT_FLAGS) & |
| 4534 | (1 << MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_LBN)) |
| 4535 | part->fw_subtype = MCDI_DWORD(outbuf, |
| 4536 | NVRAM_METADATA_OUT_SUBTYPE); |
| 4537 | |
| 4538 | part->common.dev_type_name = "EF10 NVRAM manager"; |
| 4539 | part->common.type_name = info->name; |
| 4540 | |
| 4541 | part->common.mtd.type = MTD_NORFLASH; |
| 4542 | part->common.mtd.flags = MTD_CAP_NORFLASH; |
| 4543 | part->common.mtd.size = size; |
| 4544 | part->common.mtd.erasesize = erase_size; |
| 4545 | |
| 4546 | return 0; |
| 4547 | } |
| 4548 | |
| 4549 | static int efx_ef10_mtd_probe(struct efx_nic *efx) |
| 4550 | { |
| 4551 | MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX); |
| 4552 | struct efx_mcdi_mtd_partition *parts; |
| 4553 | size_t outlen, n_parts_total, i, n_parts; |
| 4554 | unsigned int type; |
| 4555 | int rc; |
| 4556 | |
| 4557 | ASSERT_RTNL(); |
| 4558 | |
| 4559 | BUILD_BUG_ON(MC_CMD_NVRAM_PARTITIONS_IN_LEN != 0); |
| 4560 | rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_PARTITIONS, NULL, 0, |
| 4561 | outbuf, sizeof(outbuf), &outlen); |
| 4562 | if (rc) |
| 4563 | return rc; |
| 4564 | if (outlen < MC_CMD_NVRAM_PARTITIONS_OUT_LENMIN) |
| 4565 | return -EIO; |
| 4566 | |
| 4567 | n_parts_total = MCDI_DWORD(outbuf, NVRAM_PARTITIONS_OUT_NUM_PARTITIONS); |
| 4568 | if (n_parts_total > |
| 4569 | MCDI_VAR_ARRAY_LEN(outlen, NVRAM_PARTITIONS_OUT_TYPE_ID)) |
| 4570 | return -EIO; |
| 4571 | |
| 4572 | parts = kcalloc(n_parts_total, sizeof(*parts), GFP_KERNEL); |
| 4573 | if (!parts) |
| 4574 | return -ENOMEM; |
| 4575 | |
| 4576 | n_parts = 0; |
| 4577 | for (i = 0; i < n_parts_total; i++) { |
| 4578 | type = MCDI_ARRAY_DWORD(outbuf, NVRAM_PARTITIONS_OUT_TYPE_ID, |
| 4579 | i); |
| 4580 | rc = efx_ef10_mtd_probe_partition(efx, &parts[n_parts], type); |
| 4581 | if (rc == 0) |
| 4582 | n_parts++; |
| 4583 | else if (rc != -ENODEV) |
| 4584 | goto fail; |
| 4585 | } |
| 4586 | |
| 4587 | rc = efx_mtd_add(efx, &parts[0].common, n_parts, sizeof(*parts)); |
| 4588 | fail: |
| 4589 | if (rc) |
| 4590 | kfree(parts); |
| 4591 | return rc; |
| 4592 | } |
| 4593 | |
| 4594 | #endif /* CONFIG_SFC_MTD */ |
| 4595 | |
| 4596 | static void efx_ef10_ptp_write_host_time(struct efx_nic *efx, u32 host_time) |
| 4597 | { |
| 4598 | _efx_writed(efx, cpu_to_le32(host_time), ER_DZ_MC_DB_LWRD); |
| 4599 | } |
| 4600 | |
Shradha Shah | 02246a7 | 2015-05-06 00:58:14 +0100 | [diff] [blame] | 4601 | static void efx_ef10_ptp_write_host_time_vf(struct efx_nic *efx, |
| 4602 | u32 host_time) {} |
| 4603 | |
Jon Cooper | bd9a265 | 2013-11-18 12:54:41 +0000 | [diff] [blame] | 4604 | static int efx_ef10_rx_enable_timestamping(struct efx_channel *channel, |
| 4605 | bool temp) |
| 4606 | { |
| 4607 | MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_LEN); |
| 4608 | int rc; |
| 4609 | |
| 4610 | if (channel->sync_events_state == SYNC_EVENTS_REQUESTED || |
| 4611 | channel->sync_events_state == SYNC_EVENTS_VALID || |
| 4612 | (temp && channel->sync_events_state == SYNC_EVENTS_DISABLED)) |
| 4613 | return 0; |
| 4614 | channel->sync_events_state = SYNC_EVENTS_REQUESTED; |
| 4615 | |
| 4616 | MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_TIME_EVENT_SUBSCRIBE); |
| 4617 | MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0); |
| 4618 | MCDI_SET_DWORD(inbuf, PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE, |
| 4619 | channel->channel); |
| 4620 | |
| 4621 | rc = efx_mcdi_rpc(channel->efx, MC_CMD_PTP, |
| 4622 | inbuf, sizeof(inbuf), NULL, 0, NULL); |
| 4623 | |
| 4624 | if (rc != 0) |
| 4625 | channel->sync_events_state = temp ? SYNC_EVENTS_QUIESCENT : |
| 4626 | SYNC_EVENTS_DISABLED; |
| 4627 | |
| 4628 | return rc; |
| 4629 | } |
| 4630 | |
| 4631 | static int efx_ef10_rx_disable_timestamping(struct efx_channel *channel, |
| 4632 | bool temp) |
| 4633 | { |
| 4634 | MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_LEN); |
| 4635 | int rc; |
| 4636 | |
| 4637 | if (channel->sync_events_state == SYNC_EVENTS_DISABLED || |
| 4638 | (temp && channel->sync_events_state == SYNC_EVENTS_QUIESCENT)) |
| 4639 | return 0; |
| 4640 | if (channel->sync_events_state == SYNC_EVENTS_QUIESCENT) { |
| 4641 | channel->sync_events_state = SYNC_EVENTS_DISABLED; |
| 4642 | return 0; |
| 4643 | } |
| 4644 | channel->sync_events_state = temp ? SYNC_EVENTS_QUIESCENT : |
| 4645 | SYNC_EVENTS_DISABLED; |
| 4646 | |
| 4647 | MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_TIME_EVENT_UNSUBSCRIBE); |
| 4648 | MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0); |
| 4649 | MCDI_SET_DWORD(inbuf, PTP_IN_TIME_EVENT_UNSUBSCRIBE_CONTROL, |
| 4650 | MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_SINGLE); |
| 4651 | MCDI_SET_DWORD(inbuf, PTP_IN_TIME_EVENT_UNSUBSCRIBE_QUEUE, |
| 4652 | channel->channel); |
| 4653 | |
| 4654 | rc = efx_mcdi_rpc(channel->efx, MC_CMD_PTP, |
| 4655 | inbuf, sizeof(inbuf), NULL, 0, NULL); |
| 4656 | |
| 4657 | return rc; |
| 4658 | } |
| 4659 | |
| 4660 | static int efx_ef10_ptp_set_ts_sync_events(struct efx_nic *efx, bool en, |
| 4661 | bool temp) |
| 4662 | { |
| 4663 | int (*set)(struct efx_channel *channel, bool temp); |
| 4664 | struct efx_channel *channel; |
| 4665 | |
| 4666 | set = en ? |
| 4667 | efx_ef10_rx_enable_timestamping : |
| 4668 | efx_ef10_rx_disable_timestamping; |
| 4669 | |
| 4670 | efx_for_each_channel(channel, efx) { |
| 4671 | int rc = set(channel, temp); |
| 4672 | if (en && rc != 0) { |
| 4673 | efx_ef10_ptp_set_ts_sync_events(efx, false, temp); |
| 4674 | return rc; |
| 4675 | } |
| 4676 | } |
| 4677 | |
| 4678 | return 0; |
| 4679 | } |
| 4680 | |
Shradha Shah | 02246a7 | 2015-05-06 00:58:14 +0100 | [diff] [blame] | 4681 | static int efx_ef10_ptp_set_ts_config_vf(struct efx_nic *efx, |
| 4682 | struct hwtstamp_config *init) |
| 4683 | { |
| 4684 | return -EOPNOTSUPP; |
| 4685 | } |
| 4686 | |
Jon Cooper | bd9a265 | 2013-11-18 12:54:41 +0000 | [diff] [blame] | 4687 | static int efx_ef10_ptp_set_ts_config(struct efx_nic *efx, |
| 4688 | struct hwtstamp_config *init) |
| 4689 | { |
| 4690 | int rc; |
| 4691 | |
| 4692 | switch (init->rx_filter) { |
| 4693 | case HWTSTAMP_FILTER_NONE: |
| 4694 | efx_ef10_ptp_set_ts_sync_events(efx, false, false); |
| 4695 | /* if TX timestamping is still requested then leave PTP on */ |
| 4696 | return efx_ptp_change_mode(efx, |
| 4697 | init->tx_type != HWTSTAMP_TX_OFF, 0); |
| 4698 | case HWTSTAMP_FILTER_ALL: |
| 4699 | case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: |
| 4700 | case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: |
| 4701 | case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: |
| 4702 | case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: |
| 4703 | case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: |
| 4704 | case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: |
| 4705 | case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: |
| 4706 | case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: |
| 4707 | case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: |
| 4708 | case HWTSTAMP_FILTER_PTP_V2_EVENT: |
| 4709 | case HWTSTAMP_FILTER_PTP_V2_SYNC: |
| 4710 | case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: |
| 4711 | init->rx_filter = HWTSTAMP_FILTER_ALL; |
| 4712 | rc = efx_ptp_change_mode(efx, true, 0); |
| 4713 | if (!rc) |
| 4714 | rc = efx_ef10_ptp_set_ts_sync_events(efx, true, false); |
| 4715 | if (rc) |
| 4716 | efx_ptp_change_mode(efx, false, 0); |
| 4717 | return rc; |
| 4718 | default: |
| 4719 | return -ERANGE; |
| 4720 | } |
| 4721 | } |
| 4722 | |
Andrew Rybchenko | 100a9db | 2016-06-15 17:42:26 +0100 | [diff] [blame] | 4723 | #define EF10_OFFLOAD_FEATURES \ |
| 4724 | (NETIF_F_IP_CSUM | \ |
| 4725 | NETIF_F_IPV6_CSUM | \ |
| 4726 | NETIF_F_RXHASH | \ |
| 4727 | NETIF_F_NTUPLE) |
| 4728 | |
Shradha Shah | 02246a7 | 2015-05-06 00:58:14 +0100 | [diff] [blame] | 4729 | const struct efx_nic_type efx_hunt_a0_vf_nic_type = { |
Shradha Shah | 6f7f8aa | 2015-05-06 01:00:07 +0100 | [diff] [blame] | 4730 | .is_vf = true, |
Shradha Shah | 02246a7 | 2015-05-06 00:58:14 +0100 | [diff] [blame] | 4731 | .mem_bar = EFX_MEM_VF_BAR, |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 4732 | .mem_map_size = efx_ef10_mem_map_size, |
Shradha Shah | 02246a7 | 2015-05-06 00:58:14 +0100 | [diff] [blame] | 4733 | .probe = efx_ef10_probe_vf, |
| 4734 | .remove = efx_ef10_remove, |
| 4735 | .dimension_resources = efx_ef10_dimension_resources, |
| 4736 | .init = efx_ef10_init_nic, |
| 4737 | .fini = efx_port_dummy_op_void, |
Jon Cooper | 087e902 | 2015-05-20 11:11:35 +0100 | [diff] [blame] | 4738 | .map_reset_reason = efx_ef10_map_reset_reason, |
Shradha Shah | 02246a7 | 2015-05-06 00:58:14 +0100 | [diff] [blame] | 4739 | .map_reset_flags = efx_ef10_map_reset_flags, |
| 4740 | .reset = efx_ef10_reset, |
| 4741 | .probe_port = efx_mcdi_port_probe, |
| 4742 | .remove_port = efx_mcdi_port_remove, |
| 4743 | .fini_dmaq = efx_ef10_fini_dmaq, |
| 4744 | .prepare_flr = efx_ef10_prepare_flr, |
| 4745 | .finish_flr = efx_port_dummy_op_void, |
| 4746 | .describe_stats = efx_ef10_describe_stats, |
Daniel Pieczko | d778819 | 2015-06-02 11:39:20 +0100 | [diff] [blame] | 4747 | .update_stats = efx_ef10_update_stats_vf, |
Shradha Shah | 02246a7 | 2015-05-06 00:58:14 +0100 | [diff] [blame] | 4748 | .start_stats = efx_port_dummy_op_void, |
| 4749 | .pull_stats = efx_port_dummy_op_void, |
| 4750 | .stop_stats = efx_port_dummy_op_void, |
| 4751 | .set_id_led = efx_mcdi_set_id_led, |
| 4752 | .push_irq_moderation = efx_ef10_push_irq_moderation, |
Shradha Shah | 862f894 | 2015-05-20 11:08:56 +0100 | [diff] [blame] | 4753 | .reconfigure_mac = efx_ef10_mac_reconfigure_vf, |
Shradha Shah | 02246a7 | 2015-05-06 00:58:14 +0100 | [diff] [blame] | 4754 | .check_mac_fault = efx_mcdi_mac_check_fault, |
| 4755 | .reconfigure_port = efx_mcdi_port_reconfigure, |
| 4756 | .get_wol = efx_ef10_get_wol_vf, |
| 4757 | .set_wol = efx_ef10_set_wol_vf, |
| 4758 | .resume_wol = efx_port_dummy_op_void, |
| 4759 | .mcdi_request = efx_ef10_mcdi_request, |
| 4760 | .mcdi_poll_response = efx_ef10_mcdi_poll_response, |
| 4761 | .mcdi_read_response = efx_ef10_mcdi_read_response, |
| 4762 | .mcdi_poll_reboot = efx_ef10_mcdi_poll_reboot, |
Daniel Pieczko | c577e59 | 2015-10-09 10:40:35 +0100 | [diff] [blame] | 4763 | .mcdi_reboot_detected = efx_ef10_mcdi_reboot_detected, |
Shradha Shah | 02246a7 | 2015-05-06 00:58:14 +0100 | [diff] [blame] | 4764 | .irq_enable_master = efx_port_dummy_op_void, |
| 4765 | .irq_test_generate = efx_ef10_irq_test_generate, |
| 4766 | .irq_disable_non_ev = efx_port_dummy_op_void, |
| 4767 | .irq_handle_msi = efx_ef10_msi_interrupt, |
| 4768 | .irq_handle_legacy = efx_ef10_legacy_interrupt, |
| 4769 | .tx_probe = efx_ef10_tx_probe, |
| 4770 | .tx_init = efx_ef10_tx_init, |
| 4771 | .tx_remove = efx_ef10_tx_remove, |
| 4772 | .tx_write = efx_ef10_tx_write, |
Jon Cooper | 267c015 | 2015-05-06 00:59:38 +0100 | [diff] [blame] | 4773 | .rx_push_rss_config = efx_ef10_vf_rx_push_rss_config, |
Shradha Shah | 02246a7 | 2015-05-06 00:58:14 +0100 | [diff] [blame] | 4774 | .rx_probe = efx_ef10_rx_probe, |
| 4775 | .rx_init = efx_ef10_rx_init, |
| 4776 | .rx_remove = efx_ef10_rx_remove, |
| 4777 | .rx_write = efx_ef10_rx_write, |
| 4778 | .rx_defer_refill = efx_ef10_rx_defer_refill, |
| 4779 | .ev_probe = efx_ef10_ev_probe, |
| 4780 | .ev_init = efx_ef10_ev_init, |
| 4781 | .ev_fini = efx_ef10_ev_fini, |
| 4782 | .ev_remove = efx_ef10_ev_remove, |
| 4783 | .ev_process = efx_ef10_ev_process, |
| 4784 | .ev_read_ack = efx_ef10_ev_read_ack, |
| 4785 | .ev_test_generate = efx_ef10_ev_test_generate, |
| 4786 | .filter_table_probe = efx_ef10_filter_table_probe, |
| 4787 | .filter_table_restore = efx_ef10_filter_table_restore, |
| 4788 | .filter_table_remove = efx_ef10_filter_table_remove, |
| 4789 | .filter_update_rx_scatter = efx_ef10_filter_update_rx_scatter, |
| 4790 | .filter_insert = efx_ef10_filter_insert, |
| 4791 | .filter_remove_safe = efx_ef10_filter_remove_safe, |
| 4792 | .filter_get_safe = efx_ef10_filter_get_safe, |
| 4793 | .filter_clear_rx = efx_ef10_filter_clear_rx, |
| 4794 | .filter_count_rx_used = efx_ef10_filter_count_rx_used, |
| 4795 | .filter_get_rx_id_limit = efx_ef10_filter_get_rx_id_limit, |
| 4796 | .filter_get_rx_ids = efx_ef10_filter_get_rx_ids, |
| 4797 | #ifdef CONFIG_RFS_ACCEL |
| 4798 | .filter_rfs_insert = efx_ef10_filter_rfs_insert, |
| 4799 | .filter_rfs_expire_one = efx_ef10_filter_rfs_expire_one, |
| 4800 | #endif |
| 4801 | #ifdef CONFIG_SFC_MTD |
| 4802 | .mtd_probe = efx_port_dummy_op_int, |
| 4803 | #endif |
| 4804 | .ptp_write_host_time = efx_ef10_ptp_write_host_time_vf, |
| 4805 | .ptp_set_ts_config = efx_ef10_ptp_set_ts_config_vf, |
| 4806 | #ifdef CONFIG_SFC_SRIOV |
Shradha Shah | 7b8c7b5 | 2015-05-06 00:58:54 +0100 | [diff] [blame] | 4807 | .vswitching_probe = efx_ef10_vswitching_probe_vf, |
| 4808 | .vswitching_restore = efx_ef10_vswitching_restore_vf, |
| 4809 | .vswitching_remove = efx_ef10_vswitching_remove_vf, |
Shradha Shah | 1d051e0 | 2015-06-02 11:38:16 +0100 | [diff] [blame] | 4810 | .sriov_get_phys_port_id = efx_ef10_sriov_get_phys_port_id, |
Shradha Shah | 02246a7 | 2015-05-06 00:58:14 +0100 | [diff] [blame] | 4811 | #endif |
Daniel Pieczko | 0d5e0fb | 2015-05-20 11:10:20 +0100 | [diff] [blame] | 4812 | .get_mac_address = efx_ef10_get_mac_address_vf, |
Shradha Shah | 910c878 | 2015-05-20 11:12:48 +0100 | [diff] [blame] | 4813 | .set_mac_address = efx_ef10_set_mac_address, |
Daniel Pieczko | 0d5e0fb | 2015-05-20 11:10:20 +0100 | [diff] [blame] | 4814 | |
Shradha Shah | 02246a7 | 2015-05-06 00:58:14 +0100 | [diff] [blame] | 4815 | .revision = EFX_REV_HUNT_A0, |
| 4816 | .max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH), |
| 4817 | .rx_prefix_size = ES_DZ_RX_PREFIX_SIZE, |
| 4818 | .rx_hash_offset = ES_DZ_RX_PREFIX_HASH_OFST, |
| 4819 | .rx_ts_offset = ES_DZ_RX_PREFIX_TSTAMP_OFST, |
| 4820 | .can_rx_scatter = true, |
| 4821 | .always_rx_scatter = true, |
| 4822 | .max_interrupt_mode = EFX_INT_MODE_MSIX, |
| 4823 | .timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH, |
Andrew Rybchenko | 100a9db | 2016-06-15 17:42:26 +0100 | [diff] [blame] | 4824 | .offload_features = EF10_OFFLOAD_FEATURES, |
Shradha Shah | 02246a7 | 2015-05-06 00:58:14 +0100 | [diff] [blame] | 4825 | .mcdi_max_ver = 2, |
| 4826 | .max_rx_ip_filters = HUNT_FILTER_TBL_ROWS, |
| 4827 | .hwtstamp_filters = 1 << HWTSTAMP_FILTER_NONE | |
| 4828 | 1 << HWTSTAMP_FILTER_ALL, |
| 4829 | }; |
| 4830 | |
| 4831 | const struct efx_nic_type efx_hunt_a0_nic_type = { |
Shradha Shah | 6f7f8aa | 2015-05-06 01:00:07 +0100 | [diff] [blame] | 4832 | .is_vf = false, |
Shradha Shah | 02246a7 | 2015-05-06 00:58:14 +0100 | [diff] [blame] | 4833 | .mem_bar = EFX_MEM_BAR, |
| 4834 | .mem_map_size = efx_ef10_mem_map_size, |
| 4835 | .probe = efx_ef10_probe_pf, |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 4836 | .remove = efx_ef10_remove, |
| 4837 | .dimension_resources = efx_ef10_dimension_resources, |
| 4838 | .init = efx_ef10_init_nic, |
| 4839 | .fini = efx_port_dummy_op_void, |
Jon Cooper | 087e902 | 2015-05-20 11:11:35 +0100 | [diff] [blame] | 4840 | .map_reset_reason = efx_ef10_map_reset_reason, |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 4841 | .map_reset_flags = efx_ef10_map_reset_flags, |
Jon Cooper | 3e33626 | 2014-01-17 19:48:06 +0000 | [diff] [blame] | 4842 | .reset = efx_ef10_reset, |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 4843 | .probe_port = efx_mcdi_port_probe, |
| 4844 | .remove_port = efx_mcdi_port_remove, |
| 4845 | .fini_dmaq = efx_ef10_fini_dmaq, |
Edward Cree | e283546 | 2014-04-16 19:27:48 +0100 | [diff] [blame] | 4846 | .prepare_flr = efx_ef10_prepare_flr, |
| 4847 | .finish_flr = efx_port_dummy_op_void, |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 4848 | .describe_stats = efx_ef10_describe_stats, |
Daniel Pieczko | d778819 | 2015-06-02 11:39:20 +0100 | [diff] [blame] | 4849 | .update_stats = efx_ef10_update_stats_pf, |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 4850 | .start_stats = efx_mcdi_mac_start_stats, |
Jon Cooper | f8f3b5a | 2013-09-30 17:36:50 +0100 | [diff] [blame] | 4851 | .pull_stats = efx_mcdi_mac_pull_stats, |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 4852 | .stop_stats = efx_mcdi_mac_stop_stats, |
| 4853 | .set_id_led = efx_mcdi_set_id_led, |
| 4854 | .push_irq_moderation = efx_ef10_push_irq_moderation, |
| 4855 | .reconfigure_mac = efx_ef10_mac_reconfigure, |
| 4856 | .check_mac_fault = efx_mcdi_mac_check_fault, |
| 4857 | .reconfigure_port = efx_mcdi_port_reconfigure, |
| 4858 | .get_wol = efx_ef10_get_wol, |
| 4859 | .set_wol = efx_ef10_set_wol, |
| 4860 | .resume_wol = efx_port_dummy_op_void, |
Jon Cooper | 74cd60a | 2013-09-16 14:18:51 +0100 | [diff] [blame] | 4861 | .test_chip = efx_ef10_test_chip, |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 4862 | .test_nvram = efx_mcdi_nvram_test_all, |
| 4863 | .mcdi_request = efx_ef10_mcdi_request, |
| 4864 | .mcdi_poll_response = efx_ef10_mcdi_poll_response, |
| 4865 | .mcdi_read_response = efx_ef10_mcdi_read_response, |
| 4866 | .mcdi_poll_reboot = efx_ef10_mcdi_poll_reboot, |
Daniel Pieczko | c577e59 | 2015-10-09 10:40:35 +0100 | [diff] [blame] | 4867 | .mcdi_reboot_detected = efx_ef10_mcdi_reboot_detected, |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 4868 | .irq_enable_master = efx_port_dummy_op_void, |
| 4869 | .irq_test_generate = efx_ef10_irq_test_generate, |
| 4870 | .irq_disable_non_ev = efx_port_dummy_op_void, |
| 4871 | .irq_handle_msi = efx_ef10_msi_interrupt, |
| 4872 | .irq_handle_legacy = efx_ef10_legacy_interrupt, |
| 4873 | .tx_probe = efx_ef10_tx_probe, |
| 4874 | .tx_init = efx_ef10_tx_init, |
| 4875 | .tx_remove = efx_ef10_tx_remove, |
| 4876 | .tx_write = efx_ef10_tx_write, |
Jon Cooper | 267c015 | 2015-05-06 00:59:38 +0100 | [diff] [blame] | 4877 | .rx_push_rss_config = efx_ef10_pf_rx_push_rss_config, |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 4878 | .rx_probe = efx_ef10_rx_probe, |
| 4879 | .rx_init = efx_ef10_rx_init, |
| 4880 | .rx_remove = efx_ef10_rx_remove, |
| 4881 | .rx_write = efx_ef10_rx_write, |
| 4882 | .rx_defer_refill = efx_ef10_rx_defer_refill, |
| 4883 | .ev_probe = efx_ef10_ev_probe, |
| 4884 | .ev_init = efx_ef10_ev_init, |
| 4885 | .ev_fini = efx_ef10_ev_fini, |
| 4886 | .ev_remove = efx_ef10_ev_remove, |
| 4887 | .ev_process = efx_ef10_ev_process, |
| 4888 | .ev_read_ack = efx_ef10_ev_read_ack, |
| 4889 | .ev_test_generate = efx_ef10_ev_test_generate, |
| 4890 | .filter_table_probe = efx_ef10_filter_table_probe, |
| 4891 | .filter_table_restore = efx_ef10_filter_table_restore, |
| 4892 | .filter_table_remove = efx_ef10_filter_table_remove, |
| 4893 | .filter_update_rx_scatter = efx_ef10_filter_update_rx_scatter, |
| 4894 | .filter_insert = efx_ef10_filter_insert, |
| 4895 | .filter_remove_safe = efx_ef10_filter_remove_safe, |
| 4896 | .filter_get_safe = efx_ef10_filter_get_safe, |
| 4897 | .filter_clear_rx = efx_ef10_filter_clear_rx, |
| 4898 | .filter_count_rx_used = efx_ef10_filter_count_rx_used, |
| 4899 | .filter_get_rx_id_limit = efx_ef10_filter_get_rx_id_limit, |
| 4900 | .filter_get_rx_ids = efx_ef10_filter_get_rx_ids, |
| 4901 | #ifdef CONFIG_RFS_ACCEL |
| 4902 | .filter_rfs_insert = efx_ef10_filter_rfs_insert, |
| 4903 | .filter_rfs_expire_one = efx_ef10_filter_rfs_expire_one, |
| 4904 | #endif |
| 4905 | #ifdef CONFIG_SFC_MTD |
| 4906 | .mtd_probe = efx_ef10_mtd_probe, |
| 4907 | .mtd_rename = efx_mcdi_mtd_rename, |
| 4908 | .mtd_read = efx_mcdi_mtd_read, |
| 4909 | .mtd_erase = efx_mcdi_mtd_erase, |
| 4910 | .mtd_write = efx_mcdi_mtd_write, |
| 4911 | .mtd_sync = efx_mcdi_mtd_sync, |
| 4912 | #endif |
| 4913 | .ptp_write_host_time = efx_ef10_ptp_write_host_time, |
Jon Cooper | bd9a265 | 2013-11-18 12:54:41 +0000 | [diff] [blame] | 4914 | .ptp_set_ts_sync_events = efx_ef10_ptp_set_ts_sync_events, |
| 4915 | .ptp_set_ts_config = efx_ef10_ptp_set_ts_config, |
Shradha Shah | 7fa8d54 | 2015-05-06 00:55:13 +0100 | [diff] [blame] | 4916 | #ifdef CONFIG_SFC_SRIOV |
Shradha Shah | 834e23d | 2015-05-06 00:55:58 +0100 | [diff] [blame] | 4917 | .sriov_configure = efx_ef10_sriov_configure, |
Shradha Shah | d98a4ff | 2014-11-05 12:16:46 +0000 | [diff] [blame] | 4918 | .sriov_init = efx_ef10_sriov_init, |
| 4919 | .sriov_fini = efx_ef10_sriov_fini, |
Shradha Shah | d98a4ff | 2014-11-05 12:16:46 +0000 | [diff] [blame] | 4920 | .sriov_wanted = efx_ef10_sriov_wanted, |
| 4921 | .sriov_reset = efx_ef10_sriov_reset, |
Shradha Shah | 7fa8d54 | 2015-05-06 00:55:13 +0100 | [diff] [blame] | 4922 | .sriov_flr = efx_ef10_sriov_flr, |
| 4923 | .sriov_set_vf_mac = efx_ef10_sriov_set_vf_mac, |
| 4924 | .sriov_set_vf_vlan = efx_ef10_sriov_set_vf_vlan, |
| 4925 | .sriov_set_vf_spoofchk = efx_ef10_sriov_set_vf_spoofchk, |
| 4926 | .sriov_get_vf_config = efx_ef10_sriov_get_vf_config, |
Edward Cree | 4392dc6 | 2015-05-20 11:12:13 +0100 | [diff] [blame] | 4927 | .sriov_set_vf_link_state = efx_ef10_sriov_set_vf_link_state, |
Shradha Shah | 7b8c7b5 | 2015-05-06 00:58:54 +0100 | [diff] [blame] | 4928 | .vswitching_probe = efx_ef10_vswitching_probe_pf, |
| 4929 | .vswitching_restore = efx_ef10_vswitching_restore_pf, |
| 4930 | .vswitching_remove = efx_ef10_vswitching_remove_pf, |
Shradha Shah | 7fa8d54 | 2015-05-06 00:55:13 +0100 | [diff] [blame] | 4931 | #endif |
Daniel Pieczko | 0d5e0fb | 2015-05-20 11:10:20 +0100 | [diff] [blame] | 4932 | .get_mac_address = efx_ef10_get_mac_address_pf, |
Shradha Shah | 910c878 | 2015-05-20 11:12:48 +0100 | [diff] [blame] | 4933 | .set_mac_address = efx_ef10_set_mac_address, |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 4934 | |
| 4935 | .revision = EFX_REV_HUNT_A0, |
| 4936 | .max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH), |
| 4937 | .rx_prefix_size = ES_DZ_RX_PREFIX_SIZE, |
| 4938 | .rx_hash_offset = ES_DZ_RX_PREFIX_HASH_OFST, |
Jon Cooper | bd9a265 | 2013-11-18 12:54:41 +0000 | [diff] [blame] | 4939 | .rx_ts_offset = ES_DZ_RX_PREFIX_TSTAMP_OFST, |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 4940 | .can_rx_scatter = true, |
| 4941 | .always_rx_scatter = true, |
| 4942 | .max_interrupt_mode = EFX_INT_MODE_MSIX, |
| 4943 | .timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH, |
Andrew Rybchenko | 100a9db | 2016-06-15 17:42:26 +0100 | [diff] [blame] | 4944 | .offload_features = EF10_OFFLOAD_FEATURES, |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 4945 | .mcdi_max_ver = 2, |
| 4946 | .max_rx_ip_filters = HUNT_FILTER_TBL_ROWS, |
Jon Cooper | bd9a265 | 2013-11-18 12:54:41 +0000 | [diff] [blame] | 4947 | .hwtstamp_filters = 1 << HWTSTAMP_FILTER_NONE | |
| 4948 | 1 << HWTSTAMP_FILTER_ALL, |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 4949 | }; |