Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 1 | /**************************************************************************** |
| 2 | * Driver for Solarflare network controllers and boards |
| 3 | * Copyright 2012-2013 Solarflare Communications Inc. |
| 4 | * |
| 5 | * This program is free software; you can redistribute it and/or modify it |
| 6 | * under the terms of the GNU General Public License version 2 as published |
| 7 | * by the Free Software Foundation, incorporated herein by reference. |
| 8 | */ |
| 9 | |
| 10 | #include "net_driver.h" |
| 11 | #include "ef10_regs.h" |
| 12 | #include "io.h" |
| 13 | #include "mcdi.h" |
| 14 | #include "mcdi_pcol.h" |
| 15 | #include "nic.h" |
| 16 | #include "workarounds.h" |
Jon Cooper | 74cd60a | 2013-09-16 14:18:51 +0100 | [diff] [blame] | 17 | #include "selftest.h" |
Shradha Shah | 7fa8d54 | 2015-05-06 00:55:13 +0100 | [diff] [blame] | 18 | #include "ef10_sriov.h" |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 19 | #include <linux/in.h> |
| 20 | #include <linux/jhash.h> |
| 21 | #include <linux/wait.h> |
| 22 | #include <linux/workqueue.h> |
| 23 | |
| 24 | /* Hardware control for EF10 architecture including 'Huntington'. */ |
| 25 | |
| 26 | #define EFX_EF10_DRVGEN_EV 7 |
| 27 | enum { |
| 28 | EFX_EF10_TEST = 1, |
| 29 | EFX_EF10_REFILL, |
| 30 | }; |
| 31 | |
| 32 | /* The reserved RSS context value */ |
| 33 | #define EFX_EF10_RSS_CONTEXT_INVALID 0xffffffff |
Jon Cooper | 267c015 | 2015-05-06 00:59:38 +0100 | [diff] [blame] | 34 | /* The maximum size of a shared RSS context */ |
| 35 | /* TODO: this should really be from the mcdi protocol export */ |
| 36 | #define EFX_EF10_MAX_SHARED_RSS_CONTEXT_SIZE 64UL |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 37 | |
| 38 | /* The filter table(s) are managed by firmware and we have write-only |
| 39 | * access. When removing filters we must identify them to the |
| 40 | * firmware by a 64-bit handle, but this is too wide for Linux kernel |
| 41 | * interfaces (32-bit for RX NFC, 16-bit for RFS). Also, we need to |
| 42 | * be able to tell in advance whether a requested insertion will |
| 43 | * replace an existing filter. Therefore we maintain a software hash |
| 44 | * table, which should be at least as large as the hardware hash |
| 45 | * table. |
| 46 | * |
| 47 | * Huntington has a single 8K filter table shared between all filter |
| 48 | * types and both ports. |
| 49 | */ |
| 50 | #define HUNT_FILTER_TBL_ROWS 8192 |
| 51 | |
Edward Cree | 12fb0da | 2015-07-21 15:11:00 +0100 | [diff] [blame] | 52 | #define EFX_EF10_FILTER_ID_INVALID 0xffff |
Andrew Rybchenko | dc3273e | 2016-06-15 17:45:36 +0100 | [diff] [blame] | 53 | |
| 54 | #define EFX_EF10_FILTER_DEV_UC_MAX 32 |
| 55 | #define EFX_EF10_FILTER_DEV_MC_MAX 256 |
| 56 | |
Andrew Rybchenko | 34813fe | 2016-06-15 17:48:14 +0100 | [diff] [blame] | 57 | /* VLAN list entry */ |
| 58 | struct efx_ef10_vlan { |
| 59 | struct list_head list; |
| 60 | u16 vid; |
| 61 | }; |
| 62 | |
Edward Cree | 9b41080 | 2017-01-27 15:02:52 +0000 | [diff] [blame] | 63 | enum efx_ef10_default_filters { |
| 64 | EFX_EF10_BCAST, |
| 65 | EFX_EF10_UCDEF, |
| 66 | EFX_EF10_MCDEF, |
| 67 | EFX_EF10_VXLAN4_UCDEF, |
| 68 | EFX_EF10_VXLAN4_MCDEF, |
| 69 | EFX_EF10_VXLAN6_UCDEF, |
| 70 | EFX_EF10_VXLAN6_MCDEF, |
| 71 | EFX_EF10_NVGRE4_UCDEF, |
| 72 | EFX_EF10_NVGRE4_MCDEF, |
| 73 | EFX_EF10_NVGRE6_UCDEF, |
| 74 | EFX_EF10_NVGRE6_MCDEF, |
| 75 | EFX_EF10_GENEVE4_UCDEF, |
| 76 | EFX_EF10_GENEVE4_MCDEF, |
| 77 | EFX_EF10_GENEVE6_UCDEF, |
| 78 | EFX_EF10_GENEVE6_MCDEF, |
| 79 | |
| 80 | EFX_EF10_NUM_DEFAULT_FILTERS |
| 81 | }; |
| 82 | |
Andrew Rybchenko | dc3273e | 2016-06-15 17:45:36 +0100 | [diff] [blame] | 83 | /* Per-VLAN filters information */ |
| 84 | struct efx_ef10_filter_vlan { |
Andrew Rybchenko | 34813fe | 2016-06-15 17:48:14 +0100 | [diff] [blame] | 85 | struct list_head list; |
Andrew Rybchenko | b3a3c03 | 2016-06-15 17:47:36 +0100 | [diff] [blame] | 86 | u16 vid; |
Andrew Rybchenko | dc3273e | 2016-06-15 17:45:36 +0100 | [diff] [blame] | 87 | u16 uc[EFX_EF10_FILTER_DEV_UC_MAX]; |
| 88 | u16 mc[EFX_EF10_FILTER_DEV_MC_MAX]; |
Edward Cree | 9b41080 | 2017-01-27 15:02:52 +0000 | [diff] [blame] | 89 | u16 default_filters[EFX_EF10_NUM_DEFAULT_FILTERS]; |
Andrew Rybchenko | dc3273e | 2016-06-15 17:45:36 +0100 | [diff] [blame] | 90 | }; |
| 91 | |
Daniel Pieczko | 822b96f | 2015-07-21 15:10:27 +0100 | [diff] [blame] | 92 | struct efx_ef10_dev_addr { |
| 93 | u8 addr[ETH_ALEN]; |
Daniel Pieczko | 822b96f | 2015-07-21 15:10:27 +0100 | [diff] [blame] | 94 | }; |
| 95 | |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 96 | struct efx_ef10_filter_table { |
Andrew Rybchenko | 7ac0dd9 | 2016-06-15 17:49:30 +0100 | [diff] [blame] | 97 | /* The MCDI match masks supported by this fw & hw, in order of priority */ |
| 98 | u32 rx_match_mcdi_flags[ |
Edward Cree | 9b41080 | 2017-01-27 15:02:52 +0000 | [diff] [blame] | 99 | MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MAXNUM * 2]; |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 100 | unsigned int rx_match_count; |
| 101 | |
| 102 | struct { |
| 103 | unsigned long spec; /* pointer to spec plus flag bits */ |
Ben Hutchings | b59e6ef | 2013-11-21 19:02:22 +0000 | [diff] [blame] | 104 | /* BUSY flag indicates that an update is in progress. AUTO_OLD is |
| 105 | * used to mark and sweep MAC filters for the device address lists. |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 106 | */ |
| 107 | #define EFX_EF10_FILTER_FLAG_BUSY 1UL |
Ben Hutchings | b59e6ef | 2013-11-21 19:02:22 +0000 | [diff] [blame] | 108 | #define EFX_EF10_FILTER_FLAG_AUTO_OLD 2UL |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 109 | #define EFX_EF10_FILTER_FLAGS 3UL |
| 110 | u64 handle; /* firmware handle */ |
| 111 | } *entry; |
| 112 | wait_queue_head_t waitq; |
| 113 | /* Shadow of net_device address lists, guarded by mac_lock */ |
Daniel Pieczko | 822b96f | 2015-07-21 15:10:27 +0100 | [diff] [blame] | 114 | struct efx_ef10_dev_addr dev_uc_list[EFX_EF10_FILTER_DEV_UC_MAX]; |
| 115 | struct efx_ef10_dev_addr dev_mc_list[EFX_EF10_FILTER_DEV_MC_MAX]; |
Edward Cree | 12fb0da | 2015-07-21 15:11:00 +0100 | [diff] [blame] | 116 | int dev_uc_count; |
| 117 | int dev_mc_count; |
Andrew Rybchenko | afa4ce1 | 2016-06-15 17:45:56 +0100 | [diff] [blame] | 118 | bool uc_promisc; |
| 119 | bool mc_promisc; |
Andrew Rybchenko | b071c3a | 2016-06-15 17:43:00 +0100 | [diff] [blame] | 120 | /* Whether in multicast promiscuous mode when last changed */ |
| 121 | bool mc_promisc_last; |
Andrew Rybchenko | 4a53ea8 | 2016-06-15 17:48:32 +0100 | [diff] [blame] | 122 | bool vlan_filter; |
Andrew Rybchenko | 34813fe | 2016-06-15 17:48:14 +0100 | [diff] [blame] | 123 | struct list_head vlan_list; |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 124 | }; |
| 125 | |
| 126 | /* An arbitrary search limit for the software hash table */ |
| 127 | #define EFX_EF10_FILTER_SEARCH_LIMIT 200 |
| 128 | |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 129 | static void efx_ef10_rx_free_indir_table(struct efx_nic *efx); |
| 130 | static void efx_ef10_filter_table_remove(struct efx_nic *efx); |
Andrew Rybchenko | 34813fe | 2016-06-15 17:48:14 +0100 | [diff] [blame] | 131 | static int efx_ef10_filter_add_vlan(struct efx_nic *efx, u16 vid); |
| 132 | static void efx_ef10_filter_del_vlan_internal(struct efx_nic *efx, |
| 133 | struct efx_ef10_filter_vlan *vlan); |
| 134 | static void efx_ef10_filter_del_vlan(struct efx_nic *efx, u16 vid); |
Jon Cooper | e5fbd97 | 2017-02-08 16:52:10 +0000 | [diff] [blame^] | 135 | static int efx_ef10_set_udp_tnl_ports(struct efx_nic *efx, bool unloading); |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 136 | |
| 137 | static int efx_ef10_get_warm_boot_count(struct efx_nic *efx) |
| 138 | { |
| 139 | efx_dword_t reg; |
| 140 | |
| 141 | efx_readd(efx, ®, ER_DZ_BIU_MC_SFT_STATUS); |
| 142 | return EFX_DWORD_FIELD(reg, EFX_WORD_1) == 0xb007 ? |
| 143 | EFX_DWORD_FIELD(reg, EFX_WORD_0) : -EIO; |
| 144 | } |
| 145 | |
| 146 | static unsigned int efx_ef10_mem_map_size(struct efx_nic *efx) |
| 147 | { |
Shradha Shah | 02246a7 | 2015-05-06 00:58:14 +0100 | [diff] [blame] | 148 | int bar; |
| 149 | |
| 150 | bar = efx->type->mem_bar; |
| 151 | return resource_size(&efx->pci_dev->resource[bar]); |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 152 | } |
| 153 | |
Daniel Pieczko | 7a186f4 | 2015-07-07 11:37:19 +0100 | [diff] [blame] | 154 | static bool efx_ef10_is_vf(struct efx_nic *efx) |
| 155 | { |
| 156 | return efx->type->is_vf; |
| 157 | } |
| 158 | |
Daniel Pieczko | 1cd9ecb | 2015-05-06 00:57:53 +0100 | [diff] [blame] | 159 | static int efx_ef10_get_pf_index(struct efx_nic *efx) |
| 160 | { |
| 161 | MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_FUNCTION_INFO_OUT_LEN); |
| 162 | struct efx_ef10_nic_data *nic_data = efx->nic_data; |
| 163 | size_t outlen; |
| 164 | int rc; |
| 165 | |
| 166 | rc = efx_mcdi_rpc(efx, MC_CMD_GET_FUNCTION_INFO, NULL, 0, outbuf, |
| 167 | sizeof(outbuf), &outlen); |
| 168 | if (rc) |
| 169 | return rc; |
| 170 | if (outlen < sizeof(outbuf)) |
| 171 | return -EIO; |
| 172 | |
| 173 | nic_data->pf_index = MCDI_DWORD(outbuf, GET_FUNCTION_INFO_OUT_PF); |
| 174 | return 0; |
| 175 | } |
| 176 | |
Shradha Shah | 88a37de | 2015-05-20 11:09:15 +0100 | [diff] [blame] | 177 | #ifdef CONFIG_SFC_SRIOV |
| 178 | static int efx_ef10_get_vf_index(struct efx_nic *efx) |
| 179 | { |
| 180 | MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_FUNCTION_INFO_OUT_LEN); |
| 181 | struct efx_ef10_nic_data *nic_data = efx->nic_data; |
| 182 | size_t outlen; |
| 183 | int rc; |
| 184 | |
| 185 | rc = efx_mcdi_rpc(efx, MC_CMD_GET_FUNCTION_INFO, NULL, 0, outbuf, |
| 186 | sizeof(outbuf), &outlen); |
| 187 | if (rc) |
| 188 | return rc; |
| 189 | if (outlen < sizeof(outbuf)) |
| 190 | return -EIO; |
| 191 | |
| 192 | nic_data->vf_index = MCDI_DWORD(outbuf, GET_FUNCTION_INFO_OUT_VF); |
| 193 | return 0; |
| 194 | } |
| 195 | #endif |
| 196 | |
Ben Hutchings | e5a2538 | 2013-09-05 22:50:59 +0100 | [diff] [blame] | 197 | static int efx_ef10_init_datapath_caps(struct efx_nic *efx) |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 198 | { |
Bert Kenward | ca889a05 | 2016-08-11 13:01:35 +0100 | [diff] [blame] | 199 | MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CAPABILITIES_V2_OUT_LEN); |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 200 | struct efx_ef10_nic_data *nic_data = efx->nic_data; |
| 201 | size_t outlen; |
| 202 | int rc; |
| 203 | |
| 204 | BUILD_BUG_ON(MC_CMD_GET_CAPABILITIES_IN_LEN != 0); |
| 205 | |
| 206 | rc = efx_mcdi_rpc(efx, MC_CMD_GET_CAPABILITIES, NULL, 0, |
| 207 | outbuf, sizeof(outbuf), &outlen); |
| 208 | if (rc) |
| 209 | return rc; |
Bert Kenward | ca889a05 | 2016-08-11 13:01:35 +0100 | [diff] [blame] | 210 | if (outlen < MC_CMD_GET_CAPABILITIES_OUT_LEN) { |
Ben Hutchings | e5a2538 | 2013-09-05 22:50:59 +0100 | [diff] [blame] | 211 | netif_err(efx, drv, efx->net_dev, |
| 212 | "unable to read datapath firmware capabilities\n"); |
| 213 | return -EIO; |
| 214 | } |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 215 | |
Ben Hutchings | e5a2538 | 2013-09-05 22:50:59 +0100 | [diff] [blame] | 216 | nic_data->datapath_caps = |
| 217 | MCDI_DWORD(outbuf, GET_CAPABILITIES_OUT_FLAGS1); |
| 218 | |
Edward Cree | c634700 | 2017-01-13 21:20:29 +0000 | [diff] [blame] | 219 | if (outlen >= MC_CMD_GET_CAPABILITIES_V2_OUT_LEN) { |
Bert Kenward | ca889a05 | 2016-08-11 13:01:35 +0100 | [diff] [blame] | 220 | nic_data->datapath_caps2 = MCDI_DWORD(outbuf, |
| 221 | GET_CAPABILITIES_V2_OUT_FLAGS2); |
Edward Cree | c634700 | 2017-01-13 21:20:29 +0000 | [diff] [blame] | 222 | nic_data->piobuf_size = MCDI_WORD(outbuf, |
| 223 | GET_CAPABILITIES_V2_OUT_SIZE_PIO_BUFF); |
| 224 | } else { |
Bert Kenward | ca889a05 | 2016-08-11 13:01:35 +0100 | [diff] [blame] | 225 | nic_data->datapath_caps2 = 0; |
Edward Cree | c634700 | 2017-01-13 21:20:29 +0000 | [diff] [blame] | 226 | nic_data->piobuf_size = ER_DZ_TX_PIOBUF_SIZE; |
| 227 | } |
Bert Kenward | ca889a05 | 2016-08-11 13:01:35 +0100 | [diff] [blame] | 228 | |
Daniel Pieczko | 8d9f9dd | 2015-05-06 00:56:55 +0100 | [diff] [blame] | 229 | /* record the DPCPU firmware IDs to determine VEB vswitching support. |
| 230 | */ |
| 231 | nic_data->rx_dpcpu_fw_id = |
| 232 | MCDI_WORD(outbuf, GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID); |
| 233 | nic_data->tx_dpcpu_fw_id = |
| 234 | MCDI_WORD(outbuf, GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID); |
| 235 | |
Ben Hutchings | e5a2538 | 2013-09-05 22:50:59 +0100 | [diff] [blame] | 236 | if (!(nic_data->datapath_caps & |
Ben Hutchings | e5a2538 | 2013-09-05 22:50:59 +0100 | [diff] [blame] | 237 | (1 << MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_14_LBN))) { |
| 238 | netif_err(efx, probe, efx->net_dev, |
| 239 | "current firmware does not support an RX prefix\n"); |
| 240 | return -ENODEV; |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 241 | } |
| 242 | |
| 243 | return 0; |
| 244 | } |
| 245 | |
| 246 | static int efx_ef10_get_sysclk_freq(struct efx_nic *efx) |
| 247 | { |
| 248 | MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CLOCK_OUT_LEN); |
| 249 | int rc; |
| 250 | |
| 251 | rc = efx_mcdi_rpc(efx, MC_CMD_GET_CLOCK, NULL, 0, |
| 252 | outbuf, sizeof(outbuf), NULL); |
| 253 | if (rc) |
| 254 | return rc; |
| 255 | rc = MCDI_DWORD(outbuf, GET_CLOCK_OUT_SYS_FREQ); |
| 256 | return rc > 0 ? rc : -ERANGE; |
| 257 | } |
| 258 | |
Bert Kenward | d95e329 | 2016-08-11 13:02:36 +0100 | [diff] [blame] | 259 | static int efx_ef10_get_timer_workarounds(struct efx_nic *efx) |
| 260 | { |
| 261 | struct efx_ef10_nic_data *nic_data = efx->nic_data; |
| 262 | unsigned int implemented; |
| 263 | unsigned int enabled; |
| 264 | int rc; |
| 265 | |
| 266 | nic_data->workaround_35388 = false; |
| 267 | nic_data->workaround_61265 = false; |
| 268 | |
| 269 | rc = efx_mcdi_get_workarounds(efx, &implemented, &enabled); |
| 270 | |
| 271 | if (rc == -ENOSYS) { |
| 272 | /* Firmware without GET_WORKAROUNDS - not a problem. */ |
| 273 | rc = 0; |
| 274 | } else if (rc == 0) { |
| 275 | /* Bug61265 workaround is always enabled if implemented. */ |
| 276 | if (enabled & MC_CMD_GET_WORKAROUNDS_OUT_BUG61265) |
| 277 | nic_data->workaround_61265 = true; |
| 278 | |
| 279 | if (enabled & MC_CMD_GET_WORKAROUNDS_OUT_BUG35388) { |
| 280 | nic_data->workaround_35388 = true; |
| 281 | } else if (implemented & MC_CMD_GET_WORKAROUNDS_OUT_BUG35388) { |
| 282 | /* Workaround is implemented but not enabled. |
| 283 | * Try to enable it. |
| 284 | */ |
| 285 | rc = efx_mcdi_set_workaround(efx, |
| 286 | MC_CMD_WORKAROUND_BUG35388, |
| 287 | true, NULL); |
| 288 | if (rc == 0) |
| 289 | nic_data->workaround_35388 = true; |
| 290 | /* If we failed to set the workaround just carry on. */ |
| 291 | rc = 0; |
| 292 | } |
| 293 | } |
| 294 | |
| 295 | netif_dbg(efx, probe, efx->net_dev, |
| 296 | "workaround for bug 35388 is %sabled\n", |
| 297 | nic_data->workaround_35388 ? "en" : "dis"); |
| 298 | netif_dbg(efx, probe, efx->net_dev, |
| 299 | "workaround for bug 61265 is %sabled\n", |
| 300 | nic_data->workaround_61265 ? "en" : "dis"); |
| 301 | |
| 302 | return rc; |
| 303 | } |
| 304 | |
| 305 | static void efx_ef10_process_timer_config(struct efx_nic *efx, |
| 306 | const efx_dword_t *data) |
| 307 | { |
| 308 | unsigned int max_count; |
| 309 | |
| 310 | if (EFX_EF10_WORKAROUND_61265(efx)) { |
| 311 | efx->timer_quantum_ns = MCDI_DWORD(data, |
| 312 | GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_STEP_NS); |
| 313 | efx->timer_max_ns = MCDI_DWORD(data, |
| 314 | GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_MAX_NS); |
| 315 | } else if (EFX_EF10_WORKAROUND_35388(efx)) { |
| 316 | efx->timer_quantum_ns = MCDI_DWORD(data, |
| 317 | GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_NS_PER_COUNT); |
| 318 | max_count = MCDI_DWORD(data, |
| 319 | GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_MAX_COUNT); |
| 320 | efx->timer_max_ns = max_count * efx->timer_quantum_ns; |
| 321 | } else { |
| 322 | efx->timer_quantum_ns = MCDI_DWORD(data, |
| 323 | GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_NS_PER_COUNT); |
| 324 | max_count = MCDI_DWORD(data, |
| 325 | GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_MAX_COUNT); |
| 326 | efx->timer_max_ns = max_count * efx->timer_quantum_ns; |
| 327 | } |
| 328 | |
| 329 | netif_dbg(efx, probe, efx->net_dev, |
| 330 | "got timer properties from MC: quantum %u ns; max %u ns\n", |
| 331 | efx->timer_quantum_ns, efx->timer_max_ns); |
| 332 | } |
| 333 | |
| 334 | static int efx_ef10_get_timer_config(struct efx_nic *efx) |
| 335 | { |
| 336 | MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_LEN); |
| 337 | int rc; |
| 338 | |
| 339 | rc = efx_ef10_get_timer_workarounds(efx); |
| 340 | if (rc) |
| 341 | return rc; |
| 342 | |
| 343 | rc = efx_mcdi_rpc_quiet(efx, MC_CMD_GET_EVQ_TMR_PROPERTIES, NULL, 0, |
| 344 | outbuf, sizeof(outbuf), NULL); |
| 345 | |
| 346 | if (rc == 0) { |
| 347 | efx_ef10_process_timer_config(efx, outbuf); |
| 348 | } else if (rc == -ENOSYS || rc == -EPERM) { |
| 349 | /* Not available - fall back to Huntington defaults. */ |
| 350 | unsigned int quantum; |
| 351 | |
| 352 | rc = efx_ef10_get_sysclk_freq(efx); |
| 353 | if (rc < 0) |
| 354 | return rc; |
| 355 | |
| 356 | quantum = 1536000 / rc; /* 1536 cycles */ |
| 357 | efx->timer_quantum_ns = quantum; |
| 358 | efx->timer_max_ns = efx->type->timer_period_max * quantum; |
| 359 | rc = 0; |
| 360 | } else { |
| 361 | efx_mcdi_display_error(efx, MC_CMD_GET_EVQ_TMR_PROPERTIES, |
| 362 | MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_LEN, |
| 363 | NULL, 0, rc); |
| 364 | } |
| 365 | |
| 366 | return rc; |
| 367 | } |
| 368 | |
Daniel Pieczko | 0d5e0fb | 2015-05-20 11:10:20 +0100 | [diff] [blame] | 369 | static int efx_ef10_get_mac_address_pf(struct efx_nic *efx, u8 *mac_address) |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 370 | { |
| 371 | MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_MAC_ADDRESSES_OUT_LEN); |
| 372 | size_t outlen; |
| 373 | int rc; |
| 374 | |
| 375 | BUILD_BUG_ON(MC_CMD_GET_MAC_ADDRESSES_IN_LEN != 0); |
| 376 | |
| 377 | rc = efx_mcdi_rpc(efx, MC_CMD_GET_MAC_ADDRESSES, NULL, 0, |
| 378 | outbuf, sizeof(outbuf), &outlen); |
| 379 | if (rc) |
| 380 | return rc; |
| 381 | if (outlen < MC_CMD_GET_MAC_ADDRESSES_OUT_LEN) |
| 382 | return -EIO; |
| 383 | |
Edward Cree | cd84ff4 | 2014-03-07 18:27:41 +0000 | [diff] [blame] | 384 | ether_addr_copy(mac_address, |
| 385 | MCDI_PTR(outbuf, GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE)); |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 386 | return 0; |
| 387 | } |
| 388 | |
Daniel Pieczko | 0d5e0fb | 2015-05-20 11:10:20 +0100 | [diff] [blame] | 389 | static int efx_ef10_get_mac_address_vf(struct efx_nic *efx, u8 *mac_address) |
| 390 | { |
| 391 | MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN); |
| 392 | MCDI_DECLARE_BUF(outbuf, MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX); |
| 393 | size_t outlen; |
| 394 | int num_addrs, rc; |
| 395 | |
| 396 | MCDI_SET_DWORD(inbuf, VPORT_GET_MAC_ADDRESSES_IN_VPORT_ID, |
| 397 | EVB_PORT_ID_ASSIGNED); |
| 398 | rc = efx_mcdi_rpc(efx, MC_CMD_VPORT_GET_MAC_ADDRESSES, inbuf, |
| 399 | sizeof(inbuf), outbuf, sizeof(outbuf), &outlen); |
| 400 | |
| 401 | if (rc) |
| 402 | return rc; |
| 403 | if (outlen < MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMIN) |
| 404 | return -EIO; |
| 405 | |
| 406 | num_addrs = MCDI_DWORD(outbuf, |
| 407 | VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_COUNT); |
| 408 | |
| 409 | WARN_ON(num_addrs != 1); |
| 410 | |
| 411 | ether_addr_copy(mac_address, |
| 412 | MCDI_PTR(outbuf, VPORT_GET_MAC_ADDRESSES_OUT_MACADDR)); |
| 413 | |
| 414 | return 0; |
| 415 | } |
| 416 | |
Shradha Shah | 0f5c084 | 2015-06-02 11:37:58 +0100 | [diff] [blame] | 417 | static ssize_t efx_ef10_show_link_control_flag(struct device *dev, |
| 418 | struct device_attribute *attr, |
| 419 | char *buf) |
| 420 | { |
| 421 | struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); |
| 422 | |
| 423 | return sprintf(buf, "%d\n", |
| 424 | ((efx->mcdi->fn_flags) & |
| 425 | (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL)) |
| 426 | ? 1 : 0); |
| 427 | } |
| 428 | |
| 429 | static ssize_t efx_ef10_show_primary_flag(struct device *dev, |
| 430 | struct device_attribute *attr, |
| 431 | char *buf) |
| 432 | { |
| 433 | struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); |
| 434 | |
| 435 | return sprintf(buf, "%d\n", |
| 436 | ((efx->mcdi->fn_flags) & |
| 437 | (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY)) |
| 438 | ? 1 : 0); |
| 439 | } |
| 440 | |
Andrew Rybchenko | 34813fe | 2016-06-15 17:48:14 +0100 | [diff] [blame] | 441 | static struct efx_ef10_vlan *efx_ef10_find_vlan(struct efx_nic *efx, u16 vid) |
| 442 | { |
| 443 | struct efx_ef10_nic_data *nic_data = efx->nic_data; |
| 444 | struct efx_ef10_vlan *vlan; |
| 445 | |
| 446 | WARN_ON(!mutex_is_locked(&nic_data->vlan_lock)); |
| 447 | |
| 448 | list_for_each_entry(vlan, &nic_data->vlan_list, list) { |
| 449 | if (vlan->vid == vid) |
| 450 | return vlan; |
| 451 | } |
| 452 | |
| 453 | return NULL; |
| 454 | } |
| 455 | |
| 456 | static int efx_ef10_add_vlan(struct efx_nic *efx, u16 vid) |
| 457 | { |
| 458 | struct efx_ef10_nic_data *nic_data = efx->nic_data; |
| 459 | struct efx_ef10_vlan *vlan; |
| 460 | int rc; |
| 461 | |
| 462 | mutex_lock(&nic_data->vlan_lock); |
| 463 | |
| 464 | vlan = efx_ef10_find_vlan(efx, vid); |
| 465 | if (vlan) { |
Andrew Rybchenko | 4a53ea8 | 2016-06-15 17:48:32 +0100 | [diff] [blame] | 466 | /* We add VID 0 on init. 8021q adds it on module init |
| 467 | * for all interfaces with VLAN filtring feature. |
| 468 | */ |
| 469 | if (vid == 0) |
| 470 | goto done_unlock; |
Andrew Rybchenko | 34813fe | 2016-06-15 17:48:14 +0100 | [diff] [blame] | 471 | netif_warn(efx, drv, efx->net_dev, |
| 472 | "VLAN %u already added\n", vid); |
| 473 | rc = -EALREADY; |
| 474 | goto fail_exist; |
| 475 | } |
| 476 | |
| 477 | rc = -ENOMEM; |
| 478 | vlan = kzalloc(sizeof(*vlan), GFP_KERNEL); |
| 479 | if (!vlan) |
| 480 | goto fail_alloc; |
| 481 | |
| 482 | vlan->vid = vid; |
| 483 | |
| 484 | list_add_tail(&vlan->list, &nic_data->vlan_list); |
| 485 | |
| 486 | if (efx->filter_state) { |
| 487 | mutex_lock(&efx->mac_lock); |
| 488 | down_write(&efx->filter_sem); |
| 489 | rc = efx_ef10_filter_add_vlan(efx, vlan->vid); |
| 490 | up_write(&efx->filter_sem); |
| 491 | mutex_unlock(&efx->mac_lock); |
| 492 | if (rc) |
| 493 | goto fail_filter_add_vlan; |
| 494 | } |
| 495 | |
Andrew Rybchenko | 4a53ea8 | 2016-06-15 17:48:32 +0100 | [diff] [blame] | 496 | done_unlock: |
Andrew Rybchenko | 34813fe | 2016-06-15 17:48:14 +0100 | [diff] [blame] | 497 | mutex_unlock(&nic_data->vlan_lock); |
| 498 | return 0; |
| 499 | |
| 500 | fail_filter_add_vlan: |
| 501 | list_del(&vlan->list); |
| 502 | kfree(vlan); |
| 503 | fail_alloc: |
| 504 | fail_exist: |
| 505 | mutex_unlock(&nic_data->vlan_lock); |
| 506 | return rc; |
| 507 | } |
| 508 | |
| 509 | static void efx_ef10_del_vlan_internal(struct efx_nic *efx, |
| 510 | struct efx_ef10_vlan *vlan) |
| 511 | { |
| 512 | struct efx_ef10_nic_data *nic_data = efx->nic_data; |
| 513 | |
| 514 | WARN_ON(!mutex_is_locked(&nic_data->vlan_lock)); |
| 515 | |
| 516 | if (efx->filter_state) { |
| 517 | down_write(&efx->filter_sem); |
| 518 | efx_ef10_filter_del_vlan(efx, vlan->vid); |
| 519 | up_write(&efx->filter_sem); |
| 520 | } |
| 521 | |
| 522 | list_del(&vlan->list); |
| 523 | kfree(vlan); |
| 524 | } |
| 525 | |
Andrew Rybchenko | 4a53ea8 | 2016-06-15 17:48:32 +0100 | [diff] [blame] | 526 | static int efx_ef10_del_vlan(struct efx_nic *efx, u16 vid) |
| 527 | { |
| 528 | struct efx_ef10_nic_data *nic_data = efx->nic_data; |
| 529 | struct efx_ef10_vlan *vlan; |
| 530 | int rc = 0; |
| 531 | |
| 532 | /* 8021q removes VID 0 on module unload for all interfaces |
| 533 | * with VLAN filtering feature. We need to keep it to receive |
| 534 | * untagged traffic. |
| 535 | */ |
| 536 | if (vid == 0) |
| 537 | return 0; |
| 538 | |
| 539 | mutex_lock(&nic_data->vlan_lock); |
| 540 | |
| 541 | vlan = efx_ef10_find_vlan(efx, vid); |
| 542 | if (!vlan) { |
| 543 | netif_err(efx, drv, efx->net_dev, |
| 544 | "VLAN %u to be deleted not found\n", vid); |
| 545 | rc = -ENOENT; |
| 546 | } else { |
| 547 | efx_ef10_del_vlan_internal(efx, vlan); |
| 548 | } |
| 549 | |
| 550 | mutex_unlock(&nic_data->vlan_lock); |
| 551 | |
| 552 | return rc; |
| 553 | } |
| 554 | |
Andrew Rybchenko | 34813fe | 2016-06-15 17:48:14 +0100 | [diff] [blame] | 555 | static void efx_ef10_cleanup_vlans(struct efx_nic *efx) |
| 556 | { |
| 557 | struct efx_ef10_nic_data *nic_data = efx->nic_data; |
| 558 | struct efx_ef10_vlan *vlan, *next_vlan; |
| 559 | |
| 560 | mutex_lock(&nic_data->vlan_lock); |
| 561 | list_for_each_entry_safe(vlan, next_vlan, &nic_data->vlan_list, list) |
| 562 | efx_ef10_del_vlan_internal(efx, vlan); |
| 563 | mutex_unlock(&nic_data->vlan_lock); |
| 564 | } |
| 565 | |
Shradha Shah | 0f5c084 | 2015-06-02 11:37:58 +0100 | [diff] [blame] | 566 | static DEVICE_ATTR(link_control_flag, 0444, efx_ef10_show_link_control_flag, |
| 567 | NULL); |
| 568 | static DEVICE_ATTR(primary_flag, 0444, efx_ef10_show_primary_flag, NULL); |
| 569 | |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 570 | static int efx_ef10_probe(struct efx_nic *efx) |
| 571 | { |
| 572 | struct efx_ef10_nic_data *nic_data; |
| 573 | int i, rc; |
| 574 | |
Ben Hutchings | aa3930e | 2014-02-12 18:59:19 +0000 | [diff] [blame] | 575 | /* We can have one VI for each 8K region. However, until we |
| 576 | * use TX option descriptors we need two TX queues per channel. |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 577 | */ |
Shradha Shah | b0fbdae | 2015-08-28 10:55:42 +0100 | [diff] [blame] | 578 | efx->max_channels = min_t(unsigned int, |
| 579 | EFX_MAX_CHANNELS, |
| 580 | efx_ef10_mem_map_size(efx) / |
| 581 | (EFX_VI_PAGE_SIZE * EFX_TXQ_TYPES)); |
| 582 | efx->max_tx_channels = efx->max_channels; |
Edward Cree | 9fd3d3a | 2014-11-03 14:14:35 +0000 | [diff] [blame] | 583 | if (WARN_ON(efx->max_channels == 0)) |
| 584 | return -EIO; |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 585 | |
| 586 | nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL); |
| 587 | if (!nic_data) |
| 588 | return -ENOMEM; |
| 589 | efx->nic_data = nic_data; |
| 590 | |
Edward Cree | 75aba2a | 2015-05-27 13:13:54 +0100 | [diff] [blame] | 591 | /* we assume later that we can copy from this buffer in dwords */ |
| 592 | BUILD_BUG_ON(MCDI_CTL_SDU_LEN_MAX_V2 % 4); |
| 593 | |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 594 | rc = efx_nic_alloc_buffer(efx, &nic_data->mcdi_buf, |
| 595 | 8 + MCDI_CTL_SDU_LEN_MAX_V2, GFP_KERNEL); |
| 596 | if (rc) |
| 597 | goto fail1; |
| 598 | |
| 599 | /* Get the MC's warm boot count. In case it's rebooting right |
| 600 | * now, be prepared to retry. |
| 601 | */ |
| 602 | i = 0; |
| 603 | for (;;) { |
| 604 | rc = efx_ef10_get_warm_boot_count(efx); |
| 605 | if (rc >= 0) |
| 606 | break; |
| 607 | if (++i == 5) |
| 608 | goto fail2; |
| 609 | ssleep(1); |
| 610 | } |
| 611 | nic_data->warm_boot_count = rc; |
| 612 | |
| 613 | nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID; |
| 614 | |
Daniel Pieczko | 45b2449 | 2015-05-06 00:57:14 +0100 | [diff] [blame] | 615 | nic_data->vport_id = EVB_PORT_ID_ASSIGNED; |
| 616 | |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 617 | /* In case we're recovering from a crash (kexec), we want to |
| 618 | * cancel any outstanding request by the previous user of this |
| 619 | * function. We send a special message using the least |
| 620 | * significant bits of the 'high' (doorbell) register. |
| 621 | */ |
| 622 | _efx_writed(efx, cpu_to_le32(1), ER_DZ_MC_DB_HWRD); |
| 623 | |
| 624 | rc = efx_mcdi_init(efx); |
| 625 | if (rc) |
| 626 | goto fail2; |
| 627 | |
Jon Cooper | e5fbd97 | 2017-02-08 16:52:10 +0000 | [diff] [blame^] | 628 | mutex_init(&nic_data->udp_tunnels_lock); |
| 629 | |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 630 | /* Reset (most) configuration for this function */ |
| 631 | rc = efx_mcdi_reset(efx, RESET_TYPE_ALL); |
| 632 | if (rc) |
| 633 | goto fail3; |
| 634 | |
| 635 | /* Enable event logging */ |
| 636 | rc = efx_mcdi_log_ctrl(efx, true, false, 0); |
| 637 | if (rc) |
| 638 | goto fail3; |
| 639 | |
Shradha Shah | 0f5c084 | 2015-06-02 11:37:58 +0100 | [diff] [blame] | 640 | rc = device_create_file(&efx->pci_dev->dev, |
| 641 | &dev_attr_link_control_flag); |
Daniel Pieczko | 1cd9ecb | 2015-05-06 00:57:53 +0100 | [diff] [blame] | 642 | if (rc) |
| 643 | goto fail3; |
| 644 | |
Shradha Shah | 0f5c084 | 2015-06-02 11:37:58 +0100 | [diff] [blame] | 645 | rc = device_create_file(&efx->pci_dev->dev, &dev_attr_primary_flag); |
| 646 | if (rc) |
| 647 | goto fail4; |
| 648 | |
| 649 | rc = efx_ef10_get_pf_index(efx); |
| 650 | if (rc) |
| 651 | goto fail5; |
| 652 | |
Ben Hutchings | e5a2538 | 2013-09-05 22:50:59 +0100 | [diff] [blame] | 653 | rc = efx_ef10_init_datapath_caps(efx); |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 654 | if (rc < 0) |
Shradha Shah | 0f5c084 | 2015-06-02 11:37:58 +0100 | [diff] [blame] | 655 | goto fail5; |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 656 | |
| 657 | efx->rx_packet_len_offset = |
| 658 | ES_DZ_RX_PREFIX_PKTLEN_OFST - ES_DZ_RX_PREFIX_SIZE; |
| 659 | |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 660 | rc = efx_mcdi_port_get_number(efx); |
| 661 | if (rc < 0) |
Shradha Shah | 0f5c084 | 2015-06-02 11:37:58 +0100 | [diff] [blame] | 662 | goto fail5; |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 663 | efx->port_num = rc; |
| 664 | |
Daniel Pieczko | 0d5e0fb | 2015-05-20 11:10:20 +0100 | [diff] [blame] | 665 | rc = efx->type->get_mac_address(efx, efx->net_dev->perm_addr); |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 666 | if (rc) |
Shradha Shah | 0f5c084 | 2015-06-02 11:37:58 +0100 | [diff] [blame] | 667 | goto fail5; |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 668 | |
Bert Kenward | d95e329 | 2016-08-11 13:02:36 +0100 | [diff] [blame] | 669 | rc = efx_ef10_get_timer_config(efx); |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 670 | if (rc < 0) |
Shradha Shah | 0f5c084 | 2015-06-02 11:37:58 +0100 | [diff] [blame] | 671 | goto fail5; |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 672 | |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 673 | rc = efx_mcdi_mon_probe(efx); |
Edward Cree | 267d9d7 | 2015-05-06 00:59:18 +0100 | [diff] [blame] | 674 | if (rc && rc != -EPERM) |
Shradha Shah | 0f5c084 | 2015-06-02 11:37:58 +0100 | [diff] [blame] | 675 | goto fail5; |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 676 | |
Ben Hutchings | 9aecda9 | 2013-12-05 21:28:42 +0000 | [diff] [blame] | 677 | efx_ptp_probe(efx, NULL); |
| 678 | |
Shradha Shah | 1d051e0 | 2015-06-02 11:38:16 +0100 | [diff] [blame] | 679 | #ifdef CONFIG_SFC_SRIOV |
| 680 | if ((efx->pci_dev->physfn) && (!efx->pci_dev->is_physfn)) { |
| 681 | struct pci_dev *pci_dev_pf = efx->pci_dev->physfn; |
| 682 | struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf); |
| 683 | |
| 684 | efx_pf->type->get_mac_address(efx_pf, nic_data->port_id); |
| 685 | } else |
| 686 | #endif |
| 687 | ether_addr_copy(nic_data->port_id, efx->net_dev->perm_addr); |
| 688 | |
Andrew Rybchenko | 34813fe | 2016-06-15 17:48:14 +0100 | [diff] [blame] | 689 | INIT_LIST_HEAD(&nic_data->vlan_list); |
| 690 | mutex_init(&nic_data->vlan_lock); |
| 691 | |
| 692 | /* Add unspecified VID to support VLAN filtering being disabled */ |
| 693 | rc = efx_ef10_add_vlan(efx, EFX_FILTER_VID_UNSPEC); |
| 694 | if (rc) |
| 695 | goto fail_add_vid_unspec; |
| 696 | |
Andrew Rybchenko | 4a53ea8 | 2016-06-15 17:48:32 +0100 | [diff] [blame] | 697 | /* If VLAN filtering is enabled, we need VID 0 to get untagged |
| 698 | * traffic. It is added automatically if 8021q module is loaded, |
| 699 | * but we can't rely on it since module may be not loaded. |
| 700 | */ |
| 701 | rc = efx_ef10_add_vlan(efx, 0); |
| 702 | if (rc) |
| 703 | goto fail_add_vid_0; |
| 704 | |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 705 | return 0; |
| 706 | |
Andrew Rybchenko | 4a53ea8 | 2016-06-15 17:48:32 +0100 | [diff] [blame] | 707 | fail_add_vid_0: |
| 708 | efx_ef10_cleanup_vlans(efx); |
Andrew Rybchenko | 34813fe | 2016-06-15 17:48:14 +0100 | [diff] [blame] | 709 | fail_add_vid_unspec: |
| 710 | mutex_destroy(&nic_data->vlan_lock); |
| 711 | efx_ptp_remove(efx); |
| 712 | efx_mcdi_mon_remove(efx); |
Shradha Shah | 0f5c084 | 2015-06-02 11:37:58 +0100 | [diff] [blame] | 713 | fail5: |
| 714 | device_remove_file(&efx->pci_dev->dev, &dev_attr_primary_flag); |
| 715 | fail4: |
| 716 | device_remove_file(&efx->pci_dev->dev, &dev_attr_link_control_flag); |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 717 | fail3: |
Jon Cooper | e5fbd97 | 2017-02-08 16:52:10 +0000 | [diff] [blame^] | 718 | efx_mcdi_detach(efx); |
| 719 | |
| 720 | mutex_lock(&nic_data->udp_tunnels_lock); |
| 721 | memset(nic_data->udp_tunnels, 0, sizeof(nic_data->udp_tunnels)); |
| 722 | (void)efx_ef10_set_udp_tnl_ports(efx, true); |
| 723 | mutex_unlock(&nic_data->udp_tunnels_lock); |
| 724 | mutex_destroy(&nic_data->udp_tunnels_lock); |
| 725 | |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 726 | efx_mcdi_fini(efx); |
| 727 | fail2: |
| 728 | efx_nic_free_buffer(efx, &nic_data->mcdi_buf); |
| 729 | fail1: |
| 730 | kfree(nic_data); |
| 731 | efx->nic_data = NULL; |
| 732 | return rc; |
| 733 | } |
| 734 | |
| 735 | static int efx_ef10_free_vis(struct efx_nic *efx) |
| 736 | { |
Jon Cooper | aa09a3d | 2015-05-20 11:10:41 +0100 | [diff] [blame] | 737 | MCDI_DECLARE_BUF_ERR(outbuf); |
Edward Cree | 1e0b812 | 2013-05-31 18:36:12 +0100 | [diff] [blame] | 738 | size_t outlen; |
| 739 | int rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FREE_VIS, NULL, 0, |
| 740 | outbuf, sizeof(outbuf), &outlen); |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 741 | |
| 742 | /* -EALREADY means nothing to free, so ignore */ |
| 743 | if (rc == -EALREADY) |
| 744 | rc = 0; |
Edward Cree | 1e0b812 | 2013-05-31 18:36:12 +0100 | [diff] [blame] | 745 | if (rc) |
| 746 | efx_mcdi_display_error(efx, MC_CMD_FREE_VIS, 0, outbuf, outlen, |
| 747 | rc); |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 748 | return rc; |
| 749 | } |
| 750 | |
Ben Hutchings | 183233b | 2013-06-28 21:47:12 +0100 | [diff] [blame] | 751 | #ifdef EFX_USE_PIO |
| 752 | |
| 753 | static void efx_ef10_free_piobufs(struct efx_nic *efx) |
| 754 | { |
| 755 | struct efx_ef10_nic_data *nic_data = efx->nic_data; |
| 756 | MCDI_DECLARE_BUF(inbuf, MC_CMD_FREE_PIOBUF_IN_LEN); |
| 757 | unsigned int i; |
| 758 | int rc; |
| 759 | |
| 760 | BUILD_BUG_ON(MC_CMD_FREE_PIOBUF_OUT_LEN != 0); |
| 761 | |
| 762 | for (i = 0; i < nic_data->n_piobufs; i++) { |
| 763 | MCDI_SET_DWORD(inbuf, FREE_PIOBUF_IN_PIOBUF_HANDLE, |
| 764 | nic_data->piobuf_handle[i]); |
| 765 | rc = efx_mcdi_rpc(efx, MC_CMD_FREE_PIOBUF, inbuf, sizeof(inbuf), |
| 766 | NULL, 0, NULL); |
| 767 | WARN_ON(rc); |
| 768 | } |
| 769 | |
| 770 | nic_data->n_piobufs = 0; |
| 771 | } |
| 772 | |
| 773 | static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n) |
| 774 | { |
| 775 | struct efx_ef10_nic_data *nic_data = efx->nic_data; |
| 776 | MCDI_DECLARE_BUF(outbuf, MC_CMD_ALLOC_PIOBUF_OUT_LEN); |
| 777 | unsigned int i; |
| 778 | size_t outlen; |
| 779 | int rc = 0; |
| 780 | |
| 781 | BUILD_BUG_ON(MC_CMD_ALLOC_PIOBUF_IN_LEN != 0); |
| 782 | |
| 783 | for (i = 0; i < n; i++) { |
Bert Kenward | 09a0420 | 2015-12-23 08:58:15 +0000 | [diff] [blame] | 784 | rc = efx_mcdi_rpc_quiet(efx, MC_CMD_ALLOC_PIOBUF, NULL, 0, |
| 785 | outbuf, sizeof(outbuf), &outlen); |
| 786 | if (rc) { |
| 787 | /* Don't display the MC error if we didn't have space |
| 788 | * for a VF. |
| 789 | */ |
| 790 | if (!(efx_ef10_is_vf(efx) && rc == -ENOSPC)) |
| 791 | efx_mcdi_display_error(efx, MC_CMD_ALLOC_PIOBUF, |
| 792 | 0, outbuf, outlen, rc); |
Ben Hutchings | 183233b | 2013-06-28 21:47:12 +0100 | [diff] [blame] | 793 | break; |
Bert Kenward | 09a0420 | 2015-12-23 08:58:15 +0000 | [diff] [blame] | 794 | } |
Ben Hutchings | 183233b | 2013-06-28 21:47:12 +0100 | [diff] [blame] | 795 | if (outlen < MC_CMD_ALLOC_PIOBUF_OUT_LEN) { |
| 796 | rc = -EIO; |
| 797 | break; |
| 798 | } |
| 799 | nic_data->piobuf_handle[i] = |
| 800 | MCDI_DWORD(outbuf, ALLOC_PIOBUF_OUT_PIOBUF_HANDLE); |
| 801 | netif_dbg(efx, probe, efx->net_dev, |
| 802 | "allocated PIO buffer %u handle %x\n", i, |
| 803 | nic_data->piobuf_handle[i]); |
| 804 | } |
| 805 | |
| 806 | nic_data->n_piobufs = i; |
| 807 | if (rc) |
| 808 | efx_ef10_free_piobufs(efx); |
| 809 | return rc; |
| 810 | } |
| 811 | |
| 812 | static int efx_ef10_link_piobufs(struct efx_nic *efx) |
| 813 | { |
| 814 | struct efx_ef10_nic_data *nic_data = efx->nic_data; |
Jon Cooper | aa09a3d | 2015-05-20 11:10:41 +0100 | [diff] [blame] | 815 | _MCDI_DECLARE_BUF(inbuf, |
| 816 | max(MC_CMD_LINK_PIOBUF_IN_LEN, |
| 817 | MC_CMD_UNLINK_PIOBUF_IN_LEN)); |
Ben Hutchings | 183233b | 2013-06-28 21:47:12 +0100 | [diff] [blame] | 818 | struct efx_channel *channel; |
| 819 | struct efx_tx_queue *tx_queue; |
| 820 | unsigned int offset, index; |
| 821 | int rc; |
| 822 | |
| 823 | BUILD_BUG_ON(MC_CMD_LINK_PIOBUF_OUT_LEN != 0); |
| 824 | BUILD_BUG_ON(MC_CMD_UNLINK_PIOBUF_OUT_LEN != 0); |
| 825 | |
Jon Cooper | aa09a3d | 2015-05-20 11:10:41 +0100 | [diff] [blame] | 826 | memset(inbuf, 0, sizeof(inbuf)); |
| 827 | |
Ben Hutchings | 183233b | 2013-06-28 21:47:12 +0100 | [diff] [blame] | 828 | /* Link a buffer to each VI in the write-combining mapping */ |
| 829 | for (index = 0; index < nic_data->n_piobufs; ++index) { |
| 830 | MCDI_SET_DWORD(inbuf, LINK_PIOBUF_IN_PIOBUF_HANDLE, |
| 831 | nic_data->piobuf_handle[index]); |
| 832 | MCDI_SET_DWORD(inbuf, LINK_PIOBUF_IN_TXQ_INSTANCE, |
| 833 | nic_data->pio_write_vi_base + index); |
| 834 | rc = efx_mcdi_rpc(efx, MC_CMD_LINK_PIOBUF, |
| 835 | inbuf, MC_CMD_LINK_PIOBUF_IN_LEN, |
| 836 | NULL, 0, NULL); |
| 837 | if (rc) { |
| 838 | netif_err(efx, drv, efx->net_dev, |
| 839 | "failed to link VI %u to PIO buffer %u (%d)\n", |
| 840 | nic_data->pio_write_vi_base + index, index, |
| 841 | rc); |
| 842 | goto fail; |
| 843 | } |
| 844 | netif_dbg(efx, probe, efx->net_dev, |
| 845 | "linked VI %u to PIO buffer %u\n", |
| 846 | nic_data->pio_write_vi_base + index, index); |
| 847 | } |
| 848 | |
| 849 | /* Link a buffer to each TX queue */ |
| 850 | efx_for_each_channel(channel, efx) { |
| 851 | efx_for_each_channel_tx_queue(tx_queue, channel) { |
| 852 | /* We assign the PIO buffers to queues in |
| 853 | * reverse order to allow for the following |
| 854 | * special case. |
| 855 | */ |
| 856 | offset = ((efx->tx_channel_offset + efx->n_tx_channels - |
| 857 | tx_queue->channel->channel - 1) * |
| 858 | efx_piobuf_size); |
Edward Cree | c634700 | 2017-01-13 21:20:29 +0000 | [diff] [blame] | 859 | index = offset / nic_data->piobuf_size; |
| 860 | offset = offset % nic_data->piobuf_size; |
Ben Hutchings | 183233b | 2013-06-28 21:47:12 +0100 | [diff] [blame] | 861 | |
| 862 | /* When the host page size is 4K, the first |
| 863 | * host page in the WC mapping may be within |
| 864 | * the same VI page as the last TX queue. We |
| 865 | * can only link one buffer to each VI. |
| 866 | */ |
| 867 | if (tx_queue->queue == nic_data->pio_write_vi_base) { |
| 868 | BUG_ON(index != 0); |
| 869 | rc = 0; |
| 870 | } else { |
| 871 | MCDI_SET_DWORD(inbuf, |
| 872 | LINK_PIOBUF_IN_PIOBUF_HANDLE, |
| 873 | nic_data->piobuf_handle[index]); |
| 874 | MCDI_SET_DWORD(inbuf, |
| 875 | LINK_PIOBUF_IN_TXQ_INSTANCE, |
| 876 | tx_queue->queue); |
| 877 | rc = efx_mcdi_rpc(efx, MC_CMD_LINK_PIOBUF, |
| 878 | inbuf, MC_CMD_LINK_PIOBUF_IN_LEN, |
| 879 | NULL, 0, NULL); |
| 880 | } |
| 881 | |
| 882 | if (rc) { |
| 883 | /* This is non-fatal; the TX path just |
| 884 | * won't use PIO for this queue |
| 885 | */ |
| 886 | netif_err(efx, drv, efx->net_dev, |
| 887 | "failed to link VI %u to PIO buffer %u (%d)\n", |
| 888 | tx_queue->queue, index, rc); |
| 889 | tx_queue->piobuf = NULL; |
| 890 | } else { |
| 891 | tx_queue->piobuf = |
| 892 | nic_data->pio_write_base + |
| 893 | index * EFX_VI_PAGE_SIZE + offset; |
| 894 | tx_queue->piobuf_offset = offset; |
| 895 | netif_dbg(efx, probe, efx->net_dev, |
| 896 | "linked VI %u to PIO buffer %u offset %x addr %p\n", |
| 897 | tx_queue->queue, index, |
| 898 | tx_queue->piobuf_offset, |
| 899 | tx_queue->piobuf); |
| 900 | } |
| 901 | } |
| 902 | } |
| 903 | |
| 904 | return 0; |
| 905 | |
| 906 | fail: |
| 907 | while (index--) { |
| 908 | MCDI_SET_DWORD(inbuf, UNLINK_PIOBUF_IN_TXQ_INSTANCE, |
| 909 | nic_data->pio_write_vi_base + index); |
| 910 | efx_mcdi_rpc(efx, MC_CMD_UNLINK_PIOBUF, |
| 911 | inbuf, MC_CMD_UNLINK_PIOBUF_IN_LEN, |
| 912 | NULL, 0, NULL); |
| 913 | } |
| 914 | return rc; |
| 915 | } |
| 916 | |
Edward Cree | c0795bf | 2016-05-24 18:53:36 +0100 | [diff] [blame] | 917 | static void efx_ef10_forget_old_piobufs(struct efx_nic *efx) |
| 918 | { |
| 919 | struct efx_channel *channel; |
| 920 | struct efx_tx_queue *tx_queue; |
| 921 | |
| 922 | /* All our existing PIO buffers went away */ |
| 923 | efx_for_each_channel(channel, efx) |
| 924 | efx_for_each_channel_tx_queue(tx_queue, channel) |
| 925 | tx_queue->piobuf = NULL; |
| 926 | } |
| 927 | |
Ben Hutchings | 183233b | 2013-06-28 21:47:12 +0100 | [diff] [blame] | 928 | #else /* !EFX_USE_PIO */ |
| 929 | |
| 930 | static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n) |
| 931 | { |
| 932 | return n == 0 ? 0 : -ENOBUFS; |
| 933 | } |
| 934 | |
| 935 | static int efx_ef10_link_piobufs(struct efx_nic *efx) |
| 936 | { |
| 937 | return 0; |
| 938 | } |
| 939 | |
| 940 | static void efx_ef10_free_piobufs(struct efx_nic *efx) |
| 941 | { |
| 942 | } |
| 943 | |
Edward Cree | c0795bf | 2016-05-24 18:53:36 +0100 | [diff] [blame] | 944 | static void efx_ef10_forget_old_piobufs(struct efx_nic *efx) |
| 945 | { |
| 946 | } |
| 947 | |
Ben Hutchings | 183233b | 2013-06-28 21:47:12 +0100 | [diff] [blame] | 948 | #endif /* EFX_USE_PIO */ |
| 949 | |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 950 | static void efx_ef10_remove(struct efx_nic *efx) |
| 951 | { |
| 952 | struct efx_ef10_nic_data *nic_data = efx->nic_data; |
| 953 | int rc; |
| 954 | |
Shradha Shah | f1122a3 | 2015-05-20 11:09:46 +0100 | [diff] [blame] | 955 | #ifdef CONFIG_SFC_SRIOV |
| 956 | struct efx_ef10_nic_data *nic_data_pf; |
| 957 | struct pci_dev *pci_dev_pf; |
| 958 | struct efx_nic *efx_pf; |
| 959 | struct ef10_vf *vf; |
| 960 | |
| 961 | if (efx->pci_dev->is_virtfn) { |
| 962 | pci_dev_pf = efx->pci_dev->physfn; |
| 963 | if (pci_dev_pf) { |
| 964 | efx_pf = pci_get_drvdata(pci_dev_pf); |
| 965 | nic_data_pf = efx_pf->nic_data; |
| 966 | vf = nic_data_pf->vf + nic_data->vf_index; |
| 967 | vf->efx = NULL; |
| 968 | } else |
| 969 | netif_info(efx, drv, efx->net_dev, |
| 970 | "Could not get the PF id from VF\n"); |
| 971 | } |
| 972 | #endif |
| 973 | |
Andrew Rybchenko | 34813fe | 2016-06-15 17:48:14 +0100 | [diff] [blame] | 974 | efx_ef10_cleanup_vlans(efx); |
| 975 | mutex_destroy(&nic_data->vlan_lock); |
| 976 | |
Ben Hutchings | 9aecda9 | 2013-12-05 21:28:42 +0000 | [diff] [blame] | 977 | efx_ptp_remove(efx); |
| 978 | |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 979 | efx_mcdi_mon_remove(efx); |
| 980 | |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 981 | efx_ef10_rx_free_indir_table(efx); |
| 982 | |
Ben Hutchings | 183233b | 2013-06-28 21:47:12 +0100 | [diff] [blame] | 983 | if (nic_data->wc_membase) |
| 984 | iounmap(nic_data->wc_membase); |
| 985 | |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 986 | rc = efx_ef10_free_vis(efx); |
| 987 | WARN_ON(rc != 0); |
| 988 | |
Ben Hutchings | 183233b | 2013-06-28 21:47:12 +0100 | [diff] [blame] | 989 | if (!nic_data->must_restore_piobufs) |
| 990 | efx_ef10_free_piobufs(efx); |
| 991 | |
Shradha Shah | 0f5c084 | 2015-06-02 11:37:58 +0100 | [diff] [blame] | 992 | device_remove_file(&efx->pci_dev->dev, &dev_attr_primary_flag); |
| 993 | device_remove_file(&efx->pci_dev->dev, &dev_attr_link_control_flag); |
| 994 | |
Jon Cooper | e5fbd97 | 2017-02-08 16:52:10 +0000 | [diff] [blame^] | 995 | efx_mcdi_detach(efx); |
| 996 | |
| 997 | memset(nic_data->udp_tunnels, 0, sizeof(nic_data->udp_tunnels)); |
| 998 | mutex_lock(&nic_data->udp_tunnels_lock); |
| 999 | (void)efx_ef10_set_udp_tnl_ports(efx, true); |
| 1000 | mutex_unlock(&nic_data->udp_tunnels_lock); |
| 1001 | |
| 1002 | mutex_destroy(&nic_data->udp_tunnels_lock); |
| 1003 | |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 1004 | efx_mcdi_fini(efx); |
| 1005 | efx_nic_free_buffer(efx, &nic_data->mcdi_buf); |
| 1006 | kfree(nic_data); |
| 1007 | } |
| 1008 | |
Shradha Shah | 88a37de | 2015-05-20 11:09:15 +0100 | [diff] [blame] | 1009 | static int efx_ef10_probe_pf(struct efx_nic *efx) |
| 1010 | { |
| 1011 | return efx_ef10_probe(efx); |
| 1012 | } |
| 1013 | |
Andrew Rybchenko | 38d27f3 | 2016-06-15 17:52:08 +0100 | [diff] [blame] | 1014 | int efx_ef10_vadaptor_query(struct efx_nic *efx, unsigned int port_id, |
| 1015 | u32 *port_flags, u32 *vadaptor_flags, |
| 1016 | unsigned int *vlan_tags) |
| 1017 | { |
| 1018 | struct efx_ef10_nic_data *nic_data = efx->nic_data; |
| 1019 | MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_QUERY_IN_LEN); |
| 1020 | MCDI_DECLARE_BUF(outbuf, MC_CMD_VADAPTOR_QUERY_OUT_LEN); |
| 1021 | size_t outlen; |
| 1022 | int rc; |
| 1023 | |
| 1024 | if (nic_data->datapath_caps & |
| 1025 | (1 << MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_QUERY_LBN)) { |
| 1026 | MCDI_SET_DWORD(inbuf, VADAPTOR_QUERY_IN_UPSTREAM_PORT_ID, |
| 1027 | port_id); |
| 1028 | |
| 1029 | rc = efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_QUERY, inbuf, sizeof(inbuf), |
| 1030 | outbuf, sizeof(outbuf), &outlen); |
| 1031 | if (rc) |
| 1032 | return rc; |
| 1033 | |
| 1034 | if (outlen < sizeof(outbuf)) { |
| 1035 | rc = -EIO; |
| 1036 | return rc; |
| 1037 | } |
| 1038 | } |
| 1039 | |
| 1040 | if (port_flags) |
| 1041 | *port_flags = MCDI_DWORD(outbuf, VADAPTOR_QUERY_OUT_PORT_FLAGS); |
| 1042 | if (vadaptor_flags) |
| 1043 | *vadaptor_flags = |
| 1044 | MCDI_DWORD(outbuf, VADAPTOR_QUERY_OUT_VADAPTOR_FLAGS); |
| 1045 | if (vlan_tags) |
| 1046 | *vlan_tags = |
| 1047 | MCDI_DWORD(outbuf, |
| 1048 | VADAPTOR_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS); |
| 1049 | |
| 1050 | return 0; |
| 1051 | } |
| 1052 | |
Daniel Pieczko | 7a186f4 | 2015-07-07 11:37:19 +0100 | [diff] [blame] | 1053 | int efx_ef10_vadaptor_alloc(struct efx_nic *efx, unsigned int port_id) |
| 1054 | { |
| 1055 | MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_ALLOC_IN_LEN); |
| 1056 | |
| 1057 | MCDI_SET_DWORD(inbuf, VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID, port_id); |
| 1058 | return efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_ALLOC, inbuf, sizeof(inbuf), |
| 1059 | NULL, 0, NULL); |
| 1060 | } |
| 1061 | |
| 1062 | int efx_ef10_vadaptor_free(struct efx_nic *efx, unsigned int port_id) |
| 1063 | { |
| 1064 | MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_FREE_IN_LEN); |
| 1065 | |
| 1066 | MCDI_SET_DWORD(inbuf, VADAPTOR_FREE_IN_UPSTREAM_PORT_ID, port_id); |
| 1067 | return efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_FREE, inbuf, sizeof(inbuf), |
| 1068 | NULL, 0, NULL); |
| 1069 | } |
| 1070 | |
| 1071 | int efx_ef10_vport_add_mac(struct efx_nic *efx, |
| 1072 | unsigned int port_id, u8 *mac) |
| 1073 | { |
| 1074 | MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_LEN); |
| 1075 | |
| 1076 | MCDI_SET_DWORD(inbuf, VPORT_ADD_MAC_ADDRESS_IN_VPORT_ID, port_id); |
| 1077 | ether_addr_copy(MCDI_PTR(inbuf, VPORT_ADD_MAC_ADDRESS_IN_MACADDR), mac); |
| 1078 | |
| 1079 | return efx_mcdi_rpc(efx, MC_CMD_VPORT_ADD_MAC_ADDRESS, inbuf, |
| 1080 | sizeof(inbuf), NULL, 0, NULL); |
| 1081 | } |
| 1082 | |
| 1083 | int efx_ef10_vport_del_mac(struct efx_nic *efx, |
| 1084 | unsigned int port_id, u8 *mac) |
| 1085 | { |
| 1086 | MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_LEN); |
| 1087 | |
| 1088 | MCDI_SET_DWORD(inbuf, VPORT_DEL_MAC_ADDRESS_IN_VPORT_ID, port_id); |
| 1089 | ether_addr_copy(MCDI_PTR(inbuf, VPORT_DEL_MAC_ADDRESS_IN_MACADDR), mac); |
| 1090 | |
| 1091 | return efx_mcdi_rpc(efx, MC_CMD_VPORT_DEL_MAC_ADDRESS, inbuf, |
| 1092 | sizeof(inbuf), NULL, 0, NULL); |
| 1093 | } |
| 1094 | |
Shradha Shah | 88a37de | 2015-05-20 11:09:15 +0100 | [diff] [blame] | 1095 | #ifdef CONFIG_SFC_SRIOV |
| 1096 | static int efx_ef10_probe_vf(struct efx_nic *efx) |
| 1097 | { |
| 1098 | int rc; |
Daniel Pieczko | 6598dad | 2015-06-02 11:41:00 +0100 | [diff] [blame] | 1099 | struct pci_dev *pci_dev_pf; |
| 1100 | |
| 1101 | /* If the parent PF has no VF data structure, it doesn't know about this |
| 1102 | * VF so fail probe. The VF needs to be re-created. This can happen |
| 1103 | * if the PF driver is unloaded while the VF is assigned to a guest. |
| 1104 | */ |
| 1105 | pci_dev_pf = efx->pci_dev->physfn; |
| 1106 | if (pci_dev_pf) { |
| 1107 | struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf); |
| 1108 | struct efx_ef10_nic_data *nic_data_pf = efx_pf->nic_data; |
| 1109 | |
| 1110 | if (!nic_data_pf->vf) { |
| 1111 | netif_info(efx, drv, efx->net_dev, |
| 1112 | "The VF cannot link to its parent PF; " |
| 1113 | "please destroy and re-create the VF\n"); |
| 1114 | return -EBUSY; |
| 1115 | } |
| 1116 | } |
Shradha Shah | 88a37de | 2015-05-20 11:09:15 +0100 | [diff] [blame] | 1117 | |
| 1118 | rc = efx_ef10_probe(efx); |
| 1119 | if (rc) |
| 1120 | return rc; |
| 1121 | |
| 1122 | rc = efx_ef10_get_vf_index(efx); |
| 1123 | if (rc) |
| 1124 | goto fail; |
| 1125 | |
Shradha Shah | f1122a3 | 2015-05-20 11:09:46 +0100 | [diff] [blame] | 1126 | if (efx->pci_dev->is_virtfn) { |
| 1127 | if (efx->pci_dev->physfn) { |
| 1128 | struct efx_nic *efx_pf = |
| 1129 | pci_get_drvdata(efx->pci_dev->physfn); |
| 1130 | struct efx_ef10_nic_data *nic_data_p = efx_pf->nic_data; |
| 1131 | struct efx_ef10_nic_data *nic_data = efx->nic_data; |
| 1132 | |
| 1133 | nic_data_p->vf[nic_data->vf_index].efx = efx; |
Daniel Pieczko | 6598dad | 2015-06-02 11:41:00 +0100 | [diff] [blame] | 1134 | nic_data_p->vf[nic_data->vf_index].pci_dev = |
| 1135 | efx->pci_dev; |
Shradha Shah | f1122a3 | 2015-05-20 11:09:46 +0100 | [diff] [blame] | 1136 | } else |
| 1137 | netif_info(efx, drv, efx->net_dev, |
| 1138 | "Could not get the PF id from VF\n"); |
| 1139 | } |
| 1140 | |
Shradha Shah | 88a37de | 2015-05-20 11:09:15 +0100 | [diff] [blame] | 1141 | return 0; |
| 1142 | |
| 1143 | fail: |
| 1144 | efx_ef10_remove(efx); |
| 1145 | return rc; |
| 1146 | } |
| 1147 | #else |
| 1148 | static int efx_ef10_probe_vf(struct efx_nic *efx __attribute__ ((unused))) |
| 1149 | { |
| 1150 | return 0; |
| 1151 | } |
| 1152 | #endif |
| 1153 | |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 1154 | static int efx_ef10_alloc_vis(struct efx_nic *efx, |
| 1155 | unsigned int min_vis, unsigned int max_vis) |
| 1156 | { |
| 1157 | MCDI_DECLARE_BUF(inbuf, MC_CMD_ALLOC_VIS_IN_LEN); |
| 1158 | MCDI_DECLARE_BUF(outbuf, MC_CMD_ALLOC_VIS_OUT_LEN); |
| 1159 | struct efx_ef10_nic_data *nic_data = efx->nic_data; |
| 1160 | size_t outlen; |
| 1161 | int rc; |
| 1162 | |
| 1163 | MCDI_SET_DWORD(inbuf, ALLOC_VIS_IN_MIN_VI_COUNT, min_vis); |
| 1164 | MCDI_SET_DWORD(inbuf, ALLOC_VIS_IN_MAX_VI_COUNT, max_vis); |
| 1165 | rc = efx_mcdi_rpc(efx, MC_CMD_ALLOC_VIS, inbuf, sizeof(inbuf), |
| 1166 | outbuf, sizeof(outbuf), &outlen); |
| 1167 | if (rc != 0) |
| 1168 | return rc; |
| 1169 | |
| 1170 | if (outlen < MC_CMD_ALLOC_VIS_OUT_LEN) |
| 1171 | return -EIO; |
| 1172 | |
| 1173 | netif_dbg(efx, drv, efx->net_dev, "base VI is A0x%03x\n", |
| 1174 | MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_BASE)); |
| 1175 | |
| 1176 | nic_data->vi_base = MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_BASE); |
| 1177 | nic_data->n_allocated_vis = MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_COUNT); |
| 1178 | return 0; |
| 1179 | } |
| 1180 | |
Ben Hutchings | 183233b | 2013-06-28 21:47:12 +0100 | [diff] [blame] | 1181 | /* Note that the failure path of this function does not free |
| 1182 | * resources, as this will be done by efx_ef10_remove(). |
| 1183 | */ |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 1184 | static int efx_ef10_dimension_resources(struct efx_nic *efx) |
| 1185 | { |
Ben Hutchings | 183233b | 2013-06-28 21:47:12 +0100 | [diff] [blame] | 1186 | struct efx_ef10_nic_data *nic_data = efx->nic_data; |
| 1187 | unsigned int uc_mem_map_size, wc_mem_map_size; |
Shradha Shah | b0fbdae | 2015-08-28 10:55:42 +0100 | [diff] [blame] | 1188 | unsigned int min_vis = max(EFX_TXQ_TYPES, |
| 1189 | efx_separate_tx_channels ? 2 : 1); |
| 1190 | unsigned int channel_vis, pio_write_vi_base, max_vis; |
Ben Hutchings | 183233b | 2013-06-28 21:47:12 +0100 | [diff] [blame] | 1191 | void __iomem *membase; |
| 1192 | int rc; |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 1193 | |
Shradha Shah | b0fbdae | 2015-08-28 10:55:42 +0100 | [diff] [blame] | 1194 | channel_vis = max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES); |
Ben Hutchings | 183233b | 2013-06-28 21:47:12 +0100 | [diff] [blame] | 1195 | |
| 1196 | #ifdef EFX_USE_PIO |
| 1197 | /* Try to allocate PIO buffers if wanted and if the full |
| 1198 | * number of PIO buffers would be sufficient to allocate one |
| 1199 | * copy-buffer per TX channel. Failure is non-fatal, as there |
| 1200 | * are only a small number of PIO buffers shared between all |
| 1201 | * functions of the controller. |
| 1202 | */ |
| 1203 | if (efx_piobuf_size != 0 && |
Edward Cree | c634700 | 2017-01-13 21:20:29 +0000 | [diff] [blame] | 1204 | nic_data->piobuf_size / efx_piobuf_size * EF10_TX_PIOBUF_COUNT >= |
Ben Hutchings | 183233b | 2013-06-28 21:47:12 +0100 | [diff] [blame] | 1205 | efx->n_tx_channels) { |
| 1206 | unsigned int n_piobufs = |
| 1207 | DIV_ROUND_UP(efx->n_tx_channels, |
Edward Cree | c634700 | 2017-01-13 21:20:29 +0000 | [diff] [blame] | 1208 | nic_data->piobuf_size / efx_piobuf_size); |
Ben Hutchings | 183233b | 2013-06-28 21:47:12 +0100 | [diff] [blame] | 1209 | |
| 1210 | rc = efx_ef10_alloc_piobufs(efx, n_piobufs); |
Tomáš Pilař | 6eacfb5 | 2017-01-25 13:48:17 +0000 | [diff] [blame] | 1211 | if (rc == -ENOSPC) |
| 1212 | netif_dbg(efx, probe, efx->net_dev, |
| 1213 | "out of PIO buffers; cannot allocate more\n"); |
| 1214 | else if (rc == -EPERM) |
| 1215 | netif_dbg(efx, probe, efx->net_dev, |
| 1216 | "not permitted to allocate PIO buffers\n"); |
| 1217 | else if (rc) |
Ben Hutchings | 183233b | 2013-06-28 21:47:12 +0100 | [diff] [blame] | 1218 | netif_err(efx, probe, efx->net_dev, |
| 1219 | "failed to allocate PIO buffers (%d)\n", rc); |
| 1220 | else |
| 1221 | netif_dbg(efx, probe, efx->net_dev, |
| 1222 | "allocated %u PIO buffers\n", n_piobufs); |
| 1223 | } |
| 1224 | #else |
| 1225 | nic_data->n_piobufs = 0; |
| 1226 | #endif |
| 1227 | |
| 1228 | /* PIO buffers should be mapped with write-combining enabled, |
| 1229 | * and we want to make single UC and WC mappings rather than |
| 1230 | * several of each (in fact that's the only option if host |
| 1231 | * page size is >4K). So we may allocate some extra VIs just |
| 1232 | * for writing PIO buffers through. |
Daniel Pieczko | 52ad762 | 2014-04-01 13:10:34 +0100 | [diff] [blame] | 1233 | * |
Shradha Shah | b0fbdae | 2015-08-28 10:55:42 +0100 | [diff] [blame] | 1234 | * The UC mapping contains (channel_vis - 1) complete VIs and the |
Daniel Pieczko | 52ad762 | 2014-04-01 13:10:34 +0100 | [diff] [blame] | 1235 | * first half of the next VI. Then the WC mapping begins with |
| 1236 | * the second half of this last VI. |
Ben Hutchings | 183233b | 2013-06-28 21:47:12 +0100 | [diff] [blame] | 1237 | */ |
Shradha Shah | b0fbdae | 2015-08-28 10:55:42 +0100 | [diff] [blame] | 1238 | uc_mem_map_size = PAGE_ALIGN((channel_vis - 1) * EFX_VI_PAGE_SIZE + |
Ben Hutchings | 183233b | 2013-06-28 21:47:12 +0100 | [diff] [blame] | 1239 | ER_DZ_TX_PIOBUF); |
| 1240 | if (nic_data->n_piobufs) { |
Daniel Pieczko | 52ad762 | 2014-04-01 13:10:34 +0100 | [diff] [blame] | 1241 | /* pio_write_vi_base rounds down to give the number of complete |
| 1242 | * VIs inside the UC mapping. |
| 1243 | */ |
Ben Hutchings | 183233b | 2013-06-28 21:47:12 +0100 | [diff] [blame] | 1244 | pio_write_vi_base = uc_mem_map_size / EFX_VI_PAGE_SIZE; |
| 1245 | wc_mem_map_size = (PAGE_ALIGN((pio_write_vi_base + |
| 1246 | nic_data->n_piobufs) * |
| 1247 | EFX_VI_PAGE_SIZE) - |
| 1248 | uc_mem_map_size); |
| 1249 | max_vis = pio_write_vi_base + nic_data->n_piobufs; |
| 1250 | } else { |
| 1251 | pio_write_vi_base = 0; |
| 1252 | wc_mem_map_size = 0; |
Shradha Shah | b0fbdae | 2015-08-28 10:55:42 +0100 | [diff] [blame] | 1253 | max_vis = channel_vis; |
Ben Hutchings | 183233b | 2013-06-28 21:47:12 +0100 | [diff] [blame] | 1254 | } |
| 1255 | |
| 1256 | /* In case the last attached driver failed to free VIs, do it now */ |
| 1257 | rc = efx_ef10_free_vis(efx); |
| 1258 | if (rc != 0) |
| 1259 | return rc; |
| 1260 | |
| 1261 | rc = efx_ef10_alloc_vis(efx, min_vis, max_vis); |
| 1262 | if (rc != 0) |
| 1263 | return rc; |
| 1264 | |
Shradha Shah | b0fbdae | 2015-08-28 10:55:42 +0100 | [diff] [blame] | 1265 | if (nic_data->n_allocated_vis < channel_vis) { |
| 1266 | netif_info(efx, drv, efx->net_dev, |
| 1267 | "Could not allocate enough VIs to satisfy RSS" |
| 1268 | " requirements. Performance may not be optimal.\n"); |
| 1269 | /* We didn't get the VIs to populate our channels. |
| 1270 | * We could keep what we got but then we'd have more |
| 1271 | * interrupts than we need. |
| 1272 | * Instead calculate new max_channels and restart |
| 1273 | */ |
| 1274 | efx->max_channels = nic_data->n_allocated_vis; |
| 1275 | efx->max_tx_channels = |
| 1276 | nic_data->n_allocated_vis / EFX_TXQ_TYPES; |
| 1277 | |
| 1278 | efx_ef10_free_vis(efx); |
| 1279 | return -EAGAIN; |
| 1280 | } |
| 1281 | |
Ben Hutchings | 183233b | 2013-06-28 21:47:12 +0100 | [diff] [blame] | 1282 | /* If we didn't get enough VIs to map all the PIO buffers, free the |
| 1283 | * PIO buffers |
| 1284 | */ |
| 1285 | if (nic_data->n_piobufs && |
| 1286 | nic_data->n_allocated_vis < |
| 1287 | pio_write_vi_base + nic_data->n_piobufs) { |
| 1288 | netif_dbg(efx, probe, efx->net_dev, |
| 1289 | "%u VIs are not sufficient to map %u PIO buffers\n", |
| 1290 | nic_data->n_allocated_vis, nic_data->n_piobufs); |
| 1291 | efx_ef10_free_piobufs(efx); |
| 1292 | } |
| 1293 | |
| 1294 | /* Shrink the original UC mapping of the memory BAR */ |
| 1295 | membase = ioremap_nocache(efx->membase_phys, uc_mem_map_size); |
| 1296 | if (!membase) { |
| 1297 | netif_err(efx, probe, efx->net_dev, |
| 1298 | "could not shrink memory BAR to %x\n", |
| 1299 | uc_mem_map_size); |
| 1300 | return -ENOMEM; |
| 1301 | } |
| 1302 | iounmap(efx->membase); |
| 1303 | efx->membase = membase; |
| 1304 | |
| 1305 | /* Set up the WC mapping if needed */ |
| 1306 | if (wc_mem_map_size) { |
| 1307 | nic_data->wc_membase = ioremap_wc(efx->membase_phys + |
| 1308 | uc_mem_map_size, |
| 1309 | wc_mem_map_size); |
| 1310 | if (!nic_data->wc_membase) { |
| 1311 | netif_err(efx, probe, efx->net_dev, |
| 1312 | "could not allocate WC mapping of size %x\n", |
| 1313 | wc_mem_map_size); |
| 1314 | return -ENOMEM; |
| 1315 | } |
| 1316 | nic_data->pio_write_vi_base = pio_write_vi_base; |
| 1317 | nic_data->pio_write_base = |
| 1318 | nic_data->wc_membase + |
| 1319 | (pio_write_vi_base * EFX_VI_PAGE_SIZE + ER_DZ_TX_PIOBUF - |
| 1320 | uc_mem_map_size); |
| 1321 | |
| 1322 | rc = efx_ef10_link_piobufs(efx); |
| 1323 | if (rc) |
| 1324 | efx_ef10_free_piobufs(efx); |
| 1325 | } |
| 1326 | |
| 1327 | netif_dbg(efx, probe, efx->net_dev, |
| 1328 | "memory BAR at %pa (virtual %p+%x UC, %p+%x WC)\n", |
| 1329 | &efx->membase_phys, efx->membase, uc_mem_map_size, |
| 1330 | nic_data->wc_membase, wc_mem_map_size); |
| 1331 | |
| 1332 | return 0; |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 1333 | } |
| 1334 | |
| 1335 | static int efx_ef10_init_nic(struct efx_nic *efx) |
| 1336 | { |
| 1337 | struct efx_ef10_nic_data *nic_data = efx->nic_data; |
| 1338 | int rc; |
| 1339 | |
Ben Hutchings | a915ccc | 2013-09-05 22:51:55 +0100 | [diff] [blame] | 1340 | if (nic_data->must_check_datapath_caps) { |
| 1341 | rc = efx_ef10_init_datapath_caps(efx); |
| 1342 | if (rc) |
| 1343 | return rc; |
| 1344 | nic_data->must_check_datapath_caps = false; |
| 1345 | } |
| 1346 | |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 1347 | if (nic_data->must_realloc_vis) { |
| 1348 | /* We cannot let the number of VIs change now */ |
| 1349 | rc = efx_ef10_alloc_vis(efx, nic_data->n_allocated_vis, |
| 1350 | nic_data->n_allocated_vis); |
| 1351 | if (rc) |
| 1352 | return rc; |
| 1353 | nic_data->must_realloc_vis = false; |
| 1354 | } |
| 1355 | |
Ben Hutchings | 183233b | 2013-06-28 21:47:12 +0100 | [diff] [blame] | 1356 | if (nic_data->must_restore_piobufs && nic_data->n_piobufs) { |
| 1357 | rc = efx_ef10_alloc_piobufs(efx, nic_data->n_piobufs); |
| 1358 | if (rc == 0) { |
| 1359 | rc = efx_ef10_link_piobufs(efx); |
| 1360 | if (rc) |
| 1361 | efx_ef10_free_piobufs(efx); |
| 1362 | } |
| 1363 | |
Tomáš Pilař | 6eacfb5 | 2017-01-25 13:48:17 +0000 | [diff] [blame] | 1364 | /* Log an error on failure, but this is non-fatal. |
| 1365 | * Permission errors are less important - we've presumably |
| 1366 | * had the PIO buffer licence removed. |
| 1367 | */ |
| 1368 | if (rc == -EPERM) |
| 1369 | netif_dbg(efx, drv, efx->net_dev, |
| 1370 | "not permitted to restore PIO buffers\n"); |
| 1371 | else if (rc) |
Ben Hutchings | 183233b | 2013-06-28 21:47:12 +0100 | [diff] [blame] | 1372 | netif_err(efx, drv, efx->net_dev, |
| 1373 | "failed to restore PIO buffers (%d)\n", rc); |
| 1374 | nic_data->must_restore_piobufs = false; |
| 1375 | } |
| 1376 | |
Jon Cooper | 267c015 | 2015-05-06 00:59:38 +0100 | [diff] [blame] | 1377 | /* don't fail init if RSS setup doesn't work */ |
Edward Cree | f74d199 | 2017-01-17 12:01:53 +0000 | [diff] [blame] | 1378 | rc = efx->type->rx_push_rss_config(efx, false, efx->rx_indir_table, NULL); |
Edward Cree | 4fdda95 | 2017-01-04 15:10:56 +0000 | [diff] [blame] | 1379 | efx->rss_active = (rc == 0); |
Jon Cooper | 267c015 | 2015-05-06 00:59:38 +0100 | [diff] [blame] | 1380 | |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 1381 | return 0; |
| 1382 | } |
| 1383 | |
Jon Cooper | 3e33626 | 2014-01-17 19:48:06 +0000 | [diff] [blame] | 1384 | static void efx_ef10_reset_mc_allocations(struct efx_nic *efx) |
| 1385 | { |
| 1386 | struct efx_ef10_nic_data *nic_data = efx->nic_data; |
Daniel Pieczko | 774ad03 | 2015-07-31 11:15:22 +0100 | [diff] [blame] | 1387 | #ifdef CONFIG_SFC_SRIOV |
| 1388 | unsigned int i; |
| 1389 | #endif |
Jon Cooper | 3e33626 | 2014-01-17 19:48:06 +0000 | [diff] [blame] | 1390 | |
| 1391 | /* All our allocations have been reset */ |
| 1392 | nic_data->must_realloc_vis = true; |
| 1393 | nic_data->must_restore_filters = true; |
| 1394 | nic_data->must_restore_piobufs = true; |
Edward Cree | c0795bf | 2016-05-24 18:53:36 +0100 | [diff] [blame] | 1395 | efx_ef10_forget_old_piobufs(efx); |
Jon Cooper | 3e33626 | 2014-01-17 19:48:06 +0000 | [diff] [blame] | 1396 | nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID; |
Daniel Pieczko | 774ad03 | 2015-07-31 11:15:22 +0100 | [diff] [blame] | 1397 | |
| 1398 | /* Driver-created vswitches and vports must be re-created */ |
| 1399 | nic_data->must_probe_vswitching = true; |
| 1400 | nic_data->vport_id = EVB_PORT_ID_ASSIGNED; |
| 1401 | #ifdef CONFIG_SFC_SRIOV |
| 1402 | if (nic_data->vf) |
| 1403 | for (i = 0; i < efx->vf_count; i++) |
| 1404 | nic_data->vf[i].vport_id = 0; |
| 1405 | #endif |
Jon Cooper | 3e33626 | 2014-01-17 19:48:06 +0000 | [diff] [blame] | 1406 | } |
| 1407 | |
Jon Cooper | 087e902 | 2015-05-20 11:11:35 +0100 | [diff] [blame] | 1408 | static enum reset_type efx_ef10_map_reset_reason(enum reset_type reason) |
| 1409 | { |
| 1410 | if (reason == RESET_TYPE_MC_FAILURE) |
| 1411 | return RESET_TYPE_DATAPATH; |
| 1412 | |
| 1413 | return efx_mcdi_map_reset_reason(reason); |
| 1414 | } |
| 1415 | |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 1416 | static int efx_ef10_map_reset_flags(u32 *flags) |
| 1417 | { |
| 1418 | enum { |
| 1419 | EF10_RESET_PORT = ((ETH_RESET_MAC | ETH_RESET_PHY) << |
| 1420 | ETH_RESET_SHARED_SHIFT), |
| 1421 | EF10_RESET_MC = ((ETH_RESET_DMA | ETH_RESET_FILTER | |
| 1422 | ETH_RESET_OFFLOAD | ETH_RESET_MAC | |
| 1423 | ETH_RESET_PHY | ETH_RESET_MGMT) << |
| 1424 | ETH_RESET_SHARED_SHIFT) |
| 1425 | }; |
| 1426 | |
| 1427 | /* We assume for now that our PCI function is permitted to |
| 1428 | * reset everything. |
| 1429 | */ |
| 1430 | |
| 1431 | if ((*flags & EF10_RESET_MC) == EF10_RESET_MC) { |
| 1432 | *flags &= ~EF10_RESET_MC; |
| 1433 | return RESET_TYPE_WORLD; |
| 1434 | } |
| 1435 | |
| 1436 | if ((*flags & EF10_RESET_PORT) == EF10_RESET_PORT) { |
| 1437 | *flags &= ~EF10_RESET_PORT; |
| 1438 | return RESET_TYPE_ALL; |
| 1439 | } |
| 1440 | |
| 1441 | /* no invisible reset implemented */ |
| 1442 | |
| 1443 | return -EINVAL; |
| 1444 | } |
| 1445 | |
Jon Cooper | 3e33626 | 2014-01-17 19:48:06 +0000 | [diff] [blame] | 1446 | static int efx_ef10_reset(struct efx_nic *efx, enum reset_type reset_type) |
| 1447 | { |
| 1448 | int rc = efx_mcdi_reset(efx, reset_type); |
| 1449 | |
Daniel Pieczko | 2732482 | 2015-07-31 11:14:54 +0100 | [diff] [blame] | 1450 | /* Unprivileged functions return -EPERM, but need to return success |
| 1451 | * here so that the datapath is brought back up. |
| 1452 | */ |
| 1453 | if (reset_type == RESET_TYPE_WORLD && rc == -EPERM) |
| 1454 | rc = 0; |
| 1455 | |
Jon Cooper | 3e33626 | 2014-01-17 19:48:06 +0000 | [diff] [blame] | 1456 | /* If it was a port reset, trigger reallocation of MC resources. |
| 1457 | * Note that on an MC reset nothing needs to be done now because we'll |
| 1458 | * detect the MC reset later and handle it then. |
Edward Cree | e283546 | 2014-04-16 19:27:48 +0100 | [diff] [blame] | 1459 | * For an FLR, we never get an MC reset event, but the MC has reset all |
| 1460 | * resources assigned to us, so we have to trigger reallocation now. |
Jon Cooper | 3e33626 | 2014-01-17 19:48:06 +0000 | [diff] [blame] | 1461 | */ |
Edward Cree | e283546 | 2014-04-16 19:27:48 +0100 | [diff] [blame] | 1462 | if ((reset_type == RESET_TYPE_ALL || |
| 1463 | reset_type == RESET_TYPE_MCDI_TIMEOUT) && !rc) |
Jon Cooper | 3e33626 | 2014-01-17 19:48:06 +0000 | [diff] [blame] | 1464 | efx_ef10_reset_mc_allocations(efx); |
| 1465 | return rc; |
| 1466 | } |
| 1467 | |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 1468 | #define EF10_DMA_STAT(ext_name, mcdi_name) \ |
| 1469 | [EF10_STAT_ ## ext_name] = \ |
| 1470 | { #ext_name, 64, 8 * MC_CMD_MAC_ ## mcdi_name } |
| 1471 | #define EF10_DMA_INVIS_STAT(int_name, mcdi_name) \ |
| 1472 | [EF10_STAT_ ## int_name] = \ |
| 1473 | { NULL, 64, 8 * MC_CMD_MAC_ ## mcdi_name } |
| 1474 | #define EF10_OTHER_STAT(ext_name) \ |
| 1475 | [EF10_STAT_ ## ext_name] = { #ext_name, 0, 0 } |
Edward Cree | e4d112e | 2014-07-15 11:58:12 +0100 | [diff] [blame] | 1476 | #define GENERIC_SW_STAT(ext_name) \ |
| 1477 | [GENERIC_STAT_ ## ext_name] = { #ext_name, 0, 0 } |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 1478 | |
| 1479 | static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = { |
Daniel Pieczko | e80ca013 | 2015-06-02 11:38:34 +0100 | [diff] [blame] | 1480 | EF10_DMA_STAT(port_tx_bytes, TX_BYTES), |
| 1481 | EF10_DMA_STAT(port_tx_packets, TX_PKTS), |
| 1482 | EF10_DMA_STAT(port_tx_pause, TX_PAUSE_PKTS), |
| 1483 | EF10_DMA_STAT(port_tx_control, TX_CONTROL_PKTS), |
| 1484 | EF10_DMA_STAT(port_tx_unicast, TX_UNICAST_PKTS), |
| 1485 | EF10_DMA_STAT(port_tx_multicast, TX_MULTICAST_PKTS), |
| 1486 | EF10_DMA_STAT(port_tx_broadcast, TX_BROADCAST_PKTS), |
| 1487 | EF10_DMA_STAT(port_tx_lt64, TX_LT64_PKTS), |
| 1488 | EF10_DMA_STAT(port_tx_64, TX_64_PKTS), |
| 1489 | EF10_DMA_STAT(port_tx_65_to_127, TX_65_TO_127_PKTS), |
| 1490 | EF10_DMA_STAT(port_tx_128_to_255, TX_128_TO_255_PKTS), |
| 1491 | EF10_DMA_STAT(port_tx_256_to_511, TX_256_TO_511_PKTS), |
| 1492 | EF10_DMA_STAT(port_tx_512_to_1023, TX_512_TO_1023_PKTS), |
| 1493 | EF10_DMA_STAT(port_tx_1024_to_15xx, TX_1024_TO_15XX_PKTS), |
| 1494 | EF10_DMA_STAT(port_tx_15xx_to_jumbo, TX_15XX_TO_JUMBO_PKTS), |
| 1495 | EF10_DMA_STAT(port_rx_bytes, RX_BYTES), |
| 1496 | EF10_DMA_INVIS_STAT(port_rx_bytes_minus_good_bytes, RX_BAD_BYTES), |
| 1497 | EF10_OTHER_STAT(port_rx_good_bytes), |
| 1498 | EF10_OTHER_STAT(port_rx_bad_bytes), |
| 1499 | EF10_DMA_STAT(port_rx_packets, RX_PKTS), |
| 1500 | EF10_DMA_STAT(port_rx_good, RX_GOOD_PKTS), |
| 1501 | EF10_DMA_STAT(port_rx_bad, RX_BAD_FCS_PKTS), |
| 1502 | EF10_DMA_STAT(port_rx_pause, RX_PAUSE_PKTS), |
| 1503 | EF10_DMA_STAT(port_rx_control, RX_CONTROL_PKTS), |
| 1504 | EF10_DMA_STAT(port_rx_unicast, RX_UNICAST_PKTS), |
| 1505 | EF10_DMA_STAT(port_rx_multicast, RX_MULTICAST_PKTS), |
| 1506 | EF10_DMA_STAT(port_rx_broadcast, RX_BROADCAST_PKTS), |
| 1507 | EF10_DMA_STAT(port_rx_lt64, RX_UNDERSIZE_PKTS), |
| 1508 | EF10_DMA_STAT(port_rx_64, RX_64_PKTS), |
| 1509 | EF10_DMA_STAT(port_rx_65_to_127, RX_65_TO_127_PKTS), |
| 1510 | EF10_DMA_STAT(port_rx_128_to_255, RX_128_TO_255_PKTS), |
| 1511 | EF10_DMA_STAT(port_rx_256_to_511, RX_256_TO_511_PKTS), |
| 1512 | EF10_DMA_STAT(port_rx_512_to_1023, RX_512_TO_1023_PKTS), |
| 1513 | EF10_DMA_STAT(port_rx_1024_to_15xx, RX_1024_TO_15XX_PKTS), |
| 1514 | EF10_DMA_STAT(port_rx_15xx_to_jumbo, RX_15XX_TO_JUMBO_PKTS), |
| 1515 | EF10_DMA_STAT(port_rx_gtjumbo, RX_GTJUMBO_PKTS), |
| 1516 | EF10_DMA_STAT(port_rx_bad_gtjumbo, RX_JABBER_PKTS), |
| 1517 | EF10_DMA_STAT(port_rx_overflow, RX_OVERFLOW_PKTS), |
| 1518 | EF10_DMA_STAT(port_rx_align_error, RX_ALIGN_ERROR_PKTS), |
| 1519 | EF10_DMA_STAT(port_rx_length_error, RX_LENGTH_ERROR_PKTS), |
| 1520 | EF10_DMA_STAT(port_rx_nodesc_drops, RX_NODESC_DROPS), |
Edward Cree | e4d112e | 2014-07-15 11:58:12 +0100 | [diff] [blame] | 1521 | GENERIC_SW_STAT(rx_nodesc_trunc), |
| 1522 | GENERIC_SW_STAT(rx_noskb_drops), |
Daniel Pieczko | e80ca013 | 2015-06-02 11:38:34 +0100 | [diff] [blame] | 1523 | EF10_DMA_STAT(port_rx_pm_trunc_bb_overflow, PM_TRUNC_BB_OVERFLOW), |
| 1524 | EF10_DMA_STAT(port_rx_pm_discard_bb_overflow, PM_DISCARD_BB_OVERFLOW), |
| 1525 | EF10_DMA_STAT(port_rx_pm_trunc_vfifo_full, PM_TRUNC_VFIFO_FULL), |
| 1526 | EF10_DMA_STAT(port_rx_pm_discard_vfifo_full, PM_DISCARD_VFIFO_FULL), |
| 1527 | EF10_DMA_STAT(port_rx_pm_trunc_qbb, PM_TRUNC_QBB), |
| 1528 | EF10_DMA_STAT(port_rx_pm_discard_qbb, PM_DISCARD_QBB), |
| 1529 | EF10_DMA_STAT(port_rx_pm_discard_mapping, PM_DISCARD_MAPPING), |
| 1530 | EF10_DMA_STAT(port_rx_dp_q_disabled_packets, RXDP_Q_DISABLED_PKTS), |
| 1531 | EF10_DMA_STAT(port_rx_dp_di_dropped_packets, RXDP_DI_DROPPED_PKTS), |
| 1532 | EF10_DMA_STAT(port_rx_dp_streaming_packets, RXDP_STREAMING_PKTS), |
| 1533 | EF10_DMA_STAT(port_rx_dp_hlb_fetch, RXDP_HLB_FETCH_CONDITIONS), |
| 1534 | EF10_DMA_STAT(port_rx_dp_hlb_wait, RXDP_HLB_WAIT_CONDITIONS), |
Daniel Pieczko | 3c36a2a | 2015-06-02 11:39:06 +0100 | [diff] [blame] | 1535 | EF10_DMA_STAT(rx_unicast, VADAPTER_RX_UNICAST_PACKETS), |
| 1536 | EF10_DMA_STAT(rx_unicast_bytes, VADAPTER_RX_UNICAST_BYTES), |
| 1537 | EF10_DMA_STAT(rx_multicast, VADAPTER_RX_MULTICAST_PACKETS), |
| 1538 | EF10_DMA_STAT(rx_multicast_bytes, VADAPTER_RX_MULTICAST_BYTES), |
| 1539 | EF10_DMA_STAT(rx_broadcast, VADAPTER_RX_BROADCAST_PACKETS), |
| 1540 | EF10_DMA_STAT(rx_broadcast_bytes, VADAPTER_RX_BROADCAST_BYTES), |
| 1541 | EF10_DMA_STAT(rx_bad, VADAPTER_RX_BAD_PACKETS), |
| 1542 | EF10_DMA_STAT(rx_bad_bytes, VADAPTER_RX_BAD_BYTES), |
| 1543 | EF10_DMA_STAT(rx_overflow, VADAPTER_RX_OVERFLOW), |
| 1544 | EF10_DMA_STAT(tx_unicast, VADAPTER_TX_UNICAST_PACKETS), |
| 1545 | EF10_DMA_STAT(tx_unicast_bytes, VADAPTER_TX_UNICAST_BYTES), |
| 1546 | EF10_DMA_STAT(tx_multicast, VADAPTER_TX_MULTICAST_PACKETS), |
| 1547 | EF10_DMA_STAT(tx_multicast_bytes, VADAPTER_TX_MULTICAST_BYTES), |
| 1548 | EF10_DMA_STAT(tx_broadcast, VADAPTER_TX_BROADCAST_PACKETS), |
| 1549 | EF10_DMA_STAT(tx_broadcast_bytes, VADAPTER_TX_BROADCAST_BYTES), |
| 1550 | EF10_DMA_STAT(tx_bad, VADAPTER_TX_BAD_PACKETS), |
| 1551 | EF10_DMA_STAT(tx_bad_bytes, VADAPTER_TX_BAD_BYTES), |
| 1552 | EF10_DMA_STAT(tx_overflow, VADAPTER_TX_OVERFLOW), |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 1553 | }; |
| 1554 | |
Daniel Pieczko | e80ca013 | 2015-06-02 11:38:34 +0100 | [diff] [blame] | 1555 | #define HUNT_COMMON_STAT_MASK ((1ULL << EF10_STAT_port_tx_bytes) | \ |
| 1556 | (1ULL << EF10_STAT_port_tx_packets) | \ |
| 1557 | (1ULL << EF10_STAT_port_tx_pause) | \ |
| 1558 | (1ULL << EF10_STAT_port_tx_unicast) | \ |
| 1559 | (1ULL << EF10_STAT_port_tx_multicast) | \ |
| 1560 | (1ULL << EF10_STAT_port_tx_broadcast) | \ |
| 1561 | (1ULL << EF10_STAT_port_rx_bytes) | \ |
| 1562 | (1ULL << \ |
| 1563 | EF10_STAT_port_rx_bytes_minus_good_bytes) | \ |
| 1564 | (1ULL << EF10_STAT_port_rx_good_bytes) | \ |
| 1565 | (1ULL << EF10_STAT_port_rx_bad_bytes) | \ |
| 1566 | (1ULL << EF10_STAT_port_rx_packets) | \ |
| 1567 | (1ULL << EF10_STAT_port_rx_good) | \ |
| 1568 | (1ULL << EF10_STAT_port_rx_bad) | \ |
| 1569 | (1ULL << EF10_STAT_port_rx_pause) | \ |
| 1570 | (1ULL << EF10_STAT_port_rx_control) | \ |
| 1571 | (1ULL << EF10_STAT_port_rx_unicast) | \ |
| 1572 | (1ULL << EF10_STAT_port_rx_multicast) | \ |
| 1573 | (1ULL << EF10_STAT_port_rx_broadcast) | \ |
| 1574 | (1ULL << EF10_STAT_port_rx_lt64) | \ |
| 1575 | (1ULL << EF10_STAT_port_rx_64) | \ |
| 1576 | (1ULL << EF10_STAT_port_rx_65_to_127) | \ |
| 1577 | (1ULL << EF10_STAT_port_rx_128_to_255) | \ |
| 1578 | (1ULL << EF10_STAT_port_rx_256_to_511) | \ |
| 1579 | (1ULL << EF10_STAT_port_rx_512_to_1023) |\ |
| 1580 | (1ULL << EF10_STAT_port_rx_1024_to_15xx) |\ |
| 1581 | (1ULL << EF10_STAT_port_rx_15xx_to_jumbo) |\ |
| 1582 | (1ULL << EF10_STAT_port_rx_gtjumbo) | \ |
| 1583 | (1ULL << EF10_STAT_port_rx_bad_gtjumbo) |\ |
| 1584 | (1ULL << EF10_STAT_port_rx_overflow) | \ |
| 1585 | (1ULL << EF10_STAT_port_rx_nodesc_drops) |\ |
Edward Cree | e4d112e | 2014-07-15 11:58:12 +0100 | [diff] [blame] | 1586 | (1ULL << GENERIC_STAT_rx_nodesc_trunc) | \ |
| 1587 | (1ULL << GENERIC_STAT_rx_noskb_drops)) |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 1588 | |
Edward Cree | 69b365c | 2016-08-26 15:12:41 +0100 | [diff] [blame] | 1589 | /* On 7000 series NICs, these statistics are only provided by the 10G MAC. |
| 1590 | * For a 10G/40G switchable port we do not expose these because they might |
| 1591 | * not include all the packets they should. |
| 1592 | * On 8000 series NICs these statistics are always provided. |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 1593 | */ |
Daniel Pieczko | e80ca013 | 2015-06-02 11:38:34 +0100 | [diff] [blame] | 1594 | #define HUNT_10G_ONLY_STAT_MASK ((1ULL << EF10_STAT_port_tx_control) | \ |
| 1595 | (1ULL << EF10_STAT_port_tx_lt64) | \ |
| 1596 | (1ULL << EF10_STAT_port_tx_64) | \ |
| 1597 | (1ULL << EF10_STAT_port_tx_65_to_127) |\ |
| 1598 | (1ULL << EF10_STAT_port_tx_128_to_255) |\ |
| 1599 | (1ULL << EF10_STAT_port_tx_256_to_511) |\ |
| 1600 | (1ULL << EF10_STAT_port_tx_512_to_1023) |\ |
| 1601 | (1ULL << EF10_STAT_port_tx_1024_to_15xx) |\ |
| 1602 | (1ULL << EF10_STAT_port_tx_15xx_to_jumbo)) |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 1603 | |
| 1604 | /* These statistics are only provided by the 40G MAC. For a 10G/40G |
| 1605 | * switchable port we do expose these because the errors will otherwise |
| 1606 | * be silent. |
| 1607 | */ |
Daniel Pieczko | e80ca013 | 2015-06-02 11:38:34 +0100 | [diff] [blame] | 1608 | #define HUNT_40G_EXTRA_STAT_MASK ((1ULL << EF10_STAT_port_rx_align_error) |\ |
| 1609 | (1ULL << EF10_STAT_port_rx_length_error)) |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 1610 | |
Edward Cree | 568d7a0 | 2013-09-25 17:32:09 +0100 | [diff] [blame] | 1611 | /* These statistics are only provided if the firmware supports the |
| 1612 | * capability PM_AND_RXDP_COUNTERS. |
| 1613 | */ |
| 1614 | #define HUNT_PM_AND_RXDP_STAT_MASK ( \ |
Daniel Pieczko | e80ca013 | 2015-06-02 11:38:34 +0100 | [diff] [blame] | 1615 | (1ULL << EF10_STAT_port_rx_pm_trunc_bb_overflow) | \ |
| 1616 | (1ULL << EF10_STAT_port_rx_pm_discard_bb_overflow) | \ |
| 1617 | (1ULL << EF10_STAT_port_rx_pm_trunc_vfifo_full) | \ |
| 1618 | (1ULL << EF10_STAT_port_rx_pm_discard_vfifo_full) | \ |
| 1619 | (1ULL << EF10_STAT_port_rx_pm_trunc_qbb) | \ |
| 1620 | (1ULL << EF10_STAT_port_rx_pm_discard_qbb) | \ |
| 1621 | (1ULL << EF10_STAT_port_rx_pm_discard_mapping) | \ |
| 1622 | (1ULL << EF10_STAT_port_rx_dp_q_disabled_packets) | \ |
| 1623 | (1ULL << EF10_STAT_port_rx_dp_di_dropped_packets) | \ |
| 1624 | (1ULL << EF10_STAT_port_rx_dp_streaming_packets) | \ |
| 1625 | (1ULL << EF10_STAT_port_rx_dp_hlb_fetch) | \ |
| 1626 | (1ULL << EF10_STAT_port_rx_dp_hlb_wait)) |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 1627 | |
Edward Cree | 4bae913 | 2013-09-27 18:52:49 +0100 | [diff] [blame] | 1628 | static u64 efx_ef10_raw_stat_mask(struct efx_nic *efx) |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 1629 | { |
Edward Cree | 4bae913 | 2013-09-27 18:52:49 +0100 | [diff] [blame] | 1630 | u64 raw_mask = HUNT_COMMON_STAT_MASK; |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 1631 | u32 port_caps = efx_mcdi_phy_get_caps(efx); |
Edward Cree | 568d7a0 | 2013-09-25 17:32:09 +0100 | [diff] [blame] | 1632 | struct efx_ef10_nic_data *nic_data = efx->nic_data; |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 1633 | |
Daniel Pieczko | 3c36a2a | 2015-06-02 11:39:06 +0100 | [diff] [blame] | 1634 | if (!(efx->mcdi->fn_flags & |
| 1635 | 1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL)) |
| 1636 | return 0; |
| 1637 | |
Edward Cree | 69b365c | 2016-08-26 15:12:41 +0100 | [diff] [blame] | 1638 | if (port_caps & (1 << MC_CMD_PHY_CAP_40000FDX_LBN)) { |
Edward Cree | 4bae913 | 2013-09-27 18:52:49 +0100 | [diff] [blame] | 1639 | raw_mask |= HUNT_40G_EXTRA_STAT_MASK; |
Edward Cree | 69b365c | 2016-08-26 15:12:41 +0100 | [diff] [blame] | 1640 | /* 8000 series have everything even at 40G */ |
| 1641 | if (nic_data->datapath_caps2 & |
| 1642 | (1 << MC_CMD_GET_CAPABILITIES_V2_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN)) |
| 1643 | raw_mask |= HUNT_10G_ONLY_STAT_MASK; |
| 1644 | } else { |
Edward Cree | 4bae913 | 2013-09-27 18:52:49 +0100 | [diff] [blame] | 1645 | raw_mask |= HUNT_10G_ONLY_STAT_MASK; |
Edward Cree | 69b365c | 2016-08-26 15:12:41 +0100 | [diff] [blame] | 1646 | } |
Edward Cree | 568d7a0 | 2013-09-25 17:32:09 +0100 | [diff] [blame] | 1647 | |
| 1648 | if (nic_data->datapath_caps & |
| 1649 | (1 << MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_LBN)) |
| 1650 | raw_mask |= HUNT_PM_AND_RXDP_STAT_MASK; |
| 1651 | |
Edward Cree | 4bae913 | 2013-09-27 18:52:49 +0100 | [diff] [blame] | 1652 | return raw_mask; |
| 1653 | } |
| 1654 | |
| 1655 | static void efx_ef10_get_stat_mask(struct efx_nic *efx, unsigned long *mask) |
| 1656 | { |
Daniel Pieczko | d94619c | 2015-06-02 11:40:05 +0100 | [diff] [blame] | 1657 | struct efx_ef10_nic_data *nic_data = efx->nic_data; |
Daniel Pieczko | 3c36a2a | 2015-06-02 11:39:06 +0100 | [diff] [blame] | 1658 | u64 raw_mask[2]; |
| 1659 | |
| 1660 | raw_mask[0] = efx_ef10_raw_stat_mask(efx); |
| 1661 | |
Daniel Pieczko | d94619c | 2015-06-02 11:40:05 +0100 | [diff] [blame] | 1662 | /* Only show vadaptor stats when EVB capability is present */ |
| 1663 | if (nic_data->datapath_caps & |
| 1664 | (1 << MC_CMD_GET_CAPABILITIES_OUT_EVB_LBN)) { |
| 1665 | raw_mask[0] |= ~((1ULL << EF10_STAT_rx_unicast) - 1); |
| 1666 | raw_mask[1] = (1ULL << (EF10_STAT_COUNT - 63)) - 1; |
| 1667 | } else { |
| 1668 | raw_mask[1] = 0; |
| 1669 | } |
Edward Cree | 4bae913 | 2013-09-27 18:52:49 +0100 | [diff] [blame] | 1670 | |
| 1671 | #if BITS_PER_LONG == 64 |
Andrew Rybchenko | e70c70c3 | 2016-08-26 11:19:34 +0100 | [diff] [blame] | 1672 | BUILD_BUG_ON(BITS_TO_LONGS(EF10_STAT_COUNT) != 2); |
Daniel Pieczko | 3c36a2a | 2015-06-02 11:39:06 +0100 | [diff] [blame] | 1673 | mask[0] = raw_mask[0]; |
| 1674 | mask[1] = raw_mask[1]; |
Edward Cree | 4bae913 | 2013-09-27 18:52:49 +0100 | [diff] [blame] | 1675 | #else |
Andrew Rybchenko | e70c70c3 | 2016-08-26 11:19:34 +0100 | [diff] [blame] | 1676 | BUILD_BUG_ON(BITS_TO_LONGS(EF10_STAT_COUNT) != 3); |
Daniel Pieczko | 3c36a2a | 2015-06-02 11:39:06 +0100 | [diff] [blame] | 1677 | mask[0] = raw_mask[0] & 0xffffffff; |
| 1678 | mask[1] = raw_mask[0] >> 32; |
| 1679 | mask[2] = raw_mask[1] & 0xffffffff; |
Edward Cree | 4bae913 | 2013-09-27 18:52:49 +0100 | [diff] [blame] | 1680 | #endif |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 1681 | } |
| 1682 | |
| 1683 | static size_t efx_ef10_describe_stats(struct efx_nic *efx, u8 *names) |
| 1684 | { |
Edward Cree | 4bae913 | 2013-09-27 18:52:49 +0100 | [diff] [blame] | 1685 | DECLARE_BITMAP(mask, EF10_STAT_COUNT); |
| 1686 | |
| 1687 | efx_ef10_get_stat_mask(efx, mask); |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 1688 | return efx_nic_describe_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, |
Edward Cree | 4bae913 | 2013-09-27 18:52:49 +0100 | [diff] [blame] | 1689 | mask, names); |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 1690 | } |
| 1691 | |
Daniel Pieczko | d778819 | 2015-06-02 11:39:20 +0100 | [diff] [blame] | 1692 | static size_t efx_ef10_update_stats_common(struct efx_nic *efx, u64 *full_stats, |
| 1693 | struct rtnl_link_stats64 *core_stats) |
| 1694 | { |
| 1695 | DECLARE_BITMAP(mask, EF10_STAT_COUNT); |
| 1696 | struct efx_ef10_nic_data *nic_data = efx->nic_data; |
| 1697 | u64 *stats = nic_data->stats; |
| 1698 | size_t stats_count = 0, index; |
| 1699 | |
| 1700 | efx_ef10_get_stat_mask(efx, mask); |
| 1701 | |
| 1702 | if (full_stats) { |
| 1703 | for_each_set_bit(index, mask, EF10_STAT_COUNT) { |
| 1704 | if (efx_ef10_stat_desc[index].name) { |
| 1705 | *full_stats++ = stats[index]; |
| 1706 | ++stats_count; |
| 1707 | } |
| 1708 | } |
| 1709 | } |
| 1710 | |
Bert Kenward | fbe4307 | 2015-08-26 16:39:03 +0100 | [diff] [blame] | 1711 | if (!core_stats) |
| 1712 | return stats_count; |
| 1713 | |
| 1714 | if (nic_data->datapath_caps & |
| 1715 | 1 << MC_CMD_GET_CAPABILITIES_OUT_EVB_LBN) { |
| 1716 | /* Use vadaptor stats. */ |
Daniel Pieczko | 0fc95fc | 2015-06-02 11:39:33 +0100 | [diff] [blame] | 1717 | core_stats->rx_packets = stats[EF10_STAT_rx_unicast] + |
| 1718 | stats[EF10_STAT_rx_multicast] + |
| 1719 | stats[EF10_STAT_rx_broadcast]; |
| 1720 | core_stats->tx_packets = stats[EF10_STAT_tx_unicast] + |
| 1721 | stats[EF10_STAT_tx_multicast] + |
| 1722 | stats[EF10_STAT_tx_broadcast]; |
| 1723 | core_stats->rx_bytes = stats[EF10_STAT_rx_unicast_bytes] + |
| 1724 | stats[EF10_STAT_rx_multicast_bytes] + |
| 1725 | stats[EF10_STAT_rx_broadcast_bytes]; |
| 1726 | core_stats->tx_bytes = stats[EF10_STAT_tx_unicast_bytes] + |
| 1727 | stats[EF10_STAT_tx_multicast_bytes] + |
| 1728 | stats[EF10_STAT_tx_broadcast_bytes]; |
| 1729 | core_stats->rx_dropped = stats[GENERIC_STAT_rx_nodesc_trunc] + |
Daniel Pieczko | d778819 | 2015-06-02 11:39:20 +0100 | [diff] [blame] | 1730 | stats[GENERIC_STAT_rx_noskb_drops]; |
Daniel Pieczko | 0fc95fc | 2015-06-02 11:39:33 +0100 | [diff] [blame] | 1731 | core_stats->multicast = stats[EF10_STAT_rx_multicast]; |
| 1732 | core_stats->rx_crc_errors = stats[EF10_STAT_rx_bad]; |
| 1733 | core_stats->rx_fifo_errors = stats[EF10_STAT_rx_overflow]; |
| 1734 | core_stats->rx_errors = core_stats->rx_crc_errors; |
| 1735 | core_stats->tx_errors = stats[EF10_STAT_tx_bad]; |
Bert Kenward | fbe4307 | 2015-08-26 16:39:03 +0100 | [diff] [blame] | 1736 | } else { |
| 1737 | /* Use port stats. */ |
| 1738 | core_stats->rx_packets = stats[EF10_STAT_port_rx_packets]; |
| 1739 | core_stats->tx_packets = stats[EF10_STAT_port_tx_packets]; |
| 1740 | core_stats->rx_bytes = stats[EF10_STAT_port_rx_bytes]; |
| 1741 | core_stats->tx_bytes = stats[EF10_STAT_port_tx_bytes]; |
| 1742 | core_stats->rx_dropped = stats[EF10_STAT_port_rx_nodesc_drops] + |
| 1743 | stats[GENERIC_STAT_rx_nodesc_trunc] + |
| 1744 | stats[GENERIC_STAT_rx_noskb_drops]; |
| 1745 | core_stats->multicast = stats[EF10_STAT_port_rx_multicast]; |
| 1746 | core_stats->rx_length_errors = |
| 1747 | stats[EF10_STAT_port_rx_gtjumbo] + |
| 1748 | stats[EF10_STAT_port_rx_length_error]; |
| 1749 | core_stats->rx_crc_errors = stats[EF10_STAT_port_rx_bad]; |
| 1750 | core_stats->rx_frame_errors = |
| 1751 | stats[EF10_STAT_port_rx_align_error]; |
| 1752 | core_stats->rx_fifo_errors = stats[EF10_STAT_port_rx_overflow]; |
| 1753 | core_stats->rx_errors = (core_stats->rx_length_errors + |
| 1754 | core_stats->rx_crc_errors + |
| 1755 | core_stats->rx_frame_errors); |
Daniel Pieczko | d778819 | 2015-06-02 11:39:20 +0100 | [diff] [blame] | 1756 | } |
| 1757 | |
| 1758 | return stats_count; |
| 1759 | } |
| 1760 | |
| 1761 | static int efx_ef10_try_update_nic_stats_pf(struct efx_nic *efx) |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 1762 | { |
| 1763 | struct efx_ef10_nic_data *nic_data = efx->nic_data; |
Edward Cree | 4bae913 | 2013-09-27 18:52:49 +0100 | [diff] [blame] | 1764 | DECLARE_BITMAP(mask, EF10_STAT_COUNT); |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 1765 | __le64 generation_start, generation_end; |
| 1766 | u64 *stats = nic_data->stats; |
| 1767 | __le64 *dma_stats; |
| 1768 | |
Edward Cree | 4bae913 | 2013-09-27 18:52:49 +0100 | [diff] [blame] | 1769 | efx_ef10_get_stat_mask(efx, mask); |
| 1770 | |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 1771 | dma_stats = efx->stats_buffer.addr; |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 1772 | |
| 1773 | generation_end = dma_stats[MC_CMD_MAC_GENERATION_END]; |
| 1774 | if (generation_end == EFX_MC_STATS_GENERATION_INVALID) |
| 1775 | return 0; |
| 1776 | rmb(); |
Edward Cree | 4bae913 | 2013-09-27 18:52:49 +0100 | [diff] [blame] | 1777 | efx_nic_update_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, mask, |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 1778 | stats, efx->stats_buffer.addr, false); |
Jon Cooper | d546a89 | 2013-09-27 18:26:30 +0100 | [diff] [blame] | 1779 | rmb(); |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 1780 | generation_start = dma_stats[MC_CMD_MAC_GENERATION_START]; |
| 1781 | if (generation_end != generation_start) |
| 1782 | return -EAGAIN; |
| 1783 | |
| 1784 | /* Update derived statistics */ |
Daniel Pieczko | e80ca013 | 2015-06-02 11:38:34 +0100 | [diff] [blame] | 1785 | efx_nic_fix_nodesc_drop_stat(efx, |
| 1786 | &stats[EF10_STAT_port_rx_nodesc_drops]); |
| 1787 | stats[EF10_STAT_port_rx_good_bytes] = |
| 1788 | stats[EF10_STAT_port_rx_bytes] - |
| 1789 | stats[EF10_STAT_port_rx_bytes_minus_good_bytes]; |
| 1790 | efx_update_diff_stat(&stats[EF10_STAT_port_rx_bad_bytes], |
| 1791 | stats[EF10_STAT_port_rx_bytes_minus_good_bytes]); |
Edward Cree | e4d112e | 2014-07-15 11:58:12 +0100 | [diff] [blame] | 1792 | efx_update_sw_stats(efx, stats); |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 1793 | return 0; |
| 1794 | } |
| 1795 | |
| 1796 | |
Daniel Pieczko | d778819 | 2015-06-02 11:39:20 +0100 | [diff] [blame] | 1797 | static size_t efx_ef10_update_stats_pf(struct efx_nic *efx, u64 *full_stats, |
| 1798 | struct rtnl_link_stats64 *core_stats) |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 1799 | { |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 1800 | int retry; |
| 1801 | |
| 1802 | /* If we're unlucky enough to read statistics during the DMA, wait |
| 1803 | * up to 10ms for it to finish (typically takes <500us) |
| 1804 | */ |
| 1805 | for (retry = 0; retry < 100; ++retry) { |
Daniel Pieczko | d778819 | 2015-06-02 11:39:20 +0100 | [diff] [blame] | 1806 | if (efx_ef10_try_update_nic_stats_pf(efx) == 0) |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 1807 | break; |
| 1808 | udelay(100); |
| 1809 | } |
| 1810 | |
Daniel Pieczko | d778819 | 2015-06-02 11:39:20 +0100 | [diff] [blame] | 1811 | return efx_ef10_update_stats_common(efx, full_stats, core_stats); |
| 1812 | } |
| 1813 | |
| 1814 | static int efx_ef10_try_update_nic_stats_vf(struct efx_nic *efx) |
| 1815 | { |
| 1816 | MCDI_DECLARE_BUF(inbuf, MC_CMD_MAC_STATS_IN_LEN); |
| 1817 | struct efx_ef10_nic_data *nic_data = efx->nic_data; |
| 1818 | DECLARE_BITMAP(mask, EF10_STAT_COUNT); |
| 1819 | __le64 generation_start, generation_end; |
| 1820 | u64 *stats = nic_data->stats; |
| 1821 | u32 dma_len = MC_CMD_MAC_NSTATS * sizeof(u64); |
| 1822 | struct efx_buffer stats_buf; |
| 1823 | __le64 *dma_stats; |
| 1824 | int rc; |
| 1825 | |
Daniel Pieczko | f00bf23 | 2015-06-02 11:40:18 +0100 | [diff] [blame] | 1826 | spin_unlock_bh(&efx->stats_lock); |
| 1827 | |
| 1828 | if (in_interrupt()) { |
| 1829 | /* If in atomic context, cannot update stats. Just update the |
| 1830 | * software stats and return so the caller can continue. |
| 1831 | */ |
| 1832 | spin_lock_bh(&efx->stats_lock); |
| 1833 | efx_update_sw_stats(efx, stats); |
| 1834 | return 0; |
| 1835 | } |
| 1836 | |
Daniel Pieczko | d778819 | 2015-06-02 11:39:20 +0100 | [diff] [blame] | 1837 | efx_ef10_get_stat_mask(efx, mask); |
| 1838 | |
| 1839 | rc = efx_nic_alloc_buffer(efx, &stats_buf, dma_len, GFP_ATOMIC); |
Daniel Pieczko | f00bf23 | 2015-06-02 11:40:18 +0100 | [diff] [blame] | 1840 | if (rc) { |
| 1841 | spin_lock_bh(&efx->stats_lock); |
Daniel Pieczko | d778819 | 2015-06-02 11:39:20 +0100 | [diff] [blame] | 1842 | return rc; |
Daniel Pieczko | f00bf23 | 2015-06-02 11:40:18 +0100 | [diff] [blame] | 1843 | } |
Daniel Pieczko | d778819 | 2015-06-02 11:39:20 +0100 | [diff] [blame] | 1844 | |
| 1845 | dma_stats = stats_buf.addr; |
| 1846 | dma_stats[MC_CMD_MAC_GENERATION_END] = EFX_MC_STATS_GENERATION_INVALID; |
| 1847 | |
| 1848 | MCDI_SET_QWORD(inbuf, MAC_STATS_IN_DMA_ADDR, stats_buf.dma_addr); |
| 1849 | MCDI_POPULATE_DWORD_1(inbuf, MAC_STATS_IN_CMD, |
Daniel Pieczko | 0fc95fc | 2015-06-02 11:39:33 +0100 | [diff] [blame] | 1850 | MAC_STATS_IN_DMA, 1); |
Daniel Pieczko | d778819 | 2015-06-02 11:39:20 +0100 | [diff] [blame] | 1851 | MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len); |
| 1852 | MCDI_SET_DWORD(inbuf, MAC_STATS_IN_PORT_ID, EVB_PORT_ID_ASSIGNED); |
| 1853 | |
Daniel Pieczko | 6dd4859 | 2015-06-02 11:39:49 +0100 | [diff] [blame] | 1854 | rc = efx_mcdi_rpc_quiet(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf), |
| 1855 | NULL, 0, NULL); |
Daniel Pieczko | d778819 | 2015-06-02 11:39:20 +0100 | [diff] [blame] | 1856 | spin_lock_bh(&efx->stats_lock); |
Daniel Pieczko | 6dd4859 | 2015-06-02 11:39:49 +0100 | [diff] [blame] | 1857 | if (rc) { |
| 1858 | /* Expect ENOENT if DMA queues have not been set up */ |
| 1859 | if (rc != -ENOENT || atomic_read(&efx->active_queues)) |
| 1860 | efx_mcdi_display_error(efx, MC_CMD_MAC_STATS, |
| 1861 | sizeof(inbuf), NULL, 0, rc); |
Daniel Pieczko | d778819 | 2015-06-02 11:39:20 +0100 | [diff] [blame] | 1862 | goto out; |
Daniel Pieczko | 6dd4859 | 2015-06-02 11:39:49 +0100 | [diff] [blame] | 1863 | } |
Daniel Pieczko | d778819 | 2015-06-02 11:39:20 +0100 | [diff] [blame] | 1864 | |
| 1865 | generation_end = dma_stats[MC_CMD_MAC_GENERATION_END]; |
Daniel Pieczko | 0fc95fc | 2015-06-02 11:39:33 +0100 | [diff] [blame] | 1866 | if (generation_end == EFX_MC_STATS_GENERATION_INVALID) { |
| 1867 | WARN_ON_ONCE(1); |
Daniel Pieczko | d778819 | 2015-06-02 11:39:20 +0100 | [diff] [blame] | 1868 | goto out; |
Daniel Pieczko | 0fc95fc | 2015-06-02 11:39:33 +0100 | [diff] [blame] | 1869 | } |
Daniel Pieczko | d778819 | 2015-06-02 11:39:20 +0100 | [diff] [blame] | 1870 | rmb(); |
| 1871 | efx_nic_update_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, mask, |
| 1872 | stats, stats_buf.addr, false); |
| 1873 | rmb(); |
| 1874 | generation_start = dma_stats[MC_CMD_MAC_GENERATION_START]; |
| 1875 | if (generation_end != generation_start) { |
| 1876 | rc = -EAGAIN; |
| 1877 | goto out; |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 1878 | } |
| 1879 | |
Daniel Pieczko | d778819 | 2015-06-02 11:39:20 +0100 | [diff] [blame] | 1880 | efx_update_sw_stats(efx, stats); |
| 1881 | out: |
| 1882 | efx_nic_free_buffer(efx, &stats_buf); |
| 1883 | return rc; |
| 1884 | } |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 1885 | |
Daniel Pieczko | d778819 | 2015-06-02 11:39:20 +0100 | [diff] [blame] | 1886 | static size_t efx_ef10_update_stats_vf(struct efx_nic *efx, u64 *full_stats, |
| 1887 | struct rtnl_link_stats64 *core_stats) |
| 1888 | { |
| 1889 | if (efx_ef10_try_update_nic_stats_vf(efx)) |
| 1890 | return 0; |
| 1891 | |
| 1892 | return efx_ef10_update_stats_common(efx, full_stats, core_stats); |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 1893 | } |
| 1894 | |
| 1895 | static void efx_ef10_push_irq_moderation(struct efx_channel *channel) |
| 1896 | { |
| 1897 | struct efx_nic *efx = channel->efx; |
Bert Kenward | 539de7c | 2016-08-11 13:02:09 +0100 | [diff] [blame] | 1898 | unsigned int mode, usecs; |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 1899 | efx_dword_t timer_cmd; |
| 1900 | |
Bert Kenward | 539de7c | 2016-08-11 13:02:09 +0100 | [diff] [blame] | 1901 | if (channel->irq_moderation_us) { |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 1902 | mode = 3; |
Bert Kenward | 539de7c | 2016-08-11 13:02:09 +0100 | [diff] [blame] | 1903 | usecs = channel->irq_moderation_us; |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 1904 | } else { |
| 1905 | mode = 0; |
Bert Kenward | 539de7c | 2016-08-11 13:02:09 +0100 | [diff] [blame] | 1906 | usecs = 0; |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 1907 | } |
| 1908 | |
Bert Kenward | 539de7c | 2016-08-11 13:02:09 +0100 | [diff] [blame] | 1909 | if (EFX_EF10_WORKAROUND_61265(efx)) { |
| 1910 | MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_EVQ_TMR_IN_LEN); |
| 1911 | unsigned int ns = usecs * 1000; |
| 1912 | |
| 1913 | MCDI_SET_DWORD(inbuf, SET_EVQ_TMR_IN_INSTANCE, |
| 1914 | channel->channel); |
| 1915 | MCDI_SET_DWORD(inbuf, SET_EVQ_TMR_IN_TMR_LOAD_REQ_NS, ns); |
| 1916 | MCDI_SET_DWORD(inbuf, SET_EVQ_TMR_IN_TMR_RELOAD_REQ_NS, ns); |
| 1917 | MCDI_SET_DWORD(inbuf, SET_EVQ_TMR_IN_TMR_MODE, mode); |
| 1918 | |
| 1919 | efx_mcdi_rpc_async(efx, MC_CMD_SET_EVQ_TMR, |
| 1920 | inbuf, sizeof(inbuf), 0, NULL, 0); |
| 1921 | } else if (EFX_EF10_WORKAROUND_35388(efx)) { |
| 1922 | unsigned int ticks = efx_usecs_to_ticks(efx, usecs); |
| 1923 | |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 1924 | EFX_POPULATE_DWORD_3(timer_cmd, ERF_DD_EVQ_IND_TIMER_FLAGS, |
| 1925 | EFE_DD_EVQ_IND_TIMER_FLAGS, |
| 1926 | ERF_DD_EVQ_IND_TIMER_MODE, mode, |
Bert Kenward | 539de7c | 2016-08-11 13:02:09 +0100 | [diff] [blame] | 1927 | ERF_DD_EVQ_IND_TIMER_VAL, ticks); |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 1928 | efx_writed_page(efx, &timer_cmd, ER_DD_EVQ_INDIRECT, |
| 1929 | channel->channel); |
| 1930 | } else { |
Bert Kenward | 539de7c | 2016-08-11 13:02:09 +0100 | [diff] [blame] | 1931 | unsigned int ticks = efx_usecs_to_ticks(efx, usecs); |
| 1932 | |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 1933 | EFX_POPULATE_DWORD_2(timer_cmd, ERF_DZ_TC_TIMER_MODE, mode, |
Bert Kenward | 539de7c | 2016-08-11 13:02:09 +0100 | [diff] [blame] | 1934 | ERF_DZ_TC_TIMER_VAL, ticks); |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 1935 | efx_writed_page(efx, &timer_cmd, ER_DZ_EVQ_TMR, |
| 1936 | channel->channel); |
| 1937 | } |
| 1938 | } |
| 1939 | |
Shradha Shah | 02246a7 | 2015-05-06 00:58:14 +0100 | [diff] [blame] | 1940 | static void efx_ef10_get_wol_vf(struct efx_nic *efx, |
| 1941 | struct ethtool_wolinfo *wol) {} |
| 1942 | |
| 1943 | static int efx_ef10_set_wol_vf(struct efx_nic *efx, u32 type) |
| 1944 | { |
| 1945 | return -EOPNOTSUPP; |
| 1946 | } |
| 1947 | |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 1948 | static void efx_ef10_get_wol(struct efx_nic *efx, struct ethtool_wolinfo *wol) |
| 1949 | { |
| 1950 | wol->supported = 0; |
| 1951 | wol->wolopts = 0; |
| 1952 | memset(&wol->sopass, 0, sizeof(wol->sopass)); |
| 1953 | } |
| 1954 | |
| 1955 | static int efx_ef10_set_wol(struct efx_nic *efx, u32 type) |
| 1956 | { |
| 1957 | if (type != 0) |
| 1958 | return -EINVAL; |
| 1959 | return 0; |
| 1960 | } |
| 1961 | |
| 1962 | static void efx_ef10_mcdi_request(struct efx_nic *efx, |
| 1963 | const efx_dword_t *hdr, size_t hdr_len, |
| 1964 | const efx_dword_t *sdu, size_t sdu_len) |
| 1965 | { |
| 1966 | struct efx_ef10_nic_data *nic_data = efx->nic_data; |
| 1967 | u8 *pdu = nic_data->mcdi_buf.addr; |
| 1968 | |
| 1969 | memcpy(pdu, hdr, hdr_len); |
| 1970 | memcpy(pdu + hdr_len, sdu, sdu_len); |
| 1971 | wmb(); |
| 1972 | |
| 1973 | /* The hardware provides 'low' and 'high' (doorbell) registers |
| 1974 | * for passing the 64-bit address of an MCDI request to |
| 1975 | * firmware. However the dwords are swapped by firmware. The |
| 1976 | * least significant bits of the doorbell are then 0 for all |
| 1977 | * MCDI requests due to alignment. |
| 1978 | */ |
| 1979 | _efx_writed(efx, cpu_to_le32((u64)nic_data->mcdi_buf.dma_addr >> 32), |
| 1980 | ER_DZ_MC_DB_LWRD); |
| 1981 | _efx_writed(efx, cpu_to_le32((u32)nic_data->mcdi_buf.dma_addr), |
| 1982 | ER_DZ_MC_DB_HWRD); |
| 1983 | } |
| 1984 | |
| 1985 | static bool efx_ef10_mcdi_poll_response(struct efx_nic *efx) |
| 1986 | { |
| 1987 | struct efx_ef10_nic_data *nic_data = efx->nic_data; |
| 1988 | const efx_dword_t hdr = *(const efx_dword_t *)nic_data->mcdi_buf.addr; |
| 1989 | |
| 1990 | rmb(); |
| 1991 | return EFX_DWORD_FIELD(hdr, MCDI_HEADER_RESPONSE); |
| 1992 | } |
| 1993 | |
| 1994 | static void |
| 1995 | efx_ef10_mcdi_read_response(struct efx_nic *efx, efx_dword_t *outbuf, |
| 1996 | size_t offset, size_t outlen) |
| 1997 | { |
| 1998 | struct efx_ef10_nic_data *nic_data = efx->nic_data; |
| 1999 | const u8 *pdu = nic_data->mcdi_buf.addr; |
| 2000 | |
| 2001 | memcpy(outbuf, pdu + offset, outlen); |
| 2002 | } |
| 2003 | |
Daniel Pieczko | c577e59 | 2015-10-09 10:40:35 +0100 | [diff] [blame] | 2004 | static void efx_ef10_mcdi_reboot_detected(struct efx_nic *efx) |
| 2005 | { |
| 2006 | struct efx_ef10_nic_data *nic_data = efx->nic_data; |
| 2007 | |
| 2008 | /* All our allocations have been reset */ |
| 2009 | efx_ef10_reset_mc_allocations(efx); |
| 2010 | |
| 2011 | /* The datapath firmware might have been changed */ |
| 2012 | nic_data->must_check_datapath_caps = true; |
| 2013 | |
| 2014 | /* MAC statistics have been cleared on the NIC; clear the local |
| 2015 | * statistic that we update with efx_update_diff_stat(). |
| 2016 | */ |
| 2017 | nic_data->stats[EF10_STAT_port_rx_bad_bytes] = 0; |
| 2018 | } |
| 2019 | |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 2020 | static int efx_ef10_mcdi_poll_reboot(struct efx_nic *efx) |
| 2021 | { |
| 2022 | struct efx_ef10_nic_data *nic_data = efx->nic_data; |
| 2023 | int rc; |
| 2024 | |
| 2025 | rc = efx_ef10_get_warm_boot_count(efx); |
| 2026 | if (rc < 0) { |
| 2027 | /* The firmware is presumably in the process of |
| 2028 | * rebooting. However, we are supposed to report each |
| 2029 | * reboot just once, so we must only do that once we |
| 2030 | * can read and store the updated warm boot count. |
| 2031 | */ |
| 2032 | return 0; |
| 2033 | } |
| 2034 | |
| 2035 | if (rc == nic_data->warm_boot_count) |
| 2036 | return 0; |
| 2037 | |
| 2038 | nic_data->warm_boot_count = rc; |
Daniel Pieczko | c577e59 | 2015-10-09 10:40:35 +0100 | [diff] [blame] | 2039 | efx_ef10_mcdi_reboot_detected(efx); |
Ben Hutchings | 869070c | 2013-09-05 22:46:10 +0100 | [diff] [blame] | 2040 | |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 2041 | return -EIO; |
| 2042 | } |
| 2043 | |
| 2044 | /* Handle an MSI interrupt |
| 2045 | * |
| 2046 | * Handle an MSI hardware interrupt. This routine schedules event |
| 2047 | * queue processing. No interrupt acknowledgement cycle is necessary. |
| 2048 | * Also, we never need to check that the interrupt is for us, since |
| 2049 | * MSI interrupts cannot be shared. |
| 2050 | */ |
| 2051 | static irqreturn_t efx_ef10_msi_interrupt(int irq, void *dev_id) |
| 2052 | { |
| 2053 | struct efx_msi_context *context = dev_id; |
| 2054 | struct efx_nic *efx = context->efx; |
| 2055 | |
| 2056 | netif_vdbg(efx, intr, efx->net_dev, |
| 2057 | "IRQ %d on CPU %d\n", irq, raw_smp_processor_id()); |
| 2058 | |
| 2059 | if (likely(ACCESS_ONCE(efx->irq_soft_enabled))) { |
| 2060 | /* Note test interrupts */ |
| 2061 | if (context->index == efx->irq_level) |
| 2062 | efx->last_irq_cpu = raw_smp_processor_id(); |
| 2063 | |
| 2064 | /* Schedule processing of the channel */ |
| 2065 | efx_schedule_channel_irq(efx->channel[context->index]); |
| 2066 | } |
| 2067 | |
| 2068 | return IRQ_HANDLED; |
| 2069 | } |
| 2070 | |
| 2071 | static irqreturn_t efx_ef10_legacy_interrupt(int irq, void *dev_id) |
| 2072 | { |
| 2073 | struct efx_nic *efx = dev_id; |
| 2074 | bool soft_enabled = ACCESS_ONCE(efx->irq_soft_enabled); |
| 2075 | struct efx_channel *channel; |
| 2076 | efx_dword_t reg; |
| 2077 | u32 queues; |
| 2078 | |
| 2079 | /* Read the ISR which also ACKs the interrupts */ |
| 2080 | efx_readd(efx, ®, ER_DZ_BIU_INT_ISR); |
| 2081 | queues = EFX_DWORD_FIELD(reg, ERF_DZ_ISR_REG); |
| 2082 | |
| 2083 | if (queues == 0) |
| 2084 | return IRQ_NONE; |
| 2085 | |
| 2086 | if (likely(soft_enabled)) { |
| 2087 | /* Note test interrupts */ |
| 2088 | if (queues & (1U << efx->irq_level)) |
| 2089 | efx->last_irq_cpu = raw_smp_processor_id(); |
| 2090 | |
| 2091 | efx_for_each_channel(channel, efx) { |
| 2092 | if (queues & 1) |
| 2093 | efx_schedule_channel_irq(channel); |
| 2094 | queues >>= 1; |
| 2095 | } |
| 2096 | } |
| 2097 | |
| 2098 | netif_vdbg(efx, intr, efx->net_dev, |
| 2099 | "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n", |
| 2100 | irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg)); |
| 2101 | |
| 2102 | return IRQ_HANDLED; |
| 2103 | } |
| 2104 | |
Jon Cooper | 942e298 | 2016-08-26 15:13:30 +0100 | [diff] [blame] | 2105 | static int efx_ef10_irq_test_generate(struct efx_nic *efx) |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 2106 | { |
| 2107 | MCDI_DECLARE_BUF(inbuf, MC_CMD_TRIGGER_INTERRUPT_IN_LEN); |
| 2108 | |
Jon Cooper | 942e298 | 2016-08-26 15:13:30 +0100 | [diff] [blame] | 2109 | if (efx_mcdi_set_workaround(efx, MC_CMD_WORKAROUND_BUG41750, true, |
| 2110 | NULL) == 0) |
| 2111 | return -ENOTSUPP; |
| 2112 | |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 2113 | BUILD_BUG_ON(MC_CMD_TRIGGER_INTERRUPT_OUT_LEN != 0); |
| 2114 | |
| 2115 | MCDI_SET_DWORD(inbuf, TRIGGER_INTERRUPT_IN_INTR_LEVEL, efx->irq_level); |
Jon Cooper | 942e298 | 2016-08-26 15:13:30 +0100 | [diff] [blame] | 2116 | return efx_mcdi_rpc(efx, MC_CMD_TRIGGER_INTERRUPT, |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 2117 | inbuf, sizeof(inbuf), NULL, 0, NULL); |
| 2118 | } |
| 2119 | |
| 2120 | static int efx_ef10_tx_probe(struct efx_tx_queue *tx_queue) |
| 2121 | { |
| 2122 | return efx_nic_alloc_buffer(tx_queue->efx, &tx_queue->txd.buf, |
| 2123 | (tx_queue->ptr_mask + 1) * |
| 2124 | sizeof(efx_qword_t), |
| 2125 | GFP_KERNEL); |
| 2126 | } |
| 2127 | |
| 2128 | /* This writes to the TX_DESC_WPTR and also pushes data */ |
| 2129 | static inline void efx_ef10_push_tx_desc(struct efx_tx_queue *tx_queue, |
| 2130 | const efx_qword_t *txd) |
| 2131 | { |
| 2132 | unsigned int write_ptr; |
| 2133 | efx_oword_t reg; |
| 2134 | |
| 2135 | write_ptr = tx_queue->write_count & tx_queue->ptr_mask; |
| 2136 | EFX_POPULATE_OWORD_1(reg, ERF_DZ_TX_DESC_WPTR, write_ptr); |
| 2137 | reg.qword[0] = *txd; |
| 2138 | efx_writeo_page(tx_queue->efx, ®, |
| 2139 | ER_DZ_TX_DESC_UPD, tx_queue->queue); |
| 2140 | } |
| 2141 | |
Bert Kenward | e9117e5 | 2016-11-17 10:51:54 +0000 | [diff] [blame] | 2142 | /* Add Firmware-Assisted TSO v2 option descriptors to a queue. |
| 2143 | */ |
| 2144 | static int efx_ef10_tx_tso_desc(struct efx_tx_queue *tx_queue, |
| 2145 | struct sk_buff *skb, |
| 2146 | bool *data_mapped) |
| 2147 | { |
| 2148 | struct efx_tx_buffer *buffer; |
| 2149 | struct tcphdr *tcp; |
| 2150 | struct iphdr *ip; |
| 2151 | |
| 2152 | u16 ipv4_id; |
| 2153 | u32 seqnum; |
| 2154 | u32 mss; |
| 2155 | |
Edward Cree | e01b16a | 2016-12-02 15:51:33 +0000 | [diff] [blame] | 2156 | EFX_WARN_ON_ONCE_PARANOID(tx_queue->tso_version != 2); |
Bert Kenward | e9117e5 | 2016-11-17 10:51:54 +0000 | [diff] [blame] | 2157 | |
| 2158 | mss = skb_shinfo(skb)->gso_size; |
| 2159 | |
| 2160 | if (unlikely(mss < 4)) { |
| 2161 | WARN_ONCE(1, "MSS of %u is too small for TSO v2\n", mss); |
| 2162 | return -EINVAL; |
| 2163 | } |
| 2164 | |
| 2165 | ip = ip_hdr(skb); |
| 2166 | if (ip->version == 4) { |
| 2167 | /* Modify IPv4 header if needed. */ |
| 2168 | ip->tot_len = 0; |
| 2169 | ip->check = 0; |
| 2170 | ipv4_id = ip->id; |
| 2171 | } else { |
| 2172 | /* Modify IPv6 header if needed. */ |
| 2173 | struct ipv6hdr *ipv6 = ipv6_hdr(skb); |
| 2174 | |
| 2175 | ipv6->payload_len = 0; |
| 2176 | ipv4_id = 0; |
| 2177 | } |
| 2178 | |
| 2179 | tcp = tcp_hdr(skb); |
| 2180 | seqnum = ntohl(tcp->seq); |
| 2181 | |
| 2182 | buffer = efx_tx_queue_get_insert_buffer(tx_queue); |
| 2183 | |
| 2184 | buffer->flags = EFX_TX_BUF_OPTION; |
| 2185 | buffer->len = 0; |
| 2186 | buffer->unmap_len = 0; |
| 2187 | EFX_POPULATE_QWORD_5(buffer->option, |
| 2188 | ESF_DZ_TX_DESC_IS_OPT, 1, |
| 2189 | ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_TSO, |
| 2190 | ESF_DZ_TX_TSO_OPTION_TYPE, |
| 2191 | ESE_DZ_TX_TSO_OPTION_DESC_FATSO2A, |
| 2192 | ESF_DZ_TX_TSO_IP_ID, ipv4_id, |
| 2193 | ESF_DZ_TX_TSO_TCP_SEQNO, seqnum |
| 2194 | ); |
| 2195 | ++tx_queue->insert_count; |
| 2196 | |
| 2197 | buffer = efx_tx_queue_get_insert_buffer(tx_queue); |
| 2198 | |
| 2199 | buffer->flags = EFX_TX_BUF_OPTION; |
| 2200 | buffer->len = 0; |
| 2201 | buffer->unmap_len = 0; |
| 2202 | EFX_POPULATE_QWORD_4(buffer->option, |
| 2203 | ESF_DZ_TX_DESC_IS_OPT, 1, |
| 2204 | ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_TSO, |
| 2205 | ESF_DZ_TX_TSO_OPTION_TYPE, |
| 2206 | ESE_DZ_TX_TSO_OPTION_DESC_FATSO2B, |
| 2207 | ESF_DZ_TX_TSO_TCP_MSS, mss |
| 2208 | ); |
| 2209 | ++tx_queue->insert_count; |
| 2210 | |
| 2211 | return 0; |
| 2212 | } |
| 2213 | |
Edward Cree | 46d1efd | 2016-11-17 10:52:36 +0000 | [diff] [blame] | 2214 | static u32 efx_ef10_tso_versions(struct efx_nic *efx) |
| 2215 | { |
| 2216 | struct efx_ef10_nic_data *nic_data = efx->nic_data; |
| 2217 | u32 tso_versions = 0; |
| 2218 | |
| 2219 | if (nic_data->datapath_caps & |
| 2220 | (1 << MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_LBN)) |
| 2221 | tso_versions |= BIT(1); |
| 2222 | if (nic_data->datapath_caps2 & |
| 2223 | (1 << MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_LBN)) |
| 2224 | tso_versions |= BIT(2); |
| 2225 | return tso_versions; |
| 2226 | } |
| 2227 | |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 2228 | static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue) |
| 2229 | { |
| 2230 | MCDI_DECLARE_BUF(inbuf, MC_CMD_INIT_TXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 / |
| 2231 | EFX_BUF_SIZE)); |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 2232 | bool csum_offload = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD; |
| 2233 | size_t entries = tx_queue->txd.buf.len / EFX_BUF_SIZE; |
| 2234 | struct efx_channel *channel = tx_queue->channel; |
| 2235 | struct efx_nic *efx = tx_queue->efx; |
Daniel Pieczko | 45b2449 | 2015-05-06 00:57:14 +0100 | [diff] [blame] | 2236 | struct efx_ef10_nic_data *nic_data = efx->nic_data; |
Bert Kenward | e9117e5 | 2016-11-17 10:51:54 +0000 | [diff] [blame] | 2237 | bool tso_v2 = false; |
Jon Cooper | aa09a3d | 2015-05-20 11:10:41 +0100 | [diff] [blame] | 2238 | size_t inlen; |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 2239 | dma_addr_t dma_addr; |
| 2240 | efx_qword_t *txd; |
| 2241 | int rc; |
| 2242 | int i; |
Jon Cooper | aa09a3d | 2015-05-20 11:10:41 +0100 | [diff] [blame] | 2243 | BUILD_BUG_ON(MC_CMD_INIT_TXQ_OUT_LEN != 0); |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 2244 | |
Bert Kenward | e9117e5 | 2016-11-17 10:51:54 +0000 | [diff] [blame] | 2245 | /* TSOv2 is a limited resource that can only be configured on a limited |
| 2246 | * number of queues. TSO without checksum offload is not really a thing, |
| 2247 | * so we only enable it for those queues. |
Bert Kenward | e9117e5 | 2016-11-17 10:51:54 +0000 | [diff] [blame] | 2248 | */ |
| 2249 | if (csum_offload && (nic_data->datapath_caps2 & |
| 2250 | (1 << MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_LBN))) { |
| 2251 | tso_v2 = true; |
| 2252 | netif_dbg(efx, hw, efx->net_dev, "Using TSOv2 for channel %u\n", |
| 2253 | channel->channel); |
| 2254 | } |
| 2255 | |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 2256 | MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_SIZE, tx_queue->ptr_mask + 1); |
| 2257 | MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_TARGET_EVQ, channel->channel); |
| 2258 | MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_LABEL, tx_queue->queue); |
| 2259 | MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_INSTANCE, tx_queue->queue); |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 2260 | MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_OWNER_ID, 0); |
Daniel Pieczko | 45b2449 | 2015-05-06 00:57:14 +0100 | [diff] [blame] | 2261 | MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_PORT_ID, nic_data->vport_id); |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 2262 | |
| 2263 | dma_addr = tx_queue->txd.buf.dma_addr; |
| 2264 | |
| 2265 | netif_dbg(efx, hw, efx->net_dev, "pushing TXQ %d. %zu entries (%llx)\n", |
| 2266 | tx_queue->queue, entries, (u64)dma_addr); |
| 2267 | |
| 2268 | for (i = 0; i < entries; ++i) { |
| 2269 | MCDI_SET_ARRAY_QWORD(inbuf, INIT_TXQ_IN_DMA_ADDR, i, dma_addr); |
| 2270 | dma_addr += EFX_BUF_SIZE; |
| 2271 | } |
| 2272 | |
| 2273 | inlen = MC_CMD_INIT_TXQ_IN_LEN(entries); |
| 2274 | |
Edward Cree | e638ee1 | 2016-11-17 10:52:07 +0000 | [diff] [blame] | 2275 | do { |
| 2276 | MCDI_POPULATE_DWORD_3(inbuf, INIT_TXQ_IN_FLAGS, |
| 2277 | /* This flag was removed from mcdi_pcol.h for |
| 2278 | * the non-_EXT version of INIT_TXQ. However, |
| 2279 | * firmware still honours it. |
| 2280 | */ |
| 2281 | INIT_TXQ_EXT_IN_FLAG_TSOV2_EN, tso_v2, |
| 2282 | INIT_TXQ_IN_FLAG_IP_CSUM_DIS, !csum_offload, |
| 2283 | INIT_TXQ_IN_FLAG_TCP_CSUM_DIS, !csum_offload); |
| 2284 | |
| 2285 | rc = efx_mcdi_rpc_quiet(efx, MC_CMD_INIT_TXQ, inbuf, inlen, |
| 2286 | NULL, 0, NULL); |
| 2287 | if (rc == -ENOSPC && tso_v2) { |
| 2288 | /* Retry without TSOv2 if we're short on contexts. */ |
| 2289 | tso_v2 = false; |
| 2290 | netif_warn(efx, probe, efx->net_dev, |
| 2291 | "TSOv2 context not available to segment in hardware. TCP performance may be reduced.\n"); |
| 2292 | } else if (rc) { |
| 2293 | efx_mcdi_display_error(efx, MC_CMD_INIT_TXQ, |
| 2294 | MC_CMD_INIT_TXQ_EXT_IN_LEN, |
| 2295 | NULL, 0, rc); |
| 2296 | goto fail; |
| 2297 | } |
| 2298 | } while (rc); |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 2299 | |
| 2300 | /* A previous user of this TX queue might have set us up the |
| 2301 | * bomb by writing a descriptor to the TX push collector but |
| 2302 | * not the doorbell. (Each collector belongs to a port, not a |
| 2303 | * queue or function, so cannot easily be reset.) We must |
| 2304 | * attempt to push a no-op descriptor in its place. |
| 2305 | */ |
| 2306 | tx_queue->buffer[0].flags = EFX_TX_BUF_OPTION; |
| 2307 | tx_queue->insert_count = 1; |
| 2308 | txd = efx_tx_desc(tx_queue, 0); |
| 2309 | EFX_POPULATE_QWORD_4(*txd, |
| 2310 | ESF_DZ_TX_DESC_IS_OPT, true, |
| 2311 | ESF_DZ_TX_OPTION_TYPE, |
| 2312 | ESE_DZ_TX_OPTION_DESC_CRC_CSUM, |
| 2313 | ESF_DZ_TX_OPTION_UDP_TCP_CSUM, csum_offload, |
| 2314 | ESF_DZ_TX_OPTION_IP_CSUM, csum_offload); |
| 2315 | tx_queue->write_count = 1; |
Bert Kenward | 93171b1 | 2015-11-30 09:05:35 +0000 | [diff] [blame] | 2316 | |
Bert Kenward | e9117e5 | 2016-11-17 10:51:54 +0000 | [diff] [blame] | 2317 | if (tso_v2) { |
| 2318 | tx_queue->handle_tso = efx_ef10_tx_tso_desc; |
| 2319 | tx_queue->tso_version = 2; |
| 2320 | } else if (nic_data->datapath_caps & |
| 2321 | (1 << MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_LBN)) { |
Bert Kenward | 93171b1 | 2015-11-30 09:05:35 +0000 | [diff] [blame] | 2322 | tx_queue->tso_version = 1; |
| 2323 | } |
| 2324 | |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 2325 | wmb(); |
| 2326 | efx_ef10_push_tx_desc(tx_queue, txd); |
| 2327 | |
| 2328 | return; |
| 2329 | |
| 2330 | fail: |
Ben Hutchings | 48ce563 | 2013-11-01 16:42:44 +0000 | [diff] [blame] | 2331 | netdev_WARN(efx->net_dev, "failed to initialise TXQ %d\n", |
| 2332 | tx_queue->queue); |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 2333 | } |
| 2334 | |
| 2335 | static void efx_ef10_tx_fini(struct efx_tx_queue *tx_queue) |
| 2336 | { |
| 2337 | MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_TXQ_IN_LEN); |
Jon Cooper | aa09a3d | 2015-05-20 11:10:41 +0100 | [diff] [blame] | 2338 | MCDI_DECLARE_BUF_ERR(outbuf); |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 2339 | struct efx_nic *efx = tx_queue->efx; |
| 2340 | size_t outlen; |
| 2341 | int rc; |
| 2342 | |
| 2343 | MCDI_SET_DWORD(inbuf, FINI_TXQ_IN_INSTANCE, |
| 2344 | tx_queue->queue); |
| 2345 | |
Edward Cree | 1e0b812 | 2013-05-31 18:36:12 +0100 | [diff] [blame] | 2346 | rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_TXQ, inbuf, sizeof(inbuf), |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 2347 | outbuf, sizeof(outbuf), &outlen); |
| 2348 | |
| 2349 | if (rc && rc != -EALREADY) |
| 2350 | goto fail; |
| 2351 | |
| 2352 | return; |
| 2353 | |
| 2354 | fail: |
Edward Cree | 1e0b812 | 2013-05-31 18:36:12 +0100 | [diff] [blame] | 2355 | efx_mcdi_display_error(efx, MC_CMD_FINI_TXQ, MC_CMD_FINI_TXQ_IN_LEN, |
| 2356 | outbuf, outlen, rc); |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 2357 | } |
| 2358 | |
| 2359 | static void efx_ef10_tx_remove(struct efx_tx_queue *tx_queue) |
| 2360 | { |
| 2361 | efx_nic_free_buffer(tx_queue->efx, &tx_queue->txd.buf); |
| 2362 | } |
| 2363 | |
| 2364 | /* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */ |
| 2365 | static inline void efx_ef10_notify_tx_desc(struct efx_tx_queue *tx_queue) |
| 2366 | { |
| 2367 | unsigned int write_ptr; |
| 2368 | efx_dword_t reg; |
| 2369 | |
| 2370 | write_ptr = tx_queue->write_count & tx_queue->ptr_mask; |
| 2371 | EFX_POPULATE_DWORD_1(reg, ERF_DZ_TX_DESC_WPTR_DWORD, write_ptr); |
| 2372 | efx_writed_page(tx_queue->efx, ®, |
| 2373 | ER_DZ_TX_DESC_UPD_DWORD, tx_queue->queue); |
| 2374 | } |
| 2375 | |
Bert Kenward | e9117e5 | 2016-11-17 10:51:54 +0000 | [diff] [blame] | 2376 | #define EFX_EF10_MAX_TX_DESCRIPTOR_LEN 0x3fff |
| 2377 | |
| 2378 | static unsigned int efx_ef10_tx_limit_len(struct efx_tx_queue *tx_queue, |
| 2379 | dma_addr_t dma_addr, unsigned int len) |
| 2380 | { |
| 2381 | if (len > EFX_EF10_MAX_TX_DESCRIPTOR_LEN) { |
| 2382 | /* If we need to break across multiple descriptors we should |
| 2383 | * stop at a page boundary. This assumes the length limit is |
| 2384 | * greater than the page size. |
| 2385 | */ |
| 2386 | dma_addr_t end = dma_addr + EFX_EF10_MAX_TX_DESCRIPTOR_LEN; |
| 2387 | |
| 2388 | BUILD_BUG_ON(EFX_EF10_MAX_TX_DESCRIPTOR_LEN < EFX_PAGE_SIZE); |
| 2389 | len = (end & (~(EFX_PAGE_SIZE - 1))) - dma_addr; |
| 2390 | } |
| 2391 | |
| 2392 | return len; |
| 2393 | } |
| 2394 | |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 2395 | static void efx_ef10_tx_write(struct efx_tx_queue *tx_queue) |
| 2396 | { |
| 2397 | unsigned int old_write_count = tx_queue->write_count; |
| 2398 | struct efx_tx_buffer *buffer; |
| 2399 | unsigned int write_ptr; |
| 2400 | efx_qword_t *txd; |
| 2401 | |
Martin Habets | b2663a4 | 2015-11-02 12:51:31 +0000 | [diff] [blame] | 2402 | tx_queue->xmit_more_available = false; |
| 2403 | if (unlikely(tx_queue->write_count == tx_queue->insert_count)) |
| 2404 | return; |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 2405 | |
| 2406 | do { |
| 2407 | write_ptr = tx_queue->write_count & tx_queue->ptr_mask; |
| 2408 | buffer = &tx_queue->buffer[write_ptr]; |
| 2409 | txd = efx_tx_desc(tx_queue, write_ptr); |
| 2410 | ++tx_queue->write_count; |
| 2411 | |
| 2412 | /* Create TX descriptor ring entry */ |
| 2413 | if (buffer->flags & EFX_TX_BUF_OPTION) { |
| 2414 | *txd = buffer->option; |
Edward Cree | de1deff | 2017-01-13 21:20:14 +0000 | [diff] [blame] | 2415 | if (EFX_QWORD_FIELD(*txd, ESF_DZ_TX_OPTION_TYPE) == 1) |
| 2416 | /* PIO descriptor */ |
| 2417 | tx_queue->packet_write_count = tx_queue->write_count; |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 2418 | } else { |
Edward Cree | de1deff | 2017-01-13 21:20:14 +0000 | [diff] [blame] | 2419 | tx_queue->packet_write_count = tx_queue->write_count; |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 2420 | BUILD_BUG_ON(EFX_TX_BUF_CONT != 1); |
| 2421 | EFX_POPULATE_QWORD_3( |
| 2422 | *txd, |
| 2423 | ESF_DZ_TX_KER_CONT, |
| 2424 | buffer->flags & EFX_TX_BUF_CONT, |
| 2425 | ESF_DZ_TX_KER_BYTE_CNT, buffer->len, |
| 2426 | ESF_DZ_TX_KER_BUF_ADDR, buffer->dma_addr); |
| 2427 | } |
| 2428 | } while (tx_queue->write_count != tx_queue->insert_count); |
| 2429 | |
| 2430 | wmb(); /* Ensure descriptors are written before they are fetched */ |
| 2431 | |
| 2432 | if (efx_nic_may_push_tx_desc(tx_queue, old_write_count)) { |
| 2433 | txd = efx_tx_desc(tx_queue, |
| 2434 | old_write_count & tx_queue->ptr_mask); |
| 2435 | efx_ef10_push_tx_desc(tx_queue, txd); |
| 2436 | ++tx_queue->pushes; |
| 2437 | } else { |
| 2438 | efx_ef10_notify_tx_desc(tx_queue); |
| 2439 | } |
| 2440 | } |
| 2441 | |
Edward Cree | a33a4c7 | 2016-11-03 22:12:27 +0000 | [diff] [blame] | 2442 | #define RSS_MODE_HASH_ADDRS (1 << RSS_MODE_HASH_SRC_ADDR_LBN |\ |
| 2443 | 1 << RSS_MODE_HASH_DST_ADDR_LBN) |
| 2444 | #define RSS_MODE_HASH_PORTS (1 << RSS_MODE_HASH_SRC_PORT_LBN |\ |
| 2445 | 1 << RSS_MODE_HASH_DST_PORT_LBN) |
| 2446 | #define RSS_CONTEXT_FLAGS_DEFAULT (1 << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_LBN |\ |
| 2447 | 1 << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV4_EN_LBN |\ |
| 2448 | 1 << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV6_EN_LBN |\ |
| 2449 | 1 << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV6_EN_LBN |\ |
| 2450 | (RSS_MODE_HASH_ADDRS | RSS_MODE_HASH_PORTS) << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV4_RSS_MODE_LBN |\ |
| 2451 | RSS_MODE_HASH_ADDRS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV4_RSS_MODE_LBN |\ |
| 2452 | RSS_MODE_HASH_ADDRS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV4_RSS_MODE_LBN |\ |
| 2453 | (RSS_MODE_HASH_ADDRS | RSS_MODE_HASH_PORTS) << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV6_RSS_MODE_LBN |\ |
| 2454 | RSS_MODE_HASH_ADDRS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV6_RSS_MODE_LBN |\ |
| 2455 | RSS_MODE_HASH_ADDRS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV6_RSS_MODE_LBN) |
| 2456 | |
| 2457 | static int efx_ef10_get_rss_flags(struct efx_nic *efx, u32 context, u32 *flags) |
| 2458 | { |
| 2459 | /* Firmware had a bug (sfc bug 61952) where it would not actually |
| 2460 | * fill in the flags field in the response to MC_CMD_RSS_CONTEXT_GET_FLAGS. |
| 2461 | * This meant that it would always contain whatever was previously |
| 2462 | * in the MCDI buffer. Fortunately, all firmware versions with |
| 2463 | * this bug have the same default flags value for a newly-allocated |
| 2464 | * RSS context, and the only time we want to get the flags is just |
| 2465 | * after allocating. Moreover, the response has a 32-bit hole |
| 2466 | * where the context ID would be in the request, so we can use an |
| 2467 | * overlength buffer in the request and pre-fill the flags field |
| 2468 | * with what we believe the default to be. Thus if the firmware |
| 2469 | * has the bug, it will leave our pre-filled value in the flags |
| 2470 | * field of the response, and we will get the right answer. |
| 2471 | * |
| 2472 | * However, this does mean that this function should NOT be used if |
| 2473 | * the RSS context flags might not be their defaults - it is ONLY |
| 2474 | * reliably correct for a newly-allocated RSS context. |
| 2475 | */ |
| 2476 | MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_LEN); |
| 2477 | MCDI_DECLARE_BUF(outbuf, MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_LEN); |
| 2478 | size_t outlen; |
| 2479 | int rc; |
| 2480 | |
| 2481 | /* Check we have a hole for the context ID */ |
| 2482 | BUILD_BUG_ON(MC_CMD_RSS_CONTEXT_GET_FLAGS_IN_LEN != MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_FLAGS_OFST); |
| 2483 | MCDI_SET_DWORD(inbuf, RSS_CONTEXT_GET_FLAGS_IN_RSS_CONTEXT_ID, context); |
| 2484 | MCDI_SET_DWORD(inbuf, RSS_CONTEXT_GET_FLAGS_OUT_FLAGS, |
| 2485 | RSS_CONTEXT_FLAGS_DEFAULT); |
| 2486 | rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_GET_FLAGS, inbuf, |
| 2487 | sizeof(inbuf), outbuf, sizeof(outbuf), &outlen); |
| 2488 | if (rc == 0) { |
| 2489 | if (outlen < MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_LEN) |
| 2490 | rc = -EIO; |
| 2491 | else |
| 2492 | *flags = MCDI_DWORD(outbuf, RSS_CONTEXT_GET_FLAGS_OUT_FLAGS); |
| 2493 | } |
| 2494 | return rc; |
| 2495 | } |
| 2496 | |
| 2497 | /* Attempt to enable 4-tuple UDP hashing on the specified RSS context. |
| 2498 | * If we fail, we just leave the RSS context at its default hash settings, |
| 2499 | * which is safe but may slightly reduce performance. |
| 2500 | * Defaults are 4-tuple for TCP and 2-tuple for UDP and other-IP, so we |
| 2501 | * just need to set the UDP ports flags (for both IP versions). |
| 2502 | */ |
| 2503 | static void efx_ef10_set_rss_flags(struct efx_nic *efx, u32 context) |
| 2504 | { |
| 2505 | MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN); |
| 2506 | u32 flags; |
| 2507 | |
| 2508 | BUILD_BUG_ON(MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT_LEN != 0); |
| 2509 | |
| 2510 | if (efx_ef10_get_rss_flags(efx, context, &flags) != 0) |
| 2511 | return; |
| 2512 | MCDI_SET_DWORD(inbuf, RSS_CONTEXT_SET_FLAGS_IN_RSS_CONTEXT_ID, context); |
| 2513 | flags |= RSS_MODE_HASH_PORTS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV4_RSS_MODE_LBN; |
| 2514 | flags |= RSS_MODE_HASH_PORTS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV6_RSS_MODE_LBN; |
| 2515 | MCDI_SET_DWORD(inbuf, RSS_CONTEXT_SET_FLAGS_IN_FLAGS, flags); |
Edward Cree | b718c88 | 2016-11-03 22:12:58 +0000 | [diff] [blame] | 2516 | if (!efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_FLAGS, inbuf, sizeof(inbuf), |
| 2517 | NULL, 0, NULL)) |
| 2518 | /* Succeeded, so UDP 4-tuple is now enabled */ |
| 2519 | efx->rx_hash_udp_4tuple = true; |
Edward Cree | a33a4c7 | 2016-11-03 22:12:27 +0000 | [diff] [blame] | 2520 | } |
| 2521 | |
Jon Cooper | 267c015 | 2015-05-06 00:59:38 +0100 | [diff] [blame] | 2522 | static int efx_ef10_alloc_rss_context(struct efx_nic *efx, u32 *context, |
| 2523 | bool exclusive, unsigned *context_size) |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 2524 | { |
| 2525 | MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN); |
| 2526 | MCDI_DECLARE_BUF(outbuf, MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN); |
Daniel Pieczko | 45b2449 | 2015-05-06 00:57:14 +0100 | [diff] [blame] | 2527 | struct efx_ef10_nic_data *nic_data = efx->nic_data; |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 2528 | size_t outlen; |
| 2529 | int rc; |
Jon Cooper | 267c015 | 2015-05-06 00:59:38 +0100 | [diff] [blame] | 2530 | u32 alloc_type = exclusive ? |
| 2531 | MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_EXCLUSIVE : |
| 2532 | MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_SHARED; |
| 2533 | unsigned rss_spread = exclusive ? |
| 2534 | efx->rss_spread : |
| 2535 | min(rounddown_pow_of_two(efx->rss_spread), |
| 2536 | EFX_EF10_MAX_SHARED_RSS_CONTEXT_SIZE); |
| 2537 | |
| 2538 | if (!exclusive && rss_spread == 1) { |
| 2539 | *context = EFX_EF10_RSS_CONTEXT_INVALID; |
| 2540 | if (context_size) |
| 2541 | *context_size = 1; |
| 2542 | return 0; |
| 2543 | } |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 2544 | |
Jon Cooper | dcb4123 | 2016-04-25 16:51:00 +0100 | [diff] [blame] | 2545 | if (nic_data->datapath_caps & |
| 2546 | 1 << MC_CMD_GET_CAPABILITIES_OUT_RX_RSS_LIMITED_LBN) |
| 2547 | return -EOPNOTSUPP; |
| 2548 | |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 2549 | MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID, |
Daniel Pieczko | 45b2449 | 2015-05-06 00:57:14 +0100 | [diff] [blame] | 2550 | nic_data->vport_id); |
Jon Cooper | 267c015 | 2015-05-06 00:59:38 +0100 | [diff] [blame] | 2551 | MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_TYPE, alloc_type); |
| 2552 | MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_NUM_QUEUES, rss_spread); |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 2553 | |
| 2554 | rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_ALLOC, inbuf, sizeof(inbuf), |
| 2555 | outbuf, sizeof(outbuf), &outlen); |
| 2556 | if (rc != 0) |
| 2557 | return rc; |
| 2558 | |
| 2559 | if (outlen < MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN) |
| 2560 | return -EIO; |
| 2561 | |
| 2562 | *context = MCDI_DWORD(outbuf, RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID); |
| 2563 | |
Jon Cooper | 267c015 | 2015-05-06 00:59:38 +0100 | [diff] [blame] | 2564 | if (context_size) |
| 2565 | *context_size = rss_spread; |
| 2566 | |
Edward Cree | a33a4c7 | 2016-11-03 22:12:27 +0000 | [diff] [blame] | 2567 | if (nic_data->datapath_caps & |
| 2568 | 1 << MC_CMD_GET_CAPABILITIES_OUT_ADDITIONAL_RSS_MODES_LBN) |
| 2569 | efx_ef10_set_rss_flags(efx, *context); |
| 2570 | |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 2571 | return 0; |
| 2572 | } |
| 2573 | |
| 2574 | static void efx_ef10_free_rss_context(struct efx_nic *efx, u32 context) |
| 2575 | { |
| 2576 | MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_FREE_IN_LEN); |
| 2577 | int rc; |
| 2578 | |
| 2579 | MCDI_SET_DWORD(inbuf, RSS_CONTEXT_FREE_IN_RSS_CONTEXT_ID, |
| 2580 | context); |
| 2581 | |
| 2582 | rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_FREE, inbuf, sizeof(inbuf), |
| 2583 | NULL, 0, NULL); |
| 2584 | WARN_ON(rc != 0); |
| 2585 | } |
| 2586 | |
Jon Cooper | 267c015 | 2015-05-06 00:59:38 +0100 | [diff] [blame] | 2587 | static int efx_ef10_populate_rss_table(struct efx_nic *efx, u32 context, |
Edward Cree | f74d199 | 2017-01-17 12:01:53 +0000 | [diff] [blame] | 2588 | const u32 *rx_indir_table, const u8 *key) |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 2589 | { |
| 2590 | MCDI_DECLARE_BUF(tablebuf, MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN); |
| 2591 | MCDI_DECLARE_BUF(keybuf, MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN); |
| 2592 | int i, rc; |
| 2593 | |
| 2594 | MCDI_SET_DWORD(tablebuf, RSS_CONTEXT_SET_TABLE_IN_RSS_CONTEXT_ID, |
| 2595 | context); |
| 2596 | BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) != |
| 2597 | MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_LEN); |
| 2598 | |
Edward Cree | f74d199 | 2017-01-17 12:01:53 +0000 | [diff] [blame] | 2599 | /* This iterates over the length of efx->rx_indir_table, but copies |
| 2600 | * bytes from rx_indir_table. That's because the latter is a pointer |
| 2601 | * rather than an array, but should have the same length. |
| 2602 | * The efx->rx_hash_key loop below is similar. |
| 2603 | */ |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 2604 | for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); ++i) |
| 2605 | MCDI_PTR(tablebuf, |
| 2606 | RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE)[i] = |
Jon Cooper | 267c015 | 2015-05-06 00:59:38 +0100 | [diff] [blame] | 2607 | (u8) rx_indir_table[i]; |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 2608 | |
| 2609 | rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_TABLE, tablebuf, |
| 2610 | sizeof(tablebuf), NULL, 0, NULL); |
| 2611 | if (rc != 0) |
| 2612 | return rc; |
| 2613 | |
| 2614 | MCDI_SET_DWORD(keybuf, RSS_CONTEXT_SET_KEY_IN_RSS_CONTEXT_ID, |
| 2615 | context); |
| 2616 | BUILD_BUG_ON(ARRAY_SIZE(efx->rx_hash_key) != |
| 2617 | MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN); |
| 2618 | for (i = 0; i < ARRAY_SIZE(efx->rx_hash_key); ++i) |
Edward Cree | f74d199 | 2017-01-17 12:01:53 +0000 | [diff] [blame] | 2619 | MCDI_PTR(keybuf, RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY)[i] = key[i]; |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 2620 | |
| 2621 | return efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_KEY, keybuf, |
| 2622 | sizeof(keybuf), NULL, 0, NULL); |
| 2623 | } |
| 2624 | |
| 2625 | static void efx_ef10_rx_free_indir_table(struct efx_nic *efx) |
| 2626 | { |
| 2627 | struct efx_ef10_nic_data *nic_data = efx->nic_data; |
| 2628 | |
| 2629 | if (nic_data->rx_rss_context != EFX_EF10_RSS_CONTEXT_INVALID) |
| 2630 | efx_ef10_free_rss_context(efx, nic_data->rx_rss_context); |
| 2631 | nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID; |
| 2632 | } |
| 2633 | |
Jon Cooper | 267c015 | 2015-05-06 00:59:38 +0100 | [diff] [blame] | 2634 | static int efx_ef10_rx_push_shared_rss_config(struct efx_nic *efx, |
| 2635 | unsigned *context_size) |
| 2636 | { |
| 2637 | u32 new_rx_rss_context; |
| 2638 | struct efx_ef10_nic_data *nic_data = efx->nic_data; |
| 2639 | int rc = efx_ef10_alloc_rss_context(efx, &new_rx_rss_context, |
| 2640 | false, context_size); |
| 2641 | |
| 2642 | if (rc != 0) |
| 2643 | return rc; |
| 2644 | |
| 2645 | nic_data->rx_rss_context = new_rx_rss_context; |
| 2646 | nic_data->rx_rss_context_exclusive = false; |
| 2647 | efx_set_default_rx_indir_table(efx); |
| 2648 | return 0; |
| 2649 | } |
| 2650 | |
| 2651 | static int efx_ef10_rx_push_exclusive_rss_config(struct efx_nic *efx, |
Edward Cree | f74d199 | 2017-01-17 12:01:53 +0000 | [diff] [blame] | 2652 | const u32 *rx_indir_table, |
| 2653 | const u8 *key) |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 2654 | { |
| 2655 | struct efx_ef10_nic_data *nic_data = efx->nic_data; |
| 2656 | int rc; |
Jon Cooper | 267c015 | 2015-05-06 00:59:38 +0100 | [diff] [blame] | 2657 | u32 new_rx_rss_context; |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 2658 | |
Jon Cooper | 267c015 | 2015-05-06 00:59:38 +0100 | [diff] [blame] | 2659 | if (nic_data->rx_rss_context == EFX_EF10_RSS_CONTEXT_INVALID || |
| 2660 | !nic_data->rx_rss_context_exclusive) { |
| 2661 | rc = efx_ef10_alloc_rss_context(efx, &new_rx_rss_context, |
| 2662 | true, NULL); |
| 2663 | if (rc == -EOPNOTSUPP) |
| 2664 | return rc; |
| 2665 | else if (rc != 0) |
| 2666 | goto fail1; |
| 2667 | } else { |
| 2668 | new_rx_rss_context = nic_data->rx_rss_context; |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 2669 | } |
| 2670 | |
Jon Cooper | 267c015 | 2015-05-06 00:59:38 +0100 | [diff] [blame] | 2671 | rc = efx_ef10_populate_rss_table(efx, new_rx_rss_context, |
Edward Cree | f74d199 | 2017-01-17 12:01:53 +0000 | [diff] [blame] | 2672 | rx_indir_table, key); |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 2673 | if (rc != 0) |
Jon Cooper | 267c015 | 2015-05-06 00:59:38 +0100 | [diff] [blame] | 2674 | goto fail2; |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 2675 | |
Jon Cooper | 267c015 | 2015-05-06 00:59:38 +0100 | [diff] [blame] | 2676 | if (nic_data->rx_rss_context != new_rx_rss_context) |
| 2677 | efx_ef10_rx_free_indir_table(efx); |
| 2678 | nic_data->rx_rss_context = new_rx_rss_context; |
| 2679 | nic_data->rx_rss_context_exclusive = true; |
| 2680 | if (rx_indir_table != efx->rx_indir_table) |
| 2681 | memcpy(efx->rx_indir_table, rx_indir_table, |
| 2682 | sizeof(efx->rx_indir_table)); |
Edward Cree | f74d199 | 2017-01-17 12:01:53 +0000 | [diff] [blame] | 2683 | if (key != efx->rx_hash_key) |
| 2684 | memcpy(efx->rx_hash_key, key, efx->type->rx_hash_key_size); |
| 2685 | |
Jon Cooper | 267c015 | 2015-05-06 00:59:38 +0100 | [diff] [blame] | 2686 | return 0; |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 2687 | |
Jon Cooper | 267c015 | 2015-05-06 00:59:38 +0100 | [diff] [blame] | 2688 | fail2: |
| 2689 | if (new_rx_rss_context != nic_data->rx_rss_context) |
| 2690 | efx_ef10_free_rss_context(efx, new_rx_rss_context); |
| 2691 | fail1: |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 2692 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); |
Jon Cooper | 267c015 | 2015-05-06 00:59:38 +0100 | [diff] [blame] | 2693 | return rc; |
| 2694 | } |
| 2695 | |
Edward Cree | a707d18 | 2017-01-17 12:02:12 +0000 | [diff] [blame] | 2696 | static int efx_ef10_rx_pull_rss_config(struct efx_nic *efx) |
| 2697 | { |
| 2698 | struct efx_ef10_nic_data *nic_data = efx->nic_data; |
| 2699 | MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_GET_TABLE_IN_LEN); |
| 2700 | MCDI_DECLARE_BUF(tablebuf, MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_LEN); |
| 2701 | MCDI_DECLARE_BUF(keybuf, MC_CMD_RSS_CONTEXT_GET_KEY_OUT_LEN); |
| 2702 | size_t outlen; |
| 2703 | int rc, i; |
| 2704 | |
| 2705 | BUILD_BUG_ON(MC_CMD_RSS_CONTEXT_GET_TABLE_IN_LEN != |
| 2706 | MC_CMD_RSS_CONTEXT_GET_KEY_IN_LEN); |
| 2707 | |
| 2708 | if (nic_data->rx_rss_context == EFX_EF10_RSS_CONTEXT_INVALID) |
| 2709 | return -ENOENT; |
| 2710 | |
| 2711 | MCDI_SET_DWORD(inbuf, RSS_CONTEXT_GET_TABLE_IN_RSS_CONTEXT_ID, |
| 2712 | nic_data->rx_rss_context); |
| 2713 | BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) != |
| 2714 | MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_INDIRECTION_TABLE_LEN); |
| 2715 | rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_GET_TABLE, inbuf, sizeof(inbuf), |
| 2716 | tablebuf, sizeof(tablebuf), &outlen); |
| 2717 | if (rc != 0) |
| 2718 | return rc; |
| 2719 | |
| 2720 | if (WARN_ON(outlen != MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_LEN)) |
| 2721 | return -EIO; |
| 2722 | |
| 2723 | for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++) |
| 2724 | efx->rx_indir_table[i] = MCDI_PTR(tablebuf, |
| 2725 | RSS_CONTEXT_GET_TABLE_OUT_INDIRECTION_TABLE)[i]; |
| 2726 | |
| 2727 | MCDI_SET_DWORD(inbuf, RSS_CONTEXT_GET_KEY_IN_RSS_CONTEXT_ID, |
| 2728 | nic_data->rx_rss_context); |
| 2729 | BUILD_BUG_ON(ARRAY_SIZE(efx->rx_hash_key) != |
| 2730 | MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN); |
| 2731 | rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_GET_KEY, inbuf, sizeof(inbuf), |
| 2732 | keybuf, sizeof(keybuf), &outlen); |
| 2733 | if (rc != 0) |
| 2734 | return rc; |
| 2735 | |
| 2736 | if (WARN_ON(outlen != MC_CMD_RSS_CONTEXT_GET_KEY_OUT_LEN)) |
| 2737 | return -EIO; |
| 2738 | |
| 2739 | for (i = 0; i < ARRAY_SIZE(efx->rx_hash_key); ++i) |
| 2740 | efx->rx_hash_key[i] = MCDI_PTR( |
| 2741 | keybuf, RSS_CONTEXT_GET_KEY_OUT_TOEPLITZ_KEY)[i]; |
| 2742 | |
| 2743 | return 0; |
| 2744 | } |
| 2745 | |
Jon Cooper | 267c015 | 2015-05-06 00:59:38 +0100 | [diff] [blame] | 2746 | static int efx_ef10_pf_rx_push_rss_config(struct efx_nic *efx, bool user, |
Edward Cree | f74d199 | 2017-01-17 12:01:53 +0000 | [diff] [blame] | 2747 | const u32 *rx_indir_table, |
| 2748 | const u8 *key) |
Jon Cooper | 267c015 | 2015-05-06 00:59:38 +0100 | [diff] [blame] | 2749 | { |
| 2750 | int rc; |
| 2751 | |
| 2752 | if (efx->rss_spread == 1) |
| 2753 | return 0; |
| 2754 | |
Edward Cree | f74d199 | 2017-01-17 12:01:53 +0000 | [diff] [blame] | 2755 | if (!key) |
| 2756 | key = efx->rx_hash_key; |
| 2757 | |
| 2758 | rc = efx_ef10_rx_push_exclusive_rss_config(efx, rx_indir_table, key); |
Jon Cooper | 267c015 | 2015-05-06 00:59:38 +0100 | [diff] [blame] | 2759 | |
| 2760 | if (rc == -ENOBUFS && !user) { |
| 2761 | unsigned context_size; |
| 2762 | bool mismatch = false; |
| 2763 | size_t i; |
| 2764 | |
| 2765 | for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table) && !mismatch; |
| 2766 | i++) |
| 2767 | mismatch = rx_indir_table[i] != |
| 2768 | ethtool_rxfh_indir_default(i, efx->rss_spread); |
| 2769 | |
| 2770 | rc = efx_ef10_rx_push_shared_rss_config(efx, &context_size); |
| 2771 | if (rc == 0) { |
| 2772 | if (context_size != efx->rss_spread) |
| 2773 | netif_warn(efx, probe, efx->net_dev, |
| 2774 | "Could not allocate an exclusive RSS" |
| 2775 | " context; allocated a shared one of" |
| 2776 | " different size." |
| 2777 | " Wanted %u, got %u.\n", |
| 2778 | efx->rss_spread, context_size); |
| 2779 | else if (mismatch) |
| 2780 | netif_warn(efx, probe, efx->net_dev, |
| 2781 | "Could not allocate an exclusive RSS" |
| 2782 | " context; allocated a shared one but" |
| 2783 | " could not apply custom" |
| 2784 | " indirection.\n"); |
| 2785 | else |
| 2786 | netif_info(efx, probe, efx->net_dev, |
| 2787 | "Could not allocate an exclusive RSS" |
| 2788 | " context; allocated a shared one.\n"); |
| 2789 | } |
| 2790 | } |
| 2791 | return rc; |
| 2792 | } |
| 2793 | |
| 2794 | static int efx_ef10_vf_rx_push_rss_config(struct efx_nic *efx, bool user, |
| 2795 | const u32 *rx_indir_table |
Edward Cree | f74d199 | 2017-01-17 12:01:53 +0000 | [diff] [blame] | 2796 | __attribute__ ((unused)), |
| 2797 | const u8 *key |
Jon Cooper | 267c015 | 2015-05-06 00:59:38 +0100 | [diff] [blame] | 2798 | __attribute__ ((unused))) |
| 2799 | { |
| 2800 | struct efx_ef10_nic_data *nic_data = efx->nic_data; |
| 2801 | |
| 2802 | if (user) |
| 2803 | return -EOPNOTSUPP; |
| 2804 | if (nic_data->rx_rss_context != EFX_EF10_RSS_CONTEXT_INVALID) |
| 2805 | return 0; |
| 2806 | return efx_ef10_rx_push_shared_rss_config(efx, NULL); |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 2807 | } |
| 2808 | |
| 2809 | static int efx_ef10_rx_probe(struct efx_rx_queue *rx_queue) |
| 2810 | { |
| 2811 | return efx_nic_alloc_buffer(rx_queue->efx, &rx_queue->rxd.buf, |
| 2812 | (rx_queue->ptr_mask + 1) * |
| 2813 | sizeof(efx_qword_t), |
| 2814 | GFP_KERNEL); |
| 2815 | } |
| 2816 | |
| 2817 | static void efx_ef10_rx_init(struct efx_rx_queue *rx_queue) |
| 2818 | { |
| 2819 | MCDI_DECLARE_BUF(inbuf, |
| 2820 | MC_CMD_INIT_RXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 / |
| 2821 | EFX_BUF_SIZE)); |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 2822 | struct efx_channel *channel = efx_rx_queue_channel(rx_queue); |
| 2823 | size_t entries = rx_queue->rxd.buf.len / EFX_BUF_SIZE; |
| 2824 | struct efx_nic *efx = rx_queue->efx; |
Daniel Pieczko | 45b2449 | 2015-05-06 00:57:14 +0100 | [diff] [blame] | 2825 | struct efx_ef10_nic_data *nic_data = efx->nic_data; |
Jon Cooper | aa09a3d | 2015-05-20 11:10:41 +0100 | [diff] [blame] | 2826 | size_t inlen; |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 2827 | dma_addr_t dma_addr; |
| 2828 | int rc; |
| 2829 | int i; |
Jon Cooper | aa09a3d | 2015-05-20 11:10:41 +0100 | [diff] [blame] | 2830 | BUILD_BUG_ON(MC_CMD_INIT_RXQ_OUT_LEN != 0); |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 2831 | |
| 2832 | rx_queue->scatter_n = 0; |
| 2833 | rx_queue->scatter_len = 0; |
| 2834 | |
| 2835 | MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_SIZE, rx_queue->ptr_mask + 1); |
| 2836 | MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_TARGET_EVQ, channel->channel); |
| 2837 | MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_LABEL, efx_rx_queue_index(rx_queue)); |
| 2838 | MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_INSTANCE, |
| 2839 | efx_rx_queue_index(rx_queue)); |
Jon Cooper | bd9a265 | 2013-11-18 12:54:41 +0000 | [diff] [blame] | 2840 | MCDI_POPULATE_DWORD_2(inbuf, INIT_RXQ_IN_FLAGS, |
| 2841 | INIT_RXQ_IN_FLAG_PREFIX, 1, |
| 2842 | INIT_RXQ_IN_FLAG_TIMESTAMP, 1); |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 2843 | MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_OWNER_ID, 0); |
Daniel Pieczko | 45b2449 | 2015-05-06 00:57:14 +0100 | [diff] [blame] | 2844 | MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_PORT_ID, nic_data->vport_id); |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 2845 | |
| 2846 | dma_addr = rx_queue->rxd.buf.dma_addr; |
| 2847 | |
| 2848 | netif_dbg(efx, hw, efx->net_dev, "pushing RXQ %d. %zu entries (%llx)\n", |
| 2849 | efx_rx_queue_index(rx_queue), entries, (u64)dma_addr); |
| 2850 | |
| 2851 | for (i = 0; i < entries; ++i) { |
| 2852 | MCDI_SET_ARRAY_QWORD(inbuf, INIT_RXQ_IN_DMA_ADDR, i, dma_addr); |
| 2853 | dma_addr += EFX_BUF_SIZE; |
| 2854 | } |
| 2855 | |
| 2856 | inlen = MC_CMD_INIT_RXQ_IN_LEN(entries); |
| 2857 | |
| 2858 | rc = efx_mcdi_rpc(efx, MC_CMD_INIT_RXQ, inbuf, inlen, |
Jon Cooper | aa09a3d | 2015-05-20 11:10:41 +0100 | [diff] [blame] | 2859 | NULL, 0, NULL); |
Ben Hutchings | 48ce563 | 2013-11-01 16:42:44 +0000 | [diff] [blame] | 2860 | if (rc) |
| 2861 | netdev_WARN(efx->net_dev, "failed to initialise RXQ %d\n", |
| 2862 | efx_rx_queue_index(rx_queue)); |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 2863 | } |
| 2864 | |
| 2865 | static void efx_ef10_rx_fini(struct efx_rx_queue *rx_queue) |
| 2866 | { |
| 2867 | MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_RXQ_IN_LEN); |
Jon Cooper | aa09a3d | 2015-05-20 11:10:41 +0100 | [diff] [blame] | 2868 | MCDI_DECLARE_BUF_ERR(outbuf); |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 2869 | struct efx_nic *efx = rx_queue->efx; |
| 2870 | size_t outlen; |
| 2871 | int rc; |
| 2872 | |
| 2873 | MCDI_SET_DWORD(inbuf, FINI_RXQ_IN_INSTANCE, |
| 2874 | efx_rx_queue_index(rx_queue)); |
| 2875 | |
Edward Cree | 1e0b812 | 2013-05-31 18:36:12 +0100 | [diff] [blame] | 2876 | rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_RXQ, inbuf, sizeof(inbuf), |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 2877 | outbuf, sizeof(outbuf), &outlen); |
| 2878 | |
| 2879 | if (rc && rc != -EALREADY) |
| 2880 | goto fail; |
| 2881 | |
| 2882 | return; |
| 2883 | |
| 2884 | fail: |
Edward Cree | 1e0b812 | 2013-05-31 18:36:12 +0100 | [diff] [blame] | 2885 | efx_mcdi_display_error(efx, MC_CMD_FINI_RXQ, MC_CMD_FINI_RXQ_IN_LEN, |
| 2886 | outbuf, outlen, rc); |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 2887 | } |
| 2888 | |
| 2889 | static void efx_ef10_rx_remove(struct efx_rx_queue *rx_queue) |
| 2890 | { |
| 2891 | efx_nic_free_buffer(rx_queue->efx, &rx_queue->rxd.buf); |
| 2892 | } |
| 2893 | |
| 2894 | /* This creates an entry in the RX descriptor queue */ |
| 2895 | static inline void |
| 2896 | efx_ef10_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index) |
| 2897 | { |
| 2898 | struct efx_rx_buffer *rx_buf; |
| 2899 | efx_qword_t *rxd; |
| 2900 | |
| 2901 | rxd = efx_rx_desc(rx_queue, index); |
| 2902 | rx_buf = efx_rx_buffer(rx_queue, index); |
| 2903 | EFX_POPULATE_QWORD_2(*rxd, |
| 2904 | ESF_DZ_RX_KER_BYTE_CNT, rx_buf->len, |
| 2905 | ESF_DZ_RX_KER_BUF_ADDR, rx_buf->dma_addr); |
| 2906 | } |
| 2907 | |
| 2908 | static void efx_ef10_rx_write(struct efx_rx_queue *rx_queue) |
| 2909 | { |
| 2910 | struct efx_nic *efx = rx_queue->efx; |
| 2911 | unsigned int write_count; |
| 2912 | efx_dword_t reg; |
| 2913 | |
| 2914 | /* Firmware requires that RX_DESC_WPTR be a multiple of 8 */ |
| 2915 | write_count = rx_queue->added_count & ~7; |
| 2916 | if (rx_queue->notified_count == write_count) |
| 2917 | return; |
| 2918 | |
| 2919 | do |
| 2920 | efx_ef10_build_rx_desc( |
| 2921 | rx_queue, |
| 2922 | rx_queue->notified_count & rx_queue->ptr_mask); |
| 2923 | while (++rx_queue->notified_count != write_count); |
| 2924 | |
| 2925 | wmb(); |
| 2926 | EFX_POPULATE_DWORD_1(reg, ERF_DZ_RX_DESC_WPTR, |
| 2927 | write_count & rx_queue->ptr_mask); |
| 2928 | efx_writed_page(efx, ®, ER_DZ_RX_DESC_UPD, |
| 2929 | efx_rx_queue_index(rx_queue)); |
| 2930 | } |
| 2931 | |
| 2932 | static efx_mcdi_async_completer efx_ef10_rx_defer_refill_complete; |
| 2933 | |
| 2934 | static void efx_ef10_rx_defer_refill(struct efx_rx_queue *rx_queue) |
| 2935 | { |
| 2936 | struct efx_channel *channel = efx_rx_queue_channel(rx_queue); |
| 2937 | MCDI_DECLARE_BUF(inbuf, MC_CMD_DRIVER_EVENT_IN_LEN); |
| 2938 | efx_qword_t event; |
| 2939 | |
| 2940 | EFX_POPULATE_QWORD_2(event, |
| 2941 | ESF_DZ_EV_CODE, EFX_EF10_DRVGEN_EV, |
| 2942 | ESF_DZ_EV_DATA, EFX_EF10_REFILL); |
| 2943 | |
| 2944 | MCDI_SET_DWORD(inbuf, DRIVER_EVENT_IN_EVQ, channel->channel); |
| 2945 | |
| 2946 | /* MCDI_SET_QWORD is not appropriate here since EFX_POPULATE_* has |
| 2947 | * already swapped the data to little-endian order. |
| 2948 | */ |
| 2949 | memcpy(MCDI_PTR(inbuf, DRIVER_EVENT_IN_DATA), &event.u64[0], |
| 2950 | sizeof(efx_qword_t)); |
| 2951 | |
| 2952 | efx_mcdi_rpc_async(channel->efx, MC_CMD_DRIVER_EVENT, |
| 2953 | inbuf, sizeof(inbuf), 0, |
| 2954 | efx_ef10_rx_defer_refill_complete, 0); |
| 2955 | } |
| 2956 | |
| 2957 | static void |
| 2958 | efx_ef10_rx_defer_refill_complete(struct efx_nic *efx, unsigned long cookie, |
| 2959 | int rc, efx_dword_t *outbuf, |
| 2960 | size_t outlen_actual) |
| 2961 | { |
| 2962 | /* nothing to do */ |
| 2963 | } |
| 2964 | |
| 2965 | static int efx_ef10_ev_probe(struct efx_channel *channel) |
| 2966 | { |
| 2967 | return efx_nic_alloc_buffer(channel->efx, &channel->eventq.buf, |
| 2968 | (channel->eventq_mask + 1) * |
| 2969 | sizeof(efx_qword_t), |
| 2970 | GFP_KERNEL); |
| 2971 | } |
| 2972 | |
Daniel Pieczko | 46e612b | 2015-07-21 15:09:18 +0100 | [diff] [blame] | 2973 | static void efx_ef10_ev_fini(struct efx_channel *channel) |
| 2974 | { |
| 2975 | MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_EVQ_IN_LEN); |
| 2976 | MCDI_DECLARE_BUF_ERR(outbuf); |
| 2977 | struct efx_nic *efx = channel->efx; |
| 2978 | size_t outlen; |
| 2979 | int rc; |
| 2980 | |
| 2981 | MCDI_SET_DWORD(inbuf, FINI_EVQ_IN_INSTANCE, channel->channel); |
| 2982 | |
| 2983 | rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_EVQ, inbuf, sizeof(inbuf), |
| 2984 | outbuf, sizeof(outbuf), &outlen); |
| 2985 | |
| 2986 | if (rc && rc != -EALREADY) |
| 2987 | goto fail; |
| 2988 | |
| 2989 | return; |
| 2990 | |
| 2991 | fail: |
| 2992 | efx_mcdi_display_error(efx, MC_CMD_FINI_EVQ, MC_CMD_FINI_EVQ_IN_LEN, |
| 2993 | outbuf, outlen, rc); |
| 2994 | } |
| 2995 | |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 2996 | static int efx_ef10_ev_init(struct efx_channel *channel) |
| 2997 | { |
| 2998 | MCDI_DECLARE_BUF(inbuf, |
Bert Kenward | a995560 | 2016-08-11 13:01:54 +0100 | [diff] [blame] | 2999 | MC_CMD_INIT_EVQ_V2_IN_LEN(EFX_MAX_EVQ_SIZE * 8 / |
| 3000 | EFX_BUF_SIZE)); |
| 3001 | MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_EVQ_V2_OUT_LEN); |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 3002 | size_t entries = channel->eventq.buf.len / EFX_BUF_SIZE; |
| 3003 | struct efx_nic *efx = channel->efx; |
| 3004 | struct efx_ef10_nic_data *nic_data; |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 3005 | size_t inlen, outlen; |
Daniel Pieczko | 46e612b | 2015-07-21 15:09:18 +0100 | [diff] [blame] | 3006 | unsigned int enabled, implemented; |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 3007 | dma_addr_t dma_addr; |
| 3008 | int rc; |
| 3009 | int i; |
| 3010 | |
| 3011 | nic_data = efx->nic_data; |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 3012 | |
| 3013 | /* Fill event queue with all ones (i.e. empty events) */ |
| 3014 | memset(channel->eventq.buf.addr, 0xff, channel->eventq.buf.len); |
| 3015 | |
| 3016 | MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_SIZE, channel->eventq_mask + 1); |
| 3017 | MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_INSTANCE, channel->channel); |
| 3018 | /* INIT_EVQ expects index in vector table, not absolute */ |
| 3019 | MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_IRQ_NUM, channel->channel); |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 3020 | MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_MODE, |
| 3021 | MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS); |
| 3022 | MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_LOAD, 0); |
| 3023 | MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_RELOAD, 0); |
| 3024 | MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_COUNT_MODE, |
| 3025 | MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS); |
| 3026 | MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_COUNT_THRSHLD, 0); |
| 3027 | |
Bert Kenward | a995560 | 2016-08-11 13:01:54 +0100 | [diff] [blame] | 3028 | if (nic_data->datapath_caps2 & |
| 3029 | 1 << MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_EVQ_V2_LBN) { |
| 3030 | /* Use the new generic approach to specifying event queue |
| 3031 | * configuration, requesting lower latency or higher throughput. |
| 3032 | * The options that actually get used appear in the output. |
| 3033 | */ |
| 3034 | MCDI_POPULATE_DWORD_2(inbuf, INIT_EVQ_V2_IN_FLAGS, |
| 3035 | INIT_EVQ_V2_IN_FLAG_INTERRUPTING, 1, |
| 3036 | INIT_EVQ_V2_IN_FLAG_TYPE, |
| 3037 | MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_AUTO); |
| 3038 | } else { |
| 3039 | bool cut_thru = !(nic_data->datapath_caps & |
| 3040 | 1 << MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN); |
| 3041 | |
| 3042 | MCDI_POPULATE_DWORD_4(inbuf, INIT_EVQ_IN_FLAGS, |
| 3043 | INIT_EVQ_IN_FLAG_INTERRUPTING, 1, |
| 3044 | INIT_EVQ_IN_FLAG_RX_MERGE, 1, |
| 3045 | INIT_EVQ_IN_FLAG_TX_MERGE, 1, |
| 3046 | INIT_EVQ_IN_FLAG_CUT_THRU, cut_thru); |
| 3047 | } |
| 3048 | |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 3049 | dma_addr = channel->eventq.buf.dma_addr; |
| 3050 | for (i = 0; i < entries; ++i) { |
| 3051 | MCDI_SET_ARRAY_QWORD(inbuf, INIT_EVQ_IN_DMA_ADDR, i, dma_addr); |
| 3052 | dma_addr += EFX_BUF_SIZE; |
| 3053 | } |
| 3054 | |
| 3055 | inlen = MC_CMD_INIT_EVQ_IN_LEN(entries); |
| 3056 | |
| 3057 | rc = efx_mcdi_rpc(efx, MC_CMD_INIT_EVQ, inbuf, inlen, |
| 3058 | outbuf, sizeof(outbuf), &outlen); |
Bert Kenward | a995560 | 2016-08-11 13:01:54 +0100 | [diff] [blame] | 3059 | |
| 3060 | if (outlen >= MC_CMD_INIT_EVQ_V2_OUT_LEN) |
| 3061 | netif_dbg(efx, drv, efx->net_dev, |
| 3062 | "Channel %d using event queue flags %08x\n", |
| 3063 | channel->channel, |
| 3064 | MCDI_DWORD(outbuf, INIT_EVQ_V2_OUT_FLAGS)); |
| 3065 | |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 3066 | /* IRQ return is ignored */ |
Daniel Pieczko | 46e612b | 2015-07-21 15:09:18 +0100 | [diff] [blame] | 3067 | if (channel->channel || rc) |
| 3068 | return rc; |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 3069 | |
Daniel Pieczko | 46e612b | 2015-07-21 15:09:18 +0100 | [diff] [blame] | 3070 | /* Successfully created event queue on channel 0 */ |
| 3071 | rc = efx_mcdi_get_workarounds(efx, &implemented, &enabled); |
Edward Cree | 832dc9e | 2015-07-21 15:09:31 +0100 | [diff] [blame] | 3072 | if (rc == -ENOSYS) { |
Bert Kenward | d95e329 | 2016-08-11 13:02:36 +0100 | [diff] [blame] | 3073 | /* GET_WORKAROUNDS was implemented before this workaround, |
| 3074 | * thus it must be unavailable in this firmware. |
Edward Cree | 832dc9e | 2015-07-21 15:09:31 +0100 | [diff] [blame] | 3075 | */ |
| 3076 | nic_data->workaround_26807 = false; |
| 3077 | rc = 0; |
| 3078 | } else if (rc) { |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 3079 | goto fail; |
Edward Cree | 832dc9e | 2015-07-21 15:09:31 +0100 | [diff] [blame] | 3080 | } else { |
| 3081 | nic_data->workaround_26807 = |
| 3082 | !!(enabled & MC_CMD_GET_WORKAROUNDS_OUT_BUG26807); |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 3083 | |
Edward Cree | 832dc9e | 2015-07-21 15:09:31 +0100 | [diff] [blame] | 3084 | if (implemented & MC_CMD_GET_WORKAROUNDS_OUT_BUG26807 && |
| 3085 | !nic_data->workaround_26807) { |
Daniel Pieczko | 5a55a72 | 2015-07-21 15:10:02 +0100 | [diff] [blame] | 3086 | unsigned int flags; |
| 3087 | |
Daniel Pieczko | 34ccfe6 | 2015-07-21 15:09:43 +0100 | [diff] [blame] | 3088 | rc = efx_mcdi_set_workaround(efx, |
| 3089 | MC_CMD_WORKAROUND_BUG26807, |
Daniel Pieczko | 5a55a72 | 2015-07-21 15:10:02 +0100 | [diff] [blame] | 3090 | true, &flags); |
| 3091 | |
| 3092 | if (!rc) { |
| 3093 | if (flags & |
| 3094 | 1 << MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_LBN) { |
| 3095 | netif_info(efx, drv, efx->net_dev, |
| 3096 | "other functions on NIC have been reset\n"); |
Daniel Pieczko | abd86a5 | 2015-12-04 08:48:39 +0000 | [diff] [blame] | 3097 | |
| 3098 | /* With MCFW v4.6.x and earlier, the |
| 3099 | * boot count will have incremented, |
| 3100 | * so re-read the warm_boot_count |
| 3101 | * value now to ensure this function |
| 3102 | * doesn't think it has changed next |
| 3103 | * time it checks. |
| 3104 | */ |
| 3105 | rc = efx_ef10_get_warm_boot_count(efx); |
| 3106 | if (rc >= 0) { |
| 3107 | nic_data->warm_boot_count = rc; |
| 3108 | rc = 0; |
| 3109 | } |
Daniel Pieczko | 5a55a72 | 2015-07-21 15:10:02 +0100 | [diff] [blame] | 3110 | } |
Edward Cree | 832dc9e | 2015-07-21 15:09:31 +0100 | [diff] [blame] | 3111 | nic_data->workaround_26807 = true; |
Daniel Pieczko | 5a55a72 | 2015-07-21 15:10:02 +0100 | [diff] [blame] | 3112 | } else if (rc == -EPERM) { |
Edward Cree | 832dc9e | 2015-07-21 15:09:31 +0100 | [diff] [blame] | 3113 | rc = 0; |
Daniel Pieczko | 5a55a72 | 2015-07-21 15:10:02 +0100 | [diff] [blame] | 3114 | } |
Edward Cree | 832dc9e | 2015-07-21 15:09:31 +0100 | [diff] [blame] | 3115 | } |
Daniel Pieczko | 46e612b | 2015-07-21 15:09:18 +0100 | [diff] [blame] | 3116 | } |
| 3117 | |
| 3118 | if (!rc) |
| 3119 | return 0; |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 3120 | |
| 3121 | fail: |
Daniel Pieczko | 46e612b | 2015-07-21 15:09:18 +0100 | [diff] [blame] | 3122 | efx_ef10_ev_fini(channel); |
| 3123 | return rc; |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 3124 | } |
| 3125 | |
| 3126 | static void efx_ef10_ev_remove(struct efx_channel *channel) |
| 3127 | { |
| 3128 | efx_nic_free_buffer(channel->efx, &channel->eventq.buf); |
| 3129 | } |
| 3130 | |
| 3131 | static void efx_ef10_handle_rx_wrong_queue(struct efx_rx_queue *rx_queue, |
| 3132 | unsigned int rx_queue_label) |
| 3133 | { |
| 3134 | struct efx_nic *efx = rx_queue->efx; |
| 3135 | |
| 3136 | netif_info(efx, hw, efx->net_dev, |
| 3137 | "rx event arrived on queue %d labeled as queue %u\n", |
| 3138 | efx_rx_queue_index(rx_queue), rx_queue_label); |
| 3139 | |
| 3140 | efx_schedule_reset(efx, RESET_TYPE_DISABLE); |
| 3141 | } |
| 3142 | |
| 3143 | static void |
| 3144 | efx_ef10_handle_rx_bad_lbits(struct efx_rx_queue *rx_queue, |
| 3145 | unsigned int actual, unsigned int expected) |
| 3146 | { |
| 3147 | unsigned int dropped = (actual - expected) & rx_queue->ptr_mask; |
| 3148 | struct efx_nic *efx = rx_queue->efx; |
| 3149 | |
| 3150 | netif_info(efx, hw, efx->net_dev, |
| 3151 | "dropped %d events (index=%d expected=%d)\n", |
| 3152 | dropped, actual, expected); |
| 3153 | |
| 3154 | efx_schedule_reset(efx, RESET_TYPE_DISABLE); |
| 3155 | } |
| 3156 | |
| 3157 | /* partially received RX was aborted. clean up. */ |
| 3158 | static void efx_ef10_handle_rx_abort(struct efx_rx_queue *rx_queue) |
| 3159 | { |
| 3160 | unsigned int rx_desc_ptr; |
| 3161 | |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 3162 | netif_dbg(rx_queue->efx, hw, rx_queue->efx->net_dev, |
| 3163 | "scattered RX aborted (dropping %u buffers)\n", |
| 3164 | rx_queue->scatter_n); |
| 3165 | |
| 3166 | rx_desc_ptr = rx_queue->removed_count & rx_queue->ptr_mask; |
| 3167 | |
| 3168 | efx_rx_packet(rx_queue, rx_desc_ptr, rx_queue->scatter_n, |
| 3169 | 0, EFX_RX_PKT_DISCARD); |
| 3170 | |
| 3171 | rx_queue->removed_count += rx_queue->scatter_n; |
| 3172 | rx_queue->scatter_n = 0; |
| 3173 | rx_queue->scatter_len = 0; |
| 3174 | ++efx_rx_queue_channel(rx_queue)->n_rx_nodesc_trunc; |
| 3175 | } |
| 3176 | |
Jon Cooper | a0ee354 | 2017-02-08 16:50:40 +0000 | [diff] [blame] | 3177 | static u16 efx_ef10_handle_rx_event_errors(struct efx_channel *channel, |
| 3178 | unsigned int n_packets, |
| 3179 | unsigned int rx_encap_hdr, |
| 3180 | unsigned int rx_l3_class, |
| 3181 | unsigned int rx_l4_class, |
| 3182 | const efx_qword_t *event) |
| 3183 | { |
| 3184 | struct efx_nic *efx = channel->efx; |
| 3185 | |
| 3186 | if (EFX_QWORD_FIELD(*event, ESF_DZ_RX_ECRC_ERR)) { |
| 3187 | if (!efx->loopback_selftest) |
| 3188 | channel->n_rx_eth_crc_err += n_packets; |
| 3189 | return EFX_RX_PKT_DISCARD; |
| 3190 | } |
| 3191 | if (EFX_QWORD_FIELD(*event, ESF_DZ_RX_IPCKSUM_ERR)) { |
| 3192 | if (unlikely(rx_encap_hdr != ESE_EZ_ENCAP_HDR_VXLAN && |
| 3193 | rx_l3_class != ESE_DZ_L3_CLASS_IP4 && |
| 3194 | rx_l3_class != ESE_DZ_L3_CLASS_IP4_FRAG && |
| 3195 | rx_l3_class != ESE_DZ_L3_CLASS_IP6 && |
| 3196 | rx_l3_class != ESE_DZ_L3_CLASS_IP6_FRAG)) |
| 3197 | netdev_WARN(efx->net_dev, |
| 3198 | "invalid class for RX_IPCKSUM_ERR: event=" |
| 3199 | EFX_QWORD_FMT "\n", |
| 3200 | EFX_QWORD_VAL(*event)); |
| 3201 | if (!efx->loopback_selftest) |
| 3202 | *(rx_encap_hdr ? |
| 3203 | &channel->n_rx_outer_ip_hdr_chksum_err : |
| 3204 | &channel->n_rx_ip_hdr_chksum_err) += n_packets; |
| 3205 | return 0; |
| 3206 | } |
| 3207 | if (EFX_QWORD_FIELD(*event, ESF_DZ_RX_TCPUDP_CKSUM_ERR)) { |
| 3208 | if (unlikely(rx_encap_hdr != ESE_EZ_ENCAP_HDR_VXLAN && |
| 3209 | ((rx_l3_class != ESE_DZ_L3_CLASS_IP4 && |
| 3210 | rx_l3_class != ESE_DZ_L3_CLASS_IP6) || |
| 3211 | (rx_l4_class != ESE_DZ_L4_CLASS_TCP && |
| 3212 | rx_l4_class != ESE_DZ_L4_CLASS_UDP)))) |
| 3213 | netdev_WARN(efx->net_dev, |
| 3214 | "invalid class for RX_TCPUDP_CKSUM_ERR: event=" |
| 3215 | EFX_QWORD_FMT "\n", |
| 3216 | EFX_QWORD_VAL(*event)); |
| 3217 | if (!efx->loopback_selftest) |
| 3218 | *(rx_encap_hdr ? |
| 3219 | &channel->n_rx_outer_tcp_udp_chksum_err : |
| 3220 | &channel->n_rx_tcp_udp_chksum_err) += n_packets; |
| 3221 | return 0; |
| 3222 | } |
| 3223 | if (EFX_QWORD_FIELD(*event, ESF_EZ_RX_IP_INNER_CHKSUM_ERR)) { |
| 3224 | if (unlikely(!rx_encap_hdr)) |
| 3225 | netdev_WARN(efx->net_dev, |
| 3226 | "invalid encapsulation type for RX_IP_INNER_CHKSUM_ERR: event=" |
| 3227 | EFX_QWORD_FMT "\n", |
| 3228 | EFX_QWORD_VAL(*event)); |
| 3229 | else if (unlikely(rx_l3_class != ESE_DZ_L3_CLASS_IP4 && |
| 3230 | rx_l3_class != ESE_DZ_L3_CLASS_IP4_FRAG && |
| 3231 | rx_l3_class != ESE_DZ_L3_CLASS_IP6 && |
| 3232 | rx_l3_class != ESE_DZ_L3_CLASS_IP6_FRAG)) |
| 3233 | netdev_WARN(efx->net_dev, |
| 3234 | "invalid class for RX_IP_INNER_CHKSUM_ERR: event=" |
| 3235 | EFX_QWORD_FMT "\n", |
| 3236 | EFX_QWORD_VAL(*event)); |
| 3237 | if (!efx->loopback_selftest) |
| 3238 | channel->n_rx_inner_ip_hdr_chksum_err += n_packets; |
| 3239 | return 0; |
| 3240 | } |
| 3241 | if (EFX_QWORD_FIELD(*event, ESF_EZ_RX_TCP_UDP_INNER_CHKSUM_ERR)) { |
| 3242 | if (unlikely(!rx_encap_hdr)) |
| 3243 | netdev_WARN(efx->net_dev, |
| 3244 | "invalid encapsulation type for RX_TCP_UDP_INNER_CHKSUM_ERR: event=" |
| 3245 | EFX_QWORD_FMT "\n", |
| 3246 | EFX_QWORD_VAL(*event)); |
| 3247 | else if (unlikely((rx_l3_class != ESE_DZ_L3_CLASS_IP4 && |
| 3248 | rx_l3_class != ESE_DZ_L3_CLASS_IP6) || |
| 3249 | (rx_l4_class != ESE_DZ_L4_CLASS_TCP && |
| 3250 | rx_l4_class != ESE_DZ_L4_CLASS_UDP))) |
| 3251 | netdev_WARN(efx->net_dev, |
| 3252 | "invalid class for RX_TCP_UDP_INNER_CHKSUM_ERR: event=" |
| 3253 | EFX_QWORD_FMT "\n", |
| 3254 | EFX_QWORD_VAL(*event)); |
| 3255 | if (!efx->loopback_selftest) |
| 3256 | channel->n_rx_inner_tcp_udp_chksum_err += n_packets; |
| 3257 | return 0; |
| 3258 | } |
| 3259 | |
| 3260 | WARN_ON(1); /* No error bits were recognised */ |
| 3261 | return 0; |
| 3262 | } |
| 3263 | |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 3264 | static int efx_ef10_handle_rx_event(struct efx_channel *channel, |
| 3265 | const efx_qword_t *event) |
| 3266 | { |
Jon Cooper | a0ee354 | 2017-02-08 16:50:40 +0000 | [diff] [blame] | 3267 | unsigned int rx_bytes, next_ptr_lbits, rx_queue_label; |
| 3268 | unsigned int rx_l3_class, rx_l4_class, rx_encap_hdr; |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 3269 | unsigned int n_descs, n_packets, i; |
| 3270 | struct efx_nic *efx = channel->efx; |
Jon Cooper | a0ee354 | 2017-02-08 16:50:40 +0000 | [diff] [blame] | 3271 | struct efx_ef10_nic_data *nic_data = efx->nic_data; |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 3272 | struct efx_rx_queue *rx_queue; |
Jon Cooper | a0ee354 | 2017-02-08 16:50:40 +0000 | [diff] [blame] | 3273 | efx_qword_t errors; |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 3274 | bool rx_cont; |
| 3275 | u16 flags = 0; |
| 3276 | |
| 3277 | if (unlikely(ACCESS_ONCE(efx->reset_pending))) |
| 3278 | return 0; |
| 3279 | |
| 3280 | /* Basic packet information */ |
| 3281 | rx_bytes = EFX_QWORD_FIELD(*event, ESF_DZ_RX_BYTES); |
| 3282 | next_ptr_lbits = EFX_QWORD_FIELD(*event, ESF_DZ_RX_DSC_PTR_LBITS); |
| 3283 | rx_queue_label = EFX_QWORD_FIELD(*event, ESF_DZ_RX_QLABEL); |
Jon Cooper | a0ee354 | 2017-02-08 16:50:40 +0000 | [diff] [blame] | 3284 | rx_l3_class = EFX_QWORD_FIELD(*event, ESF_DZ_RX_L3_CLASS); |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 3285 | rx_l4_class = EFX_QWORD_FIELD(*event, ESF_DZ_RX_L4_CLASS); |
| 3286 | rx_cont = EFX_QWORD_FIELD(*event, ESF_DZ_RX_CONT); |
Jon Cooper | a0ee354 | 2017-02-08 16:50:40 +0000 | [diff] [blame] | 3287 | rx_encap_hdr = |
| 3288 | nic_data->datapath_caps & |
| 3289 | (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN) ? |
| 3290 | EFX_QWORD_FIELD(*event, ESF_EZ_RX_ENCAP_HDR) : |
| 3291 | ESE_EZ_ENCAP_HDR_NONE; |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 3292 | |
Ben Hutchings | 48ce563 | 2013-11-01 16:42:44 +0000 | [diff] [blame] | 3293 | if (EFX_QWORD_FIELD(*event, ESF_DZ_RX_DROP_EVENT)) |
| 3294 | netdev_WARN(efx->net_dev, "saw RX_DROP_EVENT: event=" |
| 3295 | EFX_QWORD_FMT "\n", |
| 3296 | EFX_QWORD_VAL(*event)); |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 3297 | |
| 3298 | rx_queue = efx_channel_get_rx_queue(channel); |
| 3299 | |
| 3300 | if (unlikely(rx_queue_label != efx_rx_queue_index(rx_queue))) |
| 3301 | efx_ef10_handle_rx_wrong_queue(rx_queue, rx_queue_label); |
| 3302 | |
| 3303 | n_descs = ((next_ptr_lbits - rx_queue->removed_count) & |
| 3304 | ((1 << ESF_DZ_RX_DSC_PTR_LBITS_WIDTH) - 1)); |
| 3305 | |
| 3306 | if (n_descs != rx_queue->scatter_n + 1) { |
Ben Hutchings | 92a0416 | 2013-09-24 23:21:57 +0100 | [diff] [blame] | 3307 | struct efx_ef10_nic_data *nic_data = efx->nic_data; |
| 3308 | |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 3309 | /* detect rx abort */ |
| 3310 | if (unlikely(n_descs == rx_queue->scatter_n)) { |
Ben Hutchings | 48ce563 | 2013-11-01 16:42:44 +0000 | [diff] [blame] | 3311 | if (rx_queue->scatter_n == 0 || rx_bytes != 0) |
| 3312 | netdev_WARN(efx->net_dev, |
| 3313 | "invalid RX abort: scatter_n=%u event=" |
| 3314 | EFX_QWORD_FMT "\n", |
| 3315 | rx_queue->scatter_n, |
| 3316 | EFX_QWORD_VAL(*event)); |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 3317 | efx_ef10_handle_rx_abort(rx_queue); |
| 3318 | return 0; |
| 3319 | } |
| 3320 | |
Ben Hutchings | 92a0416 | 2013-09-24 23:21:57 +0100 | [diff] [blame] | 3321 | /* Check that RX completion merging is valid, i.e. |
| 3322 | * the current firmware supports it and this is a |
| 3323 | * non-scattered packet. |
| 3324 | */ |
| 3325 | if (!(nic_data->datapath_caps & |
| 3326 | (1 << MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN)) || |
| 3327 | rx_queue->scatter_n != 0 || rx_cont) { |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 3328 | efx_ef10_handle_rx_bad_lbits( |
| 3329 | rx_queue, next_ptr_lbits, |
| 3330 | (rx_queue->removed_count + |
| 3331 | rx_queue->scatter_n + 1) & |
| 3332 | ((1 << ESF_DZ_RX_DSC_PTR_LBITS_WIDTH) - 1)); |
| 3333 | return 0; |
| 3334 | } |
| 3335 | |
| 3336 | /* Merged completion for multiple non-scattered packets */ |
| 3337 | rx_queue->scatter_n = 1; |
| 3338 | rx_queue->scatter_len = 0; |
| 3339 | n_packets = n_descs; |
| 3340 | ++channel->n_rx_merge_events; |
| 3341 | channel->n_rx_merge_packets += n_packets; |
| 3342 | flags |= EFX_RX_PKT_PREFIX_LEN; |
| 3343 | } else { |
| 3344 | ++rx_queue->scatter_n; |
| 3345 | rx_queue->scatter_len += rx_bytes; |
| 3346 | if (rx_cont) |
| 3347 | return 0; |
| 3348 | n_packets = 1; |
| 3349 | } |
| 3350 | |
Jon Cooper | a0ee354 | 2017-02-08 16:50:40 +0000 | [diff] [blame] | 3351 | EFX_POPULATE_QWORD_5(errors, ESF_DZ_RX_ECRC_ERR, 1, |
| 3352 | ESF_DZ_RX_IPCKSUM_ERR, 1, |
| 3353 | ESF_DZ_RX_TCPUDP_CKSUM_ERR, 1, |
| 3354 | ESF_EZ_RX_IP_INNER_CHKSUM_ERR, 1, |
| 3355 | ESF_EZ_RX_TCP_UDP_INNER_CHKSUM_ERR, 1); |
| 3356 | EFX_AND_QWORD(errors, *event, errors); |
| 3357 | if (unlikely(!EFX_QWORD_IS_ZERO(errors))) { |
| 3358 | flags |= efx_ef10_handle_rx_event_errors(channel, n_packets, |
| 3359 | rx_l3_class, rx_l4_class, |
| 3360 | rx_encap_hdr, event); |
| 3361 | } else { |
Jon Cooper | da50ae2 | 2017-02-08 16:51:02 +0000 | [diff] [blame] | 3362 | bool tcpudp = rx_l4_class == ESE_DZ_L4_CLASS_TCP || |
| 3363 | rx_l4_class == ESE_DZ_L4_CLASS_UDP; |
| 3364 | |
| 3365 | switch (rx_encap_hdr) { |
| 3366 | case ESE_EZ_ENCAP_HDR_VXLAN: /* VxLAN or GENEVE */ |
| 3367 | flags |= EFX_RX_PKT_CSUMMED; /* outer UDP csum */ |
| 3368 | if (tcpudp) |
| 3369 | flags |= EFX_RX_PKT_CSUM_LEVEL; /* inner L4 */ |
| 3370 | break; |
| 3371 | case ESE_EZ_ENCAP_HDR_GRE: |
| 3372 | case ESE_EZ_ENCAP_HDR_NONE: |
| 3373 | if (tcpudp) |
| 3374 | flags |= EFX_RX_PKT_CSUMMED; |
| 3375 | break; |
| 3376 | default: |
| 3377 | netdev_WARN(efx->net_dev, |
| 3378 | "unknown encapsulation type: event=" |
| 3379 | EFX_QWORD_FMT "\n", |
| 3380 | EFX_QWORD_VAL(*event)); |
| 3381 | } |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 3382 | } |
| 3383 | |
| 3384 | if (rx_l4_class == ESE_DZ_L4_CLASS_TCP) |
| 3385 | flags |= EFX_RX_PKT_TCP; |
| 3386 | |
| 3387 | channel->irq_mod_score += 2 * n_packets; |
| 3388 | |
| 3389 | /* Handle received packet(s) */ |
| 3390 | for (i = 0; i < n_packets; i++) { |
| 3391 | efx_rx_packet(rx_queue, |
| 3392 | rx_queue->removed_count & rx_queue->ptr_mask, |
| 3393 | rx_queue->scatter_n, rx_queue->scatter_len, |
| 3394 | flags); |
| 3395 | rx_queue->removed_count += rx_queue->scatter_n; |
| 3396 | } |
| 3397 | |
| 3398 | rx_queue->scatter_n = 0; |
| 3399 | rx_queue->scatter_len = 0; |
| 3400 | |
| 3401 | return n_packets; |
| 3402 | } |
| 3403 | |
| 3404 | static int |
| 3405 | efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event) |
| 3406 | { |
| 3407 | struct efx_nic *efx = channel->efx; |
| 3408 | struct efx_tx_queue *tx_queue; |
| 3409 | unsigned int tx_ev_desc_ptr; |
| 3410 | unsigned int tx_ev_q_label; |
| 3411 | int tx_descs = 0; |
| 3412 | |
| 3413 | if (unlikely(ACCESS_ONCE(efx->reset_pending))) |
| 3414 | return 0; |
| 3415 | |
| 3416 | if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_TX_DROP_EVENT))) |
| 3417 | return 0; |
| 3418 | |
| 3419 | /* Transmit completion */ |
| 3420 | tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, ESF_DZ_TX_DESCR_INDX); |
| 3421 | tx_ev_q_label = EFX_QWORD_FIELD(*event, ESF_DZ_TX_QLABEL); |
| 3422 | tx_queue = efx_channel_get_tx_queue(channel, |
| 3423 | tx_ev_q_label % EFX_TXQ_TYPES); |
| 3424 | tx_descs = ((tx_ev_desc_ptr + 1 - tx_queue->read_count) & |
| 3425 | tx_queue->ptr_mask); |
| 3426 | efx_xmit_done(tx_queue, tx_ev_desc_ptr & tx_queue->ptr_mask); |
| 3427 | |
| 3428 | return tx_descs; |
| 3429 | } |
| 3430 | |
| 3431 | static void |
| 3432 | efx_ef10_handle_driver_event(struct efx_channel *channel, efx_qword_t *event) |
| 3433 | { |
| 3434 | struct efx_nic *efx = channel->efx; |
| 3435 | int subcode; |
| 3436 | |
| 3437 | subcode = EFX_QWORD_FIELD(*event, ESF_DZ_DRV_SUB_CODE); |
| 3438 | |
| 3439 | switch (subcode) { |
| 3440 | case ESE_DZ_DRV_TIMER_EV: |
| 3441 | case ESE_DZ_DRV_WAKE_UP_EV: |
| 3442 | break; |
| 3443 | case ESE_DZ_DRV_START_UP_EV: |
| 3444 | /* event queue init complete. ok. */ |
| 3445 | break; |
| 3446 | default: |
| 3447 | netif_err(efx, hw, efx->net_dev, |
| 3448 | "channel %d unknown driver event type %d" |
| 3449 | " (data " EFX_QWORD_FMT ")\n", |
| 3450 | channel->channel, subcode, |
| 3451 | EFX_QWORD_VAL(*event)); |
| 3452 | |
| 3453 | } |
| 3454 | } |
| 3455 | |
| 3456 | static void efx_ef10_handle_driver_generated_event(struct efx_channel *channel, |
| 3457 | efx_qword_t *event) |
| 3458 | { |
| 3459 | struct efx_nic *efx = channel->efx; |
| 3460 | u32 subcode; |
| 3461 | |
| 3462 | subcode = EFX_QWORD_FIELD(*event, EFX_DWORD_0); |
| 3463 | |
| 3464 | switch (subcode) { |
| 3465 | case EFX_EF10_TEST: |
| 3466 | channel->event_test_cpu = raw_smp_processor_id(); |
| 3467 | break; |
| 3468 | case EFX_EF10_REFILL: |
| 3469 | /* The queue must be empty, so we won't receive any rx |
| 3470 | * events, so efx_process_channel() won't refill the |
| 3471 | * queue. Refill it here |
| 3472 | */ |
Jon Cooper | cce2879 | 2013-10-02 11:04:14 +0100 | [diff] [blame] | 3473 | efx_fast_push_rx_descriptors(&channel->rx_queue, true); |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 3474 | break; |
| 3475 | default: |
| 3476 | netif_err(efx, hw, efx->net_dev, |
| 3477 | "channel %d unknown driver event type %u" |
| 3478 | " (data " EFX_QWORD_FMT ")\n", |
| 3479 | channel->channel, (unsigned) subcode, |
| 3480 | EFX_QWORD_VAL(*event)); |
| 3481 | } |
| 3482 | } |
| 3483 | |
| 3484 | static int efx_ef10_ev_process(struct efx_channel *channel, int quota) |
| 3485 | { |
| 3486 | struct efx_nic *efx = channel->efx; |
| 3487 | efx_qword_t event, *p_event; |
| 3488 | unsigned int read_ptr; |
| 3489 | int ev_code; |
| 3490 | int tx_descs = 0; |
| 3491 | int spent = 0; |
| 3492 | |
Eric W. Biederman | 75363a4 | 2014-03-14 18:11:22 -0700 | [diff] [blame] | 3493 | if (quota <= 0) |
| 3494 | return spent; |
| 3495 | |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 3496 | read_ptr = channel->eventq_read_ptr; |
| 3497 | |
| 3498 | for (;;) { |
| 3499 | p_event = efx_event(channel, read_ptr); |
| 3500 | event = *p_event; |
| 3501 | |
| 3502 | if (!efx_event_present(&event)) |
| 3503 | break; |
| 3504 | |
| 3505 | EFX_SET_QWORD(*p_event); |
| 3506 | |
| 3507 | ++read_ptr; |
| 3508 | |
| 3509 | ev_code = EFX_QWORD_FIELD(event, ESF_DZ_EV_CODE); |
| 3510 | |
| 3511 | netif_vdbg(efx, drv, efx->net_dev, |
| 3512 | "processing event on %d " EFX_QWORD_FMT "\n", |
| 3513 | channel->channel, EFX_QWORD_VAL(event)); |
| 3514 | |
| 3515 | switch (ev_code) { |
| 3516 | case ESE_DZ_EV_CODE_MCDI_EV: |
| 3517 | efx_mcdi_process_event(channel, &event); |
| 3518 | break; |
| 3519 | case ESE_DZ_EV_CODE_RX_EV: |
| 3520 | spent += efx_ef10_handle_rx_event(channel, &event); |
| 3521 | if (spent >= quota) { |
| 3522 | /* XXX can we split a merged event to |
| 3523 | * avoid going over-quota? |
| 3524 | */ |
| 3525 | spent = quota; |
| 3526 | goto out; |
| 3527 | } |
| 3528 | break; |
| 3529 | case ESE_DZ_EV_CODE_TX_EV: |
| 3530 | tx_descs += efx_ef10_handle_tx_event(channel, &event); |
| 3531 | if (tx_descs > efx->txq_entries) { |
| 3532 | spent = quota; |
| 3533 | goto out; |
| 3534 | } else if (++spent == quota) { |
| 3535 | goto out; |
| 3536 | } |
| 3537 | break; |
| 3538 | case ESE_DZ_EV_CODE_DRIVER_EV: |
| 3539 | efx_ef10_handle_driver_event(channel, &event); |
| 3540 | if (++spent == quota) |
| 3541 | goto out; |
| 3542 | break; |
| 3543 | case EFX_EF10_DRVGEN_EV: |
| 3544 | efx_ef10_handle_driver_generated_event(channel, &event); |
| 3545 | break; |
| 3546 | default: |
| 3547 | netif_err(efx, hw, efx->net_dev, |
| 3548 | "channel %d unknown event type %d" |
| 3549 | " (data " EFX_QWORD_FMT ")\n", |
| 3550 | channel->channel, ev_code, |
| 3551 | EFX_QWORD_VAL(event)); |
| 3552 | } |
| 3553 | } |
| 3554 | |
| 3555 | out: |
| 3556 | channel->eventq_read_ptr = read_ptr; |
| 3557 | return spent; |
| 3558 | } |
| 3559 | |
| 3560 | static void efx_ef10_ev_read_ack(struct efx_channel *channel) |
| 3561 | { |
| 3562 | struct efx_nic *efx = channel->efx; |
| 3563 | efx_dword_t rptr; |
| 3564 | |
| 3565 | if (EFX_EF10_WORKAROUND_35388(efx)) { |
| 3566 | BUILD_BUG_ON(EFX_MIN_EVQ_SIZE < |
| 3567 | (1 << ERF_DD_EVQ_IND_RPTR_WIDTH)); |
| 3568 | BUILD_BUG_ON(EFX_MAX_EVQ_SIZE > |
| 3569 | (1 << 2 * ERF_DD_EVQ_IND_RPTR_WIDTH)); |
| 3570 | |
| 3571 | EFX_POPULATE_DWORD_2(rptr, ERF_DD_EVQ_IND_RPTR_FLAGS, |
| 3572 | EFE_DD_EVQ_IND_RPTR_FLAGS_HIGH, |
| 3573 | ERF_DD_EVQ_IND_RPTR, |
| 3574 | (channel->eventq_read_ptr & |
| 3575 | channel->eventq_mask) >> |
| 3576 | ERF_DD_EVQ_IND_RPTR_WIDTH); |
| 3577 | efx_writed_page(efx, &rptr, ER_DD_EVQ_INDIRECT, |
| 3578 | channel->channel); |
| 3579 | EFX_POPULATE_DWORD_2(rptr, ERF_DD_EVQ_IND_RPTR_FLAGS, |
| 3580 | EFE_DD_EVQ_IND_RPTR_FLAGS_LOW, |
| 3581 | ERF_DD_EVQ_IND_RPTR, |
| 3582 | channel->eventq_read_ptr & |
| 3583 | ((1 << ERF_DD_EVQ_IND_RPTR_WIDTH) - 1)); |
| 3584 | efx_writed_page(efx, &rptr, ER_DD_EVQ_INDIRECT, |
| 3585 | channel->channel); |
| 3586 | } else { |
| 3587 | EFX_POPULATE_DWORD_1(rptr, ERF_DZ_EVQ_RPTR, |
| 3588 | channel->eventq_read_ptr & |
| 3589 | channel->eventq_mask); |
| 3590 | efx_writed_page(efx, &rptr, ER_DZ_EVQ_RPTR, channel->channel); |
| 3591 | } |
| 3592 | } |
| 3593 | |
| 3594 | static void efx_ef10_ev_test_generate(struct efx_channel *channel) |
| 3595 | { |
| 3596 | MCDI_DECLARE_BUF(inbuf, MC_CMD_DRIVER_EVENT_IN_LEN); |
| 3597 | struct efx_nic *efx = channel->efx; |
| 3598 | efx_qword_t event; |
| 3599 | int rc; |
| 3600 | |
| 3601 | EFX_POPULATE_QWORD_2(event, |
| 3602 | ESF_DZ_EV_CODE, EFX_EF10_DRVGEN_EV, |
| 3603 | ESF_DZ_EV_DATA, EFX_EF10_TEST); |
| 3604 | |
| 3605 | MCDI_SET_DWORD(inbuf, DRIVER_EVENT_IN_EVQ, channel->channel); |
| 3606 | |
| 3607 | /* MCDI_SET_QWORD is not appropriate here since EFX_POPULATE_* has |
| 3608 | * already swapped the data to little-endian order. |
| 3609 | */ |
| 3610 | memcpy(MCDI_PTR(inbuf, DRIVER_EVENT_IN_DATA), &event.u64[0], |
| 3611 | sizeof(efx_qword_t)); |
| 3612 | |
| 3613 | rc = efx_mcdi_rpc(efx, MC_CMD_DRIVER_EVENT, inbuf, sizeof(inbuf), |
| 3614 | NULL, 0, NULL); |
| 3615 | if (rc != 0) |
| 3616 | goto fail; |
| 3617 | |
| 3618 | return; |
| 3619 | |
| 3620 | fail: |
| 3621 | WARN_ON(true); |
| 3622 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); |
| 3623 | } |
| 3624 | |
| 3625 | void efx_ef10_handle_drain_event(struct efx_nic *efx) |
| 3626 | { |
| 3627 | if (atomic_dec_and_test(&efx->active_queues)) |
| 3628 | wake_up(&efx->flush_wq); |
| 3629 | |
| 3630 | WARN_ON(atomic_read(&efx->active_queues) < 0); |
| 3631 | } |
| 3632 | |
| 3633 | static int efx_ef10_fini_dmaq(struct efx_nic *efx) |
| 3634 | { |
| 3635 | struct efx_ef10_nic_data *nic_data = efx->nic_data; |
| 3636 | struct efx_channel *channel; |
| 3637 | struct efx_tx_queue *tx_queue; |
| 3638 | struct efx_rx_queue *rx_queue; |
| 3639 | int pending; |
| 3640 | |
| 3641 | /* If the MC has just rebooted, the TX/RX queues will have already been |
| 3642 | * torn down, but efx->active_queues needs to be set to zero. |
| 3643 | */ |
| 3644 | if (nic_data->must_realloc_vis) { |
| 3645 | atomic_set(&efx->active_queues, 0); |
| 3646 | return 0; |
| 3647 | } |
| 3648 | |
| 3649 | /* Do not attempt to write to the NIC during EEH recovery */ |
| 3650 | if (efx->state != STATE_RECOVERY) { |
| 3651 | efx_for_each_channel(channel, efx) { |
| 3652 | efx_for_each_channel_rx_queue(rx_queue, channel) |
| 3653 | efx_ef10_rx_fini(rx_queue); |
| 3654 | efx_for_each_channel_tx_queue(tx_queue, channel) |
| 3655 | efx_ef10_tx_fini(tx_queue); |
| 3656 | } |
| 3657 | |
| 3658 | wait_event_timeout(efx->flush_wq, |
| 3659 | atomic_read(&efx->active_queues) == 0, |
| 3660 | msecs_to_jiffies(EFX_MAX_FLUSH_TIME)); |
| 3661 | pending = atomic_read(&efx->active_queues); |
| 3662 | if (pending) { |
| 3663 | netif_err(efx, hw, efx->net_dev, "failed to flush %d queues\n", |
| 3664 | pending); |
| 3665 | return -ETIMEDOUT; |
| 3666 | } |
| 3667 | } |
| 3668 | |
| 3669 | return 0; |
| 3670 | } |
| 3671 | |
Edward Cree | e283546 | 2014-04-16 19:27:48 +0100 | [diff] [blame] | 3672 | static void efx_ef10_prepare_flr(struct efx_nic *efx) |
| 3673 | { |
| 3674 | atomic_set(&efx->active_queues, 0); |
| 3675 | } |
| 3676 | |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 3677 | static bool efx_ef10_filter_equal(const struct efx_filter_spec *left, |
| 3678 | const struct efx_filter_spec *right) |
| 3679 | { |
| 3680 | if ((left->match_flags ^ right->match_flags) | |
| 3681 | ((left->flags ^ right->flags) & |
| 3682 | (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX))) |
| 3683 | return false; |
| 3684 | |
| 3685 | return memcmp(&left->outer_vid, &right->outer_vid, |
| 3686 | sizeof(struct efx_filter_spec) - |
| 3687 | offsetof(struct efx_filter_spec, outer_vid)) == 0; |
| 3688 | } |
| 3689 | |
| 3690 | static unsigned int efx_ef10_filter_hash(const struct efx_filter_spec *spec) |
| 3691 | { |
| 3692 | BUILD_BUG_ON(offsetof(struct efx_filter_spec, outer_vid) & 3); |
| 3693 | return jhash2((const u32 *)&spec->outer_vid, |
| 3694 | (sizeof(struct efx_filter_spec) - |
| 3695 | offsetof(struct efx_filter_spec, outer_vid)) / 4, |
| 3696 | 0); |
| 3697 | /* XXX should we randomise the initval? */ |
| 3698 | } |
| 3699 | |
| 3700 | /* Decide whether a filter should be exclusive or else should allow |
| 3701 | * delivery to additional recipients. Currently we decide that |
| 3702 | * filters for specific local unicast MAC and IP addresses are |
| 3703 | * exclusive. |
| 3704 | */ |
| 3705 | static bool efx_ef10_filter_is_exclusive(const struct efx_filter_spec *spec) |
| 3706 | { |
| 3707 | if (spec->match_flags & EFX_FILTER_MATCH_LOC_MAC && |
| 3708 | !is_multicast_ether_addr(spec->loc_mac)) |
| 3709 | return true; |
| 3710 | |
| 3711 | if ((spec->match_flags & |
| 3712 | (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) == |
| 3713 | (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) { |
| 3714 | if (spec->ether_type == htons(ETH_P_IP) && |
| 3715 | !ipv4_is_multicast(spec->loc_host[0])) |
| 3716 | return true; |
| 3717 | if (spec->ether_type == htons(ETH_P_IPV6) && |
| 3718 | ((const u8 *)spec->loc_host)[0] != 0xff) |
| 3719 | return true; |
| 3720 | } |
| 3721 | |
| 3722 | return false; |
| 3723 | } |
| 3724 | |
| 3725 | static struct efx_filter_spec * |
| 3726 | efx_ef10_filter_entry_spec(const struct efx_ef10_filter_table *table, |
| 3727 | unsigned int filter_idx) |
| 3728 | { |
| 3729 | return (struct efx_filter_spec *)(table->entry[filter_idx].spec & |
| 3730 | ~EFX_EF10_FILTER_FLAGS); |
| 3731 | } |
| 3732 | |
| 3733 | static unsigned int |
| 3734 | efx_ef10_filter_entry_flags(const struct efx_ef10_filter_table *table, |
| 3735 | unsigned int filter_idx) |
| 3736 | { |
| 3737 | return table->entry[filter_idx].spec & EFX_EF10_FILTER_FLAGS; |
| 3738 | } |
| 3739 | |
| 3740 | static void |
| 3741 | efx_ef10_filter_set_entry(struct efx_ef10_filter_table *table, |
| 3742 | unsigned int filter_idx, |
| 3743 | const struct efx_filter_spec *spec, |
| 3744 | unsigned int flags) |
| 3745 | { |
| 3746 | table->entry[filter_idx].spec = (unsigned long)spec | flags; |
| 3747 | } |
| 3748 | |
Edward Cree | 9b41080 | 2017-01-27 15:02:52 +0000 | [diff] [blame] | 3749 | static void |
| 3750 | efx_ef10_filter_push_prep_set_match_fields(struct efx_nic *efx, |
| 3751 | const struct efx_filter_spec *spec, |
| 3752 | efx_dword_t *inbuf) |
| 3753 | { |
| 3754 | enum efx_encap_type encap_type = efx_filter_get_encap_type(spec); |
| 3755 | u32 match_fields = 0, uc_match, mc_match; |
| 3756 | |
| 3757 | MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP, |
| 3758 | efx_ef10_filter_is_exclusive(spec) ? |
| 3759 | MC_CMD_FILTER_OP_IN_OP_INSERT : |
| 3760 | MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE); |
| 3761 | |
| 3762 | /* Convert match flags and values. Unlike almost |
| 3763 | * everything else in MCDI, these fields are in |
| 3764 | * network byte order. |
| 3765 | */ |
| 3766 | #define COPY_VALUE(value, mcdi_field) \ |
| 3767 | do { \ |
| 3768 | match_fields |= \ |
| 3769 | 1 << MC_CMD_FILTER_OP_IN_MATCH_ ## \ |
| 3770 | mcdi_field ## _LBN; \ |
| 3771 | BUILD_BUG_ON( \ |
| 3772 | MC_CMD_FILTER_OP_IN_ ## mcdi_field ## _LEN < \ |
| 3773 | sizeof(value)); \ |
| 3774 | memcpy(MCDI_PTR(inbuf, FILTER_OP_IN_ ## mcdi_field), \ |
| 3775 | &value, sizeof(value)); \ |
| 3776 | } while (0) |
| 3777 | #define COPY_FIELD(gen_flag, gen_field, mcdi_field) \ |
| 3778 | if (spec->match_flags & EFX_FILTER_MATCH_ ## gen_flag) { \ |
| 3779 | COPY_VALUE(spec->gen_field, mcdi_field); \ |
| 3780 | } |
| 3781 | /* Handle encap filters first. They will always be mismatch |
| 3782 | * (unknown UC or MC) filters |
| 3783 | */ |
| 3784 | if (encap_type) { |
| 3785 | /* ether_type and outer_ip_proto need to be variables |
| 3786 | * because COPY_VALUE wants to memcpy them |
| 3787 | */ |
| 3788 | __be16 ether_type = |
| 3789 | htons(encap_type & EFX_ENCAP_FLAG_IPV6 ? |
| 3790 | ETH_P_IPV6 : ETH_P_IP); |
| 3791 | u8 vni_type = MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_GENEVE; |
| 3792 | u8 outer_ip_proto; |
| 3793 | |
| 3794 | switch (encap_type & EFX_ENCAP_TYPES_MASK) { |
| 3795 | case EFX_ENCAP_TYPE_VXLAN: |
| 3796 | vni_type = MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_VXLAN; |
| 3797 | /* fallthrough */ |
| 3798 | case EFX_ENCAP_TYPE_GENEVE: |
| 3799 | COPY_VALUE(ether_type, ETHER_TYPE); |
| 3800 | outer_ip_proto = IPPROTO_UDP; |
| 3801 | COPY_VALUE(outer_ip_proto, IP_PROTO); |
| 3802 | /* We always need to set the type field, even |
| 3803 | * though we're not matching on the TNI. |
| 3804 | */ |
| 3805 | MCDI_POPULATE_DWORD_1(inbuf, |
| 3806 | FILTER_OP_EXT_IN_VNI_OR_VSID, |
| 3807 | FILTER_OP_EXT_IN_VNI_TYPE, |
| 3808 | vni_type); |
| 3809 | break; |
| 3810 | case EFX_ENCAP_TYPE_NVGRE: |
| 3811 | COPY_VALUE(ether_type, ETHER_TYPE); |
| 3812 | outer_ip_proto = IPPROTO_GRE; |
| 3813 | COPY_VALUE(outer_ip_proto, IP_PROTO); |
| 3814 | break; |
| 3815 | default: |
| 3816 | WARN_ON(1); |
| 3817 | } |
| 3818 | |
| 3819 | uc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_LBN; |
| 3820 | mc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_LBN; |
| 3821 | } else { |
| 3822 | uc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_LBN; |
| 3823 | mc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_LBN; |
| 3824 | } |
| 3825 | |
| 3826 | if (spec->match_flags & EFX_FILTER_MATCH_LOC_MAC_IG) |
| 3827 | match_fields |= |
| 3828 | is_multicast_ether_addr(spec->loc_mac) ? |
| 3829 | 1 << mc_match : |
| 3830 | 1 << uc_match; |
| 3831 | COPY_FIELD(REM_HOST, rem_host, SRC_IP); |
| 3832 | COPY_FIELD(LOC_HOST, loc_host, DST_IP); |
| 3833 | COPY_FIELD(REM_MAC, rem_mac, SRC_MAC); |
| 3834 | COPY_FIELD(REM_PORT, rem_port, SRC_PORT); |
| 3835 | COPY_FIELD(LOC_MAC, loc_mac, DST_MAC); |
| 3836 | COPY_FIELD(LOC_PORT, loc_port, DST_PORT); |
| 3837 | COPY_FIELD(ETHER_TYPE, ether_type, ETHER_TYPE); |
| 3838 | COPY_FIELD(INNER_VID, inner_vid, INNER_VLAN); |
| 3839 | COPY_FIELD(OUTER_VID, outer_vid, OUTER_VLAN); |
| 3840 | COPY_FIELD(IP_PROTO, ip_proto, IP_PROTO); |
| 3841 | #undef COPY_FIELD |
| 3842 | #undef COPY_VALUE |
| 3843 | MCDI_SET_DWORD(inbuf, FILTER_OP_IN_MATCH_FIELDS, |
| 3844 | match_fields); |
| 3845 | } |
| 3846 | |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 3847 | static void efx_ef10_filter_push_prep(struct efx_nic *efx, |
| 3848 | const struct efx_filter_spec *spec, |
| 3849 | efx_dword_t *inbuf, u64 handle, |
| 3850 | bool replacing) |
| 3851 | { |
| 3852 | struct efx_ef10_nic_data *nic_data = efx->nic_data; |
Jon Cooper | dcb4123 | 2016-04-25 16:51:00 +0100 | [diff] [blame] | 3853 | u32 flags = spec->flags; |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 3854 | |
Edward Cree | 9b41080 | 2017-01-27 15:02:52 +0000 | [diff] [blame] | 3855 | memset(inbuf, 0, MC_CMD_FILTER_OP_EXT_IN_LEN); |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 3856 | |
Jon Cooper | dcb4123 | 2016-04-25 16:51:00 +0100 | [diff] [blame] | 3857 | /* Remove RSS flag if we don't have an RSS context. */ |
| 3858 | if (flags & EFX_FILTER_FLAG_RX_RSS && |
| 3859 | spec->rss_context == EFX_FILTER_RSS_CONTEXT_DEFAULT && |
| 3860 | nic_data->rx_rss_context == EFX_EF10_RSS_CONTEXT_INVALID) |
| 3861 | flags &= ~EFX_FILTER_FLAG_RX_RSS; |
| 3862 | |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 3863 | if (replacing) { |
| 3864 | MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP, |
| 3865 | MC_CMD_FILTER_OP_IN_OP_REPLACE); |
| 3866 | MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE, handle); |
| 3867 | } else { |
Edward Cree | 9b41080 | 2017-01-27 15:02:52 +0000 | [diff] [blame] | 3868 | efx_ef10_filter_push_prep_set_match_fields(efx, spec, inbuf); |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 3869 | } |
| 3870 | |
Daniel Pieczko | 45b2449 | 2015-05-06 00:57:14 +0100 | [diff] [blame] | 3871 | MCDI_SET_DWORD(inbuf, FILTER_OP_IN_PORT_ID, nic_data->vport_id); |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 3872 | MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_DEST, |
| 3873 | spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ? |
| 3874 | MC_CMD_FILTER_OP_IN_RX_DEST_DROP : |
| 3875 | MC_CMD_FILTER_OP_IN_RX_DEST_HOST); |
Shradha Shah | e3d3629 | 2015-05-06 00:56:24 +0100 | [diff] [blame] | 3876 | MCDI_SET_DWORD(inbuf, FILTER_OP_IN_TX_DOMAIN, 0); |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 3877 | MCDI_SET_DWORD(inbuf, FILTER_OP_IN_TX_DEST, |
| 3878 | MC_CMD_FILTER_OP_IN_TX_DEST_DEFAULT); |
Ben Hutchings | a0bc348 | 2013-12-16 18:56:24 +0000 | [diff] [blame] | 3879 | MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_QUEUE, |
| 3880 | spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ? |
| 3881 | 0 : spec->dmaq_id); |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 3882 | MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_MODE, |
Jon Cooper | dcb4123 | 2016-04-25 16:51:00 +0100 | [diff] [blame] | 3883 | (flags & EFX_FILTER_FLAG_RX_RSS) ? |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 3884 | MC_CMD_FILTER_OP_IN_RX_MODE_RSS : |
| 3885 | MC_CMD_FILTER_OP_IN_RX_MODE_SIMPLE); |
Jon Cooper | dcb4123 | 2016-04-25 16:51:00 +0100 | [diff] [blame] | 3886 | if (flags & EFX_FILTER_FLAG_RX_RSS) |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 3887 | MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_CONTEXT, |
| 3888 | spec->rss_context != |
| 3889 | EFX_FILTER_RSS_CONTEXT_DEFAULT ? |
| 3890 | spec->rss_context : nic_data->rx_rss_context); |
| 3891 | } |
| 3892 | |
| 3893 | static int efx_ef10_filter_push(struct efx_nic *efx, |
| 3894 | const struct efx_filter_spec *spec, |
| 3895 | u64 *handle, bool replacing) |
| 3896 | { |
Edward Cree | 9b41080 | 2017-01-27 15:02:52 +0000 | [diff] [blame] | 3897 | MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_EXT_IN_LEN); |
| 3898 | MCDI_DECLARE_BUF(outbuf, MC_CMD_FILTER_OP_EXT_OUT_LEN); |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 3899 | int rc; |
| 3900 | |
| 3901 | efx_ef10_filter_push_prep(efx, spec, inbuf, *handle, replacing); |
| 3902 | rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf), |
| 3903 | outbuf, sizeof(outbuf), NULL); |
| 3904 | if (rc == 0) |
| 3905 | *handle = MCDI_QWORD(outbuf, FILTER_OP_OUT_HANDLE); |
Ben Hutchings | 065e64c | 2013-10-09 14:17:27 +0100 | [diff] [blame] | 3906 | if (rc == -ENOSPC) |
| 3907 | rc = -EBUSY; /* to match efx_farch_filter_insert() */ |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 3908 | return rc; |
| 3909 | } |
| 3910 | |
Andrew Rybchenko | 7ac0dd9 | 2016-06-15 17:49:30 +0100 | [diff] [blame] | 3911 | static u32 efx_ef10_filter_mcdi_flags_from_spec(const struct efx_filter_spec *spec) |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 3912 | { |
Edward Cree | 9b41080 | 2017-01-27 15:02:52 +0000 | [diff] [blame] | 3913 | enum efx_encap_type encap_type = efx_filter_get_encap_type(spec); |
Andrew Rybchenko | 7ac0dd9 | 2016-06-15 17:49:30 +0100 | [diff] [blame] | 3914 | unsigned int match_flags = spec->match_flags; |
Edward Cree | 9b41080 | 2017-01-27 15:02:52 +0000 | [diff] [blame] | 3915 | unsigned int uc_match, mc_match; |
Andrew Rybchenko | 7ac0dd9 | 2016-06-15 17:49:30 +0100 | [diff] [blame] | 3916 | u32 mcdi_flags = 0; |
| 3917 | |
Edward Cree | 9b41080 | 2017-01-27 15:02:52 +0000 | [diff] [blame] | 3918 | #define MAP_FILTER_TO_MCDI_FLAG(gen_flag, mcdi_field, encap) { \ |
| 3919 | unsigned int old_match_flags = match_flags; \ |
| 3920 | match_flags &= ~EFX_FILTER_MATCH_ ## gen_flag; \ |
| 3921 | if (match_flags != old_match_flags) \ |
| 3922 | mcdi_flags |= \ |
| 3923 | (1 << ((encap) ? \ |
| 3924 | MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_ ## \ |
| 3925 | mcdi_field ## _LBN : \ |
| 3926 | MC_CMD_FILTER_OP_EXT_IN_MATCH_ ##\ |
| 3927 | mcdi_field ## _LBN)); \ |
| 3928 | } |
| 3929 | /* inner or outer based on encap type */ |
| 3930 | MAP_FILTER_TO_MCDI_FLAG(REM_HOST, SRC_IP, encap_type); |
| 3931 | MAP_FILTER_TO_MCDI_FLAG(LOC_HOST, DST_IP, encap_type); |
| 3932 | MAP_FILTER_TO_MCDI_FLAG(REM_MAC, SRC_MAC, encap_type); |
| 3933 | MAP_FILTER_TO_MCDI_FLAG(REM_PORT, SRC_PORT, encap_type); |
| 3934 | MAP_FILTER_TO_MCDI_FLAG(LOC_MAC, DST_MAC, encap_type); |
| 3935 | MAP_FILTER_TO_MCDI_FLAG(LOC_PORT, DST_PORT, encap_type); |
| 3936 | MAP_FILTER_TO_MCDI_FLAG(ETHER_TYPE, ETHER_TYPE, encap_type); |
| 3937 | MAP_FILTER_TO_MCDI_FLAG(IP_PROTO, IP_PROTO, encap_type); |
| 3938 | /* always outer */ |
| 3939 | MAP_FILTER_TO_MCDI_FLAG(INNER_VID, INNER_VLAN, false); |
| 3940 | MAP_FILTER_TO_MCDI_FLAG(OUTER_VID, OUTER_VLAN, false); |
| 3941 | #undef MAP_FILTER_TO_MCDI_FLAG |
| 3942 | |
| 3943 | /* special handling for encap type, and mismatch */ |
| 3944 | if (encap_type) { |
| 3945 | match_flags &= ~EFX_FILTER_MATCH_ENCAP_TYPE; |
| 3946 | mcdi_flags |= |
| 3947 | (1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE_LBN); |
| 3948 | mcdi_flags |= (1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO_LBN); |
| 3949 | |
| 3950 | uc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_LBN; |
| 3951 | mc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_LBN; |
| 3952 | } else { |
| 3953 | uc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_LBN; |
| 3954 | mc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_LBN; |
| 3955 | } |
| 3956 | |
Andrew Rybchenko | 7ac0dd9 | 2016-06-15 17:49:30 +0100 | [diff] [blame] | 3957 | if (match_flags & EFX_FILTER_MATCH_LOC_MAC_IG) { |
| 3958 | match_flags &= ~EFX_FILTER_MATCH_LOC_MAC_IG; |
| 3959 | mcdi_flags |= |
| 3960 | is_multicast_ether_addr(spec->loc_mac) ? |
Edward Cree | 9b41080 | 2017-01-27 15:02:52 +0000 | [diff] [blame] | 3961 | 1 << mc_match : |
| 3962 | 1 << uc_match; |
Andrew Rybchenko | 7ac0dd9 | 2016-06-15 17:49:30 +0100 | [diff] [blame] | 3963 | } |
| 3964 | |
Andrew Rybchenko | 7ac0dd9 | 2016-06-15 17:49:30 +0100 | [diff] [blame] | 3965 | /* Did we map them all? */ |
| 3966 | WARN_ON_ONCE(match_flags); |
| 3967 | |
| 3968 | return mcdi_flags; |
| 3969 | } |
| 3970 | |
| 3971 | static int efx_ef10_filter_pri(struct efx_ef10_filter_table *table, |
| 3972 | const struct efx_filter_spec *spec) |
| 3973 | { |
| 3974 | u32 mcdi_flags = efx_ef10_filter_mcdi_flags_from_spec(spec); |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 3975 | unsigned int match_pri; |
| 3976 | |
| 3977 | for (match_pri = 0; |
| 3978 | match_pri < table->rx_match_count; |
| 3979 | match_pri++) |
Andrew Rybchenko | 7ac0dd9 | 2016-06-15 17:49:30 +0100 | [diff] [blame] | 3980 | if (table->rx_match_mcdi_flags[match_pri] == mcdi_flags) |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 3981 | return match_pri; |
| 3982 | |
| 3983 | return -EPROTONOSUPPORT; |
| 3984 | } |
| 3985 | |
| 3986 | static s32 efx_ef10_filter_insert(struct efx_nic *efx, |
| 3987 | struct efx_filter_spec *spec, |
| 3988 | bool replace_equal) |
| 3989 | { |
| 3990 | struct efx_ef10_filter_table *table = efx->filter_state; |
| 3991 | DECLARE_BITMAP(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT); |
| 3992 | struct efx_filter_spec *saved_spec; |
| 3993 | unsigned int match_pri, hash; |
| 3994 | unsigned int priv_flags; |
| 3995 | bool replacing = false; |
| 3996 | int ins_index = -1; |
| 3997 | DEFINE_WAIT(wait); |
| 3998 | bool is_mc_recip; |
| 3999 | s32 rc; |
| 4000 | |
| 4001 | /* For now, only support RX filters */ |
| 4002 | if ((spec->flags & (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)) != |
| 4003 | EFX_FILTER_FLAG_RX) |
| 4004 | return -EINVAL; |
| 4005 | |
Andrew Rybchenko | 7ac0dd9 | 2016-06-15 17:49:30 +0100 | [diff] [blame] | 4006 | rc = efx_ef10_filter_pri(table, spec); |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 4007 | if (rc < 0) |
| 4008 | return rc; |
| 4009 | match_pri = rc; |
| 4010 | |
| 4011 | hash = efx_ef10_filter_hash(spec); |
| 4012 | is_mc_recip = efx_filter_is_mc_recipient(spec); |
| 4013 | if (is_mc_recip) |
| 4014 | bitmap_zero(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT); |
| 4015 | |
| 4016 | /* Find any existing filters with the same match tuple or |
| 4017 | * else a free slot to insert at. If any of them are busy, |
| 4018 | * we have to wait and retry. |
| 4019 | */ |
| 4020 | for (;;) { |
| 4021 | unsigned int depth = 1; |
| 4022 | unsigned int i; |
| 4023 | |
| 4024 | spin_lock_bh(&efx->filter_lock); |
| 4025 | |
| 4026 | for (;;) { |
| 4027 | i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1); |
| 4028 | saved_spec = efx_ef10_filter_entry_spec(table, i); |
| 4029 | |
| 4030 | if (!saved_spec) { |
| 4031 | if (ins_index < 0) |
| 4032 | ins_index = i; |
| 4033 | } else if (efx_ef10_filter_equal(spec, saved_spec)) { |
| 4034 | if (table->entry[i].spec & |
| 4035 | EFX_EF10_FILTER_FLAG_BUSY) |
| 4036 | break; |
| 4037 | if (spec->priority < saved_spec->priority && |
Ben Hutchings | 7665d1a | 2013-11-21 19:02:18 +0000 | [diff] [blame] | 4038 | spec->priority != EFX_FILTER_PRI_AUTO) { |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 4039 | rc = -EPERM; |
| 4040 | goto out_unlock; |
| 4041 | } |
| 4042 | if (!is_mc_recip) { |
| 4043 | /* This is the only one */ |
| 4044 | if (spec->priority == |
| 4045 | saved_spec->priority && |
| 4046 | !replace_equal) { |
| 4047 | rc = -EEXIST; |
| 4048 | goto out_unlock; |
| 4049 | } |
| 4050 | ins_index = i; |
| 4051 | goto found; |
| 4052 | } else if (spec->priority > |
| 4053 | saved_spec->priority || |
| 4054 | (spec->priority == |
| 4055 | saved_spec->priority && |
| 4056 | replace_equal)) { |
| 4057 | if (ins_index < 0) |
| 4058 | ins_index = i; |
| 4059 | else |
| 4060 | __set_bit(depth, mc_rem_map); |
| 4061 | } |
| 4062 | } |
| 4063 | |
| 4064 | /* Once we reach the maximum search depth, use |
| 4065 | * the first suitable slot or return -EBUSY if |
| 4066 | * there was none |
| 4067 | */ |
| 4068 | if (depth == EFX_EF10_FILTER_SEARCH_LIMIT) { |
| 4069 | if (ins_index < 0) { |
| 4070 | rc = -EBUSY; |
| 4071 | goto out_unlock; |
| 4072 | } |
| 4073 | goto found; |
| 4074 | } |
| 4075 | |
| 4076 | ++depth; |
| 4077 | } |
| 4078 | |
| 4079 | prepare_to_wait(&table->waitq, &wait, TASK_UNINTERRUPTIBLE); |
| 4080 | spin_unlock_bh(&efx->filter_lock); |
| 4081 | schedule(); |
| 4082 | } |
| 4083 | |
| 4084 | found: |
| 4085 | /* Create a software table entry if necessary, and mark it |
| 4086 | * busy. We might yet fail to insert, but any attempt to |
| 4087 | * insert a conflicting filter while we're waiting for the |
| 4088 | * firmware must find the busy entry. |
| 4089 | */ |
| 4090 | saved_spec = efx_ef10_filter_entry_spec(table, ins_index); |
| 4091 | if (saved_spec) { |
Ben Hutchings | 7665d1a | 2013-11-21 19:02:18 +0000 | [diff] [blame] | 4092 | if (spec->priority == EFX_FILTER_PRI_AUTO && |
| 4093 | saved_spec->priority >= EFX_FILTER_PRI_AUTO) { |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 4094 | /* Just make sure it won't be removed */ |
Ben Hutchings | 7665d1a | 2013-11-21 19:02:18 +0000 | [diff] [blame] | 4095 | if (saved_spec->priority > EFX_FILTER_PRI_AUTO) |
| 4096 | saved_spec->flags |= EFX_FILTER_FLAG_RX_OVER_AUTO; |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 4097 | table->entry[ins_index].spec &= |
Ben Hutchings | b59e6ef | 2013-11-21 19:02:22 +0000 | [diff] [blame] | 4098 | ~EFX_EF10_FILTER_FLAG_AUTO_OLD; |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 4099 | rc = ins_index; |
| 4100 | goto out_unlock; |
| 4101 | } |
| 4102 | replacing = true; |
| 4103 | priv_flags = efx_ef10_filter_entry_flags(table, ins_index); |
| 4104 | } else { |
| 4105 | saved_spec = kmalloc(sizeof(*spec), GFP_ATOMIC); |
| 4106 | if (!saved_spec) { |
| 4107 | rc = -ENOMEM; |
| 4108 | goto out_unlock; |
| 4109 | } |
| 4110 | *saved_spec = *spec; |
| 4111 | priv_flags = 0; |
| 4112 | } |
| 4113 | efx_ef10_filter_set_entry(table, ins_index, saved_spec, |
| 4114 | priv_flags | EFX_EF10_FILTER_FLAG_BUSY); |
| 4115 | |
| 4116 | /* Mark lower-priority multicast recipients busy prior to removal */ |
| 4117 | if (is_mc_recip) { |
| 4118 | unsigned int depth, i; |
| 4119 | |
| 4120 | for (depth = 0; depth < EFX_EF10_FILTER_SEARCH_LIMIT; depth++) { |
| 4121 | i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1); |
| 4122 | if (test_bit(depth, mc_rem_map)) |
| 4123 | table->entry[i].spec |= |
| 4124 | EFX_EF10_FILTER_FLAG_BUSY; |
| 4125 | } |
| 4126 | } |
| 4127 | |
| 4128 | spin_unlock_bh(&efx->filter_lock); |
| 4129 | |
| 4130 | rc = efx_ef10_filter_push(efx, spec, &table->entry[ins_index].handle, |
| 4131 | replacing); |
| 4132 | |
| 4133 | /* Finalise the software table entry */ |
| 4134 | spin_lock_bh(&efx->filter_lock); |
| 4135 | if (rc == 0) { |
| 4136 | if (replacing) { |
| 4137 | /* Update the fields that may differ */ |
Ben Hutchings | 7665d1a | 2013-11-21 19:02:18 +0000 | [diff] [blame] | 4138 | if (saved_spec->priority == EFX_FILTER_PRI_AUTO) |
| 4139 | saved_spec->flags |= |
| 4140 | EFX_FILTER_FLAG_RX_OVER_AUTO; |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 4141 | saved_spec->priority = spec->priority; |
Ben Hutchings | 7665d1a | 2013-11-21 19:02:18 +0000 | [diff] [blame] | 4142 | saved_spec->flags &= EFX_FILTER_FLAG_RX_OVER_AUTO; |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 4143 | saved_spec->flags |= spec->flags; |
| 4144 | saved_spec->rss_context = spec->rss_context; |
| 4145 | saved_spec->dmaq_id = spec->dmaq_id; |
| 4146 | } |
| 4147 | } else if (!replacing) { |
| 4148 | kfree(saved_spec); |
| 4149 | saved_spec = NULL; |
| 4150 | } |
| 4151 | efx_ef10_filter_set_entry(table, ins_index, saved_spec, priv_flags); |
| 4152 | |
| 4153 | /* Remove and finalise entries for lower-priority multicast |
| 4154 | * recipients |
| 4155 | */ |
| 4156 | if (is_mc_recip) { |
| 4157 | MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN); |
| 4158 | unsigned int depth, i; |
| 4159 | |
| 4160 | memset(inbuf, 0, sizeof(inbuf)); |
| 4161 | |
| 4162 | for (depth = 0; depth < EFX_EF10_FILTER_SEARCH_LIMIT; depth++) { |
| 4163 | if (!test_bit(depth, mc_rem_map)) |
| 4164 | continue; |
| 4165 | |
| 4166 | i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1); |
| 4167 | saved_spec = efx_ef10_filter_entry_spec(table, i); |
| 4168 | priv_flags = efx_ef10_filter_entry_flags(table, i); |
| 4169 | |
| 4170 | if (rc == 0) { |
| 4171 | spin_unlock_bh(&efx->filter_lock); |
| 4172 | MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP, |
| 4173 | MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE); |
| 4174 | MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE, |
| 4175 | table->entry[i].handle); |
| 4176 | rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP, |
| 4177 | inbuf, sizeof(inbuf), |
| 4178 | NULL, 0, NULL); |
| 4179 | spin_lock_bh(&efx->filter_lock); |
| 4180 | } |
| 4181 | |
| 4182 | if (rc == 0) { |
| 4183 | kfree(saved_spec); |
| 4184 | saved_spec = NULL; |
| 4185 | priv_flags = 0; |
| 4186 | } else { |
| 4187 | priv_flags &= ~EFX_EF10_FILTER_FLAG_BUSY; |
| 4188 | } |
| 4189 | efx_ef10_filter_set_entry(table, i, saved_spec, |
| 4190 | priv_flags); |
| 4191 | } |
| 4192 | } |
| 4193 | |
| 4194 | /* If successful, return the inserted filter ID */ |
| 4195 | if (rc == 0) |
| 4196 | rc = match_pri * HUNT_FILTER_TBL_ROWS + ins_index; |
| 4197 | |
| 4198 | wake_up_all(&table->waitq); |
| 4199 | out_unlock: |
| 4200 | spin_unlock_bh(&efx->filter_lock); |
| 4201 | finish_wait(&table->waitq, &wait); |
| 4202 | return rc; |
| 4203 | } |
| 4204 | |
Fengguang Wu | 9fd8095d | 2013-08-31 06:54:05 +0800 | [diff] [blame] | 4205 | static void efx_ef10_filter_update_rx_scatter(struct efx_nic *efx) |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 4206 | { |
| 4207 | /* no need to do anything here on EF10 */ |
| 4208 | } |
| 4209 | |
| 4210 | /* Remove a filter. |
Ben Hutchings | b59e6ef | 2013-11-21 19:02:22 +0000 | [diff] [blame] | 4211 | * If !by_index, remove by ID |
| 4212 | * If by_index, remove by index |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 4213 | * Filter ID may come from userland and must be range-checked. |
| 4214 | */ |
| 4215 | static int efx_ef10_filter_remove_internal(struct efx_nic *efx, |
Ben Hutchings | fbd7912 | 2013-11-21 19:15:03 +0000 | [diff] [blame] | 4216 | unsigned int priority_mask, |
Ben Hutchings | b59e6ef | 2013-11-21 19:02:22 +0000 | [diff] [blame] | 4217 | u32 filter_id, bool by_index) |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 4218 | { |
| 4219 | unsigned int filter_idx = filter_id % HUNT_FILTER_TBL_ROWS; |
| 4220 | struct efx_ef10_filter_table *table = efx->filter_state; |
| 4221 | MCDI_DECLARE_BUF(inbuf, |
| 4222 | MC_CMD_FILTER_OP_IN_HANDLE_OFST + |
| 4223 | MC_CMD_FILTER_OP_IN_HANDLE_LEN); |
| 4224 | struct efx_filter_spec *spec; |
| 4225 | DEFINE_WAIT(wait); |
| 4226 | int rc; |
| 4227 | |
| 4228 | /* Find the software table entry and mark it busy. Don't |
| 4229 | * remove it yet; any attempt to update while we're waiting |
| 4230 | * for the firmware must find the busy entry. |
| 4231 | */ |
| 4232 | for (;;) { |
| 4233 | spin_lock_bh(&efx->filter_lock); |
| 4234 | if (!(table->entry[filter_idx].spec & |
| 4235 | EFX_EF10_FILTER_FLAG_BUSY)) |
| 4236 | break; |
| 4237 | prepare_to_wait(&table->waitq, &wait, TASK_UNINTERRUPTIBLE); |
| 4238 | spin_unlock_bh(&efx->filter_lock); |
| 4239 | schedule(); |
| 4240 | } |
Ben Hutchings | 7665d1a | 2013-11-21 19:02:18 +0000 | [diff] [blame] | 4241 | |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 4242 | spec = efx_ef10_filter_entry_spec(table, filter_idx); |
Ben Hutchings | 7665d1a | 2013-11-21 19:02:18 +0000 | [diff] [blame] | 4243 | if (!spec || |
Ben Hutchings | b59e6ef | 2013-11-21 19:02:22 +0000 | [diff] [blame] | 4244 | (!by_index && |
Andrew Rybchenko | 7ac0dd9 | 2016-06-15 17:49:30 +0100 | [diff] [blame] | 4245 | efx_ef10_filter_pri(table, spec) != |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 4246 | filter_id / HUNT_FILTER_TBL_ROWS)) { |
| 4247 | rc = -ENOENT; |
| 4248 | goto out_unlock; |
| 4249 | } |
Ben Hutchings | 7665d1a | 2013-11-21 19:02:18 +0000 | [diff] [blame] | 4250 | |
| 4251 | if (spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO && |
Ben Hutchings | fbd7912 | 2013-11-21 19:15:03 +0000 | [diff] [blame] | 4252 | priority_mask == (1U << EFX_FILTER_PRI_AUTO)) { |
Ben Hutchings | 7665d1a | 2013-11-21 19:02:18 +0000 | [diff] [blame] | 4253 | /* Just remove flags */ |
| 4254 | spec->flags &= ~EFX_FILTER_FLAG_RX_OVER_AUTO; |
Ben Hutchings | b59e6ef | 2013-11-21 19:02:22 +0000 | [diff] [blame] | 4255 | table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_AUTO_OLD; |
Ben Hutchings | 7665d1a | 2013-11-21 19:02:18 +0000 | [diff] [blame] | 4256 | rc = 0; |
| 4257 | goto out_unlock; |
| 4258 | } |
| 4259 | |
Ben Hutchings | fbd7912 | 2013-11-21 19:15:03 +0000 | [diff] [blame] | 4260 | if (!(priority_mask & (1U << spec->priority))) { |
Ben Hutchings | 7665d1a | 2013-11-21 19:02:18 +0000 | [diff] [blame] | 4261 | rc = -ENOENT; |
| 4262 | goto out_unlock; |
| 4263 | } |
| 4264 | |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 4265 | table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY; |
| 4266 | spin_unlock_bh(&efx->filter_lock); |
| 4267 | |
Ben Hutchings | 7665d1a | 2013-11-21 19:02:18 +0000 | [diff] [blame] | 4268 | if (spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO) { |
Ben Hutchings | b59e6ef | 2013-11-21 19:02:22 +0000 | [diff] [blame] | 4269 | /* Reset to an automatic filter */ |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 4270 | |
| 4271 | struct efx_filter_spec new_spec = *spec; |
| 4272 | |
Ben Hutchings | 7665d1a | 2013-11-21 19:02:18 +0000 | [diff] [blame] | 4273 | new_spec.priority = EFX_FILTER_PRI_AUTO; |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 4274 | new_spec.flags = (EFX_FILTER_FLAG_RX | |
Bert Kenward | f1c2ef4 | 2015-12-11 09:39:32 +0000 | [diff] [blame] | 4275 | (efx_rss_enabled(efx) ? |
| 4276 | EFX_FILTER_FLAG_RX_RSS : 0)); |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 4277 | new_spec.dmaq_id = 0; |
| 4278 | new_spec.rss_context = EFX_FILTER_RSS_CONTEXT_DEFAULT; |
| 4279 | rc = efx_ef10_filter_push(efx, &new_spec, |
| 4280 | &table->entry[filter_idx].handle, |
| 4281 | true); |
| 4282 | |
| 4283 | spin_lock_bh(&efx->filter_lock); |
| 4284 | if (rc == 0) |
| 4285 | *spec = new_spec; |
| 4286 | } else { |
| 4287 | /* Really remove the filter */ |
| 4288 | |
| 4289 | MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP, |
| 4290 | efx_ef10_filter_is_exclusive(spec) ? |
| 4291 | MC_CMD_FILTER_OP_IN_OP_REMOVE : |
| 4292 | MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE); |
| 4293 | MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE, |
| 4294 | table->entry[filter_idx].handle); |
| 4295 | rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP, |
| 4296 | inbuf, sizeof(inbuf), NULL, 0, NULL); |
| 4297 | |
| 4298 | spin_lock_bh(&efx->filter_lock); |
| 4299 | if (rc == 0) { |
| 4300 | kfree(spec); |
| 4301 | efx_ef10_filter_set_entry(table, filter_idx, NULL, 0); |
| 4302 | } |
| 4303 | } |
Ben Hutchings | 7665d1a | 2013-11-21 19:02:18 +0000 | [diff] [blame] | 4304 | |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 4305 | table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_BUSY; |
| 4306 | wake_up_all(&table->waitq); |
| 4307 | out_unlock: |
| 4308 | spin_unlock_bh(&efx->filter_lock); |
| 4309 | finish_wait(&table->waitq, &wait); |
| 4310 | return rc; |
| 4311 | } |
| 4312 | |
| 4313 | static int efx_ef10_filter_remove_safe(struct efx_nic *efx, |
| 4314 | enum efx_filter_priority priority, |
| 4315 | u32 filter_id) |
| 4316 | { |
Ben Hutchings | fbd7912 | 2013-11-21 19:15:03 +0000 | [diff] [blame] | 4317 | return efx_ef10_filter_remove_internal(efx, 1U << priority, |
| 4318 | filter_id, false); |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 4319 | } |
| 4320 | |
Edward Cree | 12fb0da | 2015-07-21 15:11:00 +0100 | [diff] [blame] | 4321 | static u32 efx_ef10_filter_get_unsafe_id(struct efx_nic *efx, u32 filter_id) |
| 4322 | { |
| 4323 | return filter_id % HUNT_FILTER_TBL_ROWS; |
| 4324 | } |
| 4325 | |
Edward Cree | 8c91562 | 2016-06-15 17:49:05 +0100 | [diff] [blame] | 4326 | static void efx_ef10_filter_remove_unsafe(struct efx_nic *efx, |
| 4327 | enum efx_filter_priority priority, |
| 4328 | u32 filter_id) |
Edward Cree | 12fb0da | 2015-07-21 15:11:00 +0100 | [diff] [blame] | 4329 | { |
Edward Cree | 8c91562 | 2016-06-15 17:49:05 +0100 | [diff] [blame] | 4330 | if (filter_id == EFX_EF10_FILTER_ID_INVALID) |
| 4331 | return; |
| 4332 | efx_ef10_filter_remove_internal(efx, 1U << priority, filter_id, true); |
Edward Cree | 12fb0da | 2015-07-21 15:11:00 +0100 | [diff] [blame] | 4333 | } |
| 4334 | |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 4335 | static int efx_ef10_filter_get_safe(struct efx_nic *efx, |
| 4336 | enum efx_filter_priority priority, |
| 4337 | u32 filter_id, struct efx_filter_spec *spec) |
| 4338 | { |
| 4339 | unsigned int filter_idx = filter_id % HUNT_FILTER_TBL_ROWS; |
| 4340 | struct efx_ef10_filter_table *table = efx->filter_state; |
| 4341 | const struct efx_filter_spec *saved_spec; |
| 4342 | int rc; |
| 4343 | |
| 4344 | spin_lock_bh(&efx->filter_lock); |
| 4345 | saved_spec = efx_ef10_filter_entry_spec(table, filter_idx); |
| 4346 | if (saved_spec && saved_spec->priority == priority && |
Andrew Rybchenko | 7ac0dd9 | 2016-06-15 17:49:30 +0100 | [diff] [blame] | 4347 | efx_ef10_filter_pri(table, saved_spec) == |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 4348 | filter_id / HUNT_FILTER_TBL_ROWS) { |
| 4349 | *spec = *saved_spec; |
| 4350 | rc = 0; |
| 4351 | } else { |
| 4352 | rc = -ENOENT; |
| 4353 | } |
| 4354 | spin_unlock_bh(&efx->filter_lock); |
| 4355 | return rc; |
| 4356 | } |
| 4357 | |
Ben Hutchings | fbd7912 | 2013-11-21 19:15:03 +0000 | [diff] [blame] | 4358 | static int efx_ef10_filter_clear_rx(struct efx_nic *efx, |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 4359 | enum efx_filter_priority priority) |
| 4360 | { |
Ben Hutchings | fbd7912 | 2013-11-21 19:15:03 +0000 | [diff] [blame] | 4361 | unsigned int priority_mask; |
| 4362 | unsigned int i; |
| 4363 | int rc; |
| 4364 | |
| 4365 | priority_mask = (((1U << (priority + 1)) - 1) & |
| 4366 | ~(1U << EFX_FILTER_PRI_AUTO)); |
| 4367 | |
| 4368 | for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) { |
| 4369 | rc = efx_ef10_filter_remove_internal(efx, priority_mask, |
| 4370 | i, true); |
| 4371 | if (rc && rc != -ENOENT) |
| 4372 | return rc; |
| 4373 | } |
| 4374 | |
| 4375 | return 0; |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 4376 | } |
| 4377 | |
| 4378 | static u32 efx_ef10_filter_count_rx_used(struct efx_nic *efx, |
| 4379 | enum efx_filter_priority priority) |
| 4380 | { |
| 4381 | struct efx_ef10_filter_table *table = efx->filter_state; |
| 4382 | unsigned int filter_idx; |
| 4383 | s32 count = 0; |
| 4384 | |
| 4385 | spin_lock_bh(&efx->filter_lock); |
| 4386 | for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) { |
| 4387 | if (table->entry[filter_idx].spec && |
| 4388 | efx_ef10_filter_entry_spec(table, filter_idx)->priority == |
| 4389 | priority) |
| 4390 | ++count; |
| 4391 | } |
| 4392 | spin_unlock_bh(&efx->filter_lock); |
| 4393 | return count; |
| 4394 | } |
| 4395 | |
| 4396 | static u32 efx_ef10_filter_get_rx_id_limit(struct efx_nic *efx) |
| 4397 | { |
| 4398 | struct efx_ef10_filter_table *table = efx->filter_state; |
| 4399 | |
| 4400 | return table->rx_match_count * HUNT_FILTER_TBL_ROWS; |
| 4401 | } |
| 4402 | |
| 4403 | static s32 efx_ef10_filter_get_rx_ids(struct efx_nic *efx, |
| 4404 | enum efx_filter_priority priority, |
| 4405 | u32 *buf, u32 size) |
| 4406 | { |
| 4407 | struct efx_ef10_filter_table *table = efx->filter_state; |
| 4408 | struct efx_filter_spec *spec; |
| 4409 | unsigned int filter_idx; |
| 4410 | s32 count = 0; |
| 4411 | |
| 4412 | spin_lock_bh(&efx->filter_lock); |
| 4413 | for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) { |
| 4414 | spec = efx_ef10_filter_entry_spec(table, filter_idx); |
| 4415 | if (spec && spec->priority == priority) { |
| 4416 | if (count == size) { |
| 4417 | count = -EMSGSIZE; |
| 4418 | break; |
| 4419 | } |
Andrew Rybchenko | 7ac0dd9 | 2016-06-15 17:49:30 +0100 | [diff] [blame] | 4420 | buf[count++] = (efx_ef10_filter_pri(table, spec) * |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 4421 | HUNT_FILTER_TBL_ROWS + |
| 4422 | filter_idx); |
| 4423 | } |
| 4424 | } |
| 4425 | spin_unlock_bh(&efx->filter_lock); |
| 4426 | return count; |
| 4427 | } |
| 4428 | |
| 4429 | #ifdef CONFIG_RFS_ACCEL |
| 4430 | |
| 4431 | static efx_mcdi_async_completer efx_ef10_filter_rfs_insert_complete; |
| 4432 | |
| 4433 | static s32 efx_ef10_filter_rfs_insert(struct efx_nic *efx, |
| 4434 | struct efx_filter_spec *spec) |
| 4435 | { |
| 4436 | struct efx_ef10_filter_table *table = efx->filter_state; |
| 4437 | MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN); |
| 4438 | struct efx_filter_spec *saved_spec; |
| 4439 | unsigned int hash, i, depth = 1; |
| 4440 | bool replacing = false; |
| 4441 | int ins_index = -1; |
| 4442 | u64 cookie; |
| 4443 | s32 rc; |
| 4444 | |
| 4445 | /* Must be an RX filter without RSS and not for a multicast |
| 4446 | * destination address (RFS only works for connected sockets). |
| 4447 | * These restrictions allow us to pass only a tiny amount of |
| 4448 | * data through to the completion function. |
| 4449 | */ |
| 4450 | EFX_WARN_ON_PARANOID(spec->flags != |
| 4451 | (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_RX_SCATTER)); |
| 4452 | EFX_WARN_ON_PARANOID(spec->priority != EFX_FILTER_PRI_HINT); |
| 4453 | EFX_WARN_ON_PARANOID(efx_filter_is_mc_recipient(spec)); |
| 4454 | |
| 4455 | hash = efx_ef10_filter_hash(spec); |
| 4456 | |
| 4457 | spin_lock_bh(&efx->filter_lock); |
| 4458 | |
| 4459 | /* Find any existing filter with the same match tuple or else |
| 4460 | * a free slot to insert at. If an existing filter is busy, |
| 4461 | * we have to give up. |
| 4462 | */ |
| 4463 | for (;;) { |
| 4464 | i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1); |
| 4465 | saved_spec = efx_ef10_filter_entry_spec(table, i); |
| 4466 | |
| 4467 | if (!saved_spec) { |
| 4468 | if (ins_index < 0) |
| 4469 | ins_index = i; |
| 4470 | } else if (efx_ef10_filter_equal(spec, saved_spec)) { |
| 4471 | if (table->entry[i].spec & EFX_EF10_FILTER_FLAG_BUSY) { |
| 4472 | rc = -EBUSY; |
| 4473 | goto fail_unlock; |
| 4474 | } |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 4475 | if (spec->priority < saved_spec->priority) { |
| 4476 | rc = -EPERM; |
| 4477 | goto fail_unlock; |
| 4478 | } |
| 4479 | ins_index = i; |
| 4480 | break; |
| 4481 | } |
| 4482 | |
| 4483 | /* Once we reach the maximum search depth, use the |
| 4484 | * first suitable slot or return -EBUSY if there was |
| 4485 | * none |
| 4486 | */ |
| 4487 | if (depth == EFX_EF10_FILTER_SEARCH_LIMIT) { |
| 4488 | if (ins_index < 0) { |
| 4489 | rc = -EBUSY; |
| 4490 | goto fail_unlock; |
| 4491 | } |
| 4492 | break; |
| 4493 | } |
| 4494 | |
| 4495 | ++depth; |
| 4496 | } |
| 4497 | |
| 4498 | /* Create a software table entry if necessary, and mark it |
| 4499 | * busy. We might yet fail to insert, but any attempt to |
| 4500 | * insert a conflicting filter while we're waiting for the |
| 4501 | * firmware must find the busy entry. |
| 4502 | */ |
| 4503 | saved_spec = efx_ef10_filter_entry_spec(table, ins_index); |
| 4504 | if (saved_spec) { |
| 4505 | replacing = true; |
| 4506 | } else { |
| 4507 | saved_spec = kmalloc(sizeof(*spec), GFP_ATOMIC); |
| 4508 | if (!saved_spec) { |
| 4509 | rc = -ENOMEM; |
| 4510 | goto fail_unlock; |
| 4511 | } |
| 4512 | *saved_spec = *spec; |
| 4513 | } |
| 4514 | efx_ef10_filter_set_entry(table, ins_index, saved_spec, |
| 4515 | EFX_EF10_FILTER_FLAG_BUSY); |
| 4516 | |
| 4517 | spin_unlock_bh(&efx->filter_lock); |
| 4518 | |
| 4519 | /* Pack up the variables needed on completion */ |
| 4520 | cookie = replacing << 31 | ins_index << 16 | spec->dmaq_id; |
| 4521 | |
| 4522 | efx_ef10_filter_push_prep(efx, spec, inbuf, |
| 4523 | table->entry[ins_index].handle, replacing); |
| 4524 | efx_mcdi_rpc_async(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf), |
| 4525 | MC_CMD_FILTER_OP_OUT_LEN, |
| 4526 | efx_ef10_filter_rfs_insert_complete, cookie); |
| 4527 | |
| 4528 | return ins_index; |
| 4529 | |
| 4530 | fail_unlock: |
| 4531 | spin_unlock_bh(&efx->filter_lock); |
| 4532 | return rc; |
| 4533 | } |
| 4534 | |
| 4535 | static void |
| 4536 | efx_ef10_filter_rfs_insert_complete(struct efx_nic *efx, unsigned long cookie, |
| 4537 | int rc, efx_dword_t *outbuf, |
| 4538 | size_t outlen_actual) |
| 4539 | { |
| 4540 | struct efx_ef10_filter_table *table = efx->filter_state; |
| 4541 | unsigned int ins_index, dmaq_id; |
| 4542 | struct efx_filter_spec *spec; |
| 4543 | bool replacing; |
| 4544 | |
| 4545 | /* Unpack the cookie */ |
| 4546 | replacing = cookie >> 31; |
| 4547 | ins_index = (cookie >> 16) & (HUNT_FILTER_TBL_ROWS - 1); |
| 4548 | dmaq_id = cookie & 0xffff; |
| 4549 | |
| 4550 | spin_lock_bh(&efx->filter_lock); |
| 4551 | spec = efx_ef10_filter_entry_spec(table, ins_index); |
| 4552 | if (rc == 0) { |
| 4553 | table->entry[ins_index].handle = |
| 4554 | MCDI_QWORD(outbuf, FILTER_OP_OUT_HANDLE); |
| 4555 | if (replacing) |
| 4556 | spec->dmaq_id = dmaq_id; |
| 4557 | } else if (!replacing) { |
| 4558 | kfree(spec); |
| 4559 | spec = NULL; |
| 4560 | } |
| 4561 | efx_ef10_filter_set_entry(table, ins_index, spec, 0); |
| 4562 | spin_unlock_bh(&efx->filter_lock); |
| 4563 | |
| 4564 | wake_up_all(&table->waitq); |
| 4565 | } |
| 4566 | |
| 4567 | static void |
| 4568 | efx_ef10_filter_rfs_expire_complete(struct efx_nic *efx, |
| 4569 | unsigned long filter_idx, |
| 4570 | int rc, efx_dword_t *outbuf, |
| 4571 | size_t outlen_actual); |
| 4572 | |
| 4573 | static bool efx_ef10_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id, |
| 4574 | unsigned int filter_idx) |
| 4575 | { |
| 4576 | struct efx_ef10_filter_table *table = efx->filter_state; |
| 4577 | struct efx_filter_spec *spec = |
| 4578 | efx_ef10_filter_entry_spec(table, filter_idx); |
| 4579 | MCDI_DECLARE_BUF(inbuf, |
| 4580 | MC_CMD_FILTER_OP_IN_HANDLE_OFST + |
| 4581 | MC_CMD_FILTER_OP_IN_HANDLE_LEN); |
| 4582 | |
| 4583 | if (!spec || |
| 4584 | (table->entry[filter_idx].spec & EFX_EF10_FILTER_FLAG_BUSY) || |
| 4585 | spec->priority != EFX_FILTER_PRI_HINT || |
| 4586 | !rps_may_expire_flow(efx->net_dev, spec->dmaq_id, |
| 4587 | flow_id, filter_idx)) |
| 4588 | return false; |
| 4589 | |
| 4590 | MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP, |
| 4591 | MC_CMD_FILTER_OP_IN_OP_REMOVE); |
| 4592 | MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE, |
| 4593 | table->entry[filter_idx].handle); |
| 4594 | if (efx_mcdi_rpc_async(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf), 0, |
| 4595 | efx_ef10_filter_rfs_expire_complete, filter_idx)) |
| 4596 | return false; |
| 4597 | |
| 4598 | table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY; |
| 4599 | return true; |
| 4600 | } |
| 4601 | |
| 4602 | static void |
| 4603 | efx_ef10_filter_rfs_expire_complete(struct efx_nic *efx, |
| 4604 | unsigned long filter_idx, |
| 4605 | int rc, efx_dword_t *outbuf, |
| 4606 | size_t outlen_actual) |
| 4607 | { |
| 4608 | struct efx_ef10_filter_table *table = efx->filter_state; |
| 4609 | struct efx_filter_spec *spec = |
| 4610 | efx_ef10_filter_entry_spec(table, filter_idx); |
| 4611 | |
| 4612 | spin_lock_bh(&efx->filter_lock); |
| 4613 | if (rc == 0) { |
| 4614 | kfree(spec); |
| 4615 | efx_ef10_filter_set_entry(table, filter_idx, NULL, 0); |
| 4616 | } |
| 4617 | table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_BUSY; |
| 4618 | wake_up_all(&table->waitq); |
| 4619 | spin_unlock_bh(&efx->filter_lock); |
| 4620 | } |
| 4621 | |
| 4622 | #endif /* CONFIG_RFS_ACCEL */ |
| 4623 | |
Edward Cree | 9b41080 | 2017-01-27 15:02:52 +0000 | [diff] [blame] | 4624 | static int efx_ef10_filter_match_flags_from_mcdi(bool encap, u32 mcdi_flags) |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 4625 | { |
| 4626 | int match_flags = 0; |
| 4627 | |
Edward Cree | 9b41080 | 2017-01-27 15:02:52 +0000 | [diff] [blame] | 4628 | #define MAP_FLAG(gen_flag, mcdi_field) do { \ |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 4629 | u32 old_mcdi_flags = mcdi_flags; \ |
Edward Cree | 9b41080 | 2017-01-27 15:02:52 +0000 | [diff] [blame] | 4630 | mcdi_flags &= ~(1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_ ## \ |
| 4631 | mcdi_field ## _LBN); \ |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 4632 | if (mcdi_flags != old_mcdi_flags) \ |
| 4633 | match_flags |= EFX_FILTER_MATCH_ ## gen_flag; \ |
Edward Cree | 9b41080 | 2017-01-27 15:02:52 +0000 | [diff] [blame] | 4634 | } while (0) |
| 4635 | |
| 4636 | if (encap) { |
| 4637 | /* encap filters must specify encap type */ |
| 4638 | match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE; |
| 4639 | /* and imply ethertype and ip proto */ |
| 4640 | mcdi_flags &= |
| 4641 | ~(1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO_LBN); |
| 4642 | mcdi_flags &= |
| 4643 | ~(1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE_LBN); |
| 4644 | /* VLAN tags refer to the outer packet */ |
| 4645 | MAP_FLAG(INNER_VID, INNER_VLAN); |
| 4646 | MAP_FLAG(OUTER_VID, OUTER_VLAN); |
| 4647 | /* everything else refers to the inner packet */ |
| 4648 | MAP_FLAG(LOC_MAC_IG, IFRM_UNKNOWN_UCAST_DST); |
| 4649 | MAP_FLAG(LOC_MAC_IG, IFRM_UNKNOWN_MCAST_DST); |
| 4650 | MAP_FLAG(REM_HOST, IFRM_SRC_IP); |
| 4651 | MAP_FLAG(LOC_HOST, IFRM_DST_IP); |
| 4652 | MAP_FLAG(REM_MAC, IFRM_SRC_MAC); |
| 4653 | MAP_FLAG(REM_PORT, IFRM_SRC_PORT); |
| 4654 | MAP_FLAG(LOC_MAC, IFRM_DST_MAC); |
| 4655 | MAP_FLAG(LOC_PORT, IFRM_DST_PORT); |
| 4656 | MAP_FLAG(ETHER_TYPE, IFRM_ETHER_TYPE); |
| 4657 | MAP_FLAG(IP_PROTO, IFRM_IP_PROTO); |
| 4658 | } else { |
| 4659 | MAP_FLAG(LOC_MAC_IG, UNKNOWN_UCAST_DST); |
| 4660 | MAP_FLAG(LOC_MAC_IG, UNKNOWN_MCAST_DST); |
| 4661 | MAP_FLAG(REM_HOST, SRC_IP); |
| 4662 | MAP_FLAG(LOC_HOST, DST_IP); |
| 4663 | MAP_FLAG(REM_MAC, SRC_MAC); |
| 4664 | MAP_FLAG(REM_PORT, SRC_PORT); |
| 4665 | MAP_FLAG(LOC_MAC, DST_MAC); |
| 4666 | MAP_FLAG(LOC_PORT, DST_PORT); |
| 4667 | MAP_FLAG(ETHER_TYPE, ETHER_TYPE); |
| 4668 | MAP_FLAG(INNER_VID, INNER_VLAN); |
| 4669 | MAP_FLAG(OUTER_VID, OUTER_VLAN); |
| 4670 | MAP_FLAG(IP_PROTO, IP_PROTO); |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 4671 | } |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 4672 | #undef MAP_FLAG |
| 4673 | |
| 4674 | /* Did we map them all? */ |
| 4675 | if (mcdi_flags) |
| 4676 | return -EINVAL; |
| 4677 | |
| 4678 | return match_flags; |
| 4679 | } |
| 4680 | |
Andrew Rybchenko | 34813fe | 2016-06-15 17:48:14 +0100 | [diff] [blame] | 4681 | static void efx_ef10_filter_cleanup_vlans(struct efx_nic *efx) |
| 4682 | { |
| 4683 | struct efx_ef10_filter_table *table = efx->filter_state; |
| 4684 | struct efx_ef10_filter_vlan *vlan, *next_vlan; |
| 4685 | |
| 4686 | /* See comment in efx_ef10_filter_table_remove() */ |
| 4687 | if (!efx_rwsem_assert_write_locked(&efx->filter_sem)) |
| 4688 | return; |
| 4689 | |
| 4690 | if (!table) |
| 4691 | return; |
| 4692 | |
| 4693 | list_for_each_entry_safe(vlan, next_vlan, &table->vlan_list, list) |
| 4694 | efx_ef10_filter_del_vlan_internal(efx, vlan); |
| 4695 | } |
| 4696 | |
Andrew Rybchenko | 7ac0dd9 | 2016-06-15 17:49:30 +0100 | [diff] [blame] | 4697 | static bool efx_ef10_filter_match_supported(struct efx_ef10_filter_table *table, |
Edward Cree | 9b41080 | 2017-01-27 15:02:52 +0000 | [diff] [blame] | 4698 | bool encap, |
Andrew Rybchenko | 7ac0dd9 | 2016-06-15 17:49:30 +0100 | [diff] [blame] | 4699 | enum efx_filter_match_flags match_flags) |
| 4700 | { |
| 4701 | unsigned int match_pri; |
| 4702 | int mf; |
| 4703 | |
| 4704 | for (match_pri = 0; |
| 4705 | match_pri < table->rx_match_count; |
| 4706 | match_pri++) { |
Edward Cree | 9b41080 | 2017-01-27 15:02:52 +0000 | [diff] [blame] | 4707 | mf = efx_ef10_filter_match_flags_from_mcdi(encap, |
Andrew Rybchenko | 7ac0dd9 | 2016-06-15 17:49:30 +0100 | [diff] [blame] | 4708 | table->rx_match_mcdi_flags[match_pri]); |
| 4709 | if (mf == match_flags) |
| 4710 | return true; |
| 4711 | } |
| 4712 | |
| 4713 | return false; |
| 4714 | } |
| 4715 | |
Edward Cree | 9b41080 | 2017-01-27 15:02:52 +0000 | [diff] [blame] | 4716 | static int |
| 4717 | efx_ef10_filter_table_probe_matches(struct efx_nic *efx, |
| 4718 | struct efx_ef10_filter_table *table, |
| 4719 | bool encap) |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 4720 | { |
| 4721 | MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_PARSER_DISP_INFO_IN_LEN); |
| 4722 | MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX); |
| 4723 | unsigned int pd_match_pri, pd_match_count; |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 4724 | size_t outlen; |
| 4725 | int rc; |
| 4726 | |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 4727 | /* Find out which RX filter types are supported, and their priorities */ |
| 4728 | MCDI_SET_DWORD(inbuf, GET_PARSER_DISP_INFO_IN_OP, |
Edward Cree | 9b41080 | 2017-01-27 15:02:52 +0000 | [diff] [blame] | 4729 | encap ? |
| 4730 | MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_ENCAP_RX_MATCHES : |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 4731 | MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES); |
| 4732 | rc = efx_mcdi_rpc(efx, MC_CMD_GET_PARSER_DISP_INFO, |
| 4733 | inbuf, sizeof(inbuf), outbuf, sizeof(outbuf), |
| 4734 | &outlen); |
| 4735 | if (rc) |
Edward Cree | 9b41080 | 2017-01-27 15:02:52 +0000 | [diff] [blame] | 4736 | return rc; |
| 4737 | |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 4738 | pd_match_count = MCDI_VAR_ARRAY_LEN( |
| 4739 | outlen, GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES); |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 4740 | |
| 4741 | for (pd_match_pri = 0; pd_match_pri < pd_match_count; pd_match_pri++) { |
| 4742 | u32 mcdi_flags = |
| 4743 | MCDI_ARRAY_DWORD( |
| 4744 | outbuf, |
| 4745 | GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES, |
| 4746 | pd_match_pri); |
Edward Cree | 9b41080 | 2017-01-27 15:02:52 +0000 | [diff] [blame] | 4747 | rc = efx_ef10_filter_match_flags_from_mcdi(encap, mcdi_flags); |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 4748 | if (rc < 0) { |
| 4749 | netif_dbg(efx, probe, efx->net_dev, |
| 4750 | "%s: fw flags %#x pri %u not supported in driver\n", |
| 4751 | __func__, mcdi_flags, pd_match_pri); |
| 4752 | } else { |
| 4753 | netif_dbg(efx, probe, efx->net_dev, |
| 4754 | "%s: fw flags %#x pri %u supported as driver flags %#x pri %u\n", |
| 4755 | __func__, mcdi_flags, pd_match_pri, |
| 4756 | rc, table->rx_match_count); |
Andrew Rybchenko | 7ac0dd9 | 2016-06-15 17:49:30 +0100 | [diff] [blame] | 4757 | table->rx_match_mcdi_flags[table->rx_match_count] = mcdi_flags; |
| 4758 | table->rx_match_count++; |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 4759 | } |
| 4760 | } |
| 4761 | |
Edward Cree | 9b41080 | 2017-01-27 15:02:52 +0000 | [diff] [blame] | 4762 | return 0; |
| 4763 | } |
| 4764 | |
| 4765 | static int efx_ef10_filter_table_probe(struct efx_nic *efx) |
| 4766 | { |
| 4767 | struct efx_ef10_nic_data *nic_data = efx->nic_data; |
| 4768 | struct net_device *net_dev = efx->net_dev; |
| 4769 | struct efx_ef10_filter_table *table; |
| 4770 | struct efx_ef10_vlan *vlan; |
| 4771 | int rc; |
| 4772 | |
| 4773 | if (!efx_rwsem_assert_write_locked(&efx->filter_sem)) |
| 4774 | return -EINVAL; |
| 4775 | |
| 4776 | if (efx->filter_state) /* already probed */ |
| 4777 | return 0; |
| 4778 | |
| 4779 | table = kzalloc(sizeof(*table), GFP_KERNEL); |
| 4780 | if (!table) |
| 4781 | return -ENOMEM; |
| 4782 | |
| 4783 | table->rx_match_count = 0; |
| 4784 | rc = efx_ef10_filter_table_probe_matches(efx, table, false); |
| 4785 | if (rc) |
| 4786 | goto fail; |
| 4787 | if (nic_data->datapath_caps & |
| 4788 | (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN)) |
| 4789 | rc = efx_ef10_filter_table_probe_matches(efx, table, true); |
| 4790 | if (rc) |
| 4791 | goto fail; |
Martin Habets | e4478ad | 2016-06-15 17:51:07 +0100 | [diff] [blame] | 4792 | if ((efx_supported_features(efx) & NETIF_F_HW_VLAN_CTAG_FILTER) && |
Edward Cree | 9b41080 | 2017-01-27 15:02:52 +0000 | [diff] [blame] | 4793 | !(efx_ef10_filter_match_supported(table, false, |
Martin Habets | e4478ad | 2016-06-15 17:51:07 +0100 | [diff] [blame] | 4794 | (EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_LOC_MAC)) && |
Edward Cree | 9b41080 | 2017-01-27 15:02:52 +0000 | [diff] [blame] | 4795 | efx_ef10_filter_match_supported(table, false, |
Martin Habets | e4478ad | 2016-06-15 17:51:07 +0100 | [diff] [blame] | 4796 | (EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_LOC_MAC_IG)))) { |
| 4797 | netif_info(efx, probe, net_dev, |
| 4798 | "VLAN filters are not supported in this firmware variant\n"); |
| 4799 | net_dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; |
| 4800 | efx->fixed_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; |
| 4801 | net_dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; |
| 4802 | } |
| 4803 | |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 4804 | table->entry = vzalloc(HUNT_FILTER_TBL_ROWS * sizeof(*table->entry)); |
| 4805 | if (!table->entry) { |
| 4806 | rc = -ENOMEM; |
| 4807 | goto fail; |
| 4808 | } |
| 4809 | |
Andrew Rybchenko | b071c3a | 2016-06-15 17:43:00 +0100 | [diff] [blame] | 4810 | table->mc_promisc_last = false; |
Andrew Rybchenko | 4a53ea8 | 2016-06-15 17:48:32 +0100 | [diff] [blame] | 4811 | table->vlan_filter = |
| 4812 | !!(efx->net_dev->features & NETIF_F_HW_VLAN_CTAG_FILTER); |
Andrew Rybchenko | 34813fe | 2016-06-15 17:48:14 +0100 | [diff] [blame] | 4813 | INIT_LIST_HEAD(&table->vlan_list); |
Edward Cree | 12fb0da | 2015-07-21 15:11:00 +0100 | [diff] [blame] | 4814 | |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 4815 | efx->filter_state = table; |
| 4816 | init_waitqueue_head(&table->waitq); |
Andrew Rybchenko | 34813fe | 2016-06-15 17:48:14 +0100 | [diff] [blame] | 4817 | |
| 4818 | list_for_each_entry(vlan, &nic_data->vlan_list, list) { |
| 4819 | rc = efx_ef10_filter_add_vlan(efx, vlan->vid); |
| 4820 | if (rc) |
| 4821 | goto fail_add_vlan; |
| 4822 | } |
| 4823 | |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 4824 | return 0; |
| 4825 | |
Andrew Rybchenko | 34813fe | 2016-06-15 17:48:14 +0100 | [diff] [blame] | 4826 | fail_add_vlan: |
| 4827 | efx_ef10_filter_cleanup_vlans(efx); |
| 4828 | efx->filter_state = NULL; |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 4829 | fail: |
| 4830 | kfree(table); |
| 4831 | return rc; |
| 4832 | } |
| 4833 | |
Edward Cree | 0d32241 | 2015-05-20 11:10:03 +0100 | [diff] [blame] | 4834 | /* Caller must hold efx->filter_sem for read if race against |
| 4835 | * efx_ef10_filter_table_remove() is possible |
| 4836 | */ |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 4837 | static void efx_ef10_filter_table_restore(struct efx_nic *efx) |
| 4838 | { |
| 4839 | struct efx_ef10_filter_table *table = efx->filter_state; |
| 4840 | struct efx_ef10_nic_data *nic_data = efx->nic_data; |
Jon Cooper | 2d3d4ec | 2017-01-27 15:02:11 +0000 | [diff] [blame] | 4841 | unsigned int invalid_filters = 0, failed = 0; |
| 4842 | struct efx_ef10_filter_vlan *vlan; |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 4843 | struct efx_filter_spec *spec; |
| 4844 | unsigned int filter_idx; |
Jon Cooper | 2d3d4ec | 2017-01-27 15:02:11 +0000 | [diff] [blame] | 4845 | u32 mcdi_flags; |
| 4846 | int match_pri; |
Edward Cree | 9b41080 | 2017-01-27 15:02:52 +0000 | [diff] [blame] | 4847 | int rc, i; |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 4848 | |
Edward Cree | 0d32241 | 2015-05-20 11:10:03 +0100 | [diff] [blame] | 4849 | WARN_ON(!rwsem_is_locked(&efx->filter_sem)); |
| 4850 | |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 4851 | if (!nic_data->must_restore_filters) |
| 4852 | return; |
| 4853 | |
Edward Cree | 0d32241 | 2015-05-20 11:10:03 +0100 | [diff] [blame] | 4854 | if (!table) |
| 4855 | return; |
| 4856 | |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 4857 | spin_lock_bh(&efx->filter_lock); |
| 4858 | |
| 4859 | for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) { |
| 4860 | spec = efx_ef10_filter_entry_spec(table, filter_idx); |
| 4861 | if (!spec) |
| 4862 | continue; |
| 4863 | |
Jon Cooper | 2d3d4ec | 2017-01-27 15:02:11 +0000 | [diff] [blame] | 4864 | mcdi_flags = efx_ef10_filter_mcdi_flags_from_spec(spec); |
| 4865 | match_pri = 0; |
| 4866 | while (match_pri < table->rx_match_count && |
| 4867 | table->rx_match_mcdi_flags[match_pri] != mcdi_flags) |
| 4868 | ++match_pri; |
| 4869 | if (match_pri >= table->rx_match_count) { |
| 4870 | invalid_filters++; |
| 4871 | goto not_restored; |
| 4872 | } |
| 4873 | if (spec->rss_context != EFX_FILTER_RSS_CONTEXT_DEFAULT && |
| 4874 | spec->rss_context != nic_data->rx_rss_context) |
| 4875 | netif_warn(efx, drv, efx->net_dev, |
| 4876 | "Warning: unable to restore a filter with specific RSS context.\n"); |
| 4877 | |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 4878 | table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY; |
| 4879 | spin_unlock_bh(&efx->filter_lock); |
| 4880 | |
| 4881 | rc = efx_ef10_filter_push(efx, spec, |
| 4882 | &table->entry[filter_idx].handle, |
| 4883 | false); |
| 4884 | if (rc) |
Jon Cooper | 2d3d4ec | 2017-01-27 15:02:11 +0000 | [diff] [blame] | 4885 | failed++; |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 4886 | spin_lock_bh(&efx->filter_lock); |
Jon Cooper | 2d3d4ec | 2017-01-27 15:02:11 +0000 | [diff] [blame] | 4887 | |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 4888 | if (rc) { |
Jon Cooper | 2d3d4ec | 2017-01-27 15:02:11 +0000 | [diff] [blame] | 4889 | not_restored: |
Edward Cree | 9b41080 | 2017-01-27 15:02:52 +0000 | [diff] [blame] | 4890 | list_for_each_entry(vlan, &table->vlan_list, list) |
| 4891 | for (i = 0; i < EFX_EF10_NUM_DEFAULT_FILTERS; ++i) |
| 4892 | if (vlan->default_filters[i] == filter_idx) |
| 4893 | vlan->default_filters[i] = |
| 4894 | EFX_EF10_FILTER_ID_INVALID; |
| 4895 | |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 4896 | kfree(spec); |
| 4897 | efx_ef10_filter_set_entry(table, filter_idx, NULL, 0); |
| 4898 | } else { |
| 4899 | table->entry[filter_idx].spec &= |
| 4900 | ~EFX_EF10_FILTER_FLAG_BUSY; |
| 4901 | } |
| 4902 | } |
| 4903 | |
| 4904 | spin_unlock_bh(&efx->filter_lock); |
| 4905 | |
Jon Cooper | 2d3d4ec | 2017-01-27 15:02:11 +0000 | [diff] [blame] | 4906 | /* This can happen validly if the MC's capabilities have changed, so |
| 4907 | * is not an error. |
| 4908 | */ |
| 4909 | if (invalid_filters) |
| 4910 | netif_dbg(efx, drv, efx->net_dev, |
| 4911 | "Did not restore %u filters that are now unsupported.\n", |
| 4912 | invalid_filters); |
| 4913 | |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 4914 | if (failed) |
| 4915 | netif_err(efx, hw, efx->net_dev, |
Jon Cooper | 2d3d4ec | 2017-01-27 15:02:11 +0000 | [diff] [blame] | 4916 | "unable to restore %u filters\n", failed); |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 4917 | else |
| 4918 | nic_data->must_restore_filters = false; |
| 4919 | } |
| 4920 | |
| 4921 | static void efx_ef10_filter_table_remove(struct efx_nic *efx) |
| 4922 | { |
| 4923 | struct efx_ef10_filter_table *table = efx->filter_state; |
| 4924 | MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN); |
| 4925 | struct efx_filter_spec *spec; |
| 4926 | unsigned int filter_idx; |
| 4927 | int rc; |
| 4928 | |
Andrew Rybchenko | 34813fe | 2016-06-15 17:48:14 +0100 | [diff] [blame] | 4929 | efx_ef10_filter_cleanup_vlans(efx); |
Edward Cree | 0d32241 | 2015-05-20 11:10:03 +0100 | [diff] [blame] | 4930 | efx->filter_state = NULL; |
Edward Cree | dd98708 | 2016-06-15 17:43:43 +0100 | [diff] [blame] | 4931 | /* If we were called without locking, then it's not safe to free |
| 4932 | * the table as others might be using it. So we just WARN, leak |
| 4933 | * the memory, and potentially get an inconsistent filter table |
| 4934 | * state. |
| 4935 | * This should never actually happen. |
| 4936 | */ |
| 4937 | if (!efx_rwsem_assert_write_locked(&efx->filter_sem)) |
| 4938 | return; |
| 4939 | |
Edward Cree | 0d32241 | 2015-05-20 11:10:03 +0100 | [diff] [blame] | 4940 | if (!table) |
| 4941 | return; |
| 4942 | |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 4943 | for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) { |
| 4944 | spec = efx_ef10_filter_entry_spec(table, filter_idx); |
| 4945 | if (!spec) |
| 4946 | continue; |
| 4947 | |
| 4948 | MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP, |
| 4949 | efx_ef10_filter_is_exclusive(spec) ? |
| 4950 | MC_CMD_FILTER_OP_IN_OP_REMOVE : |
| 4951 | MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE); |
| 4952 | MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE, |
| 4953 | table->entry[filter_idx].handle); |
Bert Kenward | e65a510 | 2015-12-23 08:57:36 +0000 | [diff] [blame] | 4954 | rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FILTER_OP, inbuf, |
| 4955 | sizeof(inbuf), NULL, 0, NULL); |
Ben Hutchings | 48ce563 | 2013-11-01 16:42:44 +0000 | [diff] [blame] | 4956 | if (rc) |
Bert Kenward | e65a510 | 2015-12-23 08:57:36 +0000 | [diff] [blame] | 4957 | netif_info(efx, drv, efx->net_dev, |
| 4958 | "%s: filter %04x remove failed\n", |
| 4959 | __func__, filter_idx); |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 4960 | kfree(spec); |
| 4961 | } |
| 4962 | |
| 4963 | vfree(table->entry); |
| 4964 | kfree(table); |
| 4965 | } |
| 4966 | |
Andrew Rybchenko | 6a37958 | 2016-06-15 17:44:20 +0100 | [diff] [blame] | 4967 | static void efx_ef10_filter_mark_one_old(struct efx_nic *efx, uint16_t *id) |
| 4968 | { |
| 4969 | struct efx_ef10_filter_table *table = efx->filter_state; |
| 4970 | unsigned int filter_idx; |
| 4971 | |
| 4972 | if (*id != EFX_EF10_FILTER_ID_INVALID) { |
| 4973 | filter_idx = efx_ef10_filter_get_unsafe_id(efx, *id); |
| 4974 | if (!table->entry[filter_idx].spec) |
| 4975 | netif_dbg(efx, drv, efx->net_dev, |
| 4976 | "marked null spec old %04x:%04x\n", *id, |
| 4977 | filter_idx); |
| 4978 | table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_AUTO_OLD; |
| 4979 | *id = EFX_EF10_FILTER_ID_INVALID; |
Bert Kenward | e65a510 | 2015-12-23 08:57:36 +0000 | [diff] [blame] | 4980 | } |
Andrew Rybchenko | 6a37958 | 2016-06-15 17:44:20 +0100 | [diff] [blame] | 4981 | } |
| 4982 | |
Andrew Rybchenko | b3a3c03 | 2016-06-15 17:47:36 +0100 | [diff] [blame] | 4983 | /* Mark old per-VLAN filters that may need to be removed */ |
| 4984 | static void _efx_ef10_filter_vlan_mark_old(struct efx_nic *efx, |
| 4985 | struct efx_ef10_filter_vlan *vlan) |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 4986 | { |
| 4987 | struct efx_ef10_filter_table *table = efx->filter_state; |
Andrew Rybchenko | 6a37958 | 2016-06-15 17:44:20 +0100 | [diff] [blame] | 4988 | unsigned int i; |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 4989 | |
Edward Cree | 12fb0da | 2015-07-21 15:11:00 +0100 | [diff] [blame] | 4990 | for (i = 0; i < table->dev_uc_count; i++) |
Andrew Rybchenko | dc3273e | 2016-06-15 17:45:36 +0100 | [diff] [blame] | 4991 | efx_ef10_filter_mark_one_old(efx, &vlan->uc[i]); |
Edward Cree | 12fb0da | 2015-07-21 15:11:00 +0100 | [diff] [blame] | 4992 | for (i = 0; i < table->dev_mc_count; i++) |
Andrew Rybchenko | dc3273e | 2016-06-15 17:45:36 +0100 | [diff] [blame] | 4993 | efx_ef10_filter_mark_one_old(efx, &vlan->mc[i]); |
Edward Cree | 9b41080 | 2017-01-27 15:02:52 +0000 | [diff] [blame] | 4994 | for (i = 0; i < EFX_EF10_NUM_DEFAULT_FILTERS; i++) |
| 4995 | efx_ef10_filter_mark_one_old(efx, &vlan->default_filters[i]); |
Andrew Rybchenko | b3a3c03 | 2016-06-15 17:47:36 +0100 | [diff] [blame] | 4996 | } |
| 4997 | |
Andrew Rybchenko | 34813fe | 2016-06-15 17:48:14 +0100 | [diff] [blame] | 4998 | /* Mark old filters that may need to be removed. |
| 4999 | * Caller must hold efx->filter_sem for read if race against |
| 5000 | * efx_ef10_filter_table_remove() is possible |
| 5001 | */ |
Andrew Rybchenko | b3a3c03 | 2016-06-15 17:47:36 +0100 | [diff] [blame] | 5002 | static void efx_ef10_filter_mark_old(struct efx_nic *efx) |
| 5003 | { |
| 5004 | struct efx_ef10_filter_table *table = efx->filter_state; |
Andrew Rybchenko | 34813fe | 2016-06-15 17:48:14 +0100 | [diff] [blame] | 5005 | struct efx_ef10_filter_vlan *vlan; |
Andrew Rybchenko | b3a3c03 | 2016-06-15 17:47:36 +0100 | [diff] [blame] | 5006 | |
| 5007 | spin_lock_bh(&efx->filter_lock); |
Andrew Rybchenko | 34813fe | 2016-06-15 17:48:14 +0100 | [diff] [blame] | 5008 | list_for_each_entry(vlan, &table->vlan_list, list) |
| 5009 | _efx_ef10_filter_vlan_mark_old(efx, vlan); |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 5010 | spin_unlock_bh(&efx->filter_lock); |
Daniel Pieczko | 822b96f | 2015-07-21 15:10:27 +0100 | [diff] [blame] | 5011 | } |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 5012 | |
Andrew Rybchenko | afa4ce1 | 2016-06-15 17:45:56 +0100 | [diff] [blame] | 5013 | static void efx_ef10_filter_uc_addr_list(struct efx_nic *efx) |
Daniel Pieczko | 822b96f | 2015-07-21 15:10:27 +0100 | [diff] [blame] | 5014 | { |
| 5015 | struct efx_ef10_filter_table *table = efx->filter_state; |
| 5016 | struct net_device *net_dev = efx->net_dev; |
| 5017 | struct netdev_hw_addr *uc; |
Edward Cree | 12fb0da | 2015-07-21 15:11:00 +0100 | [diff] [blame] | 5018 | int addr_count; |
Daniel Pieczko | 822b96f | 2015-07-21 15:10:27 +0100 | [diff] [blame] | 5019 | unsigned int i; |
| 5020 | |
Edward Cree | 12fb0da | 2015-07-21 15:11:00 +0100 | [diff] [blame] | 5021 | addr_count = netdev_uc_count(net_dev); |
Andrew Rybchenko | afa4ce1 | 2016-06-15 17:45:56 +0100 | [diff] [blame] | 5022 | table->uc_promisc = !!(net_dev->flags & IFF_PROMISC); |
Edward Cree | 12fb0da | 2015-07-21 15:11:00 +0100 | [diff] [blame] | 5023 | table->dev_uc_count = 1 + addr_count; |
Daniel Pieczko | 822b96f | 2015-07-21 15:10:27 +0100 | [diff] [blame] | 5024 | ether_addr_copy(table->dev_uc_list[0].addr, net_dev->dev_addr); |
| 5025 | i = 1; |
| 5026 | netdev_for_each_uc_addr(uc, net_dev) { |
Edward Cree | 12fb0da | 2015-07-21 15:11:00 +0100 | [diff] [blame] | 5027 | if (i >= EFX_EF10_FILTER_DEV_UC_MAX) { |
Andrew Rybchenko | afa4ce1 | 2016-06-15 17:45:56 +0100 | [diff] [blame] | 5028 | table->uc_promisc = true; |
Edward Cree | 12fb0da | 2015-07-21 15:11:00 +0100 | [diff] [blame] | 5029 | break; |
| 5030 | } |
Daniel Pieczko | 822b96f | 2015-07-21 15:10:27 +0100 | [diff] [blame] | 5031 | ether_addr_copy(table->dev_uc_list[i].addr, uc->addr); |
| 5032 | i++; |
| 5033 | } |
| 5034 | } |
| 5035 | |
Andrew Rybchenko | afa4ce1 | 2016-06-15 17:45:56 +0100 | [diff] [blame] | 5036 | static void efx_ef10_filter_mc_addr_list(struct efx_nic *efx) |
Daniel Pieczko | 822b96f | 2015-07-21 15:10:27 +0100 | [diff] [blame] | 5037 | { |
| 5038 | struct efx_ef10_filter_table *table = efx->filter_state; |
| 5039 | struct net_device *net_dev = efx->net_dev; |
| 5040 | struct netdev_hw_addr *mc; |
Daniel Pieczko | ab8b1f7c | 2015-07-21 15:10:44 +0100 | [diff] [blame] | 5041 | unsigned int i, addr_count; |
Daniel Pieczko | 822b96f | 2015-07-21 15:10:27 +0100 | [diff] [blame] | 5042 | |
Andrew Rybchenko | afa4ce1 | 2016-06-15 17:45:56 +0100 | [diff] [blame] | 5043 | table->mc_promisc = !!(net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI)); |
Daniel Pieczko | 822b96f | 2015-07-21 15:10:27 +0100 | [diff] [blame] | 5044 | |
Edward Cree | 12fb0da | 2015-07-21 15:11:00 +0100 | [diff] [blame] | 5045 | addr_count = netdev_mc_count(net_dev); |
| 5046 | i = 0; |
Daniel Pieczko | ab8b1f7c | 2015-07-21 15:10:44 +0100 | [diff] [blame] | 5047 | netdev_for_each_mc_addr(mc, net_dev) { |
Edward Cree | 12fb0da | 2015-07-21 15:11:00 +0100 | [diff] [blame] | 5048 | if (i >= EFX_EF10_FILTER_DEV_MC_MAX) { |
Andrew Rybchenko | afa4ce1 | 2016-06-15 17:45:56 +0100 | [diff] [blame] | 5049 | table->mc_promisc = true; |
Edward Cree | 12fb0da | 2015-07-21 15:11:00 +0100 | [diff] [blame] | 5050 | break; |
| 5051 | } |
Daniel Pieczko | ab8b1f7c | 2015-07-21 15:10:44 +0100 | [diff] [blame] | 5052 | ether_addr_copy(table->dev_mc_list[i].addr, mc->addr); |
| 5053 | i++; |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 5054 | } |
Edward Cree | 12fb0da | 2015-07-21 15:11:00 +0100 | [diff] [blame] | 5055 | |
| 5056 | table->dev_mc_count = i; |
Daniel Pieczko | 822b96f | 2015-07-21 15:10:27 +0100 | [diff] [blame] | 5057 | } |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 5058 | |
Edward Cree | 12fb0da | 2015-07-21 15:11:00 +0100 | [diff] [blame] | 5059 | static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx, |
Andrew Rybchenko | b3a3c03 | 2016-06-15 17:47:36 +0100 | [diff] [blame] | 5060 | struct efx_ef10_filter_vlan *vlan, |
| 5061 | bool multicast, bool rollback) |
Daniel Pieczko | 822b96f | 2015-07-21 15:10:27 +0100 | [diff] [blame] | 5062 | { |
| 5063 | struct efx_ef10_filter_table *table = efx->filter_state; |
| 5064 | struct efx_ef10_dev_addr *addr_list; |
Bert Kenward | f1c2ef4 | 2015-12-11 09:39:32 +0000 | [diff] [blame] | 5065 | enum efx_filter_flags filter_flags; |
Daniel Pieczko | 822b96f | 2015-07-21 15:10:27 +0100 | [diff] [blame] | 5066 | struct efx_filter_spec spec; |
Edward Cree | 12fb0da | 2015-07-21 15:11:00 +0100 | [diff] [blame] | 5067 | u8 baddr[ETH_ALEN]; |
| 5068 | unsigned int i, j; |
| 5069 | int addr_count; |
Andrew Rybchenko | dc3273e | 2016-06-15 17:45:36 +0100 | [diff] [blame] | 5070 | u16 *ids; |
Daniel Pieczko | 822b96f | 2015-07-21 15:10:27 +0100 | [diff] [blame] | 5071 | int rc; |
| 5072 | |
| 5073 | if (multicast) { |
| 5074 | addr_list = table->dev_mc_list; |
Edward Cree | 12fb0da | 2015-07-21 15:11:00 +0100 | [diff] [blame] | 5075 | addr_count = table->dev_mc_count; |
Andrew Rybchenko | dc3273e | 2016-06-15 17:45:36 +0100 | [diff] [blame] | 5076 | ids = vlan->mc; |
Daniel Pieczko | 822b96f | 2015-07-21 15:10:27 +0100 | [diff] [blame] | 5077 | } else { |
| 5078 | addr_list = table->dev_uc_list; |
Edward Cree | 12fb0da | 2015-07-21 15:11:00 +0100 | [diff] [blame] | 5079 | addr_count = table->dev_uc_count; |
Andrew Rybchenko | dc3273e | 2016-06-15 17:45:36 +0100 | [diff] [blame] | 5080 | ids = vlan->uc; |
Daniel Pieczko | 822b96f | 2015-07-21 15:10:27 +0100 | [diff] [blame] | 5081 | } |
| 5082 | |
Bert Kenward | f1c2ef4 | 2015-12-11 09:39:32 +0000 | [diff] [blame] | 5083 | filter_flags = efx_rss_enabled(efx) ? EFX_FILTER_FLAG_RX_RSS : 0; |
| 5084 | |
Daniel Pieczko | 822b96f | 2015-07-21 15:10:27 +0100 | [diff] [blame] | 5085 | /* Insert/renew filters */ |
Edward Cree | 12fb0da | 2015-07-21 15:11:00 +0100 | [diff] [blame] | 5086 | for (i = 0; i < addr_count; i++) { |
Bert Kenward | f1c2ef4 | 2015-12-11 09:39:32 +0000 | [diff] [blame] | 5087 | efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0); |
Andrew Rybchenko | b3a3c03 | 2016-06-15 17:47:36 +0100 | [diff] [blame] | 5088 | efx_filter_set_eth_local(&spec, vlan->vid, addr_list[i].addr); |
Jon Cooper | b6f568e | 2015-07-21 15:10:15 +0100 | [diff] [blame] | 5089 | rc = efx_ef10_filter_insert(efx, &spec, true); |
| 5090 | if (rc < 0) { |
Edward Cree | 12fb0da | 2015-07-21 15:11:00 +0100 | [diff] [blame] | 5091 | if (rollback) { |
| 5092 | netif_info(efx, drv, efx->net_dev, |
| 5093 | "efx_ef10_filter_insert failed rc=%d\n", |
| 5094 | rc); |
| 5095 | /* Fall back to promiscuous */ |
| 5096 | for (j = 0; j < i; j++) { |
Edward Cree | 12fb0da | 2015-07-21 15:11:00 +0100 | [diff] [blame] | 5097 | efx_ef10_filter_remove_unsafe( |
| 5098 | efx, EFX_FILTER_PRI_AUTO, |
Andrew Rybchenko | dc3273e | 2016-06-15 17:45:36 +0100 | [diff] [blame] | 5099 | ids[j]); |
| 5100 | ids[j] = EFX_EF10_FILTER_ID_INVALID; |
Edward Cree | 12fb0da | 2015-07-21 15:11:00 +0100 | [diff] [blame] | 5101 | } |
| 5102 | return rc; |
| 5103 | } else { |
| 5104 | /* mark as not inserted, and carry on */ |
| 5105 | rc = EFX_EF10_FILTER_ID_INVALID; |
Daniel Pieczko | 822b96f | 2015-07-21 15:10:27 +0100 | [diff] [blame] | 5106 | } |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 5107 | } |
Andrew Rybchenko | dc3273e | 2016-06-15 17:45:36 +0100 | [diff] [blame] | 5108 | ids[i] = efx_ef10_filter_get_unsafe_id(efx, rc); |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 5109 | } |
Daniel Pieczko | 822b96f | 2015-07-21 15:10:27 +0100 | [diff] [blame] | 5110 | |
Edward Cree | 12fb0da | 2015-07-21 15:11:00 +0100 | [diff] [blame] | 5111 | if (multicast && rollback) { |
| 5112 | /* Also need an Ethernet broadcast filter */ |
Edward Cree | 9b41080 | 2017-01-27 15:02:52 +0000 | [diff] [blame] | 5113 | EFX_WARN_ON_PARANOID(vlan->default_filters[EFX_EF10_BCAST] != |
| 5114 | EFX_EF10_FILTER_ID_INVALID); |
Bert Kenward | f1c2ef4 | 2015-12-11 09:39:32 +0000 | [diff] [blame] | 5115 | efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0); |
Edward Cree | 12fb0da | 2015-07-21 15:11:00 +0100 | [diff] [blame] | 5116 | eth_broadcast_addr(baddr); |
Andrew Rybchenko | b3a3c03 | 2016-06-15 17:47:36 +0100 | [diff] [blame] | 5117 | efx_filter_set_eth_local(&spec, vlan->vid, baddr); |
Daniel Pieczko | 822b96f | 2015-07-21 15:10:27 +0100 | [diff] [blame] | 5118 | rc = efx_ef10_filter_insert(efx, &spec, true); |
Edward Cree | 12fb0da | 2015-07-21 15:11:00 +0100 | [diff] [blame] | 5119 | if (rc < 0) { |
Daniel Pieczko | 822b96f | 2015-07-21 15:10:27 +0100 | [diff] [blame] | 5120 | netif_warn(efx, drv, efx->net_dev, |
Edward Cree | 12fb0da | 2015-07-21 15:11:00 +0100 | [diff] [blame] | 5121 | "Broadcast filter insert failed rc=%d\n", rc); |
| 5122 | /* Fall back to promiscuous */ |
| 5123 | for (j = 0; j < i; j++) { |
Edward Cree | 12fb0da | 2015-07-21 15:11:00 +0100 | [diff] [blame] | 5124 | efx_ef10_filter_remove_unsafe( |
| 5125 | efx, EFX_FILTER_PRI_AUTO, |
Andrew Rybchenko | dc3273e | 2016-06-15 17:45:36 +0100 | [diff] [blame] | 5126 | ids[j]); |
| 5127 | ids[j] = EFX_EF10_FILTER_ID_INVALID; |
Edward Cree | 12fb0da | 2015-07-21 15:11:00 +0100 | [diff] [blame] | 5128 | } |
| 5129 | return rc; |
| 5130 | } else { |
Edward Cree | 9b41080 | 2017-01-27 15:02:52 +0000 | [diff] [blame] | 5131 | vlan->default_filters[EFX_EF10_BCAST] = |
| 5132 | efx_ef10_filter_get_unsafe_id(efx, rc); |
Edward Cree | 12fb0da | 2015-07-21 15:11:00 +0100 | [diff] [blame] | 5133 | } |
Daniel Pieczko | 822b96f | 2015-07-21 15:10:27 +0100 | [diff] [blame] | 5134 | } |
Edward Cree | 12fb0da | 2015-07-21 15:11:00 +0100 | [diff] [blame] | 5135 | |
| 5136 | return 0; |
| 5137 | } |
| 5138 | |
Andrew Rybchenko | b3a3c03 | 2016-06-15 17:47:36 +0100 | [diff] [blame] | 5139 | static int efx_ef10_filter_insert_def(struct efx_nic *efx, |
| 5140 | struct efx_ef10_filter_vlan *vlan, |
Edward Cree | 9b41080 | 2017-01-27 15:02:52 +0000 | [diff] [blame] | 5141 | enum efx_encap_type encap_type, |
Andrew Rybchenko | b3a3c03 | 2016-06-15 17:47:36 +0100 | [diff] [blame] | 5142 | bool multicast, bool rollback) |
Edward Cree | 12fb0da | 2015-07-21 15:11:00 +0100 | [diff] [blame] | 5143 | { |
Edward Cree | 12fb0da | 2015-07-21 15:11:00 +0100 | [diff] [blame] | 5144 | struct efx_ef10_nic_data *nic_data = efx->nic_data; |
Bert Kenward | f1c2ef4 | 2015-12-11 09:39:32 +0000 | [diff] [blame] | 5145 | enum efx_filter_flags filter_flags; |
Edward Cree | 12fb0da | 2015-07-21 15:11:00 +0100 | [diff] [blame] | 5146 | struct efx_filter_spec spec; |
| 5147 | u8 baddr[ETH_ALEN]; |
| 5148 | int rc; |
Edward Cree | 9b41080 | 2017-01-27 15:02:52 +0000 | [diff] [blame] | 5149 | u16 *id; |
Edward Cree | 12fb0da | 2015-07-21 15:11:00 +0100 | [diff] [blame] | 5150 | |
Bert Kenward | f1c2ef4 | 2015-12-11 09:39:32 +0000 | [diff] [blame] | 5151 | filter_flags = efx_rss_enabled(efx) ? EFX_FILTER_FLAG_RX_RSS : 0; |
| 5152 | |
| 5153 | efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0); |
Edward Cree | 12fb0da | 2015-07-21 15:11:00 +0100 | [diff] [blame] | 5154 | |
| 5155 | if (multicast) |
| 5156 | efx_filter_set_mc_def(&spec); |
| 5157 | else |
| 5158 | efx_filter_set_uc_def(&spec); |
| 5159 | |
Edward Cree | 9b41080 | 2017-01-27 15:02:52 +0000 | [diff] [blame] | 5160 | if (encap_type) { |
| 5161 | if (nic_data->datapath_caps & |
| 5162 | (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN)) |
| 5163 | efx_filter_set_encap_type(&spec, encap_type); |
| 5164 | else |
| 5165 | /* don't insert encap filters on non-supporting |
| 5166 | * platforms. ID will be left as INVALID. |
| 5167 | */ |
| 5168 | return 0; |
| 5169 | } |
| 5170 | |
Andrew Rybchenko | b3a3c03 | 2016-06-15 17:47:36 +0100 | [diff] [blame] | 5171 | if (vlan->vid != EFX_FILTER_VID_UNSPEC) |
| 5172 | efx_filter_set_eth_local(&spec, vlan->vid, NULL); |
| 5173 | |
Edward Cree | 12fb0da | 2015-07-21 15:11:00 +0100 | [diff] [blame] | 5174 | rc = efx_ef10_filter_insert(efx, &spec, true); |
| 5175 | if (rc < 0) { |
Edward Cree | 9b41080 | 2017-01-27 15:02:52 +0000 | [diff] [blame] | 5176 | const char *um = multicast ? "Multicast" : "Unicast"; |
| 5177 | const char *encap_name = ""; |
| 5178 | const char *encap_ipv = ""; |
| 5179 | |
| 5180 | if ((encap_type & EFX_ENCAP_TYPES_MASK) == |
| 5181 | EFX_ENCAP_TYPE_VXLAN) |
| 5182 | encap_name = "VXLAN "; |
| 5183 | else if ((encap_type & EFX_ENCAP_TYPES_MASK) == |
| 5184 | EFX_ENCAP_TYPE_NVGRE) |
| 5185 | encap_name = "NVGRE "; |
| 5186 | else if ((encap_type & EFX_ENCAP_TYPES_MASK) == |
| 5187 | EFX_ENCAP_TYPE_GENEVE) |
| 5188 | encap_name = "GENEVE "; |
| 5189 | if (encap_type & EFX_ENCAP_FLAG_IPV6) |
| 5190 | encap_ipv = "IPv6 "; |
| 5191 | else if (encap_type) |
| 5192 | encap_ipv = "IPv4 "; |
| 5193 | |
| 5194 | /* unprivileged functions can't insert mismatch filters |
| 5195 | * for encapsulated or unicast traffic, so downgrade |
| 5196 | * those warnings to debug. |
| 5197 | */ |
Jon Cooper | 34e7aef | 2017-01-27 15:02:39 +0000 | [diff] [blame] | 5198 | netif_cond_dbg(efx, drv, efx->net_dev, |
Edward Cree | 9b41080 | 2017-01-27 15:02:52 +0000 | [diff] [blame] | 5199 | rc == -EPERM && (encap_type || !multicast), warn, |
| 5200 | "%s%s%s mismatch filter insert failed rc=%d\n", |
| 5201 | encap_name, encap_ipv, um, rc); |
Edward Cree | 12fb0da | 2015-07-21 15:11:00 +0100 | [diff] [blame] | 5202 | } else if (multicast) { |
Edward Cree | 9b41080 | 2017-01-27 15:02:52 +0000 | [diff] [blame] | 5203 | /* mapping from encap types to default filter IDs (multicast) */ |
| 5204 | static enum efx_ef10_default_filters map[] = { |
| 5205 | [EFX_ENCAP_TYPE_NONE] = EFX_EF10_MCDEF, |
| 5206 | [EFX_ENCAP_TYPE_VXLAN] = EFX_EF10_VXLAN4_MCDEF, |
| 5207 | [EFX_ENCAP_TYPE_NVGRE] = EFX_EF10_NVGRE4_MCDEF, |
| 5208 | [EFX_ENCAP_TYPE_GENEVE] = EFX_EF10_GENEVE4_MCDEF, |
| 5209 | [EFX_ENCAP_TYPE_VXLAN | EFX_ENCAP_FLAG_IPV6] = |
| 5210 | EFX_EF10_VXLAN6_MCDEF, |
| 5211 | [EFX_ENCAP_TYPE_NVGRE | EFX_ENCAP_FLAG_IPV6] = |
| 5212 | EFX_EF10_NVGRE6_MCDEF, |
| 5213 | [EFX_ENCAP_TYPE_GENEVE | EFX_ENCAP_FLAG_IPV6] = |
| 5214 | EFX_EF10_GENEVE6_MCDEF, |
| 5215 | }; |
| 5216 | |
| 5217 | /* quick bounds check (BCAST result impossible) */ |
| 5218 | BUILD_BUG_ON(EFX_EF10_BCAST != 0); |
Colin Ian King | e990499 | 2017-01-31 16:30:02 +0000 | [diff] [blame] | 5219 | if (encap_type >= ARRAY_SIZE(map) || map[encap_type] == 0) { |
Edward Cree | 9b41080 | 2017-01-27 15:02:52 +0000 | [diff] [blame] | 5220 | WARN_ON(1); |
| 5221 | return -EINVAL; |
| 5222 | } |
| 5223 | /* then follow map */ |
| 5224 | id = &vlan->default_filters[map[encap_type]]; |
| 5225 | |
| 5226 | EFX_WARN_ON_PARANOID(*id != EFX_EF10_FILTER_ID_INVALID); |
| 5227 | *id = efx_ef10_filter_get_unsafe_id(efx, rc); |
| 5228 | if (!nic_data->workaround_26807 && !encap_type) { |
Edward Cree | 12fb0da | 2015-07-21 15:11:00 +0100 | [diff] [blame] | 5229 | /* Also need an Ethernet broadcast filter */ |
| 5230 | efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, |
Bert Kenward | f1c2ef4 | 2015-12-11 09:39:32 +0000 | [diff] [blame] | 5231 | filter_flags, 0); |
Edward Cree | 12fb0da | 2015-07-21 15:11:00 +0100 | [diff] [blame] | 5232 | eth_broadcast_addr(baddr); |
Andrew Rybchenko | b3a3c03 | 2016-06-15 17:47:36 +0100 | [diff] [blame] | 5233 | efx_filter_set_eth_local(&spec, vlan->vid, baddr); |
Edward Cree | 12fb0da | 2015-07-21 15:11:00 +0100 | [diff] [blame] | 5234 | rc = efx_ef10_filter_insert(efx, &spec, true); |
| 5235 | if (rc < 0) { |
| 5236 | netif_warn(efx, drv, efx->net_dev, |
| 5237 | "Broadcast filter insert failed rc=%d\n", |
| 5238 | rc); |
| 5239 | if (rollback) { |
| 5240 | /* Roll back the mc_def filter */ |
| 5241 | efx_ef10_filter_remove_unsafe( |
| 5242 | efx, EFX_FILTER_PRI_AUTO, |
Edward Cree | 9b41080 | 2017-01-27 15:02:52 +0000 | [diff] [blame] | 5243 | *id); |
| 5244 | *id = EFX_EF10_FILTER_ID_INVALID; |
Edward Cree | 12fb0da | 2015-07-21 15:11:00 +0100 | [diff] [blame] | 5245 | return rc; |
| 5246 | } |
| 5247 | } else { |
Edward Cree | 9b41080 | 2017-01-27 15:02:52 +0000 | [diff] [blame] | 5248 | EFX_WARN_ON_PARANOID( |
| 5249 | vlan->default_filters[EFX_EF10_BCAST] != |
| 5250 | EFX_EF10_FILTER_ID_INVALID); |
| 5251 | vlan->default_filters[EFX_EF10_BCAST] = |
| 5252 | efx_ef10_filter_get_unsafe_id(efx, rc); |
Edward Cree | 12fb0da | 2015-07-21 15:11:00 +0100 | [diff] [blame] | 5253 | } |
| 5254 | } |
| 5255 | rc = 0; |
| 5256 | } else { |
Edward Cree | 9b41080 | 2017-01-27 15:02:52 +0000 | [diff] [blame] | 5257 | /* mapping from encap types to default filter IDs (unicast) */ |
| 5258 | static enum efx_ef10_default_filters map[] = { |
| 5259 | [EFX_ENCAP_TYPE_NONE] = EFX_EF10_UCDEF, |
| 5260 | [EFX_ENCAP_TYPE_VXLAN] = EFX_EF10_VXLAN4_UCDEF, |
| 5261 | [EFX_ENCAP_TYPE_NVGRE] = EFX_EF10_NVGRE4_UCDEF, |
| 5262 | [EFX_ENCAP_TYPE_GENEVE] = EFX_EF10_GENEVE4_UCDEF, |
| 5263 | [EFX_ENCAP_TYPE_VXLAN | EFX_ENCAP_FLAG_IPV6] = |
| 5264 | EFX_EF10_VXLAN6_UCDEF, |
| 5265 | [EFX_ENCAP_TYPE_NVGRE | EFX_ENCAP_FLAG_IPV6] = |
| 5266 | EFX_EF10_NVGRE6_UCDEF, |
| 5267 | [EFX_ENCAP_TYPE_GENEVE | EFX_ENCAP_FLAG_IPV6] = |
| 5268 | EFX_EF10_GENEVE6_UCDEF, |
| 5269 | }; |
| 5270 | |
| 5271 | /* quick bounds check (BCAST result impossible) */ |
| 5272 | BUILD_BUG_ON(EFX_EF10_BCAST != 0); |
Dan Carpenter | ee467fb | 2017-02-07 10:44:31 +0300 | [diff] [blame] | 5273 | if (encap_type >= ARRAY_SIZE(map) || map[encap_type] == 0) { |
Edward Cree | 9b41080 | 2017-01-27 15:02:52 +0000 | [diff] [blame] | 5274 | WARN_ON(1); |
| 5275 | return -EINVAL; |
| 5276 | } |
| 5277 | /* then follow map */ |
| 5278 | id = &vlan->default_filters[map[encap_type]]; |
| 5279 | EFX_WARN_ON_PARANOID(*id != EFX_EF10_FILTER_ID_INVALID); |
| 5280 | *id = rc; |
Edward Cree | 12fb0da | 2015-07-21 15:11:00 +0100 | [diff] [blame] | 5281 | rc = 0; |
| 5282 | } |
| 5283 | return rc; |
Daniel Pieczko | 822b96f | 2015-07-21 15:10:27 +0100 | [diff] [blame] | 5284 | } |
| 5285 | |
| 5286 | /* Remove filters that weren't renewed. Since nothing else changes the AUTO_OLD |
| 5287 | * flag or removes these filters, we don't need to hold the filter_lock while |
| 5288 | * scanning for these filters. |
| 5289 | */ |
| 5290 | static void efx_ef10_filter_remove_old(struct efx_nic *efx) |
| 5291 | { |
| 5292 | struct efx_ef10_filter_table *table = efx->filter_state; |
Bert Kenward | e65a510 | 2015-12-23 08:57:36 +0000 | [diff] [blame] | 5293 | int remove_failed = 0; |
| 5294 | int remove_noent = 0; |
| 5295 | int rc; |
Daniel Pieczko | 822b96f | 2015-07-21 15:10:27 +0100 | [diff] [blame] | 5296 | int i; |
| 5297 | |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 5298 | for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) { |
| 5299 | if (ACCESS_ONCE(table->entry[i].spec) & |
Ben Hutchings | b59e6ef | 2013-11-21 19:02:22 +0000 | [diff] [blame] | 5300 | EFX_EF10_FILTER_FLAG_AUTO_OLD) { |
Bert Kenward | e65a510 | 2015-12-23 08:57:36 +0000 | [diff] [blame] | 5301 | rc = efx_ef10_filter_remove_internal(efx, |
| 5302 | 1U << EFX_FILTER_PRI_AUTO, i, true); |
| 5303 | if (rc == -ENOENT) |
| 5304 | remove_noent++; |
| 5305 | else if (rc) |
| 5306 | remove_failed++; |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 5307 | } |
| 5308 | } |
Bert Kenward | e65a510 | 2015-12-23 08:57:36 +0000 | [diff] [blame] | 5309 | |
| 5310 | if (remove_failed) |
| 5311 | netif_info(efx, drv, efx->net_dev, |
| 5312 | "%s: failed to remove %d filters\n", |
| 5313 | __func__, remove_failed); |
| 5314 | if (remove_noent) |
| 5315 | netif_info(efx, drv, efx->net_dev, |
| 5316 | "%s: failed to remove %d non-existent filters\n", |
| 5317 | __func__, remove_noent); |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 5318 | } |
| 5319 | |
Daniel Pieczko | 7a186f4 | 2015-07-07 11:37:19 +0100 | [diff] [blame] | 5320 | static int efx_ef10_vport_set_mac_address(struct efx_nic *efx) |
| 5321 | { |
| 5322 | struct efx_ef10_nic_data *nic_data = efx->nic_data; |
| 5323 | u8 mac_old[ETH_ALEN]; |
| 5324 | int rc, rc2; |
| 5325 | |
| 5326 | /* Only reconfigure a PF-created vport */ |
| 5327 | if (is_zero_ether_addr(nic_data->vport_mac)) |
| 5328 | return 0; |
| 5329 | |
| 5330 | efx_device_detach_sync(efx); |
| 5331 | efx_net_stop(efx->net_dev); |
| 5332 | down_write(&efx->filter_sem); |
| 5333 | efx_ef10_filter_table_remove(efx); |
| 5334 | up_write(&efx->filter_sem); |
| 5335 | |
| 5336 | rc = efx_ef10_vadaptor_free(efx, nic_data->vport_id); |
| 5337 | if (rc) |
| 5338 | goto restore_filters; |
| 5339 | |
| 5340 | ether_addr_copy(mac_old, nic_data->vport_mac); |
| 5341 | rc = efx_ef10_vport_del_mac(efx, nic_data->vport_id, |
| 5342 | nic_data->vport_mac); |
| 5343 | if (rc) |
| 5344 | goto restore_vadaptor; |
| 5345 | |
| 5346 | rc = efx_ef10_vport_add_mac(efx, nic_data->vport_id, |
| 5347 | efx->net_dev->dev_addr); |
| 5348 | if (!rc) { |
| 5349 | ether_addr_copy(nic_data->vport_mac, efx->net_dev->dev_addr); |
| 5350 | } else { |
| 5351 | rc2 = efx_ef10_vport_add_mac(efx, nic_data->vport_id, mac_old); |
| 5352 | if (rc2) { |
| 5353 | /* Failed to add original MAC, so clear vport_mac */ |
| 5354 | eth_zero_addr(nic_data->vport_mac); |
| 5355 | goto reset_nic; |
| 5356 | } |
| 5357 | } |
| 5358 | |
| 5359 | restore_vadaptor: |
| 5360 | rc2 = efx_ef10_vadaptor_alloc(efx, nic_data->vport_id); |
| 5361 | if (rc2) |
| 5362 | goto reset_nic; |
| 5363 | restore_filters: |
| 5364 | down_write(&efx->filter_sem); |
| 5365 | rc2 = efx_ef10_filter_table_probe(efx); |
| 5366 | up_write(&efx->filter_sem); |
| 5367 | if (rc2) |
| 5368 | goto reset_nic; |
| 5369 | |
| 5370 | rc2 = efx_net_open(efx->net_dev); |
| 5371 | if (rc2) |
| 5372 | goto reset_nic; |
| 5373 | |
| 5374 | netif_device_attach(efx->net_dev); |
| 5375 | |
| 5376 | return rc; |
| 5377 | |
| 5378 | reset_nic: |
| 5379 | netif_err(efx, drv, efx->net_dev, |
| 5380 | "Failed to restore when changing MAC address - scheduling reset\n"); |
| 5381 | efx_schedule_reset(efx, RESET_TYPE_DATAPATH); |
| 5382 | |
| 5383 | return rc ? rc : rc2; |
| 5384 | } |
| 5385 | |
Daniel Pieczko | 822b96f | 2015-07-21 15:10:27 +0100 | [diff] [blame] | 5386 | /* Caller must hold efx->filter_sem for read if race against |
| 5387 | * efx_ef10_filter_table_remove() is possible |
| 5388 | */ |
Andrew Rybchenko | 34813fe | 2016-06-15 17:48:14 +0100 | [diff] [blame] | 5389 | static void efx_ef10_filter_vlan_sync_rx_mode(struct efx_nic *efx, |
| 5390 | struct efx_ef10_filter_vlan *vlan) |
Daniel Pieczko | 822b96f | 2015-07-21 15:10:27 +0100 | [diff] [blame] | 5391 | { |
| 5392 | struct efx_ef10_filter_table *table = efx->filter_state; |
Daniel Pieczko | ab8b1f7c | 2015-07-21 15:10:44 +0100 | [diff] [blame] | 5393 | struct efx_ef10_nic_data *nic_data = efx->nic_data; |
Andrew Rybchenko | b3a3c03 | 2016-06-15 17:47:36 +0100 | [diff] [blame] | 5394 | |
Andrew Rybchenko | 4a53ea8 | 2016-06-15 17:48:32 +0100 | [diff] [blame] | 5395 | /* Do not install unspecified VID if VLAN filtering is enabled. |
| 5396 | * Do not install all specified VIDs if VLAN filtering is disabled. |
| 5397 | */ |
| 5398 | if ((vlan->vid == EFX_FILTER_VID_UNSPEC) == table->vlan_filter) |
| 5399 | return; |
| 5400 | |
Edward Cree | 12fb0da | 2015-07-21 15:11:00 +0100 | [diff] [blame] | 5401 | /* Insert/renew unicast filters */ |
Andrew Rybchenko | afa4ce1 | 2016-06-15 17:45:56 +0100 | [diff] [blame] | 5402 | if (table->uc_promisc) { |
Edward Cree | 9b41080 | 2017-01-27 15:02:52 +0000 | [diff] [blame] | 5403 | efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NONE, |
| 5404 | false, false); |
Andrew Rybchenko | b3a3c03 | 2016-06-15 17:47:36 +0100 | [diff] [blame] | 5405 | efx_ef10_filter_insert_addr_list(efx, vlan, false, false); |
Edward Cree | 12fb0da | 2015-07-21 15:11:00 +0100 | [diff] [blame] | 5406 | } else { |
| 5407 | /* If any of the filters failed to insert, fall back to |
| 5408 | * promiscuous mode - add in the uc_def filter. But keep |
| 5409 | * our individual unicast filters. |
| 5410 | */ |
Andrew Rybchenko | b3a3c03 | 2016-06-15 17:47:36 +0100 | [diff] [blame] | 5411 | if (efx_ef10_filter_insert_addr_list(efx, vlan, false, false)) |
Edward Cree | 9b41080 | 2017-01-27 15:02:52 +0000 | [diff] [blame] | 5412 | efx_ef10_filter_insert_def(efx, vlan, |
| 5413 | EFX_ENCAP_TYPE_NONE, |
| 5414 | false, false); |
Edward Cree | 12fb0da | 2015-07-21 15:11:00 +0100 | [diff] [blame] | 5415 | } |
Edward Cree | 9b41080 | 2017-01-27 15:02:52 +0000 | [diff] [blame] | 5416 | efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_VXLAN, |
| 5417 | false, false); |
| 5418 | efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_VXLAN | |
| 5419 | EFX_ENCAP_FLAG_IPV6, |
| 5420 | false, false); |
| 5421 | efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NVGRE, |
| 5422 | false, false); |
| 5423 | efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NVGRE | |
| 5424 | EFX_ENCAP_FLAG_IPV6, |
| 5425 | false, false); |
| 5426 | efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_GENEVE, |
| 5427 | false, false); |
| 5428 | efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_GENEVE | |
| 5429 | EFX_ENCAP_FLAG_IPV6, |
| 5430 | false, false); |
Daniel Pieczko | ab8b1f7c | 2015-07-21 15:10:44 +0100 | [diff] [blame] | 5431 | |
Edward Cree | 12fb0da | 2015-07-21 15:11:00 +0100 | [diff] [blame] | 5432 | /* Insert/renew multicast filters */ |
Daniel Pieczko | ab8b1f7c | 2015-07-21 15:10:44 +0100 | [diff] [blame] | 5433 | /* If changing promiscuous state with cascaded multicast filters, remove |
| 5434 | * old filters first, so that packets are dropped rather than duplicated |
| 5435 | */ |
Andrew Rybchenko | afa4ce1 | 2016-06-15 17:45:56 +0100 | [diff] [blame] | 5436 | if (nic_data->workaround_26807 && |
| 5437 | table->mc_promisc_last != table->mc_promisc) |
Daniel Pieczko | ab8b1f7c | 2015-07-21 15:10:44 +0100 | [diff] [blame] | 5438 | efx_ef10_filter_remove_old(efx); |
Andrew Rybchenko | afa4ce1 | 2016-06-15 17:45:56 +0100 | [diff] [blame] | 5439 | if (table->mc_promisc) { |
Edward Cree | 12fb0da | 2015-07-21 15:11:00 +0100 | [diff] [blame] | 5440 | if (nic_data->workaround_26807) { |
| 5441 | /* If we failed to insert promiscuous filters, rollback |
| 5442 | * and fall back to individual multicast filters |
| 5443 | */ |
Edward Cree | 9b41080 | 2017-01-27 15:02:52 +0000 | [diff] [blame] | 5444 | if (efx_ef10_filter_insert_def(efx, vlan, |
| 5445 | EFX_ENCAP_TYPE_NONE, |
| 5446 | true, true)) { |
Edward Cree | 12fb0da | 2015-07-21 15:11:00 +0100 | [diff] [blame] | 5447 | /* Changing promisc state, so remove old filters */ |
| 5448 | efx_ef10_filter_remove_old(efx); |
Andrew Rybchenko | b3a3c03 | 2016-06-15 17:47:36 +0100 | [diff] [blame] | 5449 | efx_ef10_filter_insert_addr_list(efx, vlan, |
| 5450 | true, false); |
Edward Cree | 12fb0da | 2015-07-21 15:11:00 +0100 | [diff] [blame] | 5451 | } |
| 5452 | } else { |
| 5453 | /* If we failed to insert promiscuous filters, don't |
| 5454 | * rollback. Regardless, also insert the mc_list |
| 5455 | */ |
Edward Cree | 9b41080 | 2017-01-27 15:02:52 +0000 | [diff] [blame] | 5456 | efx_ef10_filter_insert_def(efx, vlan, |
| 5457 | EFX_ENCAP_TYPE_NONE, |
| 5458 | true, false); |
Andrew Rybchenko | b3a3c03 | 2016-06-15 17:47:36 +0100 | [diff] [blame] | 5459 | efx_ef10_filter_insert_addr_list(efx, vlan, true, false); |
Edward Cree | 12fb0da | 2015-07-21 15:11:00 +0100 | [diff] [blame] | 5460 | } |
| 5461 | } else { |
| 5462 | /* If any filters failed to insert, rollback and fall back to |
| 5463 | * promiscuous mode - mc_def filter and maybe broadcast. If |
| 5464 | * that fails, roll back again and insert as many of our |
| 5465 | * individual multicast filters as we can. |
| 5466 | */ |
Andrew Rybchenko | b3a3c03 | 2016-06-15 17:47:36 +0100 | [diff] [blame] | 5467 | if (efx_ef10_filter_insert_addr_list(efx, vlan, true, true)) { |
Edward Cree | 12fb0da | 2015-07-21 15:11:00 +0100 | [diff] [blame] | 5468 | /* Changing promisc state, so remove old filters */ |
| 5469 | if (nic_data->workaround_26807) |
| 5470 | efx_ef10_filter_remove_old(efx); |
Edward Cree | 9b41080 | 2017-01-27 15:02:52 +0000 | [diff] [blame] | 5471 | if (efx_ef10_filter_insert_def(efx, vlan, |
| 5472 | EFX_ENCAP_TYPE_NONE, |
| 5473 | true, true)) |
Andrew Rybchenko | b3a3c03 | 2016-06-15 17:47:36 +0100 | [diff] [blame] | 5474 | efx_ef10_filter_insert_addr_list(efx, vlan, |
| 5475 | true, false); |
Edward Cree | 12fb0da | 2015-07-21 15:11:00 +0100 | [diff] [blame] | 5476 | } |
| 5477 | } |
Edward Cree | 9b41080 | 2017-01-27 15:02:52 +0000 | [diff] [blame] | 5478 | efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_VXLAN, |
| 5479 | true, false); |
| 5480 | efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_VXLAN | |
| 5481 | EFX_ENCAP_FLAG_IPV6, |
| 5482 | true, false); |
| 5483 | efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NVGRE, |
| 5484 | true, false); |
| 5485 | efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NVGRE | |
| 5486 | EFX_ENCAP_FLAG_IPV6, |
| 5487 | true, false); |
| 5488 | efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_GENEVE, |
| 5489 | true, false); |
| 5490 | efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_GENEVE | |
| 5491 | EFX_ENCAP_FLAG_IPV6, |
| 5492 | true, false); |
Andrew Rybchenko | 34813fe | 2016-06-15 17:48:14 +0100 | [diff] [blame] | 5493 | } |
| 5494 | |
| 5495 | /* Caller must hold efx->filter_sem for read if race against |
| 5496 | * efx_ef10_filter_table_remove() is possible |
| 5497 | */ |
| 5498 | static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx) |
| 5499 | { |
| 5500 | struct efx_ef10_filter_table *table = efx->filter_state; |
| 5501 | struct net_device *net_dev = efx->net_dev; |
| 5502 | struct efx_ef10_filter_vlan *vlan; |
Andrew Rybchenko | 4a53ea8 | 2016-06-15 17:48:32 +0100 | [diff] [blame] | 5503 | bool vlan_filter; |
Andrew Rybchenko | 34813fe | 2016-06-15 17:48:14 +0100 | [diff] [blame] | 5504 | |
| 5505 | if (!efx_dev_registered(efx)) |
| 5506 | return; |
| 5507 | |
| 5508 | if (!table) |
| 5509 | return; |
| 5510 | |
| 5511 | efx_ef10_filter_mark_old(efx); |
| 5512 | |
| 5513 | /* Copy/convert the address lists; add the primary station |
| 5514 | * address and broadcast address |
| 5515 | */ |
| 5516 | netif_addr_lock_bh(net_dev); |
| 5517 | efx_ef10_filter_uc_addr_list(efx); |
| 5518 | efx_ef10_filter_mc_addr_list(efx); |
| 5519 | netif_addr_unlock_bh(net_dev); |
| 5520 | |
Andrew Rybchenko | 4a53ea8 | 2016-06-15 17:48:32 +0100 | [diff] [blame] | 5521 | /* If VLAN filtering changes, all old filters are finally removed. |
| 5522 | * Do it in advance to avoid conflicts for unicast untagged and |
| 5523 | * VLAN 0 tagged filters. |
| 5524 | */ |
| 5525 | vlan_filter = !!(net_dev->features & NETIF_F_HW_VLAN_CTAG_FILTER); |
| 5526 | if (table->vlan_filter != vlan_filter) { |
| 5527 | table->vlan_filter = vlan_filter; |
| 5528 | efx_ef10_filter_remove_old(efx); |
| 5529 | } |
| 5530 | |
Andrew Rybchenko | 34813fe | 2016-06-15 17:48:14 +0100 | [diff] [blame] | 5531 | list_for_each_entry(vlan, &table->vlan_list, list) |
| 5532 | efx_ef10_filter_vlan_sync_rx_mode(efx, vlan); |
Daniel Pieczko | 822b96f | 2015-07-21 15:10:27 +0100 | [diff] [blame] | 5533 | |
| 5534 | efx_ef10_filter_remove_old(efx); |
Andrew Rybchenko | afa4ce1 | 2016-06-15 17:45:56 +0100 | [diff] [blame] | 5535 | table->mc_promisc_last = table->mc_promisc; |
Daniel Pieczko | 822b96f | 2015-07-21 15:10:27 +0100 | [diff] [blame] | 5536 | } |
| 5537 | |
Andrew Rybchenko | 34813fe | 2016-06-15 17:48:14 +0100 | [diff] [blame] | 5538 | static struct efx_ef10_filter_vlan *efx_ef10_filter_find_vlan(struct efx_nic *efx, u16 vid) |
| 5539 | { |
| 5540 | struct efx_ef10_filter_table *table = efx->filter_state; |
| 5541 | struct efx_ef10_filter_vlan *vlan; |
| 5542 | |
| 5543 | WARN_ON(!rwsem_is_locked(&efx->filter_sem)); |
| 5544 | |
| 5545 | list_for_each_entry(vlan, &table->vlan_list, list) { |
| 5546 | if (vlan->vid == vid) |
| 5547 | return vlan; |
| 5548 | } |
| 5549 | |
| 5550 | return NULL; |
| 5551 | } |
| 5552 | |
| 5553 | static int efx_ef10_filter_add_vlan(struct efx_nic *efx, u16 vid) |
| 5554 | { |
| 5555 | struct efx_ef10_filter_table *table = efx->filter_state; |
| 5556 | struct efx_ef10_filter_vlan *vlan; |
| 5557 | unsigned int i; |
| 5558 | |
| 5559 | if (!efx_rwsem_assert_write_locked(&efx->filter_sem)) |
| 5560 | return -EINVAL; |
| 5561 | |
| 5562 | vlan = efx_ef10_filter_find_vlan(efx, vid); |
| 5563 | if (WARN_ON(vlan)) { |
| 5564 | netif_err(efx, drv, efx->net_dev, |
| 5565 | "VLAN %u already added\n", vid); |
| 5566 | return -EALREADY; |
| 5567 | } |
| 5568 | |
| 5569 | vlan = kzalloc(sizeof(*vlan), GFP_KERNEL); |
| 5570 | if (!vlan) |
| 5571 | return -ENOMEM; |
| 5572 | |
| 5573 | vlan->vid = vid; |
| 5574 | |
| 5575 | for (i = 0; i < ARRAY_SIZE(vlan->uc); i++) |
| 5576 | vlan->uc[i] = EFX_EF10_FILTER_ID_INVALID; |
| 5577 | for (i = 0; i < ARRAY_SIZE(vlan->mc); i++) |
| 5578 | vlan->mc[i] = EFX_EF10_FILTER_ID_INVALID; |
Edward Cree | 9b41080 | 2017-01-27 15:02:52 +0000 | [diff] [blame] | 5579 | for (i = 0; i < EFX_EF10_NUM_DEFAULT_FILTERS; i++) |
| 5580 | vlan->default_filters[i] = EFX_EF10_FILTER_ID_INVALID; |
Andrew Rybchenko | 34813fe | 2016-06-15 17:48:14 +0100 | [diff] [blame] | 5581 | |
| 5582 | list_add_tail(&vlan->list, &table->vlan_list); |
| 5583 | |
| 5584 | if (efx_dev_registered(efx)) |
| 5585 | efx_ef10_filter_vlan_sync_rx_mode(efx, vlan); |
| 5586 | |
| 5587 | return 0; |
| 5588 | } |
| 5589 | |
| 5590 | static void efx_ef10_filter_del_vlan_internal(struct efx_nic *efx, |
| 5591 | struct efx_ef10_filter_vlan *vlan) |
| 5592 | { |
| 5593 | unsigned int i; |
| 5594 | |
| 5595 | /* See comment in efx_ef10_filter_table_remove() */ |
| 5596 | if (!efx_rwsem_assert_write_locked(&efx->filter_sem)) |
| 5597 | return; |
| 5598 | |
| 5599 | list_del(&vlan->list); |
| 5600 | |
Edward Cree | 8c91562 | 2016-06-15 17:49:05 +0100 | [diff] [blame] | 5601 | for (i = 0; i < ARRAY_SIZE(vlan->uc); i++) |
Andrew Rybchenko | 34813fe | 2016-06-15 17:48:14 +0100 | [diff] [blame] | 5602 | efx_ef10_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO, |
Edward Cree | 8c91562 | 2016-06-15 17:49:05 +0100 | [diff] [blame] | 5603 | vlan->uc[i]); |
| 5604 | for (i = 0; i < ARRAY_SIZE(vlan->mc); i++) |
Andrew Rybchenko | 34813fe | 2016-06-15 17:48:14 +0100 | [diff] [blame] | 5605 | efx_ef10_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO, |
Edward Cree | 8c91562 | 2016-06-15 17:49:05 +0100 | [diff] [blame] | 5606 | vlan->mc[i]); |
Edward Cree | 9b41080 | 2017-01-27 15:02:52 +0000 | [diff] [blame] | 5607 | for (i = 0; i < EFX_EF10_NUM_DEFAULT_FILTERS; i++) |
| 5608 | if (vlan->default_filters[i] != EFX_EF10_FILTER_ID_INVALID) |
| 5609 | efx_ef10_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO, |
| 5610 | vlan->default_filters[i]); |
Andrew Rybchenko | 34813fe | 2016-06-15 17:48:14 +0100 | [diff] [blame] | 5611 | |
| 5612 | kfree(vlan); |
| 5613 | } |
| 5614 | |
| 5615 | static void efx_ef10_filter_del_vlan(struct efx_nic *efx, u16 vid) |
| 5616 | { |
| 5617 | struct efx_ef10_filter_vlan *vlan; |
| 5618 | |
| 5619 | /* See comment in efx_ef10_filter_table_remove() */ |
| 5620 | if (!efx_rwsem_assert_write_locked(&efx->filter_sem)) |
| 5621 | return; |
| 5622 | |
| 5623 | vlan = efx_ef10_filter_find_vlan(efx, vid); |
| 5624 | if (!vlan) { |
| 5625 | netif_err(efx, drv, efx->net_dev, |
| 5626 | "VLAN %u not found in filter state\n", vid); |
| 5627 | return; |
| 5628 | } |
| 5629 | |
| 5630 | efx_ef10_filter_del_vlan_internal(efx, vlan); |
| 5631 | } |
| 5632 | |
Shradha Shah | 910c878 | 2015-05-20 11:12:48 +0100 | [diff] [blame] | 5633 | static int efx_ef10_set_mac_address(struct efx_nic *efx) |
| 5634 | { |
| 5635 | MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_SET_MAC_IN_LEN); |
| 5636 | struct efx_ef10_nic_data *nic_data = efx->nic_data; |
| 5637 | bool was_enabled = efx->port_enabled; |
| 5638 | int rc; |
| 5639 | |
| 5640 | efx_device_detach_sync(efx); |
| 5641 | efx_net_stop(efx->net_dev); |
Martin Habets | d248953 | 2016-06-15 17:48:49 +0100 | [diff] [blame] | 5642 | |
| 5643 | mutex_lock(&efx->mac_lock); |
Shradha Shah | 910c878 | 2015-05-20 11:12:48 +0100 | [diff] [blame] | 5644 | down_write(&efx->filter_sem); |
| 5645 | efx_ef10_filter_table_remove(efx); |
| 5646 | |
| 5647 | ether_addr_copy(MCDI_PTR(inbuf, VADAPTOR_SET_MAC_IN_MACADDR), |
| 5648 | efx->net_dev->dev_addr); |
| 5649 | MCDI_SET_DWORD(inbuf, VADAPTOR_SET_MAC_IN_UPSTREAM_PORT_ID, |
| 5650 | nic_data->vport_id); |
Daniel Pieczko | 535a617 | 2015-07-07 11:37:33 +0100 | [diff] [blame] | 5651 | rc = efx_mcdi_rpc_quiet(efx, MC_CMD_VADAPTOR_SET_MAC, inbuf, |
| 5652 | sizeof(inbuf), NULL, 0, NULL); |
Shradha Shah | 910c878 | 2015-05-20 11:12:48 +0100 | [diff] [blame] | 5653 | |
| 5654 | efx_ef10_filter_table_probe(efx); |
| 5655 | up_write(&efx->filter_sem); |
Martin Habets | d248953 | 2016-06-15 17:48:49 +0100 | [diff] [blame] | 5656 | mutex_unlock(&efx->mac_lock); |
| 5657 | |
Shradha Shah | 910c878 | 2015-05-20 11:12:48 +0100 | [diff] [blame] | 5658 | if (was_enabled) |
| 5659 | efx_net_open(efx->net_dev); |
| 5660 | netif_device_attach(efx->net_dev); |
| 5661 | |
Daniel Pieczko | 9e9f665 | 2015-07-07 11:37:00 +0100 | [diff] [blame] | 5662 | #ifdef CONFIG_SFC_SRIOV |
| 5663 | if (efx->pci_dev->is_virtfn && efx->pci_dev->physfn) { |
Shradha Shah | 910c878 | 2015-05-20 11:12:48 +0100 | [diff] [blame] | 5664 | struct pci_dev *pci_dev_pf = efx->pci_dev->physfn; |
| 5665 | |
Daniel Pieczko | 9e9f665 | 2015-07-07 11:37:00 +0100 | [diff] [blame] | 5666 | if (rc == -EPERM) { |
| 5667 | struct efx_nic *efx_pf; |
Shradha Shah | 910c878 | 2015-05-20 11:12:48 +0100 | [diff] [blame] | 5668 | |
Daniel Pieczko | 9e9f665 | 2015-07-07 11:37:00 +0100 | [diff] [blame] | 5669 | /* Switch to PF and change MAC address on vport */ |
| 5670 | efx_pf = pci_get_drvdata(pci_dev_pf); |
| 5671 | |
| 5672 | rc = efx_ef10_sriov_set_vf_mac(efx_pf, |
Shradha Shah | 910c878 | 2015-05-20 11:12:48 +0100 | [diff] [blame] | 5673 | nic_data->vf_index, |
Daniel Pieczko | 9e9f665 | 2015-07-07 11:37:00 +0100 | [diff] [blame] | 5674 | efx->net_dev->dev_addr); |
| 5675 | } else if (!rc) { |
Shradha Shah | 910c878 | 2015-05-20 11:12:48 +0100 | [diff] [blame] | 5676 | struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf); |
| 5677 | struct efx_ef10_nic_data *nic_data = efx_pf->nic_data; |
| 5678 | unsigned int i; |
| 5679 | |
Daniel Pieczko | 9e9f665 | 2015-07-07 11:37:00 +0100 | [diff] [blame] | 5680 | /* MAC address successfully changed by VF (with MAC |
| 5681 | * spoofing) so update the parent PF if possible. |
| 5682 | */ |
Shradha Shah | 910c878 | 2015-05-20 11:12:48 +0100 | [diff] [blame] | 5683 | for (i = 0; i < efx_pf->vf_count; ++i) { |
| 5684 | struct ef10_vf *vf = nic_data->vf + i; |
| 5685 | |
| 5686 | if (vf->efx == efx) { |
| 5687 | ether_addr_copy(vf->mac, |
| 5688 | efx->net_dev->dev_addr); |
| 5689 | return 0; |
| 5690 | } |
| 5691 | } |
| 5692 | } |
Daniel Pieczko | 9e9f665 | 2015-07-07 11:37:00 +0100 | [diff] [blame] | 5693 | } else |
Shradha Shah | 910c878 | 2015-05-20 11:12:48 +0100 | [diff] [blame] | 5694 | #endif |
Daniel Pieczko | 9e9f665 | 2015-07-07 11:37:00 +0100 | [diff] [blame] | 5695 | if (rc == -EPERM) { |
| 5696 | netif_err(efx, drv, efx->net_dev, |
| 5697 | "Cannot change MAC address; use sfboot to enable" |
| 5698 | " mac-spoofing on this interface\n"); |
Daniel Pieczko | 7a186f4 | 2015-07-07 11:37:19 +0100 | [diff] [blame] | 5699 | } else if (rc == -ENOSYS && !efx_ef10_is_vf(efx)) { |
| 5700 | /* If the active MCFW does not support MC_CMD_VADAPTOR_SET_MAC |
| 5701 | * fall-back to the method of changing the MAC address on the |
| 5702 | * vport. This only applies to PFs because such versions of |
| 5703 | * MCFW do not support VFs. |
| 5704 | */ |
| 5705 | rc = efx_ef10_vport_set_mac_address(efx); |
Daniel Pieczko | 535a617 | 2015-07-07 11:37:33 +0100 | [diff] [blame] | 5706 | } else { |
| 5707 | efx_mcdi_display_error(efx, MC_CMD_VADAPTOR_SET_MAC, |
| 5708 | sizeof(inbuf), NULL, 0, rc); |
Daniel Pieczko | 9e9f665 | 2015-07-07 11:37:00 +0100 | [diff] [blame] | 5709 | } |
| 5710 | |
Shradha Shah | 910c878 | 2015-05-20 11:12:48 +0100 | [diff] [blame] | 5711 | return rc; |
| 5712 | } |
| 5713 | |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 5714 | static int efx_ef10_mac_reconfigure(struct efx_nic *efx) |
| 5715 | { |
| 5716 | efx_ef10_filter_sync_rx_mode(efx); |
| 5717 | |
| 5718 | return efx_mcdi_set_mac(efx); |
| 5719 | } |
| 5720 | |
Shradha Shah | 862f894 | 2015-05-20 11:08:56 +0100 | [diff] [blame] | 5721 | static int efx_ef10_mac_reconfigure_vf(struct efx_nic *efx) |
| 5722 | { |
| 5723 | efx_ef10_filter_sync_rx_mode(efx); |
| 5724 | |
| 5725 | return 0; |
| 5726 | } |
| 5727 | |
Jon Cooper | 74cd60a | 2013-09-16 14:18:51 +0100 | [diff] [blame] | 5728 | static int efx_ef10_start_bist(struct efx_nic *efx, u32 bist_type) |
| 5729 | { |
| 5730 | MCDI_DECLARE_BUF(inbuf, MC_CMD_START_BIST_IN_LEN); |
| 5731 | |
| 5732 | MCDI_SET_DWORD(inbuf, START_BIST_IN_TYPE, bist_type); |
| 5733 | return efx_mcdi_rpc(efx, MC_CMD_START_BIST, inbuf, sizeof(inbuf), |
| 5734 | NULL, 0, NULL); |
| 5735 | } |
| 5736 | |
| 5737 | /* MC BISTs follow a different poll mechanism to phy BISTs. |
| 5738 | * The BIST is done in the poll handler on the MC, and the MCDI command |
| 5739 | * will block until the BIST is done. |
| 5740 | */ |
| 5741 | static int efx_ef10_poll_bist(struct efx_nic *efx) |
| 5742 | { |
| 5743 | int rc; |
| 5744 | MCDI_DECLARE_BUF(outbuf, MC_CMD_POLL_BIST_OUT_LEN); |
| 5745 | size_t outlen; |
| 5746 | u32 result; |
| 5747 | |
| 5748 | rc = efx_mcdi_rpc(efx, MC_CMD_POLL_BIST, NULL, 0, |
| 5749 | outbuf, sizeof(outbuf), &outlen); |
| 5750 | if (rc != 0) |
| 5751 | return rc; |
| 5752 | |
| 5753 | if (outlen < MC_CMD_POLL_BIST_OUT_LEN) |
| 5754 | return -EIO; |
| 5755 | |
| 5756 | result = MCDI_DWORD(outbuf, POLL_BIST_OUT_RESULT); |
| 5757 | switch (result) { |
| 5758 | case MC_CMD_POLL_BIST_PASSED: |
| 5759 | netif_dbg(efx, hw, efx->net_dev, "BIST passed.\n"); |
| 5760 | return 0; |
| 5761 | case MC_CMD_POLL_BIST_TIMEOUT: |
| 5762 | netif_err(efx, hw, efx->net_dev, "BIST timed out\n"); |
| 5763 | return -EIO; |
| 5764 | case MC_CMD_POLL_BIST_FAILED: |
| 5765 | netif_err(efx, hw, efx->net_dev, "BIST failed.\n"); |
| 5766 | return -EIO; |
| 5767 | default: |
| 5768 | netif_err(efx, hw, efx->net_dev, |
| 5769 | "BIST returned unknown result %u", result); |
| 5770 | return -EIO; |
| 5771 | } |
| 5772 | } |
| 5773 | |
| 5774 | static int efx_ef10_run_bist(struct efx_nic *efx, u32 bist_type) |
| 5775 | { |
| 5776 | int rc; |
| 5777 | |
| 5778 | netif_dbg(efx, drv, efx->net_dev, "starting BIST type %u\n", bist_type); |
| 5779 | |
| 5780 | rc = efx_ef10_start_bist(efx, bist_type); |
| 5781 | if (rc != 0) |
| 5782 | return rc; |
| 5783 | |
| 5784 | return efx_ef10_poll_bist(efx); |
| 5785 | } |
| 5786 | |
| 5787 | static int |
| 5788 | efx_ef10_test_chip(struct efx_nic *efx, struct efx_self_tests *tests) |
| 5789 | { |
| 5790 | int rc, rc2; |
| 5791 | |
| 5792 | efx_reset_down(efx, RESET_TYPE_WORLD); |
| 5793 | |
| 5794 | rc = efx_mcdi_rpc(efx, MC_CMD_ENABLE_OFFLINE_BIST, |
| 5795 | NULL, 0, NULL, 0, NULL); |
| 5796 | if (rc != 0) |
| 5797 | goto out; |
| 5798 | |
| 5799 | tests->memory = efx_ef10_run_bist(efx, MC_CMD_MC_MEM_BIST) ? -1 : 1; |
| 5800 | tests->registers = efx_ef10_run_bist(efx, MC_CMD_REG_BIST) ? -1 : 1; |
| 5801 | |
| 5802 | rc = efx_mcdi_reset(efx, RESET_TYPE_WORLD); |
| 5803 | |
| 5804 | out: |
Daniel Pieczko | 2732482 | 2015-07-31 11:14:54 +0100 | [diff] [blame] | 5805 | if (rc == -EPERM) |
| 5806 | rc = 0; |
Jon Cooper | 74cd60a | 2013-09-16 14:18:51 +0100 | [diff] [blame] | 5807 | rc2 = efx_reset_up(efx, RESET_TYPE_WORLD, rc == 0); |
| 5808 | return rc ? rc : rc2; |
| 5809 | } |
| 5810 | |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 5811 | #ifdef CONFIG_SFC_MTD |
| 5812 | |
| 5813 | struct efx_ef10_nvram_type_info { |
| 5814 | u16 type, type_mask; |
| 5815 | u8 port; |
| 5816 | const char *name; |
| 5817 | }; |
| 5818 | |
| 5819 | static const struct efx_ef10_nvram_type_info efx_ef10_nvram_types[] = { |
| 5820 | { NVRAM_PARTITION_TYPE_MC_FIRMWARE, 0, 0, "sfc_mcfw" }, |
| 5821 | { NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP, 0, 0, "sfc_mcfw_backup" }, |
| 5822 | { NVRAM_PARTITION_TYPE_EXPANSION_ROM, 0, 0, "sfc_exp_rom" }, |
| 5823 | { NVRAM_PARTITION_TYPE_STATIC_CONFIG, 0, 0, "sfc_static_cfg" }, |
| 5824 | { NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG, 0, 0, "sfc_dynamic_cfg" }, |
| 5825 | { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0, 0, 0, "sfc_exp_rom_cfg" }, |
| 5826 | { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT1, 0, 1, "sfc_exp_rom_cfg" }, |
| 5827 | { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT2, 0, 2, "sfc_exp_rom_cfg" }, |
| 5828 | { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT3, 0, 3, "sfc_exp_rom_cfg" }, |
Ben Hutchings | a84f3bf9 | 2013-10-09 14:14:41 +0100 | [diff] [blame] | 5829 | { NVRAM_PARTITION_TYPE_LICENSE, 0, 0, "sfc_license" }, |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 5830 | { NVRAM_PARTITION_TYPE_PHY_MIN, 0xff, 0, "sfc_phy_fw" }, |
| 5831 | }; |
| 5832 | |
| 5833 | static int efx_ef10_mtd_probe_partition(struct efx_nic *efx, |
| 5834 | struct efx_mcdi_mtd_partition *part, |
| 5835 | unsigned int type) |
| 5836 | { |
| 5837 | MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_METADATA_IN_LEN); |
| 5838 | MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_METADATA_OUT_LENMAX); |
| 5839 | const struct efx_ef10_nvram_type_info *info; |
| 5840 | size_t size, erase_size, outlen; |
| 5841 | bool protected; |
| 5842 | int rc; |
| 5843 | |
| 5844 | for (info = efx_ef10_nvram_types; ; info++) { |
| 5845 | if (info == |
| 5846 | efx_ef10_nvram_types + ARRAY_SIZE(efx_ef10_nvram_types)) |
| 5847 | return -ENODEV; |
| 5848 | if ((type & ~info->type_mask) == info->type) |
| 5849 | break; |
| 5850 | } |
| 5851 | if (info->port != efx_port_num(efx)) |
| 5852 | return -ENODEV; |
| 5853 | |
| 5854 | rc = efx_mcdi_nvram_info(efx, type, &size, &erase_size, &protected); |
| 5855 | if (rc) |
| 5856 | return rc; |
| 5857 | if (protected) |
| 5858 | return -ENODEV; /* hide it */ |
| 5859 | |
| 5860 | part->nvram_type = type; |
| 5861 | |
| 5862 | MCDI_SET_DWORD(inbuf, NVRAM_METADATA_IN_TYPE, type); |
| 5863 | rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_METADATA, inbuf, sizeof(inbuf), |
| 5864 | outbuf, sizeof(outbuf), &outlen); |
| 5865 | if (rc) |
| 5866 | return rc; |
| 5867 | if (outlen < MC_CMD_NVRAM_METADATA_OUT_LENMIN) |
| 5868 | return -EIO; |
| 5869 | if (MCDI_DWORD(outbuf, NVRAM_METADATA_OUT_FLAGS) & |
| 5870 | (1 << MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_LBN)) |
| 5871 | part->fw_subtype = MCDI_DWORD(outbuf, |
| 5872 | NVRAM_METADATA_OUT_SUBTYPE); |
| 5873 | |
| 5874 | part->common.dev_type_name = "EF10 NVRAM manager"; |
| 5875 | part->common.type_name = info->name; |
| 5876 | |
| 5877 | part->common.mtd.type = MTD_NORFLASH; |
| 5878 | part->common.mtd.flags = MTD_CAP_NORFLASH; |
| 5879 | part->common.mtd.size = size; |
| 5880 | part->common.mtd.erasesize = erase_size; |
| 5881 | |
| 5882 | return 0; |
| 5883 | } |
| 5884 | |
| 5885 | static int efx_ef10_mtd_probe(struct efx_nic *efx) |
| 5886 | { |
| 5887 | MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX); |
| 5888 | struct efx_mcdi_mtd_partition *parts; |
| 5889 | size_t outlen, n_parts_total, i, n_parts; |
| 5890 | unsigned int type; |
| 5891 | int rc; |
| 5892 | |
| 5893 | ASSERT_RTNL(); |
| 5894 | |
| 5895 | BUILD_BUG_ON(MC_CMD_NVRAM_PARTITIONS_IN_LEN != 0); |
| 5896 | rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_PARTITIONS, NULL, 0, |
| 5897 | outbuf, sizeof(outbuf), &outlen); |
| 5898 | if (rc) |
| 5899 | return rc; |
| 5900 | if (outlen < MC_CMD_NVRAM_PARTITIONS_OUT_LENMIN) |
| 5901 | return -EIO; |
| 5902 | |
| 5903 | n_parts_total = MCDI_DWORD(outbuf, NVRAM_PARTITIONS_OUT_NUM_PARTITIONS); |
| 5904 | if (n_parts_total > |
| 5905 | MCDI_VAR_ARRAY_LEN(outlen, NVRAM_PARTITIONS_OUT_TYPE_ID)) |
| 5906 | return -EIO; |
| 5907 | |
| 5908 | parts = kcalloc(n_parts_total, sizeof(*parts), GFP_KERNEL); |
| 5909 | if (!parts) |
| 5910 | return -ENOMEM; |
| 5911 | |
| 5912 | n_parts = 0; |
| 5913 | for (i = 0; i < n_parts_total; i++) { |
| 5914 | type = MCDI_ARRAY_DWORD(outbuf, NVRAM_PARTITIONS_OUT_TYPE_ID, |
| 5915 | i); |
| 5916 | rc = efx_ef10_mtd_probe_partition(efx, &parts[n_parts], type); |
| 5917 | if (rc == 0) |
| 5918 | n_parts++; |
| 5919 | else if (rc != -ENODEV) |
| 5920 | goto fail; |
| 5921 | } |
| 5922 | |
| 5923 | rc = efx_mtd_add(efx, &parts[0].common, n_parts, sizeof(*parts)); |
| 5924 | fail: |
| 5925 | if (rc) |
| 5926 | kfree(parts); |
| 5927 | return rc; |
| 5928 | } |
| 5929 | |
| 5930 | #endif /* CONFIG_SFC_MTD */ |
| 5931 | |
| 5932 | static void efx_ef10_ptp_write_host_time(struct efx_nic *efx, u32 host_time) |
| 5933 | { |
| 5934 | _efx_writed(efx, cpu_to_le32(host_time), ER_DZ_MC_DB_LWRD); |
| 5935 | } |
| 5936 | |
Shradha Shah | 02246a7 | 2015-05-06 00:58:14 +0100 | [diff] [blame] | 5937 | static void efx_ef10_ptp_write_host_time_vf(struct efx_nic *efx, |
| 5938 | u32 host_time) {} |
| 5939 | |
Jon Cooper | bd9a265 | 2013-11-18 12:54:41 +0000 | [diff] [blame] | 5940 | static int efx_ef10_rx_enable_timestamping(struct efx_channel *channel, |
| 5941 | bool temp) |
| 5942 | { |
| 5943 | MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_LEN); |
| 5944 | int rc; |
| 5945 | |
| 5946 | if (channel->sync_events_state == SYNC_EVENTS_REQUESTED || |
| 5947 | channel->sync_events_state == SYNC_EVENTS_VALID || |
| 5948 | (temp && channel->sync_events_state == SYNC_EVENTS_DISABLED)) |
| 5949 | return 0; |
| 5950 | channel->sync_events_state = SYNC_EVENTS_REQUESTED; |
| 5951 | |
| 5952 | MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_TIME_EVENT_SUBSCRIBE); |
| 5953 | MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0); |
| 5954 | MCDI_SET_DWORD(inbuf, PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE, |
| 5955 | channel->channel); |
| 5956 | |
| 5957 | rc = efx_mcdi_rpc(channel->efx, MC_CMD_PTP, |
| 5958 | inbuf, sizeof(inbuf), NULL, 0, NULL); |
| 5959 | |
| 5960 | if (rc != 0) |
| 5961 | channel->sync_events_state = temp ? SYNC_EVENTS_QUIESCENT : |
| 5962 | SYNC_EVENTS_DISABLED; |
| 5963 | |
| 5964 | return rc; |
| 5965 | } |
| 5966 | |
| 5967 | static int efx_ef10_rx_disable_timestamping(struct efx_channel *channel, |
| 5968 | bool temp) |
| 5969 | { |
| 5970 | MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_LEN); |
| 5971 | int rc; |
| 5972 | |
| 5973 | if (channel->sync_events_state == SYNC_EVENTS_DISABLED || |
| 5974 | (temp && channel->sync_events_state == SYNC_EVENTS_QUIESCENT)) |
| 5975 | return 0; |
| 5976 | if (channel->sync_events_state == SYNC_EVENTS_QUIESCENT) { |
| 5977 | channel->sync_events_state = SYNC_EVENTS_DISABLED; |
| 5978 | return 0; |
| 5979 | } |
| 5980 | channel->sync_events_state = temp ? SYNC_EVENTS_QUIESCENT : |
| 5981 | SYNC_EVENTS_DISABLED; |
| 5982 | |
| 5983 | MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_TIME_EVENT_UNSUBSCRIBE); |
| 5984 | MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0); |
| 5985 | MCDI_SET_DWORD(inbuf, PTP_IN_TIME_EVENT_UNSUBSCRIBE_CONTROL, |
| 5986 | MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_SINGLE); |
| 5987 | MCDI_SET_DWORD(inbuf, PTP_IN_TIME_EVENT_UNSUBSCRIBE_QUEUE, |
| 5988 | channel->channel); |
| 5989 | |
| 5990 | rc = efx_mcdi_rpc(channel->efx, MC_CMD_PTP, |
| 5991 | inbuf, sizeof(inbuf), NULL, 0, NULL); |
| 5992 | |
| 5993 | return rc; |
| 5994 | } |
| 5995 | |
| 5996 | static int efx_ef10_ptp_set_ts_sync_events(struct efx_nic *efx, bool en, |
| 5997 | bool temp) |
| 5998 | { |
| 5999 | int (*set)(struct efx_channel *channel, bool temp); |
| 6000 | struct efx_channel *channel; |
| 6001 | |
| 6002 | set = en ? |
| 6003 | efx_ef10_rx_enable_timestamping : |
| 6004 | efx_ef10_rx_disable_timestamping; |
| 6005 | |
| 6006 | efx_for_each_channel(channel, efx) { |
| 6007 | int rc = set(channel, temp); |
| 6008 | if (en && rc != 0) { |
| 6009 | efx_ef10_ptp_set_ts_sync_events(efx, false, temp); |
| 6010 | return rc; |
| 6011 | } |
| 6012 | } |
| 6013 | |
| 6014 | return 0; |
| 6015 | } |
| 6016 | |
Shradha Shah | 02246a7 | 2015-05-06 00:58:14 +0100 | [diff] [blame] | 6017 | static int efx_ef10_ptp_set_ts_config_vf(struct efx_nic *efx, |
| 6018 | struct hwtstamp_config *init) |
| 6019 | { |
| 6020 | return -EOPNOTSUPP; |
| 6021 | } |
| 6022 | |
Jon Cooper | bd9a265 | 2013-11-18 12:54:41 +0000 | [diff] [blame] | 6023 | static int efx_ef10_ptp_set_ts_config(struct efx_nic *efx, |
| 6024 | struct hwtstamp_config *init) |
| 6025 | { |
| 6026 | int rc; |
| 6027 | |
| 6028 | switch (init->rx_filter) { |
| 6029 | case HWTSTAMP_FILTER_NONE: |
| 6030 | efx_ef10_ptp_set_ts_sync_events(efx, false, false); |
| 6031 | /* if TX timestamping is still requested then leave PTP on */ |
| 6032 | return efx_ptp_change_mode(efx, |
| 6033 | init->tx_type != HWTSTAMP_TX_OFF, 0); |
| 6034 | case HWTSTAMP_FILTER_ALL: |
| 6035 | case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: |
| 6036 | case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: |
| 6037 | case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: |
| 6038 | case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: |
| 6039 | case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: |
| 6040 | case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: |
| 6041 | case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: |
| 6042 | case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: |
| 6043 | case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: |
| 6044 | case HWTSTAMP_FILTER_PTP_V2_EVENT: |
| 6045 | case HWTSTAMP_FILTER_PTP_V2_SYNC: |
| 6046 | case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: |
| 6047 | init->rx_filter = HWTSTAMP_FILTER_ALL; |
| 6048 | rc = efx_ptp_change_mode(efx, true, 0); |
| 6049 | if (!rc) |
| 6050 | rc = efx_ef10_ptp_set_ts_sync_events(efx, true, false); |
| 6051 | if (rc) |
| 6052 | efx_ptp_change_mode(efx, false, 0); |
| 6053 | return rc; |
| 6054 | default: |
| 6055 | return -ERANGE; |
| 6056 | } |
| 6057 | } |
| 6058 | |
Bert Kenward | 08a7b29b | 2017-01-10 16:23:33 +0000 | [diff] [blame] | 6059 | static int efx_ef10_get_phys_port_id(struct efx_nic *efx, |
| 6060 | struct netdev_phys_item_id *ppid) |
| 6061 | { |
| 6062 | struct efx_ef10_nic_data *nic_data = efx->nic_data; |
| 6063 | |
| 6064 | if (!is_valid_ether_addr(nic_data->port_id)) |
| 6065 | return -EOPNOTSUPP; |
| 6066 | |
| 6067 | ppid->id_len = ETH_ALEN; |
| 6068 | memcpy(ppid->id, nic_data->port_id, ppid->id_len); |
| 6069 | |
| 6070 | return 0; |
| 6071 | } |
| 6072 | |
Andrew Rybchenko | 4a53ea8 | 2016-06-15 17:48:32 +0100 | [diff] [blame] | 6073 | static int efx_ef10_vlan_rx_add_vid(struct efx_nic *efx, __be16 proto, u16 vid) |
| 6074 | { |
| 6075 | if (proto != htons(ETH_P_8021Q)) |
| 6076 | return -EINVAL; |
| 6077 | |
| 6078 | return efx_ef10_add_vlan(efx, vid); |
| 6079 | } |
| 6080 | |
| 6081 | static int efx_ef10_vlan_rx_kill_vid(struct efx_nic *efx, __be16 proto, u16 vid) |
| 6082 | { |
| 6083 | if (proto != htons(ETH_P_8021Q)) |
| 6084 | return -EINVAL; |
| 6085 | |
| 6086 | return efx_ef10_del_vlan(efx, vid); |
| 6087 | } |
| 6088 | |
Jon Cooper | e5fbd97 | 2017-02-08 16:52:10 +0000 | [diff] [blame^] | 6089 | /* We rely on the MCDI wiping out our TX rings if it made any changes to the |
| 6090 | * ports table, ensuring that any TSO descriptors that were made on a now- |
| 6091 | * removed tunnel port will be blown away and won't break things when we try |
| 6092 | * to transmit them using the new ports table. |
| 6093 | */ |
| 6094 | static int efx_ef10_set_udp_tnl_ports(struct efx_nic *efx, bool unloading) |
| 6095 | { |
| 6096 | struct efx_ef10_nic_data *nic_data = efx->nic_data; |
| 6097 | MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LENMAX); |
| 6098 | MCDI_DECLARE_BUF(outbuf, MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_LEN); |
| 6099 | bool will_reset = false; |
| 6100 | size_t num_entries = 0; |
| 6101 | size_t inlen, outlen; |
| 6102 | size_t i; |
| 6103 | int rc; |
| 6104 | efx_dword_t flags_and_num_entries; |
| 6105 | |
| 6106 | WARN_ON(!mutex_is_locked(&nic_data->udp_tunnels_lock)); |
| 6107 | |
| 6108 | nic_data->udp_tunnels_dirty = false; |
| 6109 | |
| 6110 | if (!(nic_data->datapath_caps & |
| 6111 | (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN))) { |
| 6112 | netif_device_attach(efx->net_dev); |
| 6113 | return 0; |
| 6114 | } |
| 6115 | |
| 6116 | BUILD_BUG_ON(ARRAY_SIZE(nic_data->udp_tunnels) > |
| 6117 | MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_MAXNUM); |
| 6118 | |
| 6119 | for (i = 0; i < ARRAY_SIZE(nic_data->udp_tunnels); ++i) { |
| 6120 | if (nic_data->udp_tunnels[i].count && |
| 6121 | nic_data->udp_tunnels[i].port) { |
| 6122 | efx_dword_t entry; |
| 6123 | |
| 6124 | EFX_POPULATE_DWORD_2(entry, |
| 6125 | TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT, |
| 6126 | ntohs(nic_data->udp_tunnels[i].port), |
| 6127 | TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL, |
| 6128 | nic_data->udp_tunnels[i].type); |
| 6129 | *_MCDI_ARRAY_DWORD(inbuf, |
| 6130 | SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES, |
| 6131 | num_entries++) = entry; |
| 6132 | } |
| 6133 | } |
| 6134 | |
| 6135 | BUILD_BUG_ON((MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_NUM_ENTRIES_OFST - |
| 6136 | MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_FLAGS_OFST) * 8 != |
| 6137 | EFX_WORD_1_LBN); |
| 6138 | BUILD_BUG_ON(MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_NUM_ENTRIES_LEN * 8 != |
| 6139 | EFX_WORD_1_WIDTH); |
| 6140 | EFX_POPULATE_DWORD_2(flags_and_num_entries, |
| 6141 | MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_UNLOADING, |
| 6142 | !!unloading, |
| 6143 | EFX_WORD_1, num_entries); |
| 6144 | *_MCDI_DWORD(inbuf, SET_TUNNEL_ENCAP_UDP_PORTS_IN_FLAGS) = |
| 6145 | flags_and_num_entries; |
| 6146 | |
| 6147 | inlen = MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LEN(num_entries); |
| 6148 | |
| 6149 | rc = efx_mcdi_rpc_quiet(efx, MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS, |
| 6150 | inbuf, inlen, outbuf, sizeof(outbuf), &outlen); |
| 6151 | if (rc == -EIO) { |
| 6152 | /* Most likely the MC rebooted due to another function also |
| 6153 | * setting its tunnel port list. Mark the tunnel port list as |
| 6154 | * dirty, so it will be pushed upon coming up from the reboot. |
| 6155 | */ |
| 6156 | nic_data->udp_tunnels_dirty = true; |
| 6157 | return 0; |
| 6158 | } |
| 6159 | |
| 6160 | if (rc) { |
| 6161 | /* expected not available on unprivileged functions */ |
| 6162 | if (rc != -EPERM) |
| 6163 | netif_warn(efx, drv, efx->net_dev, |
| 6164 | "Unable to set UDP tunnel ports; rc=%d.\n", rc); |
| 6165 | } else if (MCDI_DWORD(outbuf, SET_TUNNEL_ENCAP_UDP_PORTS_OUT_FLAGS) & |
| 6166 | (1 << MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_RESETTING_LBN)) { |
| 6167 | netif_info(efx, drv, efx->net_dev, |
| 6168 | "Rebooting MC due to UDP tunnel port list change\n"); |
| 6169 | will_reset = true; |
| 6170 | if (unloading) |
| 6171 | /* Delay for the MC reset to complete. This will make |
| 6172 | * unloading other functions a bit smoother. This is a |
| 6173 | * race, but the other unload will work whichever way |
| 6174 | * it goes, this just avoids an unnecessary error |
| 6175 | * message. |
| 6176 | */ |
| 6177 | msleep(100); |
| 6178 | } |
| 6179 | if (!will_reset && !unloading) { |
| 6180 | /* The caller will have detached, relying on the MC reset to |
| 6181 | * trigger a re-attach. Since there won't be an MC reset, we |
| 6182 | * have to do the attach ourselves. |
| 6183 | */ |
| 6184 | netif_device_attach(efx->net_dev); |
| 6185 | } |
| 6186 | |
| 6187 | return rc; |
| 6188 | } |
| 6189 | |
| 6190 | static int efx_ef10_udp_tnl_push_ports(struct efx_nic *efx) |
| 6191 | { |
| 6192 | struct efx_ef10_nic_data *nic_data = efx->nic_data; |
| 6193 | int rc = 0; |
| 6194 | |
| 6195 | mutex_lock(&nic_data->udp_tunnels_lock); |
| 6196 | if (nic_data->udp_tunnels_dirty) { |
| 6197 | /* Make sure all TX are stopped while we modify the table, else |
| 6198 | * we might race against an efx_features_check(). |
| 6199 | */ |
| 6200 | efx_device_detach_sync(efx); |
| 6201 | rc = efx_ef10_set_udp_tnl_ports(efx, false); |
| 6202 | } |
| 6203 | mutex_unlock(&nic_data->udp_tunnels_lock); |
| 6204 | return rc; |
| 6205 | } |
| 6206 | |
| 6207 | static struct efx_udp_tunnel *__efx_ef10_udp_tnl_lookup_port(struct efx_nic *efx, |
| 6208 | __be16 port) |
| 6209 | { |
| 6210 | struct efx_ef10_nic_data *nic_data = efx->nic_data; |
| 6211 | size_t i; |
| 6212 | |
| 6213 | for (i = 0; i < ARRAY_SIZE(nic_data->udp_tunnels); ++i) { |
| 6214 | if (!nic_data->udp_tunnels[i].count) |
| 6215 | continue; |
| 6216 | if (nic_data->udp_tunnels[i].port == port) |
| 6217 | return &nic_data->udp_tunnels[i]; |
| 6218 | } |
| 6219 | return NULL; |
| 6220 | } |
| 6221 | |
| 6222 | static int efx_ef10_udp_tnl_add_port(struct efx_nic *efx, |
| 6223 | struct efx_udp_tunnel tnl) |
| 6224 | { |
| 6225 | struct efx_ef10_nic_data *nic_data = efx->nic_data; |
| 6226 | struct efx_udp_tunnel *match; |
| 6227 | char typebuf[8]; |
| 6228 | size_t i; |
| 6229 | int rc; |
| 6230 | |
| 6231 | if (!(nic_data->datapath_caps & |
| 6232 | (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN))) |
| 6233 | return 0; |
| 6234 | |
| 6235 | efx_get_udp_tunnel_type_name(tnl.type, typebuf, sizeof(typebuf)); |
| 6236 | netif_dbg(efx, drv, efx->net_dev, "Adding UDP tunnel (%s) port %d\n", |
| 6237 | typebuf, ntohs(tnl.port)); |
| 6238 | |
| 6239 | mutex_lock(&nic_data->udp_tunnels_lock); |
| 6240 | /* Make sure all TX are stopped while we add to the table, else we |
| 6241 | * might race against an efx_features_check(). |
| 6242 | */ |
| 6243 | efx_device_detach_sync(efx); |
| 6244 | |
| 6245 | match = __efx_ef10_udp_tnl_lookup_port(efx, tnl.port); |
| 6246 | if (match != NULL) { |
| 6247 | if (match->type == tnl.type) { |
| 6248 | netif_dbg(efx, drv, efx->net_dev, |
| 6249 | "Referencing existing tunnel entry\n"); |
| 6250 | match->count++; |
| 6251 | /* No need to cause an MCDI update */ |
| 6252 | rc = 0; |
| 6253 | goto unlock_out; |
| 6254 | } |
| 6255 | efx_get_udp_tunnel_type_name(match->type, |
| 6256 | typebuf, sizeof(typebuf)); |
| 6257 | netif_dbg(efx, drv, efx->net_dev, |
| 6258 | "UDP port %d is already in use by %s\n", |
| 6259 | ntohs(tnl.port), typebuf); |
| 6260 | rc = -EEXIST; |
| 6261 | goto unlock_out; |
| 6262 | } |
| 6263 | |
| 6264 | for (i = 0; i < ARRAY_SIZE(nic_data->udp_tunnels); ++i) |
| 6265 | if (!nic_data->udp_tunnels[i].count) { |
| 6266 | nic_data->udp_tunnels[i] = tnl; |
| 6267 | nic_data->udp_tunnels[i].count = 1; |
| 6268 | rc = efx_ef10_set_udp_tnl_ports(efx, false); |
| 6269 | goto unlock_out; |
| 6270 | } |
| 6271 | |
| 6272 | netif_dbg(efx, drv, efx->net_dev, |
| 6273 | "Unable to add UDP tunnel (%s) port %d; insufficient resources.\n", |
| 6274 | typebuf, ntohs(tnl.port)); |
| 6275 | |
| 6276 | rc = -ENOMEM; |
| 6277 | |
| 6278 | unlock_out: |
| 6279 | mutex_unlock(&nic_data->udp_tunnels_lock); |
| 6280 | return rc; |
| 6281 | } |
| 6282 | |
| 6283 | /* Called under the TX lock with the TX queue running, hence no-one can be |
| 6284 | * in the middle of updating the UDP tunnels table. However, they could |
| 6285 | * have tried and failed the MCDI, in which case they'll have set the dirty |
| 6286 | * flag before dropping their locks. |
| 6287 | */ |
| 6288 | static bool efx_ef10_udp_tnl_has_port(struct efx_nic *efx, __be16 port) |
| 6289 | { |
| 6290 | struct efx_ef10_nic_data *nic_data = efx->nic_data; |
| 6291 | |
| 6292 | if (!(nic_data->datapath_caps & |
| 6293 | (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN))) |
| 6294 | return false; |
| 6295 | |
| 6296 | if (nic_data->udp_tunnels_dirty) |
| 6297 | /* SW table may not match HW state, so just assume we can't |
| 6298 | * use any UDP tunnel offloads. |
| 6299 | */ |
| 6300 | return false; |
| 6301 | |
| 6302 | return __efx_ef10_udp_tnl_lookup_port(efx, port) != NULL; |
| 6303 | } |
| 6304 | |
| 6305 | static int efx_ef10_udp_tnl_del_port(struct efx_nic *efx, |
| 6306 | struct efx_udp_tunnel tnl) |
| 6307 | { |
| 6308 | struct efx_ef10_nic_data *nic_data = efx->nic_data; |
| 6309 | struct efx_udp_tunnel *match; |
| 6310 | char typebuf[8]; |
| 6311 | int rc; |
| 6312 | |
| 6313 | if (!(nic_data->datapath_caps & |
| 6314 | (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN))) |
| 6315 | return 0; |
| 6316 | |
| 6317 | efx_get_udp_tunnel_type_name(tnl.type, typebuf, sizeof(typebuf)); |
| 6318 | netif_dbg(efx, drv, efx->net_dev, "Removing UDP tunnel (%s) port %d\n", |
| 6319 | typebuf, ntohs(tnl.port)); |
| 6320 | |
| 6321 | mutex_lock(&nic_data->udp_tunnels_lock); |
| 6322 | /* Make sure all TX are stopped while we remove from the table, else we |
| 6323 | * might race against an efx_features_check(). |
| 6324 | */ |
| 6325 | efx_device_detach_sync(efx); |
| 6326 | |
| 6327 | match = __efx_ef10_udp_tnl_lookup_port(efx, tnl.port); |
| 6328 | if (match != NULL) { |
| 6329 | if (match->type == tnl.type) { |
| 6330 | if (--match->count) { |
| 6331 | /* Port is still in use, so nothing to do */ |
| 6332 | netif_dbg(efx, drv, efx->net_dev, |
| 6333 | "UDP tunnel port %d remains active\n", |
| 6334 | ntohs(tnl.port)); |
| 6335 | rc = 0; |
| 6336 | goto out_unlock; |
| 6337 | } |
| 6338 | rc = efx_ef10_set_udp_tnl_ports(efx, false); |
| 6339 | goto out_unlock; |
| 6340 | } |
| 6341 | efx_get_udp_tunnel_type_name(match->type, |
| 6342 | typebuf, sizeof(typebuf)); |
| 6343 | netif_warn(efx, drv, efx->net_dev, |
| 6344 | "UDP port %d is actually in use by %s, not removing\n", |
| 6345 | ntohs(tnl.port), typebuf); |
| 6346 | } |
| 6347 | rc = -ENOENT; |
| 6348 | |
| 6349 | out_unlock: |
| 6350 | mutex_unlock(&nic_data->udp_tunnels_lock); |
| 6351 | return rc; |
| 6352 | } |
| 6353 | |
Andrew Rybchenko | 100a9db | 2016-06-15 17:42:26 +0100 | [diff] [blame] | 6354 | #define EF10_OFFLOAD_FEATURES \ |
| 6355 | (NETIF_F_IP_CSUM | \ |
Andrew Rybchenko | 4a53ea8 | 2016-06-15 17:48:32 +0100 | [diff] [blame] | 6356 | NETIF_F_HW_VLAN_CTAG_FILTER | \ |
Andrew Rybchenko | 100a9db | 2016-06-15 17:42:26 +0100 | [diff] [blame] | 6357 | NETIF_F_IPV6_CSUM | \ |
| 6358 | NETIF_F_RXHASH | \ |
| 6359 | NETIF_F_NTUPLE) |
| 6360 | |
Shradha Shah | 02246a7 | 2015-05-06 00:58:14 +0100 | [diff] [blame] | 6361 | const struct efx_nic_type efx_hunt_a0_vf_nic_type = { |
Shradha Shah | 6f7f8aa | 2015-05-06 01:00:07 +0100 | [diff] [blame] | 6362 | .is_vf = true, |
Shradha Shah | 02246a7 | 2015-05-06 00:58:14 +0100 | [diff] [blame] | 6363 | .mem_bar = EFX_MEM_VF_BAR, |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 6364 | .mem_map_size = efx_ef10_mem_map_size, |
Shradha Shah | 02246a7 | 2015-05-06 00:58:14 +0100 | [diff] [blame] | 6365 | .probe = efx_ef10_probe_vf, |
| 6366 | .remove = efx_ef10_remove, |
| 6367 | .dimension_resources = efx_ef10_dimension_resources, |
| 6368 | .init = efx_ef10_init_nic, |
| 6369 | .fini = efx_port_dummy_op_void, |
Jon Cooper | 087e902 | 2015-05-20 11:11:35 +0100 | [diff] [blame] | 6370 | .map_reset_reason = efx_ef10_map_reset_reason, |
Shradha Shah | 02246a7 | 2015-05-06 00:58:14 +0100 | [diff] [blame] | 6371 | .map_reset_flags = efx_ef10_map_reset_flags, |
| 6372 | .reset = efx_ef10_reset, |
| 6373 | .probe_port = efx_mcdi_port_probe, |
| 6374 | .remove_port = efx_mcdi_port_remove, |
| 6375 | .fini_dmaq = efx_ef10_fini_dmaq, |
| 6376 | .prepare_flr = efx_ef10_prepare_flr, |
| 6377 | .finish_flr = efx_port_dummy_op_void, |
| 6378 | .describe_stats = efx_ef10_describe_stats, |
Daniel Pieczko | d778819 | 2015-06-02 11:39:20 +0100 | [diff] [blame] | 6379 | .update_stats = efx_ef10_update_stats_vf, |
Shradha Shah | 02246a7 | 2015-05-06 00:58:14 +0100 | [diff] [blame] | 6380 | .start_stats = efx_port_dummy_op_void, |
| 6381 | .pull_stats = efx_port_dummy_op_void, |
| 6382 | .stop_stats = efx_port_dummy_op_void, |
| 6383 | .set_id_led = efx_mcdi_set_id_led, |
| 6384 | .push_irq_moderation = efx_ef10_push_irq_moderation, |
Shradha Shah | 862f894 | 2015-05-20 11:08:56 +0100 | [diff] [blame] | 6385 | .reconfigure_mac = efx_ef10_mac_reconfigure_vf, |
Shradha Shah | 02246a7 | 2015-05-06 00:58:14 +0100 | [diff] [blame] | 6386 | .check_mac_fault = efx_mcdi_mac_check_fault, |
| 6387 | .reconfigure_port = efx_mcdi_port_reconfigure, |
| 6388 | .get_wol = efx_ef10_get_wol_vf, |
| 6389 | .set_wol = efx_ef10_set_wol_vf, |
| 6390 | .resume_wol = efx_port_dummy_op_void, |
| 6391 | .mcdi_request = efx_ef10_mcdi_request, |
| 6392 | .mcdi_poll_response = efx_ef10_mcdi_poll_response, |
| 6393 | .mcdi_read_response = efx_ef10_mcdi_read_response, |
| 6394 | .mcdi_poll_reboot = efx_ef10_mcdi_poll_reboot, |
Daniel Pieczko | c577e59 | 2015-10-09 10:40:35 +0100 | [diff] [blame] | 6395 | .mcdi_reboot_detected = efx_ef10_mcdi_reboot_detected, |
Shradha Shah | 02246a7 | 2015-05-06 00:58:14 +0100 | [diff] [blame] | 6396 | .irq_enable_master = efx_port_dummy_op_void, |
| 6397 | .irq_test_generate = efx_ef10_irq_test_generate, |
| 6398 | .irq_disable_non_ev = efx_port_dummy_op_void, |
| 6399 | .irq_handle_msi = efx_ef10_msi_interrupt, |
| 6400 | .irq_handle_legacy = efx_ef10_legacy_interrupt, |
| 6401 | .tx_probe = efx_ef10_tx_probe, |
| 6402 | .tx_init = efx_ef10_tx_init, |
| 6403 | .tx_remove = efx_ef10_tx_remove, |
| 6404 | .tx_write = efx_ef10_tx_write, |
Bert Kenward | e9117e5 | 2016-11-17 10:51:54 +0000 | [diff] [blame] | 6405 | .tx_limit_len = efx_ef10_tx_limit_len, |
Jon Cooper | 267c015 | 2015-05-06 00:59:38 +0100 | [diff] [blame] | 6406 | .rx_push_rss_config = efx_ef10_vf_rx_push_rss_config, |
Edward Cree | a707d18 | 2017-01-17 12:02:12 +0000 | [diff] [blame] | 6407 | .rx_pull_rss_config = efx_ef10_rx_pull_rss_config, |
Shradha Shah | 02246a7 | 2015-05-06 00:58:14 +0100 | [diff] [blame] | 6408 | .rx_probe = efx_ef10_rx_probe, |
| 6409 | .rx_init = efx_ef10_rx_init, |
| 6410 | .rx_remove = efx_ef10_rx_remove, |
| 6411 | .rx_write = efx_ef10_rx_write, |
| 6412 | .rx_defer_refill = efx_ef10_rx_defer_refill, |
| 6413 | .ev_probe = efx_ef10_ev_probe, |
| 6414 | .ev_init = efx_ef10_ev_init, |
| 6415 | .ev_fini = efx_ef10_ev_fini, |
| 6416 | .ev_remove = efx_ef10_ev_remove, |
| 6417 | .ev_process = efx_ef10_ev_process, |
| 6418 | .ev_read_ack = efx_ef10_ev_read_ack, |
| 6419 | .ev_test_generate = efx_ef10_ev_test_generate, |
| 6420 | .filter_table_probe = efx_ef10_filter_table_probe, |
| 6421 | .filter_table_restore = efx_ef10_filter_table_restore, |
| 6422 | .filter_table_remove = efx_ef10_filter_table_remove, |
| 6423 | .filter_update_rx_scatter = efx_ef10_filter_update_rx_scatter, |
| 6424 | .filter_insert = efx_ef10_filter_insert, |
| 6425 | .filter_remove_safe = efx_ef10_filter_remove_safe, |
| 6426 | .filter_get_safe = efx_ef10_filter_get_safe, |
| 6427 | .filter_clear_rx = efx_ef10_filter_clear_rx, |
| 6428 | .filter_count_rx_used = efx_ef10_filter_count_rx_used, |
| 6429 | .filter_get_rx_id_limit = efx_ef10_filter_get_rx_id_limit, |
| 6430 | .filter_get_rx_ids = efx_ef10_filter_get_rx_ids, |
| 6431 | #ifdef CONFIG_RFS_ACCEL |
| 6432 | .filter_rfs_insert = efx_ef10_filter_rfs_insert, |
| 6433 | .filter_rfs_expire_one = efx_ef10_filter_rfs_expire_one, |
| 6434 | #endif |
| 6435 | #ifdef CONFIG_SFC_MTD |
| 6436 | .mtd_probe = efx_port_dummy_op_int, |
| 6437 | #endif |
| 6438 | .ptp_write_host_time = efx_ef10_ptp_write_host_time_vf, |
| 6439 | .ptp_set_ts_config = efx_ef10_ptp_set_ts_config_vf, |
Andrew Rybchenko | 4a53ea8 | 2016-06-15 17:48:32 +0100 | [diff] [blame] | 6440 | .vlan_rx_add_vid = efx_ef10_vlan_rx_add_vid, |
| 6441 | .vlan_rx_kill_vid = efx_ef10_vlan_rx_kill_vid, |
Shradha Shah | 02246a7 | 2015-05-06 00:58:14 +0100 | [diff] [blame] | 6442 | #ifdef CONFIG_SFC_SRIOV |
Shradha Shah | 7b8c7b5 | 2015-05-06 00:58:54 +0100 | [diff] [blame] | 6443 | .vswitching_probe = efx_ef10_vswitching_probe_vf, |
| 6444 | .vswitching_restore = efx_ef10_vswitching_restore_vf, |
| 6445 | .vswitching_remove = efx_ef10_vswitching_remove_vf, |
Shradha Shah | 02246a7 | 2015-05-06 00:58:14 +0100 | [diff] [blame] | 6446 | #endif |
Daniel Pieczko | 0d5e0fb | 2015-05-20 11:10:20 +0100 | [diff] [blame] | 6447 | .get_mac_address = efx_ef10_get_mac_address_vf, |
Shradha Shah | 910c878 | 2015-05-20 11:12:48 +0100 | [diff] [blame] | 6448 | .set_mac_address = efx_ef10_set_mac_address, |
Daniel Pieczko | 0d5e0fb | 2015-05-20 11:10:20 +0100 | [diff] [blame] | 6449 | |
Bert Kenward | 08a7b29b | 2017-01-10 16:23:33 +0000 | [diff] [blame] | 6450 | .get_phys_port_id = efx_ef10_get_phys_port_id, |
Shradha Shah | 02246a7 | 2015-05-06 00:58:14 +0100 | [diff] [blame] | 6451 | .revision = EFX_REV_HUNT_A0, |
| 6452 | .max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH), |
| 6453 | .rx_prefix_size = ES_DZ_RX_PREFIX_SIZE, |
| 6454 | .rx_hash_offset = ES_DZ_RX_PREFIX_HASH_OFST, |
| 6455 | .rx_ts_offset = ES_DZ_RX_PREFIX_TSTAMP_OFST, |
| 6456 | .can_rx_scatter = true, |
| 6457 | .always_rx_scatter = true, |
| 6458 | .max_interrupt_mode = EFX_INT_MODE_MSIX, |
| 6459 | .timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH, |
Andrew Rybchenko | 100a9db | 2016-06-15 17:42:26 +0100 | [diff] [blame] | 6460 | .offload_features = EF10_OFFLOAD_FEATURES, |
Shradha Shah | 02246a7 | 2015-05-06 00:58:14 +0100 | [diff] [blame] | 6461 | .mcdi_max_ver = 2, |
| 6462 | .max_rx_ip_filters = HUNT_FILTER_TBL_ROWS, |
| 6463 | .hwtstamp_filters = 1 << HWTSTAMP_FILTER_NONE | |
| 6464 | 1 << HWTSTAMP_FILTER_ALL, |
Edward Cree | f74d199 | 2017-01-17 12:01:53 +0000 | [diff] [blame] | 6465 | .rx_hash_key_size = 40, |
Shradha Shah | 02246a7 | 2015-05-06 00:58:14 +0100 | [diff] [blame] | 6466 | }; |
| 6467 | |
| 6468 | const struct efx_nic_type efx_hunt_a0_nic_type = { |
Shradha Shah | 6f7f8aa | 2015-05-06 01:00:07 +0100 | [diff] [blame] | 6469 | .is_vf = false, |
Shradha Shah | 02246a7 | 2015-05-06 00:58:14 +0100 | [diff] [blame] | 6470 | .mem_bar = EFX_MEM_BAR, |
| 6471 | .mem_map_size = efx_ef10_mem_map_size, |
| 6472 | .probe = efx_ef10_probe_pf, |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 6473 | .remove = efx_ef10_remove, |
| 6474 | .dimension_resources = efx_ef10_dimension_resources, |
| 6475 | .init = efx_ef10_init_nic, |
| 6476 | .fini = efx_port_dummy_op_void, |
Jon Cooper | 087e902 | 2015-05-20 11:11:35 +0100 | [diff] [blame] | 6477 | .map_reset_reason = efx_ef10_map_reset_reason, |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 6478 | .map_reset_flags = efx_ef10_map_reset_flags, |
Jon Cooper | 3e33626 | 2014-01-17 19:48:06 +0000 | [diff] [blame] | 6479 | .reset = efx_ef10_reset, |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 6480 | .probe_port = efx_mcdi_port_probe, |
| 6481 | .remove_port = efx_mcdi_port_remove, |
| 6482 | .fini_dmaq = efx_ef10_fini_dmaq, |
Edward Cree | e283546 | 2014-04-16 19:27:48 +0100 | [diff] [blame] | 6483 | .prepare_flr = efx_ef10_prepare_flr, |
| 6484 | .finish_flr = efx_port_dummy_op_void, |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 6485 | .describe_stats = efx_ef10_describe_stats, |
Daniel Pieczko | d778819 | 2015-06-02 11:39:20 +0100 | [diff] [blame] | 6486 | .update_stats = efx_ef10_update_stats_pf, |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 6487 | .start_stats = efx_mcdi_mac_start_stats, |
Jon Cooper | f8f3b5a | 2013-09-30 17:36:50 +0100 | [diff] [blame] | 6488 | .pull_stats = efx_mcdi_mac_pull_stats, |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 6489 | .stop_stats = efx_mcdi_mac_stop_stats, |
| 6490 | .set_id_led = efx_mcdi_set_id_led, |
| 6491 | .push_irq_moderation = efx_ef10_push_irq_moderation, |
| 6492 | .reconfigure_mac = efx_ef10_mac_reconfigure, |
| 6493 | .check_mac_fault = efx_mcdi_mac_check_fault, |
| 6494 | .reconfigure_port = efx_mcdi_port_reconfigure, |
| 6495 | .get_wol = efx_ef10_get_wol, |
| 6496 | .set_wol = efx_ef10_set_wol, |
| 6497 | .resume_wol = efx_port_dummy_op_void, |
Jon Cooper | 74cd60a | 2013-09-16 14:18:51 +0100 | [diff] [blame] | 6498 | .test_chip = efx_ef10_test_chip, |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 6499 | .test_nvram = efx_mcdi_nvram_test_all, |
| 6500 | .mcdi_request = efx_ef10_mcdi_request, |
| 6501 | .mcdi_poll_response = efx_ef10_mcdi_poll_response, |
| 6502 | .mcdi_read_response = efx_ef10_mcdi_read_response, |
| 6503 | .mcdi_poll_reboot = efx_ef10_mcdi_poll_reboot, |
Daniel Pieczko | c577e59 | 2015-10-09 10:40:35 +0100 | [diff] [blame] | 6504 | .mcdi_reboot_detected = efx_ef10_mcdi_reboot_detected, |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 6505 | .irq_enable_master = efx_port_dummy_op_void, |
| 6506 | .irq_test_generate = efx_ef10_irq_test_generate, |
| 6507 | .irq_disable_non_ev = efx_port_dummy_op_void, |
| 6508 | .irq_handle_msi = efx_ef10_msi_interrupt, |
| 6509 | .irq_handle_legacy = efx_ef10_legacy_interrupt, |
| 6510 | .tx_probe = efx_ef10_tx_probe, |
| 6511 | .tx_init = efx_ef10_tx_init, |
| 6512 | .tx_remove = efx_ef10_tx_remove, |
| 6513 | .tx_write = efx_ef10_tx_write, |
Bert Kenward | e9117e5 | 2016-11-17 10:51:54 +0000 | [diff] [blame] | 6514 | .tx_limit_len = efx_ef10_tx_limit_len, |
Jon Cooper | 267c015 | 2015-05-06 00:59:38 +0100 | [diff] [blame] | 6515 | .rx_push_rss_config = efx_ef10_pf_rx_push_rss_config, |
Edward Cree | a707d18 | 2017-01-17 12:02:12 +0000 | [diff] [blame] | 6516 | .rx_pull_rss_config = efx_ef10_rx_pull_rss_config, |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 6517 | .rx_probe = efx_ef10_rx_probe, |
| 6518 | .rx_init = efx_ef10_rx_init, |
| 6519 | .rx_remove = efx_ef10_rx_remove, |
| 6520 | .rx_write = efx_ef10_rx_write, |
| 6521 | .rx_defer_refill = efx_ef10_rx_defer_refill, |
| 6522 | .ev_probe = efx_ef10_ev_probe, |
| 6523 | .ev_init = efx_ef10_ev_init, |
| 6524 | .ev_fini = efx_ef10_ev_fini, |
| 6525 | .ev_remove = efx_ef10_ev_remove, |
| 6526 | .ev_process = efx_ef10_ev_process, |
| 6527 | .ev_read_ack = efx_ef10_ev_read_ack, |
| 6528 | .ev_test_generate = efx_ef10_ev_test_generate, |
| 6529 | .filter_table_probe = efx_ef10_filter_table_probe, |
| 6530 | .filter_table_restore = efx_ef10_filter_table_restore, |
| 6531 | .filter_table_remove = efx_ef10_filter_table_remove, |
| 6532 | .filter_update_rx_scatter = efx_ef10_filter_update_rx_scatter, |
| 6533 | .filter_insert = efx_ef10_filter_insert, |
| 6534 | .filter_remove_safe = efx_ef10_filter_remove_safe, |
| 6535 | .filter_get_safe = efx_ef10_filter_get_safe, |
| 6536 | .filter_clear_rx = efx_ef10_filter_clear_rx, |
| 6537 | .filter_count_rx_used = efx_ef10_filter_count_rx_used, |
| 6538 | .filter_get_rx_id_limit = efx_ef10_filter_get_rx_id_limit, |
| 6539 | .filter_get_rx_ids = efx_ef10_filter_get_rx_ids, |
| 6540 | #ifdef CONFIG_RFS_ACCEL |
| 6541 | .filter_rfs_insert = efx_ef10_filter_rfs_insert, |
| 6542 | .filter_rfs_expire_one = efx_ef10_filter_rfs_expire_one, |
| 6543 | #endif |
| 6544 | #ifdef CONFIG_SFC_MTD |
| 6545 | .mtd_probe = efx_ef10_mtd_probe, |
| 6546 | .mtd_rename = efx_mcdi_mtd_rename, |
| 6547 | .mtd_read = efx_mcdi_mtd_read, |
| 6548 | .mtd_erase = efx_mcdi_mtd_erase, |
| 6549 | .mtd_write = efx_mcdi_mtd_write, |
| 6550 | .mtd_sync = efx_mcdi_mtd_sync, |
| 6551 | #endif |
| 6552 | .ptp_write_host_time = efx_ef10_ptp_write_host_time, |
Jon Cooper | bd9a265 | 2013-11-18 12:54:41 +0000 | [diff] [blame] | 6553 | .ptp_set_ts_sync_events = efx_ef10_ptp_set_ts_sync_events, |
| 6554 | .ptp_set_ts_config = efx_ef10_ptp_set_ts_config, |
Andrew Rybchenko | 4a53ea8 | 2016-06-15 17:48:32 +0100 | [diff] [blame] | 6555 | .vlan_rx_add_vid = efx_ef10_vlan_rx_add_vid, |
| 6556 | .vlan_rx_kill_vid = efx_ef10_vlan_rx_kill_vid, |
Jon Cooper | e5fbd97 | 2017-02-08 16:52:10 +0000 | [diff] [blame^] | 6557 | .udp_tnl_push_ports = efx_ef10_udp_tnl_push_ports, |
| 6558 | .udp_tnl_add_port = efx_ef10_udp_tnl_add_port, |
| 6559 | .udp_tnl_has_port = efx_ef10_udp_tnl_has_port, |
| 6560 | .udp_tnl_del_port = efx_ef10_udp_tnl_del_port, |
Shradha Shah | 7fa8d54 | 2015-05-06 00:55:13 +0100 | [diff] [blame] | 6561 | #ifdef CONFIG_SFC_SRIOV |
Shradha Shah | 834e23d | 2015-05-06 00:55:58 +0100 | [diff] [blame] | 6562 | .sriov_configure = efx_ef10_sriov_configure, |
Shradha Shah | d98a4ff | 2014-11-05 12:16:46 +0000 | [diff] [blame] | 6563 | .sriov_init = efx_ef10_sriov_init, |
| 6564 | .sriov_fini = efx_ef10_sriov_fini, |
Shradha Shah | d98a4ff | 2014-11-05 12:16:46 +0000 | [diff] [blame] | 6565 | .sriov_wanted = efx_ef10_sriov_wanted, |
| 6566 | .sriov_reset = efx_ef10_sriov_reset, |
Shradha Shah | 7fa8d54 | 2015-05-06 00:55:13 +0100 | [diff] [blame] | 6567 | .sriov_flr = efx_ef10_sriov_flr, |
| 6568 | .sriov_set_vf_mac = efx_ef10_sriov_set_vf_mac, |
| 6569 | .sriov_set_vf_vlan = efx_ef10_sriov_set_vf_vlan, |
| 6570 | .sriov_set_vf_spoofchk = efx_ef10_sriov_set_vf_spoofchk, |
| 6571 | .sriov_get_vf_config = efx_ef10_sriov_get_vf_config, |
Edward Cree | 4392dc6 | 2015-05-20 11:12:13 +0100 | [diff] [blame] | 6572 | .sriov_set_vf_link_state = efx_ef10_sriov_set_vf_link_state, |
Shradha Shah | 7b8c7b5 | 2015-05-06 00:58:54 +0100 | [diff] [blame] | 6573 | .vswitching_probe = efx_ef10_vswitching_probe_pf, |
| 6574 | .vswitching_restore = efx_ef10_vswitching_restore_pf, |
| 6575 | .vswitching_remove = efx_ef10_vswitching_remove_pf, |
Shradha Shah | 7fa8d54 | 2015-05-06 00:55:13 +0100 | [diff] [blame] | 6576 | #endif |
Daniel Pieczko | 0d5e0fb | 2015-05-20 11:10:20 +0100 | [diff] [blame] | 6577 | .get_mac_address = efx_ef10_get_mac_address_pf, |
Shradha Shah | 910c878 | 2015-05-20 11:12:48 +0100 | [diff] [blame] | 6578 | .set_mac_address = efx_ef10_set_mac_address, |
Edward Cree | 46d1efd | 2016-11-17 10:52:36 +0000 | [diff] [blame] | 6579 | .tso_versions = efx_ef10_tso_versions, |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 6580 | |
Bert Kenward | 08a7b29b | 2017-01-10 16:23:33 +0000 | [diff] [blame] | 6581 | .get_phys_port_id = efx_ef10_get_phys_port_id, |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 6582 | .revision = EFX_REV_HUNT_A0, |
| 6583 | .max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH), |
| 6584 | .rx_prefix_size = ES_DZ_RX_PREFIX_SIZE, |
| 6585 | .rx_hash_offset = ES_DZ_RX_PREFIX_HASH_OFST, |
Jon Cooper | bd9a265 | 2013-11-18 12:54:41 +0000 | [diff] [blame] | 6586 | .rx_ts_offset = ES_DZ_RX_PREFIX_TSTAMP_OFST, |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 6587 | .can_rx_scatter = true, |
| 6588 | .always_rx_scatter = true, |
Edward Cree | de1deff | 2017-01-13 21:20:14 +0000 | [diff] [blame] | 6589 | .option_descriptors = true, |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 6590 | .max_interrupt_mode = EFX_INT_MODE_MSIX, |
| 6591 | .timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH, |
Andrew Rybchenko | 100a9db | 2016-06-15 17:42:26 +0100 | [diff] [blame] | 6592 | .offload_features = EF10_OFFLOAD_FEATURES, |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 6593 | .mcdi_max_ver = 2, |
| 6594 | .max_rx_ip_filters = HUNT_FILTER_TBL_ROWS, |
Jon Cooper | bd9a265 | 2013-11-18 12:54:41 +0000 | [diff] [blame] | 6595 | .hwtstamp_filters = 1 << HWTSTAMP_FILTER_NONE | |
| 6596 | 1 << HWTSTAMP_FILTER_ALL, |
Edward Cree | f74d199 | 2017-01-17 12:01:53 +0000 | [diff] [blame] | 6597 | .rx_hash_key_size = 40, |
Ben Hutchings | 8127d66 | 2013-08-29 19:19:29 +0100 | [diff] [blame] | 6598 | }; |