blob: 9db1b9144e708f91c1ef76f49f6d408a77e37b3c [file] [log] [blame]
Ben Hutchings8127d662013-08-29 19:19:29 +01001/****************************************************************************
2 * Driver for Solarflare network controllers and boards
3 * Copyright 2012-2013 Solarflare Communications Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation, incorporated herein by reference.
8 */
9
10#include "net_driver.h"
11#include "ef10_regs.h"
12#include "io.h"
13#include "mcdi.h"
14#include "mcdi_pcol.h"
15#include "nic.h"
16#include "workarounds.h"
Jon Cooper74cd60a2013-09-16 14:18:51 +010017#include "selftest.h"
Shradha Shah7fa8d542015-05-06 00:55:13 +010018#include "ef10_sriov.h"
Ben Hutchings8127d662013-08-29 19:19:29 +010019#include <linux/in.h>
20#include <linux/jhash.h>
21#include <linux/wait.h>
22#include <linux/workqueue.h>
23
24/* Hardware control for EF10 architecture including 'Huntington'. */
25
26#define EFX_EF10_DRVGEN_EV 7
27enum {
28 EFX_EF10_TEST = 1,
29 EFX_EF10_REFILL,
30};
Jon Cooper267c0152015-05-06 00:59:38 +010031/* The maximum size of a shared RSS context */
32/* TODO: this should really be from the mcdi protocol export */
33#define EFX_EF10_MAX_SHARED_RSS_CONTEXT_SIZE 64UL
Ben Hutchings8127d662013-08-29 19:19:29 +010034
35/* The filter table(s) are managed by firmware and we have write-only
36 * access. When removing filters we must identify them to the
37 * firmware by a 64-bit handle, but this is too wide for Linux kernel
38 * interfaces (32-bit for RX NFC, 16-bit for RFS). Also, we need to
39 * be able to tell in advance whether a requested insertion will
40 * replace an existing filter. Therefore we maintain a software hash
41 * table, which should be at least as large as the hardware hash
42 * table.
43 *
44 * Huntington has a single 8K filter table shared between all filter
45 * types and both ports.
46 */
47#define HUNT_FILTER_TBL_ROWS 8192
48
Edward Cree12fb0da2015-07-21 15:11:00 +010049#define EFX_EF10_FILTER_ID_INVALID 0xffff
Andrew Rybchenkodc3273e2016-06-15 17:45:36 +010050
51#define EFX_EF10_FILTER_DEV_UC_MAX 32
52#define EFX_EF10_FILTER_DEV_MC_MAX 256
53
Andrew Rybchenko34813fe2016-06-15 17:48:14 +010054/* VLAN list entry */
55struct efx_ef10_vlan {
56 struct list_head list;
57 u16 vid;
58};
59
Edward Cree9b410802017-01-27 15:02:52 +000060enum efx_ef10_default_filters {
61 EFX_EF10_BCAST,
62 EFX_EF10_UCDEF,
63 EFX_EF10_MCDEF,
64 EFX_EF10_VXLAN4_UCDEF,
65 EFX_EF10_VXLAN4_MCDEF,
66 EFX_EF10_VXLAN6_UCDEF,
67 EFX_EF10_VXLAN6_MCDEF,
68 EFX_EF10_NVGRE4_UCDEF,
69 EFX_EF10_NVGRE4_MCDEF,
70 EFX_EF10_NVGRE6_UCDEF,
71 EFX_EF10_NVGRE6_MCDEF,
72 EFX_EF10_GENEVE4_UCDEF,
73 EFX_EF10_GENEVE4_MCDEF,
74 EFX_EF10_GENEVE6_UCDEF,
75 EFX_EF10_GENEVE6_MCDEF,
76
77 EFX_EF10_NUM_DEFAULT_FILTERS
78};
79
Andrew Rybchenkodc3273e2016-06-15 17:45:36 +010080/* Per-VLAN filters information */
81struct efx_ef10_filter_vlan {
Andrew Rybchenko34813fe2016-06-15 17:48:14 +010082 struct list_head list;
Andrew Rybchenkob3a3c032016-06-15 17:47:36 +010083 u16 vid;
Andrew Rybchenkodc3273e2016-06-15 17:45:36 +010084 u16 uc[EFX_EF10_FILTER_DEV_UC_MAX];
85 u16 mc[EFX_EF10_FILTER_DEV_MC_MAX];
Edward Cree9b410802017-01-27 15:02:52 +000086 u16 default_filters[EFX_EF10_NUM_DEFAULT_FILTERS];
Andrew Rybchenkodc3273e2016-06-15 17:45:36 +010087};
88
Daniel Pieczko822b96f2015-07-21 15:10:27 +010089struct efx_ef10_dev_addr {
90 u8 addr[ETH_ALEN];
Daniel Pieczko822b96f2015-07-21 15:10:27 +010091};
92
Ben Hutchings8127d662013-08-29 19:19:29 +010093struct efx_ef10_filter_table {
Andrew Rybchenko7ac0dd92016-06-15 17:49:30 +010094/* The MCDI match masks supported by this fw & hw, in order of priority */
95 u32 rx_match_mcdi_flags[
Edward Cree9b410802017-01-27 15:02:52 +000096 MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MAXNUM * 2];
Ben Hutchings8127d662013-08-29 19:19:29 +010097 unsigned int rx_match_count;
98
Edward Creec2bebe32018-03-27 17:42:28 +010099 struct rw_semaphore lock; /* Protects entries */
Ben Hutchings8127d662013-08-29 19:19:29 +0100100 struct {
101 unsigned long spec; /* pointer to spec plus flag bits */
Edward Creec2bebe32018-03-27 17:42:28 +0100102/* AUTO_OLD is used to mark and sweep MAC filters for the device address lists. */
103/* unused flag 1UL */
Ben Hutchingsb59e6ef2013-11-21 19:02:22 +0000104#define EFX_EF10_FILTER_FLAG_AUTO_OLD 2UL
Ben Hutchings8127d662013-08-29 19:19:29 +0100105#define EFX_EF10_FILTER_FLAGS 3UL
106 u64 handle; /* firmware handle */
107 } *entry;
Ben Hutchings8127d662013-08-29 19:19:29 +0100108/* Shadow of net_device address lists, guarded by mac_lock */
Daniel Pieczko822b96f2015-07-21 15:10:27 +0100109 struct efx_ef10_dev_addr dev_uc_list[EFX_EF10_FILTER_DEV_UC_MAX];
110 struct efx_ef10_dev_addr dev_mc_list[EFX_EF10_FILTER_DEV_MC_MAX];
Edward Cree12fb0da2015-07-21 15:11:00 +0100111 int dev_uc_count;
112 int dev_mc_count;
Andrew Rybchenkoafa4ce12016-06-15 17:45:56 +0100113 bool uc_promisc;
114 bool mc_promisc;
Andrew Rybchenkob071c3a2016-06-15 17:43:00 +0100115/* Whether in multicast promiscuous mode when last changed */
116 bool mc_promisc_last;
Edward Cree148cbab2017-04-04 17:02:49 +0100117 bool mc_overflow; /* Too many MC addrs; should always imply mc_promisc */
Andrew Rybchenko4a53ea82016-06-15 17:48:32 +0100118 bool vlan_filter;
Andrew Rybchenko34813fe2016-06-15 17:48:14 +0100119 struct list_head vlan_list;
Ben Hutchings8127d662013-08-29 19:19:29 +0100120};
121
122/* An arbitrary search limit for the software hash table */
123#define EFX_EF10_FILTER_SEARCH_LIMIT 200
124
Ben Hutchings8127d662013-08-29 19:19:29 +0100125static void efx_ef10_rx_free_indir_table(struct efx_nic *efx);
126static void efx_ef10_filter_table_remove(struct efx_nic *efx);
Andrew Rybchenko34813fe2016-06-15 17:48:14 +0100127static int efx_ef10_filter_add_vlan(struct efx_nic *efx, u16 vid);
128static void efx_ef10_filter_del_vlan_internal(struct efx_nic *efx,
129 struct efx_ef10_filter_vlan *vlan);
130static void efx_ef10_filter_del_vlan(struct efx_nic *efx, u16 vid);
Jon Coopere5fbd972017-02-08 16:52:10 +0000131static int efx_ef10_set_udp_tnl_ports(struct efx_nic *efx, bool unloading);
Ben Hutchings8127d662013-08-29 19:19:29 +0100132
Jon Cooper0ccb9982017-02-17 15:49:13 +0000133static u32 efx_ef10_filter_get_unsafe_id(u32 filter_id)
134{
135 WARN_ON_ONCE(filter_id == EFX_EF10_FILTER_ID_INVALID);
136 return filter_id & (HUNT_FILTER_TBL_ROWS - 1);
137}
138
139static unsigned int efx_ef10_filter_get_unsafe_pri(u32 filter_id)
140{
141 return filter_id / (HUNT_FILTER_TBL_ROWS * 2);
142}
143
144static u32 efx_ef10_make_filter_id(unsigned int pri, u16 idx)
145{
146 return pri * HUNT_FILTER_TBL_ROWS * 2 + idx;
147}
148
Ben Hutchings8127d662013-08-29 19:19:29 +0100149static int efx_ef10_get_warm_boot_count(struct efx_nic *efx)
150{
151 efx_dword_t reg;
152
153 efx_readd(efx, &reg, ER_DZ_BIU_MC_SFT_STATUS);
154 return EFX_DWORD_FIELD(reg, EFX_WORD_1) == 0xb007 ?
155 EFX_DWORD_FIELD(reg, EFX_WORD_0) : -EIO;
156}
157
Edward Cree03714bb2017-12-18 16:55:50 +0000158/* On all EF10s up to and including SFC9220 (Medford1), all PFs use BAR 0 for
159 * I/O space and BAR 2(&3) for memory. On SFC9250 (Medford2), there is no I/O
160 * bar; PFs use BAR 0/1 for memory.
161 */
162static unsigned int efx_ef10_pf_mem_bar(struct efx_nic *efx)
163{
164 switch (efx->pci_dev->device) {
165 case 0x0b03: /* SFC9250 PF */
166 return 0;
167 default:
168 return 2;
169 }
170}
171
172/* All VFs use BAR 0/1 for memory */
173static unsigned int efx_ef10_vf_mem_bar(struct efx_nic *efx)
174{
175 return 0;
176}
177
Ben Hutchings8127d662013-08-29 19:19:29 +0100178static unsigned int efx_ef10_mem_map_size(struct efx_nic *efx)
179{
Shradha Shah02246a72015-05-06 00:58:14 +0100180 int bar;
181
Edward Cree03714bb2017-12-18 16:55:50 +0000182 bar = efx->type->mem_bar(efx);
Shradha Shah02246a72015-05-06 00:58:14 +0100183 return resource_size(&efx->pci_dev->resource[bar]);
Ben Hutchings8127d662013-08-29 19:19:29 +0100184}
185
Daniel Pieczko7a186f42015-07-07 11:37:19 +0100186static bool efx_ef10_is_vf(struct efx_nic *efx)
187{
188 return efx->type->is_vf;
189}
190
Daniel Pieczko1cd9ecb2015-05-06 00:57:53 +0100191static int efx_ef10_get_pf_index(struct efx_nic *efx)
192{
193 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_FUNCTION_INFO_OUT_LEN);
194 struct efx_ef10_nic_data *nic_data = efx->nic_data;
195 size_t outlen;
196 int rc;
197
198 rc = efx_mcdi_rpc(efx, MC_CMD_GET_FUNCTION_INFO, NULL, 0, outbuf,
199 sizeof(outbuf), &outlen);
200 if (rc)
201 return rc;
202 if (outlen < sizeof(outbuf))
203 return -EIO;
204
205 nic_data->pf_index = MCDI_DWORD(outbuf, GET_FUNCTION_INFO_OUT_PF);
206 return 0;
207}
208
Shradha Shah88a37de2015-05-20 11:09:15 +0100209#ifdef CONFIG_SFC_SRIOV
210static int efx_ef10_get_vf_index(struct efx_nic *efx)
211{
212 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_FUNCTION_INFO_OUT_LEN);
213 struct efx_ef10_nic_data *nic_data = efx->nic_data;
214 size_t outlen;
215 int rc;
216
217 rc = efx_mcdi_rpc(efx, MC_CMD_GET_FUNCTION_INFO, NULL, 0, outbuf,
218 sizeof(outbuf), &outlen);
219 if (rc)
220 return rc;
221 if (outlen < sizeof(outbuf))
222 return -EIO;
223
224 nic_data->vf_index = MCDI_DWORD(outbuf, GET_FUNCTION_INFO_OUT_VF);
225 return 0;
226}
227#endif
228
Ben Hutchingse5a25382013-09-05 22:50:59 +0100229static int efx_ef10_init_datapath_caps(struct efx_nic *efx)
Ben Hutchings8127d662013-08-29 19:19:29 +0100230{
Edward Creec1be4822017-12-21 09:00:26 +0000231 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CAPABILITIES_V4_OUT_LEN);
Ben Hutchings8127d662013-08-29 19:19:29 +0100232 struct efx_ef10_nic_data *nic_data = efx->nic_data;
233 size_t outlen;
234 int rc;
235
236 BUILD_BUG_ON(MC_CMD_GET_CAPABILITIES_IN_LEN != 0);
237
238 rc = efx_mcdi_rpc(efx, MC_CMD_GET_CAPABILITIES, NULL, 0,
239 outbuf, sizeof(outbuf), &outlen);
240 if (rc)
241 return rc;
Bert Kenwardca889a02016-08-11 13:01:35 +0100242 if (outlen < MC_CMD_GET_CAPABILITIES_OUT_LEN) {
Ben Hutchingse5a25382013-09-05 22:50:59 +0100243 netif_err(efx, drv, efx->net_dev,
244 "unable to read datapath firmware capabilities\n");
245 return -EIO;
246 }
Ben Hutchings8127d662013-08-29 19:19:29 +0100247
Ben Hutchingse5a25382013-09-05 22:50:59 +0100248 nic_data->datapath_caps =
249 MCDI_DWORD(outbuf, GET_CAPABILITIES_OUT_FLAGS1);
250
Edward Creec6347002017-01-13 21:20:29 +0000251 if (outlen >= MC_CMD_GET_CAPABILITIES_V2_OUT_LEN) {
Bert Kenwardca889a02016-08-11 13:01:35 +0100252 nic_data->datapath_caps2 = MCDI_DWORD(outbuf,
253 GET_CAPABILITIES_V2_OUT_FLAGS2);
Edward Creec6347002017-01-13 21:20:29 +0000254 nic_data->piobuf_size = MCDI_WORD(outbuf,
255 GET_CAPABILITIES_V2_OUT_SIZE_PIO_BUFF);
256 } else {
Bert Kenwardca889a02016-08-11 13:01:35 +0100257 nic_data->datapath_caps2 = 0;
Edward Creec6347002017-01-13 21:20:29 +0000258 nic_data->piobuf_size = ER_DZ_TX_PIOBUF_SIZE;
259 }
Bert Kenwardca889a02016-08-11 13:01:35 +0100260
Daniel Pieczko8d9f9dd2015-05-06 00:56:55 +0100261 /* record the DPCPU firmware IDs to determine VEB vswitching support.
262 */
263 nic_data->rx_dpcpu_fw_id =
264 MCDI_WORD(outbuf, GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID);
265 nic_data->tx_dpcpu_fw_id =
266 MCDI_WORD(outbuf, GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID);
267
Ben Hutchingse5a25382013-09-05 22:50:59 +0100268 if (!(nic_data->datapath_caps &
Ben Hutchingse5a25382013-09-05 22:50:59 +0100269 (1 << MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_14_LBN))) {
270 netif_err(efx, probe, efx->net_dev,
271 "current firmware does not support an RX prefix\n");
272 return -ENODEV;
Ben Hutchings8127d662013-08-29 19:19:29 +0100273 }
274
Edward Cree71827442017-12-18 16:56:19 +0000275 if (outlen >= MC_CMD_GET_CAPABILITIES_V3_OUT_LEN) {
276 u8 vi_window_mode = MCDI_BYTE(outbuf,
277 GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE);
278
279 switch (vi_window_mode) {
280 case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_8K:
281 efx->vi_stride = 8192;
282 break;
283 case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_16K:
284 efx->vi_stride = 16384;
285 break;
286 case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_64K:
287 efx->vi_stride = 65536;
288 break;
289 default:
290 netif_err(efx, probe, efx->net_dev,
291 "Unrecognised VI window mode %d\n",
292 vi_window_mode);
293 return -EIO;
294 }
295 netif_dbg(efx, probe, efx->net_dev, "vi_stride = %u\n",
296 efx->vi_stride);
297 } else {
298 /* keep default VI stride */
299 netif_dbg(efx, probe, efx->net_dev,
300 "firmware did not report VI window mode, assuming vi_stride = %u\n",
301 efx->vi_stride);
302 }
303
Edward Creec1be4822017-12-21 09:00:26 +0000304 if (outlen >= MC_CMD_GET_CAPABILITIES_V4_OUT_LEN) {
305 efx->num_mac_stats = MCDI_WORD(outbuf,
306 GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS);
307 netif_dbg(efx, probe, efx->net_dev,
308 "firmware reports num_mac_stats = %u\n",
309 efx->num_mac_stats);
310 } else {
311 /* leave num_mac_stats as the default value, MC_CMD_MAC_NSTATS */
312 netif_dbg(efx, probe, efx->net_dev,
313 "firmware did not report num_mac_stats, assuming %u\n",
314 efx->num_mac_stats);
315 }
316
Ben Hutchings8127d662013-08-29 19:19:29 +0100317 return 0;
318}
319
Martin Habets50663fe2018-01-25 17:25:33 +0000320static void efx_ef10_read_licensed_features(struct efx_nic *efx)
321{
322 MCDI_DECLARE_BUF(inbuf, MC_CMD_LICENSING_V3_IN_LEN);
323 MCDI_DECLARE_BUF(outbuf, MC_CMD_LICENSING_V3_OUT_LEN);
324 struct efx_ef10_nic_data *nic_data = efx->nic_data;
325 size_t outlen;
326 int rc;
327
328 MCDI_SET_DWORD(inbuf, LICENSING_V3_IN_OP,
329 MC_CMD_LICENSING_V3_IN_OP_REPORT_LICENSE);
330 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_LICENSING_V3, inbuf, sizeof(inbuf),
331 outbuf, sizeof(outbuf), &outlen);
332 if (rc || (outlen < MC_CMD_LICENSING_V3_OUT_LEN))
333 return;
334
335 nic_data->licensed_features = MCDI_QWORD(outbuf,
336 LICENSING_V3_OUT_LICENSED_FEATURES);
337}
338
Ben Hutchings8127d662013-08-29 19:19:29 +0100339static int efx_ef10_get_sysclk_freq(struct efx_nic *efx)
340{
341 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CLOCK_OUT_LEN);
342 int rc;
343
344 rc = efx_mcdi_rpc(efx, MC_CMD_GET_CLOCK, NULL, 0,
345 outbuf, sizeof(outbuf), NULL);
346 if (rc)
347 return rc;
348 rc = MCDI_DWORD(outbuf, GET_CLOCK_OUT_SYS_FREQ);
349 return rc > 0 ? rc : -ERANGE;
350}
351
Bert Kenwardd95e3292016-08-11 13:02:36 +0100352static int efx_ef10_get_timer_workarounds(struct efx_nic *efx)
353{
354 struct efx_ef10_nic_data *nic_data = efx->nic_data;
355 unsigned int implemented;
356 unsigned int enabled;
357 int rc;
358
359 nic_data->workaround_35388 = false;
360 nic_data->workaround_61265 = false;
361
362 rc = efx_mcdi_get_workarounds(efx, &implemented, &enabled);
363
364 if (rc == -ENOSYS) {
365 /* Firmware without GET_WORKAROUNDS - not a problem. */
366 rc = 0;
367 } else if (rc == 0) {
368 /* Bug61265 workaround is always enabled if implemented. */
369 if (enabled & MC_CMD_GET_WORKAROUNDS_OUT_BUG61265)
370 nic_data->workaround_61265 = true;
371
372 if (enabled & MC_CMD_GET_WORKAROUNDS_OUT_BUG35388) {
373 nic_data->workaround_35388 = true;
374 } else if (implemented & MC_CMD_GET_WORKAROUNDS_OUT_BUG35388) {
375 /* Workaround is implemented but not enabled.
376 * Try to enable it.
377 */
378 rc = efx_mcdi_set_workaround(efx,
379 MC_CMD_WORKAROUND_BUG35388,
380 true, NULL);
381 if (rc == 0)
382 nic_data->workaround_35388 = true;
383 /* If we failed to set the workaround just carry on. */
384 rc = 0;
385 }
386 }
387
388 netif_dbg(efx, probe, efx->net_dev,
389 "workaround for bug 35388 is %sabled\n",
390 nic_data->workaround_35388 ? "en" : "dis");
391 netif_dbg(efx, probe, efx->net_dev,
392 "workaround for bug 61265 is %sabled\n",
393 nic_data->workaround_61265 ? "en" : "dis");
394
395 return rc;
396}
397
398static void efx_ef10_process_timer_config(struct efx_nic *efx,
399 const efx_dword_t *data)
400{
401 unsigned int max_count;
402
403 if (EFX_EF10_WORKAROUND_61265(efx)) {
404 efx->timer_quantum_ns = MCDI_DWORD(data,
405 GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_STEP_NS);
406 efx->timer_max_ns = MCDI_DWORD(data,
407 GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_MAX_NS);
408 } else if (EFX_EF10_WORKAROUND_35388(efx)) {
409 efx->timer_quantum_ns = MCDI_DWORD(data,
410 GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_NS_PER_COUNT);
411 max_count = MCDI_DWORD(data,
412 GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_MAX_COUNT);
413 efx->timer_max_ns = max_count * efx->timer_quantum_ns;
414 } else {
415 efx->timer_quantum_ns = MCDI_DWORD(data,
416 GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_NS_PER_COUNT);
417 max_count = MCDI_DWORD(data,
418 GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_MAX_COUNT);
419 efx->timer_max_ns = max_count * efx->timer_quantum_ns;
420 }
421
422 netif_dbg(efx, probe, efx->net_dev,
423 "got timer properties from MC: quantum %u ns; max %u ns\n",
424 efx->timer_quantum_ns, efx->timer_max_ns);
425}
426
427static int efx_ef10_get_timer_config(struct efx_nic *efx)
428{
429 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_LEN);
430 int rc;
431
432 rc = efx_ef10_get_timer_workarounds(efx);
433 if (rc)
434 return rc;
435
436 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_GET_EVQ_TMR_PROPERTIES, NULL, 0,
437 outbuf, sizeof(outbuf), NULL);
438
439 if (rc == 0) {
440 efx_ef10_process_timer_config(efx, outbuf);
441 } else if (rc == -ENOSYS || rc == -EPERM) {
442 /* Not available - fall back to Huntington defaults. */
443 unsigned int quantum;
444
445 rc = efx_ef10_get_sysclk_freq(efx);
446 if (rc < 0)
447 return rc;
448
449 quantum = 1536000 / rc; /* 1536 cycles */
450 efx->timer_quantum_ns = quantum;
451 efx->timer_max_ns = efx->type->timer_period_max * quantum;
452 rc = 0;
453 } else {
454 efx_mcdi_display_error(efx, MC_CMD_GET_EVQ_TMR_PROPERTIES,
455 MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_LEN,
456 NULL, 0, rc);
457 }
458
459 return rc;
460}
461
Daniel Pieczko0d5e0fb2015-05-20 11:10:20 +0100462static int efx_ef10_get_mac_address_pf(struct efx_nic *efx, u8 *mac_address)
Ben Hutchings8127d662013-08-29 19:19:29 +0100463{
464 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_MAC_ADDRESSES_OUT_LEN);
465 size_t outlen;
466 int rc;
467
468 BUILD_BUG_ON(MC_CMD_GET_MAC_ADDRESSES_IN_LEN != 0);
469
470 rc = efx_mcdi_rpc(efx, MC_CMD_GET_MAC_ADDRESSES, NULL, 0,
471 outbuf, sizeof(outbuf), &outlen);
472 if (rc)
473 return rc;
474 if (outlen < MC_CMD_GET_MAC_ADDRESSES_OUT_LEN)
475 return -EIO;
476
Edward Creecd84ff42014-03-07 18:27:41 +0000477 ether_addr_copy(mac_address,
478 MCDI_PTR(outbuf, GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE));
Ben Hutchings8127d662013-08-29 19:19:29 +0100479 return 0;
480}
481
Daniel Pieczko0d5e0fb2015-05-20 11:10:20 +0100482static int efx_ef10_get_mac_address_vf(struct efx_nic *efx, u8 *mac_address)
483{
484 MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN);
485 MCDI_DECLARE_BUF(outbuf, MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX);
486 size_t outlen;
487 int num_addrs, rc;
488
489 MCDI_SET_DWORD(inbuf, VPORT_GET_MAC_ADDRESSES_IN_VPORT_ID,
490 EVB_PORT_ID_ASSIGNED);
491 rc = efx_mcdi_rpc(efx, MC_CMD_VPORT_GET_MAC_ADDRESSES, inbuf,
492 sizeof(inbuf), outbuf, sizeof(outbuf), &outlen);
493
494 if (rc)
495 return rc;
496 if (outlen < MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMIN)
497 return -EIO;
498
499 num_addrs = MCDI_DWORD(outbuf,
500 VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_COUNT);
501
502 WARN_ON(num_addrs != 1);
503
504 ether_addr_copy(mac_address,
505 MCDI_PTR(outbuf, VPORT_GET_MAC_ADDRESSES_OUT_MACADDR));
506
507 return 0;
508}
509
Shradha Shah0f5c0842015-06-02 11:37:58 +0100510static ssize_t efx_ef10_show_link_control_flag(struct device *dev,
511 struct device_attribute *attr,
512 char *buf)
513{
514 struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
515
516 return sprintf(buf, "%d\n",
517 ((efx->mcdi->fn_flags) &
518 (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL))
519 ? 1 : 0);
520}
521
522static ssize_t efx_ef10_show_primary_flag(struct device *dev,
523 struct device_attribute *attr,
524 char *buf)
525{
526 struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
527
528 return sprintf(buf, "%d\n",
529 ((efx->mcdi->fn_flags) &
530 (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY))
531 ? 1 : 0);
532}
533
Andrew Rybchenko34813fe2016-06-15 17:48:14 +0100534static struct efx_ef10_vlan *efx_ef10_find_vlan(struct efx_nic *efx, u16 vid)
535{
536 struct efx_ef10_nic_data *nic_data = efx->nic_data;
537 struct efx_ef10_vlan *vlan;
538
539 WARN_ON(!mutex_is_locked(&nic_data->vlan_lock));
540
541 list_for_each_entry(vlan, &nic_data->vlan_list, list) {
542 if (vlan->vid == vid)
543 return vlan;
544 }
545
546 return NULL;
547}
548
549static int efx_ef10_add_vlan(struct efx_nic *efx, u16 vid)
550{
551 struct efx_ef10_nic_data *nic_data = efx->nic_data;
552 struct efx_ef10_vlan *vlan;
553 int rc;
554
555 mutex_lock(&nic_data->vlan_lock);
556
557 vlan = efx_ef10_find_vlan(efx, vid);
558 if (vlan) {
Andrew Rybchenko4a53ea82016-06-15 17:48:32 +0100559 /* We add VID 0 on init. 8021q adds it on module init
560 * for all interfaces with VLAN filtring feature.
561 */
562 if (vid == 0)
563 goto done_unlock;
Andrew Rybchenko34813fe2016-06-15 17:48:14 +0100564 netif_warn(efx, drv, efx->net_dev,
565 "VLAN %u already added\n", vid);
566 rc = -EALREADY;
567 goto fail_exist;
568 }
569
570 rc = -ENOMEM;
571 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
572 if (!vlan)
573 goto fail_alloc;
574
575 vlan->vid = vid;
576
577 list_add_tail(&vlan->list, &nic_data->vlan_list);
578
579 if (efx->filter_state) {
580 mutex_lock(&efx->mac_lock);
581 down_write(&efx->filter_sem);
582 rc = efx_ef10_filter_add_vlan(efx, vlan->vid);
583 up_write(&efx->filter_sem);
584 mutex_unlock(&efx->mac_lock);
585 if (rc)
586 goto fail_filter_add_vlan;
587 }
588
Andrew Rybchenko4a53ea82016-06-15 17:48:32 +0100589done_unlock:
Andrew Rybchenko34813fe2016-06-15 17:48:14 +0100590 mutex_unlock(&nic_data->vlan_lock);
591 return 0;
592
593fail_filter_add_vlan:
594 list_del(&vlan->list);
595 kfree(vlan);
596fail_alloc:
597fail_exist:
598 mutex_unlock(&nic_data->vlan_lock);
599 return rc;
600}
601
602static void efx_ef10_del_vlan_internal(struct efx_nic *efx,
603 struct efx_ef10_vlan *vlan)
604{
605 struct efx_ef10_nic_data *nic_data = efx->nic_data;
606
607 WARN_ON(!mutex_is_locked(&nic_data->vlan_lock));
608
609 if (efx->filter_state) {
610 down_write(&efx->filter_sem);
611 efx_ef10_filter_del_vlan(efx, vlan->vid);
612 up_write(&efx->filter_sem);
613 }
614
615 list_del(&vlan->list);
616 kfree(vlan);
617}
618
Andrew Rybchenko4a53ea82016-06-15 17:48:32 +0100619static int efx_ef10_del_vlan(struct efx_nic *efx, u16 vid)
620{
621 struct efx_ef10_nic_data *nic_data = efx->nic_data;
622 struct efx_ef10_vlan *vlan;
623 int rc = 0;
624
625 /* 8021q removes VID 0 on module unload for all interfaces
626 * with VLAN filtering feature. We need to keep it to receive
627 * untagged traffic.
628 */
629 if (vid == 0)
630 return 0;
631
632 mutex_lock(&nic_data->vlan_lock);
633
634 vlan = efx_ef10_find_vlan(efx, vid);
635 if (!vlan) {
636 netif_err(efx, drv, efx->net_dev,
637 "VLAN %u to be deleted not found\n", vid);
638 rc = -ENOENT;
639 } else {
640 efx_ef10_del_vlan_internal(efx, vlan);
641 }
642
643 mutex_unlock(&nic_data->vlan_lock);
644
645 return rc;
646}
647
Andrew Rybchenko34813fe2016-06-15 17:48:14 +0100648static void efx_ef10_cleanup_vlans(struct efx_nic *efx)
649{
650 struct efx_ef10_nic_data *nic_data = efx->nic_data;
651 struct efx_ef10_vlan *vlan, *next_vlan;
652
653 mutex_lock(&nic_data->vlan_lock);
654 list_for_each_entry_safe(vlan, next_vlan, &nic_data->vlan_list, list)
655 efx_ef10_del_vlan_internal(efx, vlan);
656 mutex_unlock(&nic_data->vlan_lock);
657}
658
Shradha Shah0f5c0842015-06-02 11:37:58 +0100659static DEVICE_ATTR(link_control_flag, 0444, efx_ef10_show_link_control_flag,
660 NULL);
661static DEVICE_ATTR(primary_flag, 0444, efx_ef10_show_primary_flag, NULL);
662
Ben Hutchings8127d662013-08-29 19:19:29 +0100663static int efx_ef10_probe(struct efx_nic *efx)
664{
665 struct efx_ef10_nic_data *nic_data;
666 int i, rc;
667
Ben Hutchings8127d662013-08-29 19:19:29 +0100668 nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL);
669 if (!nic_data)
670 return -ENOMEM;
671 efx->nic_data = nic_data;
672
Edward Cree75aba2a2015-05-27 13:13:54 +0100673 /* we assume later that we can copy from this buffer in dwords */
674 BUILD_BUG_ON(MCDI_CTL_SDU_LEN_MAX_V2 % 4);
675
Ben Hutchings8127d662013-08-29 19:19:29 +0100676 rc = efx_nic_alloc_buffer(efx, &nic_data->mcdi_buf,
677 8 + MCDI_CTL_SDU_LEN_MAX_V2, GFP_KERNEL);
678 if (rc)
679 goto fail1;
680
681 /* Get the MC's warm boot count. In case it's rebooting right
682 * now, be prepared to retry.
683 */
684 i = 0;
685 for (;;) {
686 rc = efx_ef10_get_warm_boot_count(efx);
687 if (rc >= 0)
688 break;
689 if (++i == 5)
690 goto fail2;
691 ssleep(1);
692 }
693 nic_data->warm_boot_count = rc;
694
Edward Cree42356d92018-03-08 15:45:17 +0000695 efx->rss_context.context_id = EFX_EF10_RSS_CONTEXT_INVALID;
Ben Hutchings8127d662013-08-29 19:19:29 +0100696
Daniel Pieczko45b24492015-05-06 00:57:14 +0100697 nic_data->vport_id = EVB_PORT_ID_ASSIGNED;
698
Ben Hutchings8127d662013-08-29 19:19:29 +0100699 /* In case we're recovering from a crash (kexec), we want to
700 * cancel any outstanding request by the previous user of this
701 * function. We send a special message using the least
702 * significant bits of the 'high' (doorbell) register.
703 */
704 _efx_writed(efx, cpu_to_le32(1), ER_DZ_MC_DB_HWRD);
705
706 rc = efx_mcdi_init(efx);
707 if (rc)
708 goto fail2;
709
Jon Coopere5fbd972017-02-08 16:52:10 +0000710 mutex_init(&nic_data->udp_tunnels_lock);
711
Ben Hutchings8127d662013-08-29 19:19:29 +0100712 /* Reset (most) configuration for this function */
713 rc = efx_mcdi_reset(efx, RESET_TYPE_ALL);
714 if (rc)
715 goto fail3;
716
717 /* Enable event logging */
718 rc = efx_mcdi_log_ctrl(efx, true, false, 0);
719 if (rc)
720 goto fail3;
721
Shradha Shah0f5c0842015-06-02 11:37:58 +0100722 rc = device_create_file(&efx->pci_dev->dev,
723 &dev_attr_link_control_flag);
Daniel Pieczko1cd9ecb2015-05-06 00:57:53 +0100724 if (rc)
725 goto fail3;
726
Shradha Shah0f5c0842015-06-02 11:37:58 +0100727 rc = device_create_file(&efx->pci_dev->dev, &dev_attr_primary_flag);
728 if (rc)
729 goto fail4;
730
731 rc = efx_ef10_get_pf_index(efx);
732 if (rc)
733 goto fail5;
734
Ben Hutchingse5a25382013-09-05 22:50:59 +0100735 rc = efx_ef10_init_datapath_caps(efx);
Ben Hutchings8127d662013-08-29 19:19:29 +0100736 if (rc < 0)
Shradha Shah0f5c0842015-06-02 11:37:58 +0100737 goto fail5;
Ben Hutchings8127d662013-08-29 19:19:29 +0100738
Martin Habets50663fe2018-01-25 17:25:33 +0000739 efx_ef10_read_licensed_features(efx);
740
Edward Cree71827442017-12-18 16:56:19 +0000741 /* We can have one VI for each vi_stride-byte region.
742 * However, until we use TX option descriptors we need two TX queues
743 * per channel.
744 */
745 efx->max_channels = min_t(unsigned int,
746 EFX_MAX_CHANNELS,
747 efx_ef10_mem_map_size(efx) /
748 (efx->vi_stride * EFX_TXQ_TYPES));
749 efx->max_tx_channels = efx->max_channels;
750 if (WARN_ON(efx->max_channels == 0)) {
751 rc = -EIO;
752 goto fail5;
753 }
754
Ben Hutchings8127d662013-08-29 19:19:29 +0100755 efx->rx_packet_len_offset =
756 ES_DZ_RX_PREFIX_PKTLEN_OFST - ES_DZ_RX_PREFIX_SIZE;
757
Edward Cree69787292017-10-31 14:29:47 +0000758 if (nic_data->datapath_caps &
759 (1 << MC_CMD_GET_CAPABILITIES_OUT_RX_INCLUDE_FCS_LBN))
760 efx->net_dev->hw_features |= NETIF_F_RXFCS;
761
Ben Hutchings8127d662013-08-29 19:19:29 +0100762 rc = efx_mcdi_port_get_number(efx);
763 if (rc < 0)
Shradha Shah0f5c0842015-06-02 11:37:58 +0100764 goto fail5;
Ben Hutchings8127d662013-08-29 19:19:29 +0100765 efx->port_num = rc;
766
Daniel Pieczko0d5e0fb2015-05-20 11:10:20 +0100767 rc = efx->type->get_mac_address(efx, efx->net_dev->perm_addr);
Ben Hutchings8127d662013-08-29 19:19:29 +0100768 if (rc)
Shradha Shah0f5c0842015-06-02 11:37:58 +0100769 goto fail5;
Ben Hutchings8127d662013-08-29 19:19:29 +0100770
Bert Kenwardd95e3292016-08-11 13:02:36 +0100771 rc = efx_ef10_get_timer_config(efx);
Ben Hutchings8127d662013-08-29 19:19:29 +0100772 if (rc < 0)
Shradha Shah0f5c0842015-06-02 11:37:58 +0100773 goto fail5;
Ben Hutchings8127d662013-08-29 19:19:29 +0100774
Ben Hutchings8127d662013-08-29 19:19:29 +0100775 rc = efx_mcdi_mon_probe(efx);
Edward Cree267d9d72015-05-06 00:59:18 +0100776 if (rc && rc != -EPERM)
Shradha Shah0f5c0842015-06-02 11:37:58 +0100777 goto fail5;
Ben Hutchings8127d662013-08-29 19:19:29 +0100778
Martin Habets23418dc2018-01-25 17:25:15 +0000779 efx_ptp_defer_probe_with_channel(efx);
Ben Hutchings9aecda92013-12-05 21:28:42 +0000780
Shradha Shah1d051e02015-06-02 11:38:16 +0100781#ifdef CONFIG_SFC_SRIOV
782 if ((efx->pci_dev->physfn) && (!efx->pci_dev->is_physfn)) {
783 struct pci_dev *pci_dev_pf = efx->pci_dev->physfn;
784 struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf);
785
786 efx_pf->type->get_mac_address(efx_pf, nic_data->port_id);
787 } else
788#endif
789 ether_addr_copy(nic_data->port_id, efx->net_dev->perm_addr);
790
Andrew Rybchenko34813fe2016-06-15 17:48:14 +0100791 INIT_LIST_HEAD(&nic_data->vlan_list);
792 mutex_init(&nic_data->vlan_lock);
793
794 /* Add unspecified VID to support VLAN filtering being disabled */
795 rc = efx_ef10_add_vlan(efx, EFX_FILTER_VID_UNSPEC);
796 if (rc)
797 goto fail_add_vid_unspec;
798
Andrew Rybchenko4a53ea82016-06-15 17:48:32 +0100799 /* If VLAN filtering is enabled, we need VID 0 to get untagged
800 * traffic. It is added automatically if 8021q module is loaded,
801 * but we can't rely on it since module may be not loaded.
802 */
803 rc = efx_ef10_add_vlan(efx, 0);
804 if (rc)
805 goto fail_add_vid_0;
806
Ben Hutchings8127d662013-08-29 19:19:29 +0100807 return 0;
808
Andrew Rybchenko4a53ea82016-06-15 17:48:32 +0100809fail_add_vid_0:
810 efx_ef10_cleanup_vlans(efx);
Andrew Rybchenko34813fe2016-06-15 17:48:14 +0100811fail_add_vid_unspec:
812 mutex_destroy(&nic_data->vlan_lock);
813 efx_ptp_remove(efx);
814 efx_mcdi_mon_remove(efx);
Shradha Shah0f5c0842015-06-02 11:37:58 +0100815fail5:
816 device_remove_file(&efx->pci_dev->dev, &dev_attr_primary_flag);
817fail4:
818 device_remove_file(&efx->pci_dev->dev, &dev_attr_link_control_flag);
Ben Hutchings8127d662013-08-29 19:19:29 +0100819fail3:
Jon Coopere5fbd972017-02-08 16:52:10 +0000820 efx_mcdi_detach(efx);
821
822 mutex_lock(&nic_data->udp_tunnels_lock);
823 memset(nic_data->udp_tunnels, 0, sizeof(nic_data->udp_tunnels));
824 (void)efx_ef10_set_udp_tnl_ports(efx, true);
825 mutex_unlock(&nic_data->udp_tunnels_lock);
826 mutex_destroy(&nic_data->udp_tunnels_lock);
827
Ben Hutchings8127d662013-08-29 19:19:29 +0100828 efx_mcdi_fini(efx);
829fail2:
830 efx_nic_free_buffer(efx, &nic_data->mcdi_buf);
831fail1:
832 kfree(nic_data);
833 efx->nic_data = NULL;
834 return rc;
835}
836
837static int efx_ef10_free_vis(struct efx_nic *efx)
838{
Jon Cooperaa09a3d2015-05-20 11:10:41 +0100839 MCDI_DECLARE_BUF_ERR(outbuf);
Edward Cree1e0b8122013-05-31 18:36:12 +0100840 size_t outlen;
841 int rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FREE_VIS, NULL, 0,
842 outbuf, sizeof(outbuf), &outlen);
Ben Hutchings8127d662013-08-29 19:19:29 +0100843
844 /* -EALREADY means nothing to free, so ignore */
845 if (rc == -EALREADY)
846 rc = 0;
Edward Cree1e0b8122013-05-31 18:36:12 +0100847 if (rc)
848 efx_mcdi_display_error(efx, MC_CMD_FREE_VIS, 0, outbuf, outlen,
849 rc);
Ben Hutchings8127d662013-08-29 19:19:29 +0100850 return rc;
851}
852
Ben Hutchings183233b2013-06-28 21:47:12 +0100853#ifdef EFX_USE_PIO
854
855static void efx_ef10_free_piobufs(struct efx_nic *efx)
856{
857 struct efx_ef10_nic_data *nic_data = efx->nic_data;
858 MCDI_DECLARE_BUF(inbuf, MC_CMD_FREE_PIOBUF_IN_LEN);
859 unsigned int i;
860 int rc;
861
862 BUILD_BUG_ON(MC_CMD_FREE_PIOBUF_OUT_LEN != 0);
863
864 for (i = 0; i < nic_data->n_piobufs; i++) {
865 MCDI_SET_DWORD(inbuf, FREE_PIOBUF_IN_PIOBUF_HANDLE,
866 nic_data->piobuf_handle[i]);
867 rc = efx_mcdi_rpc(efx, MC_CMD_FREE_PIOBUF, inbuf, sizeof(inbuf),
868 NULL, 0, NULL);
869 WARN_ON(rc);
870 }
871
872 nic_data->n_piobufs = 0;
873}
874
875static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n)
876{
877 struct efx_ef10_nic_data *nic_data = efx->nic_data;
878 MCDI_DECLARE_BUF(outbuf, MC_CMD_ALLOC_PIOBUF_OUT_LEN);
879 unsigned int i;
880 size_t outlen;
881 int rc = 0;
882
883 BUILD_BUG_ON(MC_CMD_ALLOC_PIOBUF_IN_LEN != 0);
884
885 for (i = 0; i < n; i++) {
Bert Kenward09a04202015-12-23 08:58:15 +0000886 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_ALLOC_PIOBUF, NULL, 0,
887 outbuf, sizeof(outbuf), &outlen);
888 if (rc) {
889 /* Don't display the MC error if we didn't have space
890 * for a VF.
891 */
892 if (!(efx_ef10_is_vf(efx) && rc == -ENOSPC))
893 efx_mcdi_display_error(efx, MC_CMD_ALLOC_PIOBUF,
894 0, outbuf, outlen, rc);
Ben Hutchings183233b2013-06-28 21:47:12 +0100895 break;
Bert Kenward09a04202015-12-23 08:58:15 +0000896 }
Ben Hutchings183233b2013-06-28 21:47:12 +0100897 if (outlen < MC_CMD_ALLOC_PIOBUF_OUT_LEN) {
898 rc = -EIO;
899 break;
900 }
901 nic_data->piobuf_handle[i] =
902 MCDI_DWORD(outbuf, ALLOC_PIOBUF_OUT_PIOBUF_HANDLE);
903 netif_dbg(efx, probe, efx->net_dev,
904 "allocated PIO buffer %u handle %x\n", i,
905 nic_data->piobuf_handle[i]);
906 }
907
908 nic_data->n_piobufs = i;
909 if (rc)
910 efx_ef10_free_piobufs(efx);
911 return rc;
912}
913
914static int efx_ef10_link_piobufs(struct efx_nic *efx)
915{
916 struct efx_ef10_nic_data *nic_data = efx->nic_data;
Edward Creed0346b02017-03-03 15:22:09 +0000917 MCDI_DECLARE_BUF(inbuf, MC_CMD_LINK_PIOBUF_IN_LEN);
Ben Hutchings183233b2013-06-28 21:47:12 +0100918 struct efx_channel *channel;
919 struct efx_tx_queue *tx_queue;
920 unsigned int offset, index;
921 int rc;
922
923 BUILD_BUG_ON(MC_CMD_LINK_PIOBUF_OUT_LEN != 0);
924 BUILD_BUG_ON(MC_CMD_UNLINK_PIOBUF_OUT_LEN != 0);
925
926 /* Link a buffer to each VI in the write-combining mapping */
927 for (index = 0; index < nic_data->n_piobufs; ++index) {
928 MCDI_SET_DWORD(inbuf, LINK_PIOBUF_IN_PIOBUF_HANDLE,
929 nic_data->piobuf_handle[index]);
930 MCDI_SET_DWORD(inbuf, LINK_PIOBUF_IN_TXQ_INSTANCE,
931 nic_data->pio_write_vi_base + index);
932 rc = efx_mcdi_rpc(efx, MC_CMD_LINK_PIOBUF,
933 inbuf, MC_CMD_LINK_PIOBUF_IN_LEN,
934 NULL, 0, NULL);
935 if (rc) {
936 netif_err(efx, drv, efx->net_dev,
937 "failed to link VI %u to PIO buffer %u (%d)\n",
938 nic_data->pio_write_vi_base + index, index,
939 rc);
940 goto fail;
941 }
942 netif_dbg(efx, probe, efx->net_dev,
943 "linked VI %u to PIO buffer %u\n",
944 nic_data->pio_write_vi_base + index, index);
945 }
946
947 /* Link a buffer to each TX queue */
948 efx_for_each_channel(channel, efx) {
Edward Cree2935e3c2018-01-25 17:26:06 +0000949 /* Extra channels, even those with TXQs (PTP), do not require
950 * PIO resources.
951 */
952 if (!channel->type->want_pio)
953 continue;
Ben Hutchings183233b2013-06-28 21:47:12 +0100954 efx_for_each_channel_tx_queue(tx_queue, channel) {
955 /* We assign the PIO buffers to queues in
956 * reverse order to allow for the following
957 * special case.
958 */
959 offset = ((efx->tx_channel_offset + efx->n_tx_channels -
960 tx_queue->channel->channel - 1) *
961 efx_piobuf_size);
Edward Creec6347002017-01-13 21:20:29 +0000962 index = offset / nic_data->piobuf_size;
963 offset = offset % nic_data->piobuf_size;
Ben Hutchings183233b2013-06-28 21:47:12 +0100964
965 /* When the host page size is 4K, the first
966 * host page in the WC mapping may be within
967 * the same VI page as the last TX queue. We
968 * can only link one buffer to each VI.
969 */
970 if (tx_queue->queue == nic_data->pio_write_vi_base) {
971 BUG_ON(index != 0);
972 rc = 0;
973 } else {
974 MCDI_SET_DWORD(inbuf,
975 LINK_PIOBUF_IN_PIOBUF_HANDLE,
976 nic_data->piobuf_handle[index]);
977 MCDI_SET_DWORD(inbuf,
978 LINK_PIOBUF_IN_TXQ_INSTANCE,
979 tx_queue->queue);
980 rc = efx_mcdi_rpc(efx, MC_CMD_LINK_PIOBUF,
981 inbuf, MC_CMD_LINK_PIOBUF_IN_LEN,
982 NULL, 0, NULL);
983 }
984
985 if (rc) {
986 /* This is non-fatal; the TX path just
987 * won't use PIO for this queue
988 */
989 netif_err(efx, drv, efx->net_dev,
990 "failed to link VI %u to PIO buffer %u (%d)\n",
991 tx_queue->queue, index, rc);
992 tx_queue->piobuf = NULL;
993 } else {
994 tx_queue->piobuf =
995 nic_data->pio_write_base +
Edward Cree71827442017-12-18 16:56:19 +0000996 index * efx->vi_stride + offset;
Ben Hutchings183233b2013-06-28 21:47:12 +0100997 tx_queue->piobuf_offset = offset;
998 netif_dbg(efx, probe, efx->net_dev,
999 "linked VI %u to PIO buffer %u offset %x addr %p\n",
1000 tx_queue->queue, index,
1001 tx_queue->piobuf_offset,
1002 tx_queue->piobuf);
1003 }
1004 }
1005 }
1006
1007 return 0;
1008
1009fail:
Edward Creed0346b02017-03-03 15:22:09 +00001010 /* inbuf was defined for MC_CMD_LINK_PIOBUF. We can use the same
1011 * buffer for MC_CMD_UNLINK_PIOBUF because it's shorter.
1012 */
1013 BUILD_BUG_ON(MC_CMD_LINK_PIOBUF_IN_LEN < MC_CMD_UNLINK_PIOBUF_IN_LEN);
Ben Hutchings183233b2013-06-28 21:47:12 +01001014 while (index--) {
1015 MCDI_SET_DWORD(inbuf, UNLINK_PIOBUF_IN_TXQ_INSTANCE,
1016 nic_data->pio_write_vi_base + index);
1017 efx_mcdi_rpc(efx, MC_CMD_UNLINK_PIOBUF,
1018 inbuf, MC_CMD_UNLINK_PIOBUF_IN_LEN,
1019 NULL, 0, NULL);
1020 }
1021 return rc;
1022}
1023
Edward Creec0795bf2016-05-24 18:53:36 +01001024static void efx_ef10_forget_old_piobufs(struct efx_nic *efx)
1025{
1026 struct efx_channel *channel;
1027 struct efx_tx_queue *tx_queue;
1028
1029 /* All our existing PIO buffers went away */
1030 efx_for_each_channel(channel, efx)
1031 efx_for_each_channel_tx_queue(tx_queue, channel)
1032 tx_queue->piobuf = NULL;
1033}
1034
Ben Hutchings183233b2013-06-28 21:47:12 +01001035#else /* !EFX_USE_PIO */
1036
1037static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n)
1038{
1039 return n == 0 ? 0 : -ENOBUFS;
1040}
1041
1042static int efx_ef10_link_piobufs(struct efx_nic *efx)
1043{
1044 return 0;
1045}
1046
1047static void efx_ef10_free_piobufs(struct efx_nic *efx)
1048{
1049}
1050
Edward Creec0795bf2016-05-24 18:53:36 +01001051static void efx_ef10_forget_old_piobufs(struct efx_nic *efx)
1052{
1053}
1054
Ben Hutchings183233b2013-06-28 21:47:12 +01001055#endif /* EFX_USE_PIO */
1056
Ben Hutchings8127d662013-08-29 19:19:29 +01001057static void efx_ef10_remove(struct efx_nic *efx)
1058{
1059 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1060 int rc;
1061
Shradha Shahf1122a32015-05-20 11:09:46 +01001062#ifdef CONFIG_SFC_SRIOV
1063 struct efx_ef10_nic_data *nic_data_pf;
1064 struct pci_dev *pci_dev_pf;
1065 struct efx_nic *efx_pf;
1066 struct ef10_vf *vf;
1067
1068 if (efx->pci_dev->is_virtfn) {
1069 pci_dev_pf = efx->pci_dev->physfn;
1070 if (pci_dev_pf) {
1071 efx_pf = pci_get_drvdata(pci_dev_pf);
1072 nic_data_pf = efx_pf->nic_data;
1073 vf = nic_data_pf->vf + nic_data->vf_index;
1074 vf->efx = NULL;
1075 } else
1076 netif_info(efx, drv, efx->net_dev,
1077 "Could not get the PF id from VF\n");
1078 }
1079#endif
1080
Andrew Rybchenko34813fe2016-06-15 17:48:14 +01001081 efx_ef10_cleanup_vlans(efx);
1082 mutex_destroy(&nic_data->vlan_lock);
1083
Ben Hutchings9aecda92013-12-05 21:28:42 +00001084 efx_ptp_remove(efx);
1085
Ben Hutchings8127d662013-08-29 19:19:29 +01001086 efx_mcdi_mon_remove(efx);
1087
Ben Hutchings8127d662013-08-29 19:19:29 +01001088 efx_ef10_rx_free_indir_table(efx);
1089
Ben Hutchings183233b2013-06-28 21:47:12 +01001090 if (nic_data->wc_membase)
1091 iounmap(nic_data->wc_membase);
1092
Ben Hutchings8127d662013-08-29 19:19:29 +01001093 rc = efx_ef10_free_vis(efx);
1094 WARN_ON(rc != 0);
1095
Ben Hutchings183233b2013-06-28 21:47:12 +01001096 if (!nic_data->must_restore_piobufs)
1097 efx_ef10_free_piobufs(efx);
1098
Shradha Shah0f5c0842015-06-02 11:37:58 +01001099 device_remove_file(&efx->pci_dev->dev, &dev_attr_primary_flag);
1100 device_remove_file(&efx->pci_dev->dev, &dev_attr_link_control_flag);
1101
Jon Coopere5fbd972017-02-08 16:52:10 +00001102 efx_mcdi_detach(efx);
1103
1104 memset(nic_data->udp_tunnels, 0, sizeof(nic_data->udp_tunnels));
1105 mutex_lock(&nic_data->udp_tunnels_lock);
1106 (void)efx_ef10_set_udp_tnl_ports(efx, true);
1107 mutex_unlock(&nic_data->udp_tunnels_lock);
1108
1109 mutex_destroy(&nic_data->udp_tunnels_lock);
1110
Ben Hutchings8127d662013-08-29 19:19:29 +01001111 efx_mcdi_fini(efx);
1112 efx_nic_free_buffer(efx, &nic_data->mcdi_buf);
1113 kfree(nic_data);
1114}
1115
Shradha Shah88a37de2015-05-20 11:09:15 +01001116static int efx_ef10_probe_pf(struct efx_nic *efx)
1117{
1118 return efx_ef10_probe(efx);
1119}
1120
Andrew Rybchenko38d27f32016-06-15 17:52:08 +01001121int efx_ef10_vadaptor_query(struct efx_nic *efx, unsigned int port_id,
1122 u32 *port_flags, u32 *vadaptor_flags,
1123 unsigned int *vlan_tags)
1124{
1125 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1126 MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_QUERY_IN_LEN);
1127 MCDI_DECLARE_BUF(outbuf, MC_CMD_VADAPTOR_QUERY_OUT_LEN);
1128 size_t outlen;
1129 int rc;
1130
1131 if (nic_data->datapath_caps &
1132 (1 << MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_QUERY_LBN)) {
1133 MCDI_SET_DWORD(inbuf, VADAPTOR_QUERY_IN_UPSTREAM_PORT_ID,
1134 port_id);
1135
1136 rc = efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_QUERY, inbuf, sizeof(inbuf),
1137 outbuf, sizeof(outbuf), &outlen);
1138 if (rc)
1139 return rc;
1140
1141 if (outlen < sizeof(outbuf)) {
1142 rc = -EIO;
1143 return rc;
1144 }
1145 }
1146
1147 if (port_flags)
1148 *port_flags = MCDI_DWORD(outbuf, VADAPTOR_QUERY_OUT_PORT_FLAGS);
1149 if (vadaptor_flags)
1150 *vadaptor_flags =
1151 MCDI_DWORD(outbuf, VADAPTOR_QUERY_OUT_VADAPTOR_FLAGS);
1152 if (vlan_tags)
1153 *vlan_tags =
1154 MCDI_DWORD(outbuf,
1155 VADAPTOR_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS);
1156
1157 return 0;
1158}
1159
Daniel Pieczko7a186f42015-07-07 11:37:19 +01001160int efx_ef10_vadaptor_alloc(struct efx_nic *efx, unsigned int port_id)
1161{
1162 MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_ALLOC_IN_LEN);
1163
1164 MCDI_SET_DWORD(inbuf, VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID, port_id);
1165 return efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_ALLOC, inbuf, sizeof(inbuf),
1166 NULL, 0, NULL);
1167}
1168
1169int efx_ef10_vadaptor_free(struct efx_nic *efx, unsigned int port_id)
1170{
1171 MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_FREE_IN_LEN);
1172
1173 MCDI_SET_DWORD(inbuf, VADAPTOR_FREE_IN_UPSTREAM_PORT_ID, port_id);
1174 return efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_FREE, inbuf, sizeof(inbuf),
1175 NULL, 0, NULL);
1176}
1177
1178int efx_ef10_vport_add_mac(struct efx_nic *efx,
1179 unsigned int port_id, u8 *mac)
1180{
1181 MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_LEN);
1182
1183 MCDI_SET_DWORD(inbuf, VPORT_ADD_MAC_ADDRESS_IN_VPORT_ID, port_id);
1184 ether_addr_copy(MCDI_PTR(inbuf, VPORT_ADD_MAC_ADDRESS_IN_MACADDR), mac);
1185
1186 return efx_mcdi_rpc(efx, MC_CMD_VPORT_ADD_MAC_ADDRESS, inbuf,
1187 sizeof(inbuf), NULL, 0, NULL);
1188}
1189
1190int efx_ef10_vport_del_mac(struct efx_nic *efx,
1191 unsigned int port_id, u8 *mac)
1192{
1193 MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_LEN);
1194
1195 MCDI_SET_DWORD(inbuf, VPORT_DEL_MAC_ADDRESS_IN_VPORT_ID, port_id);
1196 ether_addr_copy(MCDI_PTR(inbuf, VPORT_DEL_MAC_ADDRESS_IN_MACADDR), mac);
1197
1198 return efx_mcdi_rpc(efx, MC_CMD_VPORT_DEL_MAC_ADDRESS, inbuf,
1199 sizeof(inbuf), NULL, 0, NULL);
1200}
1201
Shradha Shah88a37de2015-05-20 11:09:15 +01001202#ifdef CONFIG_SFC_SRIOV
1203static int efx_ef10_probe_vf(struct efx_nic *efx)
1204{
1205 int rc;
Daniel Pieczko6598dad2015-06-02 11:41:00 +01001206 struct pci_dev *pci_dev_pf;
1207
1208 /* If the parent PF has no VF data structure, it doesn't know about this
1209 * VF so fail probe. The VF needs to be re-created. This can happen
1210 * if the PF driver is unloaded while the VF is assigned to a guest.
1211 */
1212 pci_dev_pf = efx->pci_dev->physfn;
1213 if (pci_dev_pf) {
1214 struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf);
1215 struct efx_ef10_nic_data *nic_data_pf = efx_pf->nic_data;
1216
1217 if (!nic_data_pf->vf) {
1218 netif_info(efx, drv, efx->net_dev,
1219 "The VF cannot link to its parent PF; "
1220 "please destroy and re-create the VF\n");
1221 return -EBUSY;
1222 }
1223 }
Shradha Shah88a37de2015-05-20 11:09:15 +01001224
1225 rc = efx_ef10_probe(efx);
1226 if (rc)
1227 return rc;
1228
1229 rc = efx_ef10_get_vf_index(efx);
1230 if (rc)
1231 goto fail;
1232
Shradha Shahf1122a32015-05-20 11:09:46 +01001233 if (efx->pci_dev->is_virtfn) {
1234 if (efx->pci_dev->physfn) {
1235 struct efx_nic *efx_pf =
1236 pci_get_drvdata(efx->pci_dev->physfn);
1237 struct efx_ef10_nic_data *nic_data_p = efx_pf->nic_data;
1238 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1239
1240 nic_data_p->vf[nic_data->vf_index].efx = efx;
Daniel Pieczko6598dad2015-06-02 11:41:00 +01001241 nic_data_p->vf[nic_data->vf_index].pci_dev =
1242 efx->pci_dev;
Shradha Shahf1122a32015-05-20 11:09:46 +01001243 } else
1244 netif_info(efx, drv, efx->net_dev,
1245 "Could not get the PF id from VF\n");
1246 }
1247
Shradha Shah88a37de2015-05-20 11:09:15 +01001248 return 0;
1249
1250fail:
1251 efx_ef10_remove(efx);
1252 return rc;
1253}
1254#else
1255static int efx_ef10_probe_vf(struct efx_nic *efx __attribute__ ((unused)))
1256{
1257 return 0;
1258}
1259#endif
1260
Ben Hutchings8127d662013-08-29 19:19:29 +01001261static int efx_ef10_alloc_vis(struct efx_nic *efx,
1262 unsigned int min_vis, unsigned int max_vis)
1263{
1264 MCDI_DECLARE_BUF(inbuf, MC_CMD_ALLOC_VIS_IN_LEN);
1265 MCDI_DECLARE_BUF(outbuf, MC_CMD_ALLOC_VIS_OUT_LEN);
1266 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1267 size_t outlen;
1268 int rc;
1269
1270 MCDI_SET_DWORD(inbuf, ALLOC_VIS_IN_MIN_VI_COUNT, min_vis);
1271 MCDI_SET_DWORD(inbuf, ALLOC_VIS_IN_MAX_VI_COUNT, max_vis);
1272 rc = efx_mcdi_rpc(efx, MC_CMD_ALLOC_VIS, inbuf, sizeof(inbuf),
1273 outbuf, sizeof(outbuf), &outlen);
1274 if (rc != 0)
1275 return rc;
1276
1277 if (outlen < MC_CMD_ALLOC_VIS_OUT_LEN)
1278 return -EIO;
1279
1280 netif_dbg(efx, drv, efx->net_dev, "base VI is A0x%03x\n",
1281 MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_BASE));
1282
1283 nic_data->vi_base = MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_BASE);
1284 nic_data->n_allocated_vis = MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_COUNT);
1285 return 0;
1286}
1287
Ben Hutchings183233b2013-06-28 21:47:12 +01001288/* Note that the failure path of this function does not free
1289 * resources, as this will be done by efx_ef10_remove().
1290 */
Ben Hutchings8127d662013-08-29 19:19:29 +01001291static int efx_ef10_dimension_resources(struct efx_nic *efx)
1292{
Ben Hutchings183233b2013-06-28 21:47:12 +01001293 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1294 unsigned int uc_mem_map_size, wc_mem_map_size;
Shradha Shahb0fbdae2015-08-28 10:55:42 +01001295 unsigned int min_vis = max(EFX_TXQ_TYPES,
1296 efx_separate_tx_channels ? 2 : 1);
1297 unsigned int channel_vis, pio_write_vi_base, max_vis;
Ben Hutchings183233b2013-06-28 21:47:12 +01001298 void __iomem *membase;
1299 int rc;
Ben Hutchings8127d662013-08-29 19:19:29 +01001300
Edward Cree2935e3c2018-01-25 17:26:06 +00001301 channel_vis = max(efx->n_channels,
1302 (efx->n_tx_channels + efx->n_extra_tx_channels) *
1303 EFX_TXQ_TYPES);
Ben Hutchings183233b2013-06-28 21:47:12 +01001304
1305#ifdef EFX_USE_PIO
1306 /* Try to allocate PIO buffers if wanted and if the full
1307 * number of PIO buffers would be sufficient to allocate one
1308 * copy-buffer per TX channel. Failure is non-fatal, as there
1309 * are only a small number of PIO buffers shared between all
1310 * functions of the controller.
1311 */
1312 if (efx_piobuf_size != 0 &&
Edward Creec6347002017-01-13 21:20:29 +00001313 nic_data->piobuf_size / efx_piobuf_size * EF10_TX_PIOBUF_COUNT >=
Ben Hutchings183233b2013-06-28 21:47:12 +01001314 efx->n_tx_channels) {
1315 unsigned int n_piobufs =
1316 DIV_ROUND_UP(efx->n_tx_channels,
Edward Creec6347002017-01-13 21:20:29 +00001317 nic_data->piobuf_size / efx_piobuf_size);
Ben Hutchings183233b2013-06-28 21:47:12 +01001318
1319 rc = efx_ef10_alloc_piobufs(efx, n_piobufs);
Tomáš Pilař6eacfb52017-01-25 13:48:17 +00001320 if (rc == -ENOSPC)
1321 netif_dbg(efx, probe, efx->net_dev,
1322 "out of PIO buffers; cannot allocate more\n");
1323 else if (rc == -EPERM)
1324 netif_dbg(efx, probe, efx->net_dev,
1325 "not permitted to allocate PIO buffers\n");
1326 else if (rc)
Ben Hutchings183233b2013-06-28 21:47:12 +01001327 netif_err(efx, probe, efx->net_dev,
1328 "failed to allocate PIO buffers (%d)\n", rc);
1329 else
1330 netif_dbg(efx, probe, efx->net_dev,
1331 "allocated %u PIO buffers\n", n_piobufs);
1332 }
1333#else
1334 nic_data->n_piobufs = 0;
1335#endif
1336
1337 /* PIO buffers should be mapped with write-combining enabled,
1338 * and we want to make single UC and WC mappings rather than
1339 * several of each (in fact that's the only option if host
1340 * page size is >4K). So we may allocate some extra VIs just
1341 * for writing PIO buffers through.
Daniel Pieczko52ad7622014-04-01 13:10:34 +01001342 *
Shradha Shahb0fbdae2015-08-28 10:55:42 +01001343 * The UC mapping contains (channel_vis - 1) complete VIs and the
Edward Cree71827442017-12-18 16:56:19 +00001344 * first 4K of the next VI. Then the WC mapping begins with
1345 * the remainder of this last VI.
Ben Hutchings183233b2013-06-28 21:47:12 +01001346 */
Edward Cree71827442017-12-18 16:56:19 +00001347 uc_mem_map_size = PAGE_ALIGN((channel_vis - 1) * efx->vi_stride +
Ben Hutchings183233b2013-06-28 21:47:12 +01001348 ER_DZ_TX_PIOBUF);
1349 if (nic_data->n_piobufs) {
Daniel Pieczko52ad7622014-04-01 13:10:34 +01001350 /* pio_write_vi_base rounds down to give the number of complete
1351 * VIs inside the UC mapping.
1352 */
Edward Cree71827442017-12-18 16:56:19 +00001353 pio_write_vi_base = uc_mem_map_size / efx->vi_stride;
Ben Hutchings183233b2013-06-28 21:47:12 +01001354 wc_mem_map_size = (PAGE_ALIGN((pio_write_vi_base +
1355 nic_data->n_piobufs) *
Edward Cree71827442017-12-18 16:56:19 +00001356 efx->vi_stride) -
Ben Hutchings183233b2013-06-28 21:47:12 +01001357 uc_mem_map_size);
1358 max_vis = pio_write_vi_base + nic_data->n_piobufs;
1359 } else {
1360 pio_write_vi_base = 0;
1361 wc_mem_map_size = 0;
Shradha Shahb0fbdae2015-08-28 10:55:42 +01001362 max_vis = channel_vis;
Ben Hutchings183233b2013-06-28 21:47:12 +01001363 }
1364
1365 /* In case the last attached driver failed to free VIs, do it now */
1366 rc = efx_ef10_free_vis(efx);
1367 if (rc != 0)
1368 return rc;
1369
1370 rc = efx_ef10_alloc_vis(efx, min_vis, max_vis);
1371 if (rc != 0)
1372 return rc;
1373
Shradha Shahb0fbdae2015-08-28 10:55:42 +01001374 if (nic_data->n_allocated_vis < channel_vis) {
1375 netif_info(efx, drv, efx->net_dev,
1376 "Could not allocate enough VIs to satisfy RSS"
1377 " requirements. Performance may not be optimal.\n");
1378 /* We didn't get the VIs to populate our channels.
1379 * We could keep what we got but then we'd have more
1380 * interrupts than we need.
1381 * Instead calculate new max_channels and restart
1382 */
1383 efx->max_channels = nic_data->n_allocated_vis;
1384 efx->max_tx_channels =
1385 nic_data->n_allocated_vis / EFX_TXQ_TYPES;
1386
1387 efx_ef10_free_vis(efx);
1388 return -EAGAIN;
1389 }
1390
Ben Hutchings183233b2013-06-28 21:47:12 +01001391 /* If we didn't get enough VIs to map all the PIO buffers, free the
1392 * PIO buffers
1393 */
1394 if (nic_data->n_piobufs &&
1395 nic_data->n_allocated_vis <
1396 pio_write_vi_base + nic_data->n_piobufs) {
1397 netif_dbg(efx, probe, efx->net_dev,
1398 "%u VIs are not sufficient to map %u PIO buffers\n",
1399 nic_data->n_allocated_vis, nic_data->n_piobufs);
1400 efx_ef10_free_piobufs(efx);
1401 }
1402
1403 /* Shrink the original UC mapping of the memory BAR */
1404 membase = ioremap_nocache(efx->membase_phys, uc_mem_map_size);
1405 if (!membase) {
1406 netif_err(efx, probe, efx->net_dev,
1407 "could not shrink memory BAR to %x\n",
1408 uc_mem_map_size);
1409 return -ENOMEM;
1410 }
1411 iounmap(efx->membase);
1412 efx->membase = membase;
1413
1414 /* Set up the WC mapping if needed */
1415 if (wc_mem_map_size) {
1416 nic_data->wc_membase = ioremap_wc(efx->membase_phys +
1417 uc_mem_map_size,
1418 wc_mem_map_size);
1419 if (!nic_data->wc_membase) {
1420 netif_err(efx, probe, efx->net_dev,
1421 "could not allocate WC mapping of size %x\n",
1422 wc_mem_map_size);
1423 return -ENOMEM;
1424 }
1425 nic_data->pio_write_vi_base = pio_write_vi_base;
1426 nic_data->pio_write_base =
1427 nic_data->wc_membase +
Edward Cree71827442017-12-18 16:56:19 +00001428 (pio_write_vi_base * efx->vi_stride + ER_DZ_TX_PIOBUF -
Ben Hutchings183233b2013-06-28 21:47:12 +01001429 uc_mem_map_size);
1430
1431 rc = efx_ef10_link_piobufs(efx);
1432 if (rc)
1433 efx_ef10_free_piobufs(efx);
1434 }
1435
1436 netif_dbg(efx, probe, efx->net_dev,
1437 "memory BAR at %pa (virtual %p+%x UC, %p+%x WC)\n",
1438 &efx->membase_phys, efx->membase, uc_mem_map_size,
1439 nic_data->wc_membase, wc_mem_map_size);
1440
1441 return 0;
Ben Hutchings8127d662013-08-29 19:19:29 +01001442}
1443
1444static int efx_ef10_init_nic(struct efx_nic *efx)
1445{
1446 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1447 int rc;
1448
Ben Hutchingsa915ccc2013-09-05 22:51:55 +01001449 if (nic_data->must_check_datapath_caps) {
1450 rc = efx_ef10_init_datapath_caps(efx);
1451 if (rc)
1452 return rc;
1453 nic_data->must_check_datapath_caps = false;
1454 }
1455
Ben Hutchings8127d662013-08-29 19:19:29 +01001456 if (nic_data->must_realloc_vis) {
1457 /* We cannot let the number of VIs change now */
1458 rc = efx_ef10_alloc_vis(efx, nic_data->n_allocated_vis,
1459 nic_data->n_allocated_vis);
1460 if (rc)
1461 return rc;
1462 nic_data->must_realloc_vis = false;
1463 }
1464
Ben Hutchings183233b2013-06-28 21:47:12 +01001465 if (nic_data->must_restore_piobufs && nic_data->n_piobufs) {
1466 rc = efx_ef10_alloc_piobufs(efx, nic_data->n_piobufs);
1467 if (rc == 0) {
1468 rc = efx_ef10_link_piobufs(efx);
1469 if (rc)
1470 efx_ef10_free_piobufs(efx);
1471 }
1472
Tomáš Pilař6eacfb52017-01-25 13:48:17 +00001473 /* Log an error on failure, but this is non-fatal.
1474 * Permission errors are less important - we've presumably
1475 * had the PIO buffer licence removed.
1476 */
1477 if (rc == -EPERM)
1478 netif_dbg(efx, drv, efx->net_dev,
1479 "not permitted to restore PIO buffers\n");
1480 else if (rc)
Ben Hutchings183233b2013-06-28 21:47:12 +01001481 netif_err(efx, drv, efx->net_dev,
1482 "failed to restore PIO buffers (%d)\n", rc);
1483 nic_data->must_restore_piobufs = false;
1484 }
1485
Jon Cooper267c0152015-05-06 00:59:38 +01001486 /* don't fail init if RSS setup doesn't work */
Edward Cree42356d92018-03-08 15:45:17 +00001487 rc = efx->type->rx_push_rss_config(efx, false,
1488 efx->rss_context.rx_indir_table, NULL);
Jon Cooper267c0152015-05-06 00:59:38 +01001489
Ben Hutchings8127d662013-08-29 19:19:29 +01001490 return 0;
1491}
1492
Jon Cooper3e336262014-01-17 19:48:06 +00001493static void efx_ef10_reset_mc_allocations(struct efx_nic *efx)
1494{
1495 struct efx_ef10_nic_data *nic_data = efx->nic_data;
Daniel Pieczko774ad032015-07-31 11:15:22 +01001496#ifdef CONFIG_SFC_SRIOV
1497 unsigned int i;
1498#endif
Jon Cooper3e336262014-01-17 19:48:06 +00001499
1500 /* All our allocations have been reset */
1501 nic_data->must_realloc_vis = true;
1502 nic_data->must_restore_filters = true;
1503 nic_data->must_restore_piobufs = true;
Edward Creec0795bf2016-05-24 18:53:36 +01001504 efx_ef10_forget_old_piobufs(efx);
Edward Cree42356d92018-03-08 15:45:17 +00001505 efx->rss_context.context_id = EFX_EF10_RSS_CONTEXT_INVALID;
Daniel Pieczko774ad032015-07-31 11:15:22 +01001506
1507 /* Driver-created vswitches and vports must be re-created */
1508 nic_data->must_probe_vswitching = true;
1509 nic_data->vport_id = EVB_PORT_ID_ASSIGNED;
1510#ifdef CONFIG_SFC_SRIOV
1511 if (nic_data->vf)
1512 for (i = 0; i < efx->vf_count; i++)
1513 nic_data->vf[i].vport_id = 0;
1514#endif
Jon Cooper3e336262014-01-17 19:48:06 +00001515}
1516
Jon Cooper087e9022015-05-20 11:11:35 +01001517static enum reset_type efx_ef10_map_reset_reason(enum reset_type reason)
1518{
1519 if (reason == RESET_TYPE_MC_FAILURE)
1520 return RESET_TYPE_DATAPATH;
1521
1522 return efx_mcdi_map_reset_reason(reason);
1523}
1524
Ben Hutchings8127d662013-08-29 19:19:29 +01001525static int efx_ef10_map_reset_flags(u32 *flags)
1526{
1527 enum {
1528 EF10_RESET_PORT = ((ETH_RESET_MAC | ETH_RESET_PHY) <<
1529 ETH_RESET_SHARED_SHIFT),
1530 EF10_RESET_MC = ((ETH_RESET_DMA | ETH_RESET_FILTER |
1531 ETH_RESET_OFFLOAD | ETH_RESET_MAC |
1532 ETH_RESET_PHY | ETH_RESET_MGMT) <<
1533 ETH_RESET_SHARED_SHIFT)
1534 };
1535
1536 /* We assume for now that our PCI function is permitted to
1537 * reset everything.
1538 */
1539
1540 if ((*flags & EF10_RESET_MC) == EF10_RESET_MC) {
1541 *flags &= ~EF10_RESET_MC;
1542 return RESET_TYPE_WORLD;
1543 }
1544
1545 if ((*flags & EF10_RESET_PORT) == EF10_RESET_PORT) {
1546 *flags &= ~EF10_RESET_PORT;
1547 return RESET_TYPE_ALL;
1548 }
1549
1550 /* no invisible reset implemented */
1551
1552 return -EINVAL;
1553}
1554
Jon Cooper3e336262014-01-17 19:48:06 +00001555static int efx_ef10_reset(struct efx_nic *efx, enum reset_type reset_type)
1556{
1557 int rc = efx_mcdi_reset(efx, reset_type);
1558
Daniel Pieczko27324822015-07-31 11:14:54 +01001559 /* Unprivileged functions return -EPERM, but need to return success
1560 * here so that the datapath is brought back up.
1561 */
1562 if (reset_type == RESET_TYPE_WORLD && rc == -EPERM)
1563 rc = 0;
1564
Jon Cooper3e336262014-01-17 19:48:06 +00001565 /* If it was a port reset, trigger reallocation of MC resources.
1566 * Note that on an MC reset nothing needs to be done now because we'll
1567 * detect the MC reset later and handle it then.
Edward Creee2835462014-04-16 19:27:48 +01001568 * For an FLR, we never get an MC reset event, but the MC has reset all
1569 * resources assigned to us, so we have to trigger reallocation now.
Jon Cooper3e336262014-01-17 19:48:06 +00001570 */
Edward Creee2835462014-04-16 19:27:48 +01001571 if ((reset_type == RESET_TYPE_ALL ||
1572 reset_type == RESET_TYPE_MCDI_TIMEOUT) && !rc)
Jon Cooper3e336262014-01-17 19:48:06 +00001573 efx_ef10_reset_mc_allocations(efx);
1574 return rc;
1575}
1576
Ben Hutchings8127d662013-08-29 19:19:29 +01001577#define EF10_DMA_STAT(ext_name, mcdi_name) \
1578 [EF10_STAT_ ## ext_name] = \
1579 { #ext_name, 64, 8 * MC_CMD_MAC_ ## mcdi_name }
1580#define EF10_DMA_INVIS_STAT(int_name, mcdi_name) \
1581 [EF10_STAT_ ## int_name] = \
1582 { NULL, 64, 8 * MC_CMD_MAC_ ## mcdi_name }
1583#define EF10_OTHER_STAT(ext_name) \
1584 [EF10_STAT_ ## ext_name] = { #ext_name, 0, 0 }
Edward Creee4d112e2014-07-15 11:58:12 +01001585#define GENERIC_SW_STAT(ext_name) \
1586 [GENERIC_STAT_ ## ext_name] = { #ext_name, 0, 0 }
Ben Hutchings8127d662013-08-29 19:19:29 +01001587
1588static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = {
Daniel Pieczkoe80ca0132015-06-02 11:38:34 +01001589 EF10_DMA_STAT(port_tx_bytes, TX_BYTES),
1590 EF10_DMA_STAT(port_tx_packets, TX_PKTS),
1591 EF10_DMA_STAT(port_tx_pause, TX_PAUSE_PKTS),
1592 EF10_DMA_STAT(port_tx_control, TX_CONTROL_PKTS),
1593 EF10_DMA_STAT(port_tx_unicast, TX_UNICAST_PKTS),
1594 EF10_DMA_STAT(port_tx_multicast, TX_MULTICAST_PKTS),
1595 EF10_DMA_STAT(port_tx_broadcast, TX_BROADCAST_PKTS),
1596 EF10_DMA_STAT(port_tx_lt64, TX_LT64_PKTS),
1597 EF10_DMA_STAT(port_tx_64, TX_64_PKTS),
1598 EF10_DMA_STAT(port_tx_65_to_127, TX_65_TO_127_PKTS),
1599 EF10_DMA_STAT(port_tx_128_to_255, TX_128_TO_255_PKTS),
1600 EF10_DMA_STAT(port_tx_256_to_511, TX_256_TO_511_PKTS),
1601 EF10_DMA_STAT(port_tx_512_to_1023, TX_512_TO_1023_PKTS),
1602 EF10_DMA_STAT(port_tx_1024_to_15xx, TX_1024_TO_15XX_PKTS),
1603 EF10_DMA_STAT(port_tx_15xx_to_jumbo, TX_15XX_TO_JUMBO_PKTS),
1604 EF10_DMA_STAT(port_rx_bytes, RX_BYTES),
1605 EF10_DMA_INVIS_STAT(port_rx_bytes_minus_good_bytes, RX_BAD_BYTES),
1606 EF10_OTHER_STAT(port_rx_good_bytes),
1607 EF10_OTHER_STAT(port_rx_bad_bytes),
1608 EF10_DMA_STAT(port_rx_packets, RX_PKTS),
1609 EF10_DMA_STAT(port_rx_good, RX_GOOD_PKTS),
1610 EF10_DMA_STAT(port_rx_bad, RX_BAD_FCS_PKTS),
1611 EF10_DMA_STAT(port_rx_pause, RX_PAUSE_PKTS),
1612 EF10_DMA_STAT(port_rx_control, RX_CONTROL_PKTS),
1613 EF10_DMA_STAT(port_rx_unicast, RX_UNICAST_PKTS),
1614 EF10_DMA_STAT(port_rx_multicast, RX_MULTICAST_PKTS),
1615 EF10_DMA_STAT(port_rx_broadcast, RX_BROADCAST_PKTS),
1616 EF10_DMA_STAT(port_rx_lt64, RX_UNDERSIZE_PKTS),
1617 EF10_DMA_STAT(port_rx_64, RX_64_PKTS),
1618 EF10_DMA_STAT(port_rx_65_to_127, RX_65_TO_127_PKTS),
1619 EF10_DMA_STAT(port_rx_128_to_255, RX_128_TO_255_PKTS),
1620 EF10_DMA_STAT(port_rx_256_to_511, RX_256_TO_511_PKTS),
1621 EF10_DMA_STAT(port_rx_512_to_1023, RX_512_TO_1023_PKTS),
1622 EF10_DMA_STAT(port_rx_1024_to_15xx, RX_1024_TO_15XX_PKTS),
1623 EF10_DMA_STAT(port_rx_15xx_to_jumbo, RX_15XX_TO_JUMBO_PKTS),
1624 EF10_DMA_STAT(port_rx_gtjumbo, RX_GTJUMBO_PKTS),
1625 EF10_DMA_STAT(port_rx_bad_gtjumbo, RX_JABBER_PKTS),
1626 EF10_DMA_STAT(port_rx_overflow, RX_OVERFLOW_PKTS),
1627 EF10_DMA_STAT(port_rx_align_error, RX_ALIGN_ERROR_PKTS),
1628 EF10_DMA_STAT(port_rx_length_error, RX_LENGTH_ERROR_PKTS),
1629 EF10_DMA_STAT(port_rx_nodesc_drops, RX_NODESC_DROPS),
Edward Creee4d112e2014-07-15 11:58:12 +01001630 GENERIC_SW_STAT(rx_nodesc_trunc),
1631 GENERIC_SW_STAT(rx_noskb_drops),
Daniel Pieczkoe80ca0132015-06-02 11:38:34 +01001632 EF10_DMA_STAT(port_rx_pm_trunc_bb_overflow, PM_TRUNC_BB_OVERFLOW),
1633 EF10_DMA_STAT(port_rx_pm_discard_bb_overflow, PM_DISCARD_BB_OVERFLOW),
1634 EF10_DMA_STAT(port_rx_pm_trunc_vfifo_full, PM_TRUNC_VFIFO_FULL),
1635 EF10_DMA_STAT(port_rx_pm_discard_vfifo_full, PM_DISCARD_VFIFO_FULL),
1636 EF10_DMA_STAT(port_rx_pm_trunc_qbb, PM_TRUNC_QBB),
1637 EF10_DMA_STAT(port_rx_pm_discard_qbb, PM_DISCARD_QBB),
1638 EF10_DMA_STAT(port_rx_pm_discard_mapping, PM_DISCARD_MAPPING),
1639 EF10_DMA_STAT(port_rx_dp_q_disabled_packets, RXDP_Q_DISABLED_PKTS),
1640 EF10_DMA_STAT(port_rx_dp_di_dropped_packets, RXDP_DI_DROPPED_PKTS),
1641 EF10_DMA_STAT(port_rx_dp_streaming_packets, RXDP_STREAMING_PKTS),
1642 EF10_DMA_STAT(port_rx_dp_hlb_fetch, RXDP_HLB_FETCH_CONDITIONS),
1643 EF10_DMA_STAT(port_rx_dp_hlb_wait, RXDP_HLB_WAIT_CONDITIONS),
Daniel Pieczko3c36a2a2015-06-02 11:39:06 +01001644 EF10_DMA_STAT(rx_unicast, VADAPTER_RX_UNICAST_PACKETS),
1645 EF10_DMA_STAT(rx_unicast_bytes, VADAPTER_RX_UNICAST_BYTES),
1646 EF10_DMA_STAT(rx_multicast, VADAPTER_RX_MULTICAST_PACKETS),
1647 EF10_DMA_STAT(rx_multicast_bytes, VADAPTER_RX_MULTICAST_BYTES),
1648 EF10_DMA_STAT(rx_broadcast, VADAPTER_RX_BROADCAST_PACKETS),
1649 EF10_DMA_STAT(rx_broadcast_bytes, VADAPTER_RX_BROADCAST_BYTES),
1650 EF10_DMA_STAT(rx_bad, VADAPTER_RX_BAD_PACKETS),
1651 EF10_DMA_STAT(rx_bad_bytes, VADAPTER_RX_BAD_BYTES),
1652 EF10_DMA_STAT(rx_overflow, VADAPTER_RX_OVERFLOW),
1653 EF10_DMA_STAT(tx_unicast, VADAPTER_TX_UNICAST_PACKETS),
1654 EF10_DMA_STAT(tx_unicast_bytes, VADAPTER_TX_UNICAST_BYTES),
1655 EF10_DMA_STAT(tx_multicast, VADAPTER_TX_MULTICAST_PACKETS),
1656 EF10_DMA_STAT(tx_multicast_bytes, VADAPTER_TX_MULTICAST_BYTES),
1657 EF10_DMA_STAT(tx_broadcast, VADAPTER_TX_BROADCAST_PACKETS),
1658 EF10_DMA_STAT(tx_broadcast_bytes, VADAPTER_TX_BROADCAST_BYTES),
1659 EF10_DMA_STAT(tx_bad, VADAPTER_TX_BAD_PACKETS),
1660 EF10_DMA_STAT(tx_bad_bytes, VADAPTER_TX_BAD_BYTES),
1661 EF10_DMA_STAT(tx_overflow, VADAPTER_TX_OVERFLOW),
Edward Creef411b542017-12-21 09:00:36 +00001662 EF10_DMA_STAT(fec_uncorrected_errors, FEC_UNCORRECTED_ERRORS),
1663 EF10_DMA_STAT(fec_corrected_errors, FEC_CORRECTED_ERRORS),
1664 EF10_DMA_STAT(fec_corrected_symbols_lane0, FEC_CORRECTED_SYMBOLS_LANE0),
1665 EF10_DMA_STAT(fec_corrected_symbols_lane1, FEC_CORRECTED_SYMBOLS_LANE1),
1666 EF10_DMA_STAT(fec_corrected_symbols_lane2, FEC_CORRECTED_SYMBOLS_LANE2),
1667 EF10_DMA_STAT(fec_corrected_symbols_lane3, FEC_CORRECTED_SYMBOLS_LANE3),
Bert Kenward2c0b6ee2017-12-21 09:00:41 +00001668 EF10_DMA_STAT(ctpio_dmabuf_start, CTPIO_DMABUF_START),
1669 EF10_DMA_STAT(ctpio_vi_busy_fallback, CTPIO_VI_BUSY_FALLBACK),
1670 EF10_DMA_STAT(ctpio_long_write_success, CTPIO_LONG_WRITE_SUCCESS),
1671 EF10_DMA_STAT(ctpio_missing_dbell_fail, CTPIO_MISSING_DBELL_FAIL),
1672 EF10_DMA_STAT(ctpio_overflow_fail, CTPIO_OVERFLOW_FAIL),
1673 EF10_DMA_STAT(ctpio_underflow_fail, CTPIO_UNDERFLOW_FAIL),
1674 EF10_DMA_STAT(ctpio_timeout_fail, CTPIO_TIMEOUT_FAIL),
1675 EF10_DMA_STAT(ctpio_noncontig_wr_fail, CTPIO_NONCONTIG_WR_FAIL),
1676 EF10_DMA_STAT(ctpio_frm_clobber_fail, CTPIO_FRM_CLOBBER_FAIL),
1677 EF10_DMA_STAT(ctpio_invalid_wr_fail, CTPIO_INVALID_WR_FAIL),
1678 EF10_DMA_STAT(ctpio_vi_clobber_fallback, CTPIO_VI_CLOBBER_FALLBACK),
1679 EF10_DMA_STAT(ctpio_unqualified_fallback, CTPIO_UNQUALIFIED_FALLBACK),
1680 EF10_DMA_STAT(ctpio_runt_fallback, CTPIO_RUNT_FALLBACK),
1681 EF10_DMA_STAT(ctpio_success, CTPIO_SUCCESS),
1682 EF10_DMA_STAT(ctpio_fallback, CTPIO_FALLBACK),
1683 EF10_DMA_STAT(ctpio_poison, CTPIO_POISON),
1684 EF10_DMA_STAT(ctpio_erase, CTPIO_ERASE),
Ben Hutchings8127d662013-08-29 19:19:29 +01001685};
1686
Daniel Pieczkoe80ca0132015-06-02 11:38:34 +01001687#define HUNT_COMMON_STAT_MASK ((1ULL << EF10_STAT_port_tx_bytes) | \
1688 (1ULL << EF10_STAT_port_tx_packets) | \
1689 (1ULL << EF10_STAT_port_tx_pause) | \
1690 (1ULL << EF10_STAT_port_tx_unicast) | \
1691 (1ULL << EF10_STAT_port_tx_multicast) | \
1692 (1ULL << EF10_STAT_port_tx_broadcast) | \
1693 (1ULL << EF10_STAT_port_rx_bytes) | \
1694 (1ULL << \
1695 EF10_STAT_port_rx_bytes_minus_good_bytes) | \
1696 (1ULL << EF10_STAT_port_rx_good_bytes) | \
1697 (1ULL << EF10_STAT_port_rx_bad_bytes) | \
1698 (1ULL << EF10_STAT_port_rx_packets) | \
1699 (1ULL << EF10_STAT_port_rx_good) | \
1700 (1ULL << EF10_STAT_port_rx_bad) | \
1701 (1ULL << EF10_STAT_port_rx_pause) | \
1702 (1ULL << EF10_STAT_port_rx_control) | \
1703 (1ULL << EF10_STAT_port_rx_unicast) | \
1704 (1ULL << EF10_STAT_port_rx_multicast) | \
1705 (1ULL << EF10_STAT_port_rx_broadcast) | \
1706 (1ULL << EF10_STAT_port_rx_lt64) | \
1707 (1ULL << EF10_STAT_port_rx_64) | \
1708 (1ULL << EF10_STAT_port_rx_65_to_127) | \
1709 (1ULL << EF10_STAT_port_rx_128_to_255) | \
1710 (1ULL << EF10_STAT_port_rx_256_to_511) | \
1711 (1ULL << EF10_STAT_port_rx_512_to_1023) |\
1712 (1ULL << EF10_STAT_port_rx_1024_to_15xx) |\
1713 (1ULL << EF10_STAT_port_rx_15xx_to_jumbo) |\
1714 (1ULL << EF10_STAT_port_rx_gtjumbo) | \
1715 (1ULL << EF10_STAT_port_rx_bad_gtjumbo) |\
1716 (1ULL << EF10_STAT_port_rx_overflow) | \
1717 (1ULL << EF10_STAT_port_rx_nodesc_drops) |\
Edward Creee4d112e2014-07-15 11:58:12 +01001718 (1ULL << GENERIC_STAT_rx_nodesc_trunc) | \
1719 (1ULL << GENERIC_STAT_rx_noskb_drops))
Ben Hutchings8127d662013-08-29 19:19:29 +01001720
Edward Cree69b365c2016-08-26 15:12:41 +01001721/* On 7000 series NICs, these statistics are only provided by the 10G MAC.
1722 * For a 10G/40G switchable port we do not expose these because they might
1723 * not include all the packets they should.
1724 * On 8000 series NICs these statistics are always provided.
Ben Hutchings8127d662013-08-29 19:19:29 +01001725 */
Daniel Pieczkoe80ca0132015-06-02 11:38:34 +01001726#define HUNT_10G_ONLY_STAT_MASK ((1ULL << EF10_STAT_port_tx_control) | \
1727 (1ULL << EF10_STAT_port_tx_lt64) | \
1728 (1ULL << EF10_STAT_port_tx_64) | \
1729 (1ULL << EF10_STAT_port_tx_65_to_127) |\
1730 (1ULL << EF10_STAT_port_tx_128_to_255) |\
1731 (1ULL << EF10_STAT_port_tx_256_to_511) |\
1732 (1ULL << EF10_STAT_port_tx_512_to_1023) |\
1733 (1ULL << EF10_STAT_port_tx_1024_to_15xx) |\
1734 (1ULL << EF10_STAT_port_tx_15xx_to_jumbo))
Ben Hutchings8127d662013-08-29 19:19:29 +01001735
1736/* These statistics are only provided by the 40G MAC. For a 10G/40G
1737 * switchable port we do expose these because the errors will otherwise
1738 * be silent.
1739 */
Daniel Pieczkoe80ca0132015-06-02 11:38:34 +01001740#define HUNT_40G_EXTRA_STAT_MASK ((1ULL << EF10_STAT_port_rx_align_error) |\
1741 (1ULL << EF10_STAT_port_rx_length_error))
Ben Hutchings8127d662013-08-29 19:19:29 +01001742
Edward Cree568d7a02013-09-25 17:32:09 +01001743/* These statistics are only provided if the firmware supports the
1744 * capability PM_AND_RXDP_COUNTERS.
1745 */
1746#define HUNT_PM_AND_RXDP_STAT_MASK ( \
Daniel Pieczkoe80ca0132015-06-02 11:38:34 +01001747 (1ULL << EF10_STAT_port_rx_pm_trunc_bb_overflow) | \
1748 (1ULL << EF10_STAT_port_rx_pm_discard_bb_overflow) | \
1749 (1ULL << EF10_STAT_port_rx_pm_trunc_vfifo_full) | \
1750 (1ULL << EF10_STAT_port_rx_pm_discard_vfifo_full) | \
1751 (1ULL << EF10_STAT_port_rx_pm_trunc_qbb) | \
1752 (1ULL << EF10_STAT_port_rx_pm_discard_qbb) | \
1753 (1ULL << EF10_STAT_port_rx_pm_discard_mapping) | \
1754 (1ULL << EF10_STAT_port_rx_dp_q_disabled_packets) | \
1755 (1ULL << EF10_STAT_port_rx_dp_di_dropped_packets) | \
1756 (1ULL << EF10_STAT_port_rx_dp_streaming_packets) | \
1757 (1ULL << EF10_STAT_port_rx_dp_hlb_fetch) | \
1758 (1ULL << EF10_STAT_port_rx_dp_hlb_wait))
Ben Hutchings8127d662013-08-29 19:19:29 +01001759
Edward Creef411b542017-12-21 09:00:36 +00001760/* These statistics are only provided if the NIC supports MC_CMD_MAC_STATS_V2,
1761 * indicated by returning a value >= MC_CMD_MAC_NSTATS_V2 in
1762 * MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS.
1763 * These bits are in the second u64 of the raw mask.
1764 */
1765#define EF10_FEC_STAT_MASK ( \
1766 (1ULL << (EF10_STAT_fec_uncorrected_errors - 64)) | \
1767 (1ULL << (EF10_STAT_fec_corrected_errors - 64)) | \
1768 (1ULL << (EF10_STAT_fec_corrected_symbols_lane0 - 64)) | \
1769 (1ULL << (EF10_STAT_fec_corrected_symbols_lane1 - 64)) | \
1770 (1ULL << (EF10_STAT_fec_corrected_symbols_lane2 - 64)) | \
1771 (1ULL << (EF10_STAT_fec_corrected_symbols_lane3 - 64)))
1772
Bert Kenward2c0b6ee2017-12-21 09:00:41 +00001773/* These statistics are only provided if the NIC supports MC_CMD_MAC_STATS_V3,
1774 * indicated by returning a value >= MC_CMD_MAC_NSTATS_V3 in
1775 * MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS.
1776 * These bits are in the second u64 of the raw mask.
1777 */
1778#define EF10_CTPIO_STAT_MASK ( \
1779 (1ULL << (EF10_STAT_ctpio_dmabuf_start - 64)) | \
1780 (1ULL << (EF10_STAT_ctpio_vi_busy_fallback - 64)) | \
1781 (1ULL << (EF10_STAT_ctpio_long_write_success - 64)) | \
1782 (1ULL << (EF10_STAT_ctpio_missing_dbell_fail - 64)) | \
1783 (1ULL << (EF10_STAT_ctpio_overflow_fail - 64)) | \
1784 (1ULL << (EF10_STAT_ctpio_underflow_fail - 64)) | \
1785 (1ULL << (EF10_STAT_ctpio_timeout_fail - 64)) | \
1786 (1ULL << (EF10_STAT_ctpio_noncontig_wr_fail - 64)) | \
1787 (1ULL << (EF10_STAT_ctpio_frm_clobber_fail - 64)) | \
1788 (1ULL << (EF10_STAT_ctpio_invalid_wr_fail - 64)) | \
1789 (1ULL << (EF10_STAT_ctpio_vi_clobber_fallback - 64)) | \
1790 (1ULL << (EF10_STAT_ctpio_unqualified_fallback - 64)) | \
1791 (1ULL << (EF10_STAT_ctpio_runt_fallback - 64)) | \
1792 (1ULL << (EF10_STAT_ctpio_success - 64)) | \
1793 (1ULL << (EF10_STAT_ctpio_fallback - 64)) | \
1794 (1ULL << (EF10_STAT_ctpio_poison - 64)) | \
1795 (1ULL << (EF10_STAT_ctpio_erase - 64)))
1796
Edward Cree4bae9132013-09-27 18:52:49 +01001797static u64 efx_ef10_raw_stat_mask(struct efx_nic *efx)
Ben Hutchings8127d662013-08-29 19:19:29 +01001798{
Edward Cree4bae9132013-09-27 18:52:49 +01001799 u64 raw_mask = HUNT_COMMON_STAT_MASK;
Ben Hutchings8127d662013-08-29 19:19:29 +01001800 u32 port_caps = efx_mcdi_phy_get_caps(efx);
Edward Cree568d7a02013-09-25 17:32:09 +01001801 struct efx_ef10_nic_data *nic_data = efx->nic_data;
Ben Hutchings8127d662013-08-29 19:19:29 +01001802
Daniel Pieczko3c36a2a2015-06-02 11:39:06 +01001803 if (!(efx->mcdi->fn_flags &
1804 1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL))
1805 return 0;
1806
Edward Cree69b365c2016-08-26 15:12:41 +01001807 if (port_caps & (1 << MC_CMD_PHY_CAP_40000FDX_LBN)) {
Edward Cree4bae9132013-09-27 18:52:49 +01001808 raw_mask |= HUNT_40G_EXTRA_STAT_MASK;
Edward Cree69b365c2016-08-26 15:12:41 +01001809 /* 8000 series have everything even at 40G */
1810 if (nic_data->datapath_caps2 &
1811 (1 << MC_CMD_GET_CAPABILITIES_V2_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN))
1812 raw_mask |= HUNT_10G_ONLY_STAT_MASK;
1813 } else {
Edward Cree4bae9132013-09-27 18:52:49 +01001814 raw_mask |= HUNT_10G_ONLY_STAT_MASK;
Edward Cree69b365c2016-08-26 15:12:41 +01001815 }
Edward Cree568d7a02013-09-25 17:32:09 +01001816
1817 if (nic_data->datapath_caps &
1818 (1 << MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_LBN))
1819 raw_mask |= HUNT_PM_AND_RXDP_STAT_MASK;
1820
Edward Cree4bae9132013-09-27 18:52:49 +01001821 return raw_mask;
1822}
1823
1824static void efx_ef10_get_stat_mask(struct efx_nic *efx, unsigned long *mask)
1825{
Daniel Pieczkod94619c2015-06-02 11:40:05 +01001826 struct efx_ef10_nic_data *nic_data = efx->nic_data;
Daniel Pieczko3c36a2a2015-06-02 11:39:06 +01001827 u64 raw_mask[2];
1828
1829 raw_mask[0] = efx_ef10_raw_stat_mask(efx);
1830
Daniel Pieczkod94619c2015-06-02 11:40:05 +01001831 /* Only show vadaptor stats when EVB capability is present */
1832 if (nic_data->datapath_caps &
1833 (1 << MC_CMD_GET_CAPABILITIES_OUT_EVB_LBN)) {
1834 raw_mask[0] |= ~((1ULL << EF10_STAT_rx_unicast) - 1);
Edward Creef411b542017-12-21 09:00:36 +00001835 raw_mask[1] = (1ULL << (EF10_STAT_V1_COUNT - 64)) - 1;
Daniel Pieczkod94619c2015-06-02 11:40:05 +01001836 } else {
1837 raw_mask[1] = 0;
1838 }
Edward Creef411b542017-12-21 09:00:36 +00001839 /* Only show FEC stats when NIC supports MC_CMD_MAC_STATS_V2 */
1840 if (efx->num_mac_stats >= MC_CMD_MAC_NSTATS_V2)
1841 raw_mask[1] |= EF10_FEC_STAT_MASK;
Edward Cree4bae9132013-09-27 18:52:49 +01001842
Bert Kenward2c0b6ee2017-12-21 09:00:41 +00001843 /* CTPIO stats appear in V3. Only show them on devices that actually
1844 * support CTPIO. Although this driver doesn't use CTPIO others might,
1845 * and we may be reporting the stats for the underlying port.
1846 */
1847 if (efx->num_mac_stats >= MC_CMD_MAC_NSTATS_V3 &&
1848 (nic_data->datapath_caps2 &
1849 (1 << MC_CMD_GET_CAPABILITIES_V4_OUT_CTPIO_LBN)))
1850 raw_mask[1] |= EF10_CTPIO_STAT_MASK;
1851
Edward Cree4bae9132013-09-27 18:52:49 +01001852#if BITS_PER_LONG == 64
Andrew Rybchenkoe70c70c32016-08-26 11:19:34 +01001853 BUILD_BUG_ON(BITS_TO_LONGS(EF10_STAT_COUNT) != 2);
Daniel Pieczko3c36a2a2015-06-02 11:39:06 +01001854 mask[0] = raw_mask[0];
1855 mask[1] = raw_mask[1];
Edward Cree4bae9132013-09-27 18:52:49 +01001856#else
Andrew Rybchenkoe70c70c32016-08-26 11:19:34 +01001857 BUILD_BUG_ON(BITS_TO_LONGS(EF10_STAT_COUNT) != 3);
Daniel Pieczko3c36a2a2015-06-02 11:39:06 +01001858 mask[0] = raw_mask[0] & 0xffffffff;
1859 mask[1] = raw_mask[0] >> 32;
1860 mask[2] = raw_mask[1] & 0xffffffff;
Edward Cree4bae9132013-09-27 18:52:49 +01001861#endif
Ben Hutchings8127d662013-08-29 19:19:29 +01001862}
1863
1864static size_t efx_ef10_describe_stats(struct efx_nic *efx, u8 *names)
1865{
Edward Cree4bae9132013-09-27 18:52:49 +01001866 DECLARE_BITMAP(mask, EF10_STAT_COUNT);
1867
1868 efx_ef10_get_stat_mask(efx, mask);
Ben Hutchings8127d662013-08-29 19:19:29 +01001869 return efx_nic_describe_stats(efx_ef10_stat_desc, EF10_STAT_COUNT,
Edward Cree4bae9132013-09-27 18:52:49 +01001870 mask, names);
Ben Hutchings8127d662013-08-29 19:19:29 +01001871}
1872
Daniel Pieczkod7788192015-06-02 11:39:20 +01001873static size_t efx_ef10_update_stats_common(struct efx_nic *efx, u64 *full_stats,
1874 struct rtnl_link_stats64 *core_stats)
1875{
1876 DECLARE_BITMAP(mask, EF10_STAT_COUNT);
1877 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1878 u64 *stats = nic_data->stats;
1879 size_t stats_count = 0, index;
1880
1881 efx_ef10_get_stat_mask(efx, mask);
1882
1883 if (full_stats) {
1884 for_each_set_bit(index, mask, EF10_STAT_COUNT) {
1885 if (efx_ef10_stat_desc[index].name) {
1886 *full_stats++ = stats[index];
1887 ++stats_count;
1888 }
1889 }
1890 }
1891
Bert Kenwardfbe43072015-08-26 16:39:03 +01001892 if (!core_stats)
1893 return stats_count;
1894
1895 if (nic_data->datapath_caps &
1896 1 << MC_CMD_GET_CAPABILITIES_OUT_EVB_LBN) {
1897 /* Use vadaptor stats. */
Daniel Pieczko0fc95fc2015-06-02 11:39:33 +01001898 core_stats->rx_packets = stats[EF10_STAT_rx_unicast] +
1899 stats[EF10_STAT_rx_multicast] +
1900 stats[EF10_STAT_rx_broadcast];
1901 core_stats->tx_packets = stats[EF10_STAT_tx_unicast] +
1902 stats[EF10_STAT_tx_multicast] +
1903 stats[EF10_STAT_tx_broadcast];
1904 core_stats->rx_bytes = stats[EF10_STAT_rx_unicast_bytes] +
1905 stats[EF10_STAT_rx_multicast_bytes] +
1906 stats[EF10_STAT_rx_broadcast_bytes];
1907 core_stats->tx_bytes = stats[EF10_STAT_tx_unicast_bytes] +
1908 stats[EF10_STAT_tx_multicast_bytes] +
1909 stats[EF10_STAT_tx_broadcast_bytes];
1910 core_stats->rx_dropped = stats[GENERIC_STAT_rx_nodesc_trunc] +
Daniel Pieczkod7788192015-06-02 11:39:20 +01001911 stats[GENERIC_STAT_rx_noskb_drops];
Daniel Pieczko0fc95fc2015-06-02 11:39:33 +01001912 core_stats->multicast = stats[EF10_STAT_rx_multicast];
1913 core_stats->rx_crc_errors = stats[EF10_STAT_rx_bad];
1914 core_stats->rx_fifo_errors = stats[EF10_STAT_rx_overflow];
1915 core_stats->rx_errors = core_stats->rx_crc_errors;
1916 core_stats->tx_errors = stats[EF10_STAT_tx_bad];
Bert Kenwardfbe43072015-08-26 16:39:03 +01001917 } else {
1918 /* Use port stats. */
1919 core_stats->rx_packets = stats[EF10_STAT_port_rx_packets];
1920 core_stats->tx_packets = stats[EF10_STAT_port_tx_packets];
1921 core_stats->rx_bytes = stats[EF10_STAT_port_rx_bytes];
1922 core_stats->tx_bytes = stats[EF10_STAT_port_tx_bytes];
1923 core_stats->rx_dropped = stats[EF10_STAT_port_rx_nodesc_drops] +
1924 stats[GENERIC_STAT_rx_nodesc_trunc] +
1925 stats[GENERIC_STAT_rx_noskb_drops];
1926 core_stats->multicast = stats[EF10_STAT_port_rx_multicast];
1927 core_stats->rx_length_errors =
1928 stats[EF10_STAT_port_rx_gtjumbo] +
1929 stats[EF10_STAT_port_rx_length_error];
1930 core_stats->rx_crc_errors = stats[EF10_STAT_port_rx_bad];
1931 core_stats->rx_frame_errors =
1932 stats[EF10_STAT_port_rx_align_error];
1933 core_stats->rx_fifo_errors = stats[EF10_STAT_port_rx_overflow];
1934 core_stats->rx_errors = (core_stats->rx_length_errors +
1935 core_stats->rx_crc_errors +
1936 core_stats->rx_frame_errors);
Daniel Pieczkod7788192015-06-02 11:39:20 +01001937 }
1938
1939 return stats_count;
1940}
1941
1942static int efx_ef10_try_update_nic_stats_pf(struct efx_nic *efx)
Ben Hutchings8127d662013-08-29 19:19:29 +01001943{
1944 struct efx_ef10_nic_data *nic_data = efx->nic_data;
Edward Cree4bae9132013-09-27 18:52:49 +01001945 DECLARE_BITMAP(mask, EF10_STAT_COUNT);
Ben Hutchings8127d662013-08-29 19:19:29 +01001946 __le64 generation_start, generation_end;
1947 u64 *stats = nic_data->stats;
1948 __le64 *dma_stats;
1949
Edward Cree4bae9132013-09-27 18:52:49 +01001950 efx_ef10_get_stat_mask(efx, mask);
1951
Ben Hutchings8127d662013-08-29 19:19:29 +01001952 dma_stats = efx->stats_buffer.addr;
Ben Hutchings8127d662013-08-29 19:19:29 +01001953
Edward Creec1be4822017-12-21 09:00:26 +00001954 generation_end = dma_stats[efx->num_mac_stats - 1];
Ben Hutchings8127d662013-08-29 19:19:29 +01001955 if (generation_end == EFX_MC_STATS_GENERATION_INVALID)
1956 return 0;
1957 rmb();
Edward Cree4bae9132013-09-27 18:52:49 +01001958 efx_nic_update_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, mask,
Ben Hutchings8127d662013-08-29 19:19:29 +01001959 stats, efx->stats_buffer.addr, false);
Jon Cooperd546a892013-09-27 18:26:30 +01001960 rmb();
Ben Hutchings8127d662013-08-29 19:19:29 +01001961 generation_start = dma_stats[MC_CMD_MAC_GENERATION_START];
1962 if (generation_end != generation_start)
1963 return -EAGAIN;
1964
1965 /* Update derived statistics */
Daniel Pieczkoe80ca0132015-06-02 11:38:34 +01001966 efx_nic_fix_nodesc_drop_stat(efx,
1967 &stats[EF10_STAT_port_rx_nodesc_drops]);
1968 stats[EF10_STAT_port_rx_good_bytes] =
1969 stats[EF10_STAT_port_rx_bytes] -
1970 stats[EF10_STAT_port_rx_bytes_minus_good_bytes];
1971 efx_update_diff_stat(&stats[EF10_STAT_port_rx_bad_bytes],
1972 stats[EF10_STAT_port_rx_bytes_minus_good_bytes]);
Edward Creee4d112e2014-07-15 11:58:12 +01001973 efx_update_sw_stats(efx, stats);
Ben Hutchings8127d662013-08-29 19:19:29 +01001974 return 0;
1975}
1976
1977
Daniel Pieczkod7788192015-06-02 11:39:20 +01001978static size_t efx_ef10_update_stats_pf(struct efx_nic *efx, u64 *full_stats,
1979 struct rtnl_link_stats64 *core_stats)
Ben Hutchings8127d662013-08-29 19:19:29 +01001980{
Ben Hutchings8127d662013-08-29 19:19:29 +01001981 int retry;
1982
1983 /* If we're unlucky enough to read statistics during the DMA, wait
1984 * up to 10ms for it to finish (typically takes <500us)
1985 */
1986 for (retry = 0; retry < 100; ++retry) {
Daniel Pieczkod7788192015-06-02 11:39:20 +01001987 if (efx_ef10_try_update_nic_stats_pf(efx) == 0)
Ben Hutchings8127d662013-08-29 19:19:29 +01001988 break;
1989 udelay(100);
1990 }
1991
Daniel Pieczkod7788192015-06-02 11:39:20 +01001992 return efx_ef10_update_stats_common(efx, full_stats, core_stats);
1993}
1994
1995static int efx_ef10_try_update_nic_stats_vf(struct efx_nic *efx)
1996{
1997 MCDI_DECLARE_BUF(inbuf, MC_CMD_MAC_STATS_IN_LEN);
1998 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1999 DECLARE_BITMAP(mask, EF10_STAT_COUNT);
2000 __le64 generation_start, generation_end;
2001 u64 *stats = nic_data->stats;
Edward Creec1be4822017-12-21 09:00:26 +00002002 u32 dma_len = efx->num_mac_stats * sizeof(u64);
Daniel Pieczkod7788192015-06-02 11:39:20 +01002003 struct efx_buffer stats_buf;
2004 __le64 *dma_stats;
2005 int rc;
2006
Daniel Pieczkof00bf232015-06-02 11:40:18 +01002007 spin_unlock_bh(&efx->stats_lock);
2008
2009 if (in_interrupt()) {
2010 /* If in atomic context, cannot update stats. Just update the
2011 * software stats and return so the caller can continue.
2012 */
2013 spin_lock_bh(&efx->stats_lock);
2014 efx_update_sw_stats(efx, stats);
2015 return 0;
2016 }
2017
Daniel Pieczkod7788192015-06-02 11:39:20 +01002018 efx_ef10_get_stat_mask(efx, mask);
2019
2020 rc = efx_nic_alloc_buffer(efx, &stats_buf, dma_len, GFP_ATOMIC);
Daniel Pieczkof00bf232015-06-02 11:40:18 +01002021 if (rc) {
2022 spin_lock_bh(&efx->stats_lock);
Daniel Pieczkod7788192015-06-02 11:39:20 +01002023 return rc;
Daniel Pieczkof00bf232015-06-02 11:40:18 +01002024 }
Daniel Pieczkod7788192015-06-02 11:39:20 +01002025
2026 dma_stats = stats_buf.addr;
Edward Creec1be4822017-12-21 09:00:26 +00002027 dma_stats[efx->num_mac_stats - 1] = EFX_MC_STATS_GENERATION_INVALID;
Daniel Pieczkod7788192015-06-02 11:39:20 +01002028
2029 MCDI_SET_QWORD(inbuf, MAC_STATS_IN_DMA_ADDR, stats_buf.dma_addr);
2030 MCDI_POPULATE_DWORD_1(inbuf, MAC_STATS_IN_CMD,
Daniel Pieczko0fc95fc2015-06-02 11:39:33 +01002031 MAC_STATS_IN_DMA, 1);
Daniel Pieczkod7788192015-06-02 11:39:20 +01002032 MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len);
2033 MCDI_SET_DWORD(inbuf, MAC_STATS_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
2034
Daniel Pieczko6dd48592015-06-02 11:39:49 +01002035 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf),
2036 NULL, 0, NULL);
Daniel Pieczkod7788192015-06-02 11:39:20 +01002037 spin_lock_bh(&efx->stats_lock);
Daniel Pieczko6dd48592015-06-02 11:39:49 +01002038 if (rc) {
2039 /* Expect ENOENT if DMA queues have not been set up */
2040 if (rc != -ENOENT || atomic_read(&efx->active_queues))
2041 efx_mcdi_display_error(efx, MC_CMD_MAC_STATS,
2042 sizeof(inbuf), NULL, 0, rc);
Daniel Pieczkod7788192015-06-02 11:39:20 +01002043 goto out;
Daniel Pieczko6dd48592015-06-02 11:39:49 +01002044 }
Daniel Pieczkod7788192015-06-02 11:39:20 +01002045
Edward Creec1be4822017-12-21 09:00:26 +00002046 generation_end = dma_stats[efx->num_mac_stats - 1];
Daniel Pieczko0fc95fc2015-06-02 11:39:33 +01002047 if (generation_end == EFX_MC_STATS_GENERATION_INVALID) {
2048 WARN_ON_ONCE(1);
Daniel Pieczkod7788192015-06-02 11:39:20 +01002049 goto out;
Daniel Pieczko0fc95fc2015-06-02 11:39:33 +01002050 }
Daniel Pieczkod7788192015-06-02 11:39:20 +01002051 rmb();
2052 efx_nic_update_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, mask,
2053 stats, stats_buf.addr, false);
2054 rmb();
2055 generation_start = dma_stats[MC_CMD_MAC_GENERATION_START];
2056 if (generation_end != generation_start) {
2057 rc = -EAGAIN;
2058 goto out;
Ben Hutchings8127d662013-08-29 19:19:29 +01002059 }
2060
Daniel Pieczkod7788192015-06-02 11:39:20 +01002061 efx_update_sw_stats(efx, stats);
2062out:
2063 efx_nic_free_buffer(efx, &stats_buf);
2064 return rc;
2065}
Ben Hutchings8127d662013-08-29 19:19:29 +01002066
Daniel Pieczkod7788192015-06-02 11:39:20 +01002067static size_t efx_ef10_update_stats_vf(struct efx_nic *efx, u64 *full_stats,
2068 struct rtnl_link_stats64 *core_stats)
2069{
2070 if (efx_ef10_try_update_nic_stats_vf(efx))
2071 return 0;
2072
2073 return efx_ef10_update_stats_common(efx, full_stats, core_stats);
Ben Hutchings8127d662013-08-29 19:19:29 +01002074}
2075
2076static void efx_ef10_push_irq_moderation(struct efx_channel *channel)
2077{
2078 struct efx_nic *efx = channel->efx;
Bert Kenward539de7c2016-08-11 13:02:09 +01002079 unsigned int mode, usecs;
Ben Hutchings8127d662013-08-29 19:19:29 +01002080 efx_dword_t timer_cmd;
2081
Bert Kenward539de7c2016-08-11 13:02:09 +01002082 if (channel->irq_moderation_us) {
Ben Hutchings8127d662013-08-29 19:19:29 +01002083 mode = 3;
Bert Kenward539de7c2016-08-11 13:02:09 +01002084 usecs = channel->irq_moderation_us;
Ben Hutchings8127d662013-08-29 19:19:29 +01002085 } else {
2086 mode = 0;
Bert Kenward539de7c2016-08-11 13:02:09 +01002087 usecs = 0;
Ben Hutchings8127d662013-08-29 19:19:29 +01002088 }
2089
Bert Kenward539de7c2016-08-11 13:02:09 +01002090 if (EFX_EF10_WORKAROUND_61265(efx)) {
2091 MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_EVQ_TMR_IN_LEN);
2092 unsigned int ns = usecs * 1000;
2093
2094 MCDI_SET_DWORD(inbuf, SET_EVQ_TMR_IN_INSTANCE,
2095 channel->channel);
2096 MCDI_SET_DWORD(inbuf, SET_EVQ_TMR_IN_TMR_LOAD_REQ_NS, ns);
2097 MCDI_SET_DWORD(inbuf, SET_EVQ_TMR_IN_TMR_RELOAD_REQ_NS, ns);
2098 MCDI_SET_DWORD(inbuf, SET_EVQ_TMR_IN_TMR_MODE, mode);
2099
2100 efx_mcdi_rpc_async(efx, MC_CMD_SET_EVQ_TMR,
2101 inbuf, sizeof(inbuf), 0, NULL, 0);
2102 } else if (EFX_EF10_WORKAROUND_35388(efx)) {
2103 unsigned int ticks = efx_usecs_to_ticks(efx, usecs);
2104
Ben Hutchings8127d662013-08-29 19:19:29 +01002105 EFX_POPULATE_DWORD_3(timer_cmd, ERF_DD_EVQ_IND_TIMER_FLAGS,
2106 EFE_DD_EVQ_IND_TIMER_FLAGS,
2107 ERF_DD_EVQ_IND_TIMER_MODE, mode,
Bert Kenward539de7c2016-08-11 13:02:09 +01002108 ERF_DD_EVQ_IND_TIMER_VAL, ticks);
Ben Hutchings8127d662013-08-29 19:19:29 +01002109 efx_writed_page(efx, &timer_cmd, ER_DD_EVQ_INDIRECT,
2110 channel->channel);
2111 } else {
Bert Kenward539de7c2016-08-11 13:02:09 +01002112 unsigned int ticks = efx_usecs_to_ticks(efx, usecs);
2113
Bert Kenward0bc959a2017-12-18 16:57:41 +00002114 EFX_POPULATE_DWORD_3(timer_cmd, ERF_DZ_TC_TIMER_MODE, mode,
2115 ERF_DZ_TC_TIMER_VAL, ticks,
2116 ERF_FZ_TC_TMR_REL_VAL, ticks);
Ben Hutchings8127d662013-08-29 19:19:29 +01002117 efx_writed_page(efx, &timer_cmd, ER_DZ_EVQ_TMR,
2118 channel->channel);
2119 }
2120}
2121
Shradha Shah02246a72015-05-06 00:58:14 +01002122static void efx_ef10_get_wol_vf(struct efx_nic *efx,
2123 struct ethtool_wolinfo *wol) {}
2124
2125static int efx_ef10_set_wol_vf(struct efx_nic *efx, u32 type)
2126{
2127 return -EOPNOTSUPP;
2128}
2129
Ben Hutchings8127d662013-08-29 19:19:29 +01002130static void efx_ef10_get_wol(struct efx_nic *efx, struct ethtool_wolinfo *wol)
2131{
2132 wol->supported = 0;
2133 wol->wolopts = 0;
2134 memset(&wol->sopass, 0, sizeof(wol->sopass));
2135}
2136
2137static int efx_ef10_set_wol(struct efx_nic *efx, u32 type)
2138{
2139 if (type != 0)
2140 return -EINVAL;
2141 return 0;
2142}
2143
2144static void efx_ef10_mcdi_request(struct efx_nic *efx,
2145 const efx_dword_t *hdr, size_t hdr_len,
2146 const efx_dword_t *sdu, size_t sdu_len)
2147{
2148 struct efx_ef10_nic_data *nic_data = efx->nic_data;
2149 u8 *pdu = nic_data->mcdi_buf.addr;
2150
2151 memcpy(pdu, hdr, hdr_len);
2152 memcpy(pdu + hdr_len, sdu, sdu_len);
2153 wmb();
2154
2155 /* The hardware provides 'low' and 'high' (doorbell) registers
2156 * for passing the 64-bit address of an MCDI request to
2157 * firmware. However the dwords are swapped by firmware. The
2158 * least significant bits of the doorbell are then 0 for all
2159 * MCDI requests due to alignment.
2160 */
2161 _efx_writed(efx, cpu_to_le32((u64)nic_data->mcdi_buf.dma_addr >> 32),
2162 ER_DZ_MC_DB_LWRD);
2163 _efx_writed(efx, cpu_to_le32((u32)nic_data->mcdi_buf.dma_addr),
2164 ER_DZ_MC_DB_HWRD);
2165}
2166
2167static bool efx_ef10_mcdi_poll_response(struct efx_nic *efx)
2168{
2169 struct efx_ef10_nic_data *nic_data = efx->nic_data;
2170 const efx_dword_t hdr = *(const efx_dword_t *)nic_data->mcdi_buf.addr;
2171
2172 rmb();
2173 return EFX_DWORD_FIELD(hdr, MCDI_HEADER_RESPONSE);
2174}
2175
2176static void
2177efx_ef10_mcdi_read_response(struct efx_nic *efx, efx_dword_t *outbuf,
2178 size_t offset, size_t outlen)
2179{
2180 struct efx_ef10_nic_data *nic_data = efx->nic_data;
2181 const u8 *pdu = nic_data->mcdi_buf.addr;
2182
2183 memcpy(outbuf, pdu + offset, outlen);
2184}
2185
Daniel Pieczkoc577e592015-10-09 10:40:35 +01002186static void efx_ef10_mcdi_reboot_detected(struct efx_nic *efx)
2187{
2188 struct efx_ef10_nic_data *nic_data = efx->nic_data;
2189
2190 /* All our allocations have been reset */
2191 efx_ef10_reset_mc_allocations(efx);
2192
2193 /* The datapath firmware might have been changed */
2194 nic_data->must_check_datapath_caps = true;
2195
2196 /* MAC statistics have been cleared on the NIC; clear the local
2197 * statistic that we update with efx_update_diff_stat().
2198 */
2199 nic_data->stats[EF10_STAT_port_rx_bad_bytes] = 0;
2200}
2201
Ben Hutchings8127d662013-08-29 19:19:29 +01002202static int efx_ef10_mcdi_poll_reboot(struct efx_nic *efx)
2203{
2204 struct efx_ef10_nic_data *nic_data = efx->nic_data;
2205 int rc;
2206
2207 rc = efx_ef10_get_warm_boot_count(efx);
2208 if (rc < 0) {
2209 /* The firmware is presumably in the process of
2210 * rebooting. However, we are supposed to report each
2211 * reboot just once, so we must only do that once we
2212 * can read and store the updated warm boot count.
2213 */
2214 return 0;
2215 }
2216
2217 if (rc == nic_data->warm_boot_count)
2218 return 0;
2219
2220 nic_data->warm_boot_count = rc;
Daniel Pieczkoc577e592015-10-09 10:40:35 +01002221 efx_ef10_mcdi_reboot_detected(efx);
Ben Hutchings869070c2013-09-05 22:46:10 +01002222
Ben Hutchings8127d662013-08-29 19:19:29 +01002223 return -EIO;
2224}
2225
2226/* Handle an MSI interrupt
2227 *
2228 * Handle an MSI hardware interrupt. This routine schedules event
2229 * queue processing. No interrupt acknowledgement cycle is necessary.
2230 * Also, we never need to check that the interrupt is for us, since
2231 * MSI interrupts cannot be shared.
2232 */
2233static irqreturn_t efx_ef10_msi_interrupt(int irq, void *dev_id)
2234{
2235 struct efx_msi_context *context = dev_id;
2236 struct efx_nic *efx = context->efx;
2237
2238 netif_vdbg(efx, intr, efx->net_dev,
2239 "IRQ %d on CPU %d\n", irq, raw_smp_processor_id());
2240
Mark Rutland6aa7de02017-10-23 14:07:29 -07002241 if (likely(READ_ONCE(efx->irq_soft_enabled))) {
Ben Hutchings8127d662013-08-29 19:19:29 +01002242 /* Note test interrupts */
2243 if (context->index == efx->irq_level)
2244 efx->last_irq_cpu = raw_smp_processor_id();
2245
2246 /* Schedule processing of the channel */
2247 efx_schedule_channel_irq(efx->channel[context->index]);
2248 }
2249
2250 return IRQ_HANDLED;
2251}
2252
2253static irqreturn_t efx_ef10_legacy_interrupt(int irq, void *dev_id)
2254{
2255 struct efx_nic *efx = dev_id;
Mark Rutland6aa7de02017-10-23 14:07:29 -07002256 bool soft_enabled = READ_ONCE(efx->irq_soft_enabled);
Ben Hutchings8127d662013-08-29 19:19:29 +01002257 struct efx_channel *channel;
2258 efx_dword_t reg;
2259 u32 queues;
2260
2261 /* Read the ISR which also ACKs the interrupts */
2262 efx_readd(efx, &reg, ER_DZ_BIU_INT_ISR);
2263 queues = EFX_DWORD_FIELD(reg, ERF_DZ_ISR_REG);
2264
2265 if (queues == 0)
2266 return IRQ_NONE;
2267
2268 if (likely(soft_enabled)) {
2269 /* Note test interrupts */
2270 if (queues & (1U << efx->irq_level))
2271 efx->last_irq_cpu = raw_smp_processor_id();
2272
2273 efx_for_each_channel(channel, efx) {
2274 if (queues & 1)
2275 efx_schedule_channel_irq(channel);
2276 queues >>= 1;
2277 }
2278 }
2279
2280 netif_vdbg(efx, intr, efx->net_dev,
2281 "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n",
2282 irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg));
2283
2284 return IRQ_HANDLED;
2285}
2286
Jon Cooper942e2982016-08-26 15:13:30 +01002287static int efx_ef10_irq_test_generate(struct efx_nic *efx)
Ben Hutchings8127d662013-08-29 19:19:29 +01002288{
2289 MCDI_DECLARE_BUF(inbuf, MC_CMD_TRIGGER_INTERRUPT_IN_LEN);
2290
Jon Cooper942e2982016-08-26 15:13:30 +01002291 if (efx_mcdi_set_workaround(efx, MC_CMD_WORKAROUND_BUG41750, true,
2292 NULL) == 0)
2293 return -ENOTSUPP;
2294
Ben Hutchings8127d662013-08-29 19:19:29 +01002295 BUILD_BUG_ON(MC_CMD_TRIGGER_INTERRUPT_OUT_LEN != 0);
2296
2297 MCDI_SET_DWORD(inbuf, TRIGGER_INTERRUPT_IN_INTR_LEVEL, efx->irq_level);
Jon Cooper942e2982016-08-26 15:13:30 +01002298 return efx_mcdi_rpc(efx, MC_CMD_TRIGGER_INTERRUPT,
Ben Hutchings8127d662013-08-29 19:19:29 +01002299 inbuf, sizeof(inbuf), NULL, 0, NULL);
2300}
2301
2302static int efx_ef10_tx_probe(struct efx_tx_queue *tx_queue)
2303{
2304 return efx_nic_alloc_buffer(tx_queue->efx, &tx_queue->txd.buf,
2305 (tx_queue->ptr_mask + 1) *
2306 sizeof(efx_qword_t),
2307 GFP_KERNEL);
2308}
2309
2310/* This writes to the TX_DESC_WPTR and also pushes data */
2311static inline void efx_ef10_push_tx_desc(struct efx_tx_queue *tx_queue,
2312 const efx_qword_t *txd)
2313{
2314 unsigned int write_ptr;
2315 efx_oword_t reg;
2316
2317 write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
2318 EFX_POPULATE_OWORD_1(reg, ERF_DZ_TX_DESC_WPTR, write_ptr);
2319 reg.qword[0] = *txd;
2320 efx_writeo_page(tx_queue->efx, &reg,
2321 ER_DZ_TX_DESC_UPD, tx_queue->queue);
2322}
2323
Bert Kenwarde9117e52016-11-17 10:51:54 +00002324/* Add Firmware-Assisted TSO v2 option descriptors to a queue.
2325 */
2326static int efx_ef10_tx_tso_desc(struct efx_tx_queue *tx_queue,
2327 struct sk_buff *skb,
2328 bool *data_mapped)
2329{
2330 struct efx_tx_buffer *buffer;
2331 struct tcphdr *tcp;
2332 struct iphdr *ip;
2333
2334 u16 ipv4_id;
2335 u32 seqnum;
2336 u32 mss;
2337
Edward Creee01b16a2016-12-02 15:51:33 +00002338 EFX_WARN_ON_ONCE_PARANOID(tx_queue->tso_version != 2);
Bert Kenwarde9117e52016-11-17 10:51:54 +00002339
2340 mss = skb_shinfo(skb)->gso_size;
2341
2342 if (unlikely(mss < 4)) {
2343 WARN_ONCE(1, "MSS of %u is too small for TSO v2\n", mss);
2344 return -EINVAL;
2345 }
2346
2347 ip = ip_hdr(skb);
2348 if (ip->version == 4) {
2349 /* Modify IPv4 header if needed. */
2350 ip->tot_len = 0;
2351 ip->check = 0;
Edward Cree6d431312017-03-03 15:22:27 +00002352 ipv4_id = ntohs(ip->id);
Bert Kenwarde9117e52016-11-17 10:51:54 +00002353 } else {
2354 /* Modify IPv6 header if needed. */
2355 struct ipv6hdr *ipv6 = ipv6_hdr(skb);
2356
2357 ipv6->payload_len = 0;
2358 ipv4_id = 0;
2359 }
2360
2361 tcp = tcp_hdr(skb);
2362 seqnum = ntohl(tcp->seq);
2363
2364 buffer = efx_tx_queue_get_insert_buffer(tx_queue);
2365
2366 buffer->flags = EFX_TX_BUF_OPTION;
2367 buffer->len = 0;
2368 buffer->unmap_len = 0;
2369 EFX_POPULATE_QWORD_5(buffer->option,
2370 ESF_DZ_TX_DESC_IS_OPT, 1,
2371 ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_TSO,
2372 ESF_DZ_TX_TSO_OPTION_TYPE,
2373 ESE_DZ_TX_TSO_OPTION_DESC_FATSO2A,
2374 ESF_DZ_TX_TSO_IP_ID, ipv4_id,
2375 ESF_DZ_TX_TSO_TCP_SEQNO, seqnum
2376 );
2377 ++tx_queue->insert_count;
2378
2379 buffer = efx_tx_queue_get_insert_buffer(tx_queue);
2380
2381 buffer->flags = EFX_TX_BUF_OPTION;
2382 buffer->len = 0;
2383 buffer->unmap_len = 0;
2384 EFX_POPULATE_QWORD_4(buffer->option,
2385 ESF_DZ_TX_DESC_IS_OPT, 1,
2386 ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_TSO,
2387 ESF_DZ_TX_TSO_OPTION_TYPE,
2388 ESE_DZ_TX_TSO_OPTION_DESC_FATSO2B,
2389 ESF_DZ_TX_TSO_TCP_MSS, mss
2390 );
2391 ++tx_queue->insert_count;
2392
2393 return 0;
2394}
2395
Edward Cree46d1efd2016-11-17 10:52:36 +00002396static u32 efx_ef10_tso_versions(struct efx_nic *efx)
2397{
2398 struct efx_ef10_nic_data *nic_data = efx->nic_data;
2399 u32 tso_versions = 0;
2400
2401 if (nic_data->datapath_caps &
2402 (1 << MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_LBN))
2403 tso_versions |= BIT(1);
2404 if (nic_data->datapath_caps2 &
2405 (1 << MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_LBN))
2406 tso_versions |= BIT(2);
2407 return tso_versions;
2408}
2409
Ben Hutchings8127d662013-08-29 19:19:29 +01002410static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue)
2411{
2412 MCDI_DECLARE_BUF(inbuf, MC_CMD_INIT_TXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 /
2413 EFX_BUF_SIZE));
Ben Hutchings8127d662013-08-29 19:19:29 +01002414 bool csum_offload = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD;
2415 size_t entries = tx_queue->txd.buf.len / EFX_BUF_SIZE;
2416 struct efx_channel *channel = tx_queue->channel;
2417 struct efx_nic *efx = tx_queue->efx;
Daniel Pieczko45b24492015-05-06 00:57:14 +01002418 struct efx_ef10_nic_data *nic_data = efx->nic_data;
Bert Kenwarde9117e52016-11-17 10:51:54 +00002419 bool tso_v2 = false;
Jon Cooperaa09a3d2015-05-20 11:10:41 +01002420 size_t inlen;
Ben Hutchings8127d662013-08-29 19:19:29 +01002421 dma_addr_t dma_addr;
2422 efx_qword_t *txd;
2423 int rc;
2424 int i;
Jon Cooperaa09a3d2015-05-20 11:10:41 +01002425 BUILD_BUG_ON(MC_CMD_INIT_TXQ_OUT_LEN != 0);
Ben Hutchings8127d662013-08-29 19:19:29 +01002426
Martin Habets50663fe2018-01-25 17:25:33 +00002427 /* Only attempt to enable TX timestamping if we have the license for it,
2428 * otherwise TXQ init will fail
2429 */
2430 if (!(nic_data->licensed_features &
Martin Habets6aa47c82018-01-25 17:26:31 +00002431 (1 << LICENSED_V3_FEATURES_TX_TIMESTAMPS_LBN))) {
Martin Habets50663fe2018-01-25 17:25:33 +00002432 tx_queue->timestamping = false;
Martin Habets6aa47c82018-01-25 17:26:31 +00002433 /* Disable sync events on this channel. */
2434 if (efx->type->ptp_set_ts_sync_events)
2435 efx->type->ptp_set_ts_sync_events(efx, false, false);
2436 }
Martin Habets50663fe2018-01-25 17:25:33 +00002437
Bert Kenwarde9117e52016-11-17 10:51:54 +00002438 /* TSOv2 is a limited resource that can only be configured on a limited
2439 * number of queues. TSO without checksum offload is not really a thing,
2440 * so we only enable it for those queues.
Martin Habetsb9b603d42018-01-25 17:24:43 +00002441 * TSOv2 cannot be used with Hardware timestamping.
Bert Kenwarde9117e52016-11-17 10:51:54 +00002442 */
2443 if (csum_offload && (nic_data->datapath_caps2 &
Martin Habetsb9b603d42018-01-25 17:24:43 +00002444 (1 << MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_LBN)) &&
2445 !tx_queue->timestamping) {
Bert Kenwarde9117e52016-11-17 10:51:54 +00002446 tso_v2 = true;
2447 netif_dbg(efx, hw, efx->net_dev, "Using TSOv2 for channel %u\n",
2448 channel->channel);
2449 }
2450
Ben Hutchings8127d662013-08-29 19:19:29 +01002451 MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_SIZE, tx_queue->ptr_mask + 1);
2452 MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_TARGET_EVQ, channel->channel);
2453 MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_LABEL, tx_queue->queue);
2454 MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_INSTANCE, tx_queue->queue);
Ben Hutchings8127d662013-08-29 19:19:29 +01002455 MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_OWNER_ID, 0);
Daniel Pieczko45b24492015-05-06 00:57:14 +01002456 MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_PORT_ID, nic_data->vport_id);
Ben Hutchings8127d662013-08-29 19:19:29 +01002457
2458 dma_addr = tx_queue->txd.buf.dma_addr;
2459
2460 netif_dbg(efx, hw, efx->net_dev, "pushing TXQ %d. %zu entries (%llx)\n",
2461 tx_queue->queue, entries, (u64)dma_addr);
2462
2463 for (i = 0; i < entries; ++i) {
2464 MCDI_SET_ARRAY_QWORD(inbuf, INIT_TXQ_IN_DMA_ADDR, i, dma_addr);
2465 dma_addr += EFX_BUF_SIZE;
2466 }
2467
2468 inlen = MC_CMD_INIT_TXQ_IN_LEN(entries);
2469
Edward Creee638ee12016-11-17 10:52:07 +00002470 do {
Martin Habetsb9b603d42018-01-25 17:24:43 +00002471 MCDI_POPULATE_DWORD_4(inbuf, INIT_TXQ_IN_FLAGS,
Edward Creee638ee12016-11-17 10:52:07 +00002472 /* This flag was removed from mcdi_pcol.h for
2473 * the non-_EXT version of INIT_TXQ. However,
2474 * firmware still honours it.
2475 */
2476 INIT_TXQ_EXT_IN_FLAG_TSOV2_EN, tso_v2,
2477 INIT_TXQ_IN_FLAG_IP_CSUM_DIS, !csum_offload,
Martin Habetsb9b603d42018-01-25 17:24:43 +00002478 INIT_TXQ_IN_FLAG_TCP_CSUM_DIS, !csum_offload,
2479 INIT_TXQ_EXT_IN_FLAG_TIMESTAMP,
2480 tx_queue->timestamping);
Edward Creee638ee12016-11-17 10:52:07 +00002481
2482 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_INIT_TXQ, inbuf, inlen,
2483 NULL, 0, NULL);
2484 if (rc == -ENOSPC && tso_v2) {
2485 /* Retry without TSOv2 if we're short on contexts. */
2486 tso_v2 = false;
2487 netif_warn(efx, probe, efx->net_dev,
2488 "TSOv2 context not available to segment in hardware. TCP performance may be reduced.\n");
2489 } else if (rc) {
2490 efx_mcdi_display_error(efx, MC_CMD_INIT_TXQ,
2491 MC_CMD_INIT_TXQ_EXT_IN_LEN,
2492 NULL, 0, rc);
2493 goto fail;
2494 }
2495 } while (rc);
Ben Hutchings8127d662013-08-29 19:19:29 +01002496
2497 /* A previous user of this TX queue might have set us up the
2498 * bomb by writing a descriptor to the TX push collector but
2499 * not the doorbell. (Each collector belongs to a port, not a
2500 * queue or function, so cannot easily be reset.) We must
2501 * attempt to push a no-op descriptor in its place.
2502 */
2503 tx_queue->buffer[0].flags = EFX_TX_BUF_OPTION;
2504 tx_queue->insert_count = 1;
2505 txd = efx_tx_desc(tx_queue, 0);
Martin Habetsb9b603d42018-01-25 17:24:43 +00002506 EFX_POPULATE_QWORD_5(*txd,
Ben Hutchings8127d662013-08-29 19:19:29 +01002507 ESF_DZ_TX_DESC_IS_OPT, true,
2508 ESF_DZ_TX_OPTION_TYPE,
2509 ESE_DZ_TX_OPTION_DESC_CRC_CSUM,
2510 ESF_DZ_TX_OPTION_UDP_TCP_CSUM, csum_offload,
Martin Habetsb9b603d42018-01-25 17:24:43 +00002511 ESF_DZ_TX_OPTION_IP_CSUM, csum_offload,
2512 ESF_DZ_TX_TIMESTAMP, tx_queue->timestamping);
Ben Hutchings8127d662013-08-29 19:19:29 +01002513 tx_queue->write_count = 1;
Bert Kenward93171b12015-11-30 09:05:35 +00002514
Bert Kenwarde9117e52016-11-17 10:51:54 +00002515 if (tso_v2) {
2516 tx_queue->handle_tso = efx_ef10_tx_tso_desc;
2517 tx_queue->tso_version = 2;
2518 } else if (nic_data->datapath_caps &
2519 (1 << MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_LBN)) {
Bert Kenward93171b12015-11-30 09:05:35 +00002520 tx_queue->tso_version = 1;
2521 }
2522
Ben Hutchings8127d662013-08-29 19:19:29 +01002523 wmb();
2524 efx_ef10_push_tx_desc(tx_queue, txd);
2525
2526 return;
2527
2528fail:
Ben Hutchings48ce5632013-11-01 16:42:44 +00002529 netdev_WARN(efx->net_dev, "failed to initialise TXQ %d\n",
2530 tx_queue->queue);
Ben Hutchings8127d662013-08-29 19:19:29 +01002531}
2532
2533static void efx_ef10_tx_fini(struct efx_tx_queue *tx_queue)
2534{
2535 MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_TXQ_IN_LEN);
Jon Cooperaa09a3d2015-05-20 11:10:41 +01002536 MCDI_DECLARE_BUF_ERR(outbuf);
Ben Hutchings8127d662013-08-29 19:19:29 +01002537 struct efx_nic *efx = tx_queue->efx;
2538 size_t outlen;
2539 int rc;
2540
2541 MCDI_SET_DWORD(inbuf, FINI_TXQ_IN_INSTANCE,
2542 tx_queue->queue);
2543
Edward Cree1e0b8122013-05-31 18:36:12 +01002544 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_TXQ, inbuf, sizeof(inbuf),
Ben Hutchings8127d662013-08-29 19:19:29 +01002545 outbuf, sizeof(outbuf), &outlen);
2546
2547 if (rc && rc != -EALREADY)
2548 goto fail;
2549
2550 return;
2551
2552fail:
Edward Cree1e0b8122013-05-31 18:36:12 +01002553 efx_mcdi_display_error(efx, MC_CMD_FINI_TXQ, MC_CMD_FINI_TXQ_IN_LEN,
2554 outbuf, outlen, rc);
Ben Hutchings8127d662013-08-29 19:19:29 +01002555}
2556
2557static void efx_ef10_tx_remove(struct efx_tx_queue *tx_queue)
2558{
2559 efx_nic_free_buffer(tx_queue->efx, &tx_queue->txd.buf);
2560}
2561
2562/* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */
2563static inline void efx_ef10_notify_tx_desc(struct efx_tx_queue *tx_queue)
2564{
2565 unsigned int write_ptr;
2566 efx_dword_t reg;
2567
2568 write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
2569 EFX_POPULATE_DWORD_1(reg, ERF_DZ_TX_DESC_WPTR_DWORD, write_ptr);
2570 efx_writed_page(tx_queue->efx, &reg,
2571 ER_DZ_TX_DESC_UPD_DWORD, tx_queue->queue);
2572}
2573
Bert Kenwarde9117e52016-11-17 10:51:54 +00002574#define EFX_EF10_MAX_TX_DESCRIPTOR_LEN 0x3fff
2575
2576static unsigned int efx_ef10_tx_limit_len(struct efx_tx_queue *tx_queue,
2577 dma_addr_t dma_addr, unsigned int len)
2578{
2579 if (len > EFX_EF10_MAX_TX_DESCRIPTOR_LEN) {
2580 /* If we need to break across multiple descriptors we should
2581 * stop at a page boundary. This assumes the length limit is
2582 * greater than the page size.
2583 */
2584 dma_addr_t end = dma_addr + EFX_EF10_MAX_TX_DESCRIPTOR_LEN;
2585
2586 BUILD_BUG_ON(EFX_EF10_MAX_TX_DESCRIPTOR_LEN < EFX_PAGE_SIZE);
2587 len = (end & (~(EFX_PAGE_SIZE - 1))) - dma_addr;
2588 }
2589
2590 return len;
2591}
2592
Ben Hutchings8127d662013-08-29 19:19:29 +01002593static void efx_ef10_tx_write(struct efx_tx_queue *tx_queue)
2594{
2595 unsigned int old_write_count = tx_queue->write_count;
2596 struct efx_tx_buffer *buffer;
2597 unsigned int write_ptr;
2598 efx_qword_t *txd;
2599
Martin Habetsb2663a42015-11-02 12:51:31 +00002600 tx_queue->xmit_more_available = false;
2601 if (unlikely(tx_queue->write_count == tx_queue->insert_count))
2602 return;
Ben Hutchings8127d662013-08-29 19:19:29 +01002603
2604 do {
2605 write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
2606 buffer = &tx_queue->buffer[write_ptr];
2607 txd = efx_tx_desc(tx_queue, write_ptr);
2608 ++tx_queue->write_count;
2609
2610 /* Create TX descriptor ring entry */
2611 if (buffer->flags & EFX_TX_BUF_OPTION) {
2612 *txd = buffer->option;
Edward Creede1deff2017-01-13 21:20:14 +00002613 if (EFX_QWORD_FIELD(*txd, ESF_DZ_TX_OPTION_TYPE) == 1)
2614 /* PIO descriptor */
2615 tx_queue->packet_write_count = tx_queue->write_count;
Ben Hutchings8127d662013-08-29 19:19:29 +01002616 } else {
Edward Creede1deff2017-01-13 21:20:14 +00002617 tx_queue->packet_write_count = tx_queue->write_count;
Ben Hutchings8127d662013-08-29 19:19:29 +01002618 BUILD_BUG_ON(EFX_TX_BUF_CONT != 1);
2619 EFX_POPULATE_QWORD_3(
2620 *txd,
2621 ESF_DZ_TX_KER_CONT,
2622 buffer->flags & EFX_TX_BUF_CONT,
2623 ESF_DZ_TX_KER_BYTE_CNT, buffer->len,
2624 ESF_DZ_TX_KER_BUF_ADDR, buffer->dma_addr);
2625 }
2626 } while (tx_queue->write_count != tx_queue->insert_count);
2627
2628 wmb(); /* Ensure descriptors are written before they are fetched */
2629
2630 if (efx_nic_may_push_tx_desc(tx_queue, old_write_count)) {
2631 txd = efx_tx_desc(tx_queue,
2632 old_write_count & tx_queue->ptr_mask);
2633 efx_ef10_push_tx_desc(tx_queue, txd);
2634 ++tx_queue->pushes;
2635 } else {
2636 efx_ef10_notify_tx_desc(tx_queue);
2637 }
2638}
2639
Edward Creea33a4c72016-11-03 22:12:27 +00002640#define RSS_MODE_HASH_ADDRS (1 << RSS_MODE_HASH_SRC_ADDR_LBN |\
2641 1 << RSS_MODE_HASH_DST_ADDR_LBN)
2642#define RSS_MODE_HASH_PORTS (1 << RSS_MODE_HASH_SRC_PORT_LBN |\
2643 1 << RSS_MODE_HASH_DST_PORT_LBN)
2644#define RSS_CONTEXT_FLAGS_DEFAULT (1 << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_LBN |\
2645 1 << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV4_EN_LBN |\
2646 1 << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV6_EN_LBN |\
2647 1 << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV6_EN_LBN |\
2648 (RSS_MODE_HASH_ADDRS | RSS_MODE_HASH_PORTS) << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV4_RSS_MODE_LBN |\
2649 RSS_MODE_HASH_ADDRS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV4_RSS_MODE_LBN |\
2650 RSS_MODE_HASH_ADDRS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV4_RSS_MODE_LBN |\
2651 (RSS_MODE_HASH_ADDRS | RSS_MODE_HASH_PORTS) << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV6_RSS_MODE_LBN |\
2652 RSS_MODE_HASH_ADDRS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV6_RSS_MODE_LBN |\
2653 RSS_MODE_HASH_ADDRS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV6_RSS_MODE_LBN)
2654
2655static int efx_ef10_get_rss_flags(struct efx_nic *efx, u32 context, u32 *flags)
2656{
2657 /* Firmware had a bug (sfc bug 61952) where it would not actually
2658 * fill in the flags field in the response to MC_CMD_RSS_CONTEXT_GET_FLAGS.
2659 * This meant that it would always contain whatever was previously
2660 * in the MCDI buffer. Fortunately, all firmware versions with
2661 * this bug have the same default flags value for a newly-allocated
2662 * RSS context, and the only time we want to get the flags is just
2663 * after allocating. Moreover, the response has a 32-bit hole
2664 * where the context ID would be in the request, so we can use an
2665 * overlength buffer in the request and pre-fill the flags field
2666 * with what we believe the default to be. Thus if the firmware
2667 * has the bug, it will leave our pre-filled value in the flags
2668 * field of the response, and we will get the right answer.
2669 *
2670 * However, this does mean that this function should NOT be used if
2671 * the RSS context flags might not be their defaults - it is ONLY
2672 * reliably correct for a newly-allocated RSS context.
2673 */
2674 MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_LEN);
2675 MCDI_DECLARE_BUF(outbuf, MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_LEN);
2676 size_t outlen;
2677 int rc;
2678
2679 /* Check we have a hole for the context ID */
2680 BUILD_BUG_ON(MC_CMD_RSS_CONTEXT_GET_FLAGS_IN_LEN != MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_FLAGS_OFST);
2681 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_GET_FLAGS_IN_RSS_CONTEXT_ID, context);
2682 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_GET_FLAGS_OUT_FLAGS,
2683 RSS_CONTEXT_FLAGS_DEFAULT);
2684 rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_GET_FLAGS, inbuf,
2685 sizeof(inbuf), outbuf, sizeof(outbuf), &outlen);
2686 if (rc == 0) {
2687 if (outlen < MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_LEN)
2688 rc = -EIO;
2689 else
2690 *flags = MCDI_DWORD(outbuf, RSS_CONTEXT_GET_FLAGS_OUT_FLAGS);
2691 }
2692 return rc;
2693}
2694
2695/* Attempt to enable 4-tuple UDP hashing on the specified RSS context.
2696 * If we fail, we just leave the RSS context at its default hash settings,
2697 * which is safe but may slightly reduce performance.
2698 * Defaults are 4-tuple for TCP and 2-tuple for UDP and other-IP, so we
2699 * just need to set the UDP ports flags (for both IP versions).
2700 */
Edward Cree42356d92018-03-08 15:45:17 +00002701static void efx_ef10_set_rss_flags(struct efx_nic *efx,
2702 struct efx_rss_context *ctx)
Edward Creea33a4c72016-11-03 22:12:27 +00002703{
2704 MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN);
2705 u32 flags;
2706
2707 BUILD_BUG_ON(MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT_LEN != 0);
2708
Edward Cree42356d92018-03-08 15:45:17 +00002709 if (efx_ef10_get_rss_flags(efx, ctx->context_id, &flags) != 0)
Edward Creea33a4c72016-11-03 22:12:27 +00002710 return;
Edward Cree42356d92018-03-08 15:45:17 +00002711 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_SET_FLAGS_IN_RSS_CONTEXT_ID,
2712 ctx->context_id);
Edward Creea33a4c72016-11-03 22:12:27 +00002713 flags |= RSS_MODE_HASH_PORTS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV4_RSS_MODE_LBN;
2714 flags |= RSS_MODE_HASH_PORTS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV6_RSS_MODE_LBN;
2715 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_SET_FLAGS_IN_FLAGS, flags);
Edward Creeb718c882016-11-03 22:12:58 +00002716 if (!efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_FLAGS, inbuf, sizeof(inbuf),
2717 NULL, 0, NULL))
2718 /* Succeeded, so UDP 4-tuple is now enabled */
Edward Cree42356d92018-03-08 15:45:17 +00002719 ctx->rx_hash_udp_4tuple = true;
Edward Creea33a4c72016-11-03 22:12:27 +00002720}
2721
Edward Cree42356d92018-03-08 15:45:17 +00002722static int efx_ef10_alloc_rss_context(struct efx_nic *efx, bool exclusive,
2723 struct efx_rss_context *ctx,
2724 unsigned *context_size)
Ben Hutchings8127d662013-08-29 19:19:29 +01002725{
2726 MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN);
2727 MCDI_DECLARE_BUF(outbuf, MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN);
Daniel Pieczko45b24492015-05-06 00:57:14 +01002728 struct efx_ef10_nic_data *nic_data = efx->nic_data;
Ben Hutchings8127d662013-08-29 19:19:29 +01002729 size_t outlen;
2730 int rc;
Jon Cooper267c0152015-05-06 00:59:38 +01002731 u32 alloc_type = exclusive ?
2732 MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_EXCLUSIVE :
2733 MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_SHARED;
2734 unsigned rss_spread = exclusive ?
2735 efx->rss_spread :
2736 min(rounddown_pow_of_two(efx->rss_spread),
2737 EFX_EF10_MAX_SHARED_RSS_CONTEXT_SIZE);
2738
2739 if (!exclusive && rss_spread == 1) {
Edward Cree42356d92018-03-08 15:45:17 +00002740 ctx->context_id = EFX_EF10_RSS_CONTEXT_INVALID;
Jon Cooper267c0152015-05-06 00:59:38 +01002741 if (context_size)
2742 *context_size = 1;
2743 return 0;
2744 }
Ben Hutchings8127d662013-08-29 19:19:29 +01002745
Jon Cooperdcb41232016-04-25 16:51:00 +01002746 if (nic_data->datapath_caps &
2747 1 << MC_CMD_GET_CAPABILITIES_OUT_RX_RSS_LIMITED_LBN)
2748 return -EOPNOTSUPP;
2749
Ben Hutchings8127d662013-08-29 19:19:29 +01002750 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID,
Daniel Pieczko45b24492015-05-06 00:57:14 +01002751 nic_data->vport_id);
Jon Cooper267c0152015-05-06 00:59:38 +01002752 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_TYPE, alloc_type);
2753 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_NUM_QUEUES, rss_spread);
Ben Hutchings8127d662013-08-29 19:19:29 +01002754
2755 rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_ALLOC, inbuf, sizeof(inbuf),
2756 outbuf, sizeof(outbuf), &outlen);
2757 if (rc != 0)
2758 return rc;
2759
2760 if (outlen < MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN)
2761 return -EIO;
2762
Edward Cree42356d92018-03-08 15:45:17 +00002763 ctx->context_id = MCDI_DWORD(outbuf, RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID);
Ben Hutchings8127d662013-08-29 19:19:29 +01002764
Jon Cooper267c0152015-05-06 00:59:38 +01002765 if (context_size)
2766 *context_size = rss_spread;
2767
Edward Creea33a4c72016-11-03 22:12:27 +00002768 if (nic_data->datapath_caps &
2769 1 << MC_CMD_GET_CAPABILITIES_OUT_ADDITIONAL_RSS_MODES_LBN)
Edward Cree42356d92018-03-08 15:45:17 +00002770 efx_ef10_set_rss_flags(efx, ctx);
Edward Creea33a4c72016-11-03 22:12:27 +00002771
Ben Hutchings8127d662013-08-29 19:19:29 +01002772 return 0;
2773}
2774
Edward Cree42356d92018-03-08 15:45:17 +00002775static int efx_ef10_free_rss_context(struct efx_nic *efx, u32 context)
Ben Hutchings8127d662013-08-29 19:19:29 +01002776{
2777 MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_FREE_IN_LEN);
Ben Hutchings8127d662013-08-29 19:19:29 +01002778
2779 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_FREE_IN_RSS_CONTEXT_ID,
2780 context);
Edward Cree42356d92018-03-08 15:45:17 +00002781 return efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_FREE, inbuf, sizeof(inbuf),
Ben Hutchings8127d662013-08-29 19:19:29 +01002782 NULL, 0, NULL);
Ben Hutchings8127d662013-08-29 19:19:29 +01002783}
2784
Jon Cooper267c0152015-05-06 00:59:38 +01002785static int efx_ef10_populate_rss_table(struct efx_nic *efx, u32 context,
Edward Creef74d1992017-01-17 12:01:53 +00002786 const u32 *rx_indir_table, const u8 *key)
Ben Hutchings8127d662013-08-29 19:19:29 +01002787{
2788 MCDI_DECLARE_BUF(tablebuf, MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN);
2789 MCDI_DECLARE_BUF(keybuf, MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN);
2790 int i, rc;
2791
2792 MCDI_SET_DWORD(tablebuf, RSS_CONTEXT_SET_TABLE_IN_RSS_CONTEXT_ID,
2793 context);
Edward Cree42356d92018-03-08 15:45:17 +00002794 BUILD_BUG_ON(ARRAY_SIZE(efx->rss_context.rx_indir_table) !=
Ben Hutchings8127d662013-08-29 19:19:29 +01002795 MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_LEN);
2796
Edward Cree42356d92018-03-08 15:45:17 +00002797 /* This iterates over the length of efx->rss_context.rx_indir_table, but
2798 * copies bytes from rx_indir_table. That's because the latter is a
2799 * pointer rather than an array, but should have the same length.
2800 * The efx->rss_context.rx_hash_key loop below is similar.
Edward Creef74d1992017-01-17 12:01:53 +00002801 */
Edward Cree42356d92018-03-08 15:45:17 +00002802 for (i = 0; i < ARRAY_SIZE(efx->rss_context.rx_indir_table); ++i)
Ben Hutchings8127d662013-08-29 19:19:29 +01002803 MCDI_PTR(tablebuf,
2804 RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE)[i] =
Jon Cooper267c0152015-05-06 00:59:38 +01002805 (u8) rx_indir_table[i];
Ben Hutchings8127d662013-08-29 19:19:29 +01002806
2807 rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_TABLE, tablebuf,
2808 sizeof(tablebuf), NULL, 0, NULL);
2809 if (rc != 0)
2810 return rc;
2811
2812 MCDI_SET_DWORD(keybuf, RSS_CONTEXT_SET_KEY_IN_RSS_CONTEXT_ID,
2813 context);
Edward Cree42356d92018-03-08 15:45:17 +00002814 BUILD_BUG_ON(ARRAY_SIZE(efx->rss_context.rx_hash_key) !=
Ben Hutchings8127d662013-08-29 19:19:29 +01002815 MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN);
Edward Cree42356d92018-03-08 15:45:17 +00002816 for (i = 0; i < ARRAY_SIZE(efx->rss_context.rx_hash_key); ++i)
Edward Creef74d1992017-01-17 12:01:53 +00002817 MCDI_PTR(keybuf, RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY)[i] = key[i];
Ben Hutchings8127d662013-08-29 19:19:29 +01002818
2819 return efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_KEY, keybuf,
2820 sizeof(keybuf), NULL, 0, NULL);
2821}
2822
2823static void efx_ef10_rx_free_indir_table(struct efx_nic *efx)
2824{
Edward Cree42356d92018-03-08 15:45:17 +00002825 int rc;
Ben Hutchings8127d662013-08-29 19:19:29 +01002826
Edward Cree42356d92018-03-08 15:45:17 +00002827 if (efx->rss_context.context_id != EFX_EF10_RSS_CONTEXT_INVALID) {
2828 rc = efx_ef10_free_rss_context(efx, efx->rss_context.context_id);
2829 WARN_ON(rc != 0);
2830 }
2831 efx->rss_context.context_id = EFX_EF10_RSS_CONTEXT_INVALID;
Ben Hutchings8127d662013-08-29 19:19:29 +01002832}
2833
Jon Cooper267c0152015-05-06 00:59:38 +01002834static int efx_ef10_rx_push_shared_rss_config(struct efx_nic *efx,
2835 unsigned *context_size)
2836{
Jon Cooper267c0152015-05-06 00:59:38 +01002837 struct efx_ef10_nic_data *nic_data = efx->nic_data;
Edward Cree42356d92018-03-08 15:45:17 +00002838 int rc = efx_ef10_alloc_rss_context(efx, false, &efx->rss_context,
2839 context_size);
Jon Cooper267c0152015-05-06 00:59:38 +01002840
2841 if (rc != 0)
2842 return rc;
2843
Jon Cooper267c0152015-05-06 00:59:38 +01002844 nic_data->rx_rss_context_exclusive = false;
Edward Cree42356d92018-03-08 15:45:17 +00002845 efx_set_default_rx_indir_table(efx, &efx->rss_context);
Jon Cooper267c0152015-05-06 00:59:38 +01002846 return 0;
2847}
2848
2849static int efx_ef10_rx_push_exclusive_rss_config(struct efx_nic *efx,
Edward Creef74d1992017-01-17 12:01:53 +00002850 const u32 *rx_indir_table,
2851 const u8 *key)
Ben Hutchings8127d662013-08-29 19:19:29 +01002852{
Edward Cree42356d92018-03-08 15:45:17 +00002853 u32 old_rx_rss_context = efx->rss_context.context_id;
Ben Hutchings8127d662013-08-29 19:19:29 +01002854 struct efx_ef10_nic_data *nic_data = efx->nic_data;
2855 int rc;
2856
Edward Cree42356d92018-03-08 15:45:17 +00002857 if (efx->rss_context.context_id == EFX_EF10_RSS_CONTEXT_INVALID ||
Jon Cooper267c0152015-05-06 00:59:38 +01002858 !nic_data->rx_rss_context_exclusive) {
Edward Cree42356d92018-03-08 15:45:17 +00002859 rc = efx_ef10_alloc_rss_context(efx, true, &efx->rss_context,
2860 NULL);
Jon Cooper267c0152015-05-06 00:59:38 +01002861 if (rc == -EOPNOTSUPP)
2862 return rc;
2863 else if (rc != 0)
2864 goto fail1;
Ben Hutchings8127d662013-08-29 19:19:29 +01002865 }
2866
Edward Cree42356d92018-03-08 15:45:17 +00002867 rc = efx_ef10_populate_rss_table(efx, efx->rss_context.context_id,
Edward Creef74d1992017-01-17 12:01:53 +00002868 rx_indir_table, key);
Ben Hutchings8127d662013-08-29 19:19:29 +01002869 if (rc != 0)
Jon Cooper267c0152015-05-06 00:59:38 +01002870 goto fail2;
Ben Hutchings8127d662013-08-29 19:19:29 +01002871
Edward Cree42356d92018-03-08 15:45:17 +00002872 if (efx->rss_context.context_id != old_rx_rss_context &&
2873 old_rx_rss_context != EFX_EF10_RSS_CONTEXT_INVALID)
2874 WARN_ON(efx_ef10_free_rss_context(efx, old_rx_rss_context) != 0);
Jon Cooper267c0152015-05-06 00:59:38 +01002875 nic_data->rx_rss_context_exclusive = true;
Edward Cree42356d92018-03-08 15:45:17 +00002876 if (rx_indir_table != efx->rss_context.rx_indir_table)
2877 memcpy(efx->rss_context.rx_indir_table, rx_indir_table,
2878 sizeof(efx->rss_context.rx_indir_table));
2879 if (key != efx->rss_context.rx_hash_key)
2880 memcpy(efx->rss_context.rx_hash_key, key,
2881 efx->type->rx_hash_key_size);
Edward Creef74d1992017-01-17 12:01:53 +00002882
Jon Cooper267c0152015-05-06 00:59:38 +01002883 return 0;
Ben Hutchings8127d662013-08-29 19:19:29 +01002884
Jon Cooper267c0152015-05-06 00:59:38 +01002885fail2:
Edward Cree42356d92018-03-08 15:45:17 +00002886 if (old_rx_rss_context != efx->rss_context.context_id) {
2887 WARN_ON(efx_ef10_free_rss_context(efx, efx->rss_context.context_id) != 0);
2888 efx->rss_context.context_id = old_rx_rss_context;
2889 }
Jon Cooper267c0152015-05-06 00:59:38 +01002890fail1:
Ben Hutchings8127d662013-08-29 19:19:29 +01002891 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
Jon Cooper267c0152015-05-06 00:59:38 +01002892 return rc;
2893}
2894
Edward Cree42356d92018-03-08 15:45:17 +00002895static int efx_ef10_rx_push_rss_context_config(struct efx_nic *efx,
2896 struct efx_rss_context *ctx,
2897 const u32 *rx_indir_table,
2898 const u8 *key)
Edward Creea707d182017-01-17 12:02:12 +00002899{
Edward Cree42356d92018-03-08 15:45:17 +00002900 int rc;
2901
2902 if (ctx->context_id == EFX_EF10_RSS_CONTEXT_INVALID) {
2903 rc = efx_ef10_alloc_rss_context(efx, true, ctx, NULL);
2904 if (rc)
2905 return rc;
2906 }
2907
2908 if (!rx_indir_table) /* Delete this context */
2909 return efx_ef10_free_rss_context(efx, ctx->context_id);
2910
2911 rc = efx_ef10_populate_rss_table(efx, ctx->context_id,
2912 rx_indir_table, key);
2913 if (rc)
2914 return rc;
2915
2916 memcpy(ctx->rx_indir_table, rx_indir_table,
2917 sizeof(efx->rss_context.rx_indir_table));
2918 memcpy(ctx->rx_hash_key, key, efx->type->rx_hash_key_size);
2919
2920 return 0;
2921}
2922
2923static int efx_ef10_rx_pull_rss_context_config(struct efx_nic *efx,
2924 struct efx_rss_context *ctx)
2925{
Edward Creea707d182017-01-17 12:02:12 +00002926 MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_GET_TABLE_IN_LEN);
2927 MCDI_DECLARE_BUF(tablebuf, MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_LEN);
2928 MCDI_DECLARE_BUF(keybuf, MC_CMD_RSS_CONTEXT_GET_KEY_OUT_LEN);
2929 size_t outlen;
2930 int rc, i;
2931
2932 BUILD_BUG_ON(MC_CMD_RSS_CONTEXT_GET_TABLE_IN_LEN !=
2933 MC_CMD_RSS_CONTEXT_GET_KEY_IN_LEN);
2934
Edward Cree42356d92018-03-08 15:45:17 +00002935 if (ctx->context_id == EFX_EF10_RSS_CONTEXT_INVALID)
Edward Creea707d182017-01-17 12:02:12 +00002936 return -ENOENT;
2937
2938 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_GET_TABLE_IN_RSS_CONTEXT_ID,
Edward Cree42356d92018-03-08 15:45:17 +00002939 ctx->context_id);
2940 BUILD_BUG_ON(ARRAY_SIZE(ctx->rx_indir_table) !=
Edward Creea707d182017-01-17 12:02:12 +00002941 MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_INDIRECTION_TABLE_LEN);
2942 rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_GET_TABLE, inbuf, sizeof(inbuf),
2943 tablebuf, sizeof(tablebuf), &outlen);
2944 if (rc != 0)
2945 return rc;
2946
2947 if (WARN_ON(outlen != MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_LEN))
2948 return -EIO;
2949
Edward Cree42356d92018-03-08 15:45:17 +00002950 for (i = 0; i < ARRAY_SIZE(ctx->rx_indir_table); i++)
2951 ctx->rx_indir_table[i] = MCDI_PTR(tablebuf,
Edward Creea707d182017-01-17 12:02:12 +00002952 RSS_CONTEXT_GET_TABLE_OUT_INDIRECTION_TABLE)[i];
2953
2954 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_GET_KEY_IN_RSS_CONTEXT_ID,
Edward Cree42356d92018-03-08 15:45:17 +00002955 ctx->context_id);
2956 BUILD_BUG_ON(ARRAY_SIZE(ctx->rx_hash_key) !=
Edward Creea707d182017-01-17 12:02:12 +00002957 MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN);
2958 rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_GET_KEY, inbuf, sizeof(inbuf),
2959 keybuf, sizeof(keybuf), &outlen);
2960 if (rc != 0)
2961 return rc;
2962
2963 if (WARN_ON(outlen != MC_CMD_RSS_CONTEXT_GET_KEY_OUT_LEN))
2964 return -EIO;
2965
Edward Cree42356d92018-03-08 15:45:17 +00002966 for (i = 0; i < ARRAY_SIZE(ctx->rx_hash_key); ++i)
2967 ctx->rx_hash_key[i] = MCDI_PTR(
Edward Creea707d182017-01-17 12:02:12 +00002968 keybuf, RSS_CONTEXT_GET_KEY_OUT_TOEPLITZ_KEY)[i];
2969
2970 return 0;
2971}
2972
Edward Cree42356d92018-03-08 15:45:17 +00002973static int efx_ef10_rx_pull_rss_config(struct efx_nic *efx)
2974{
2975 return efx_ef10_rx_pull_rss_context_config(efx, &efx->rss_context);
2976}
2977
2978static void efx_ef10_rx_restore_rss_contexts(struct efx_nic *efx)
2979{
2980 struct efx_rss_context *ctx;
2981 int rc;
2982
2983 list_for_each_entry(ctx, &efx->rss_context.list, list) {
2984 /* previous NIC RSS context is gone */
2985 ctx->context_id = EFX_EF10_RSS_CONTEXT_INVALID;
2986 /* so try to allocate a new one */
2987 rc = efx_ef10_rx_push_rss_context_config(efx, ctx,
2988 ctx->rx_indir_table,
2989 ctx->rx_hash_key);
2990 if (rc)
2991 netif_warn(efx, probe, efx->net_dev,
2992 "failed to restore RSS context %u, rc=%d"
2993 "; RSS filters may fail to be applied\n",
2994 ctx->user_id, rc);
2995 }
2996}
2997
Jon Cooper267c0152015-05-06 00:59:38 +01002998static int efx_ef10_pf_rx_push_rss_config(struct efx_nic *efx, bool user,
Edward Creef74d1992017-01-17 12:01:53 +00002999 const u32 *rx_indir_table,
3000 const u8 *key)
Jon Cooper267c0152015-05-06 00:59:38 +01003001{
3002 int rc;
3003
3004 if (efx->rss_spread == 1)
3005 return 0;
3006
Edward Creef74d1992017-01-17 12:01:53 +00003007 if (!key)
Edward Cree42356d92018-03-08 15:45:17 +00003008 key = efx->rss_context.rx_hash_key;
Edward Creef74d1992017-01-17 12:01:53 +00003009
3010 rc = efx_ef10_rx_push_exclusive_rss_config(efx, rx_indir_table, key);
Jon Cooper267c0152015-05-06 00:59:38 +01003011
3012 if (rc == -ENOBUFS && !user) {
3013 unsigned context_size;
3014 bool mismatch = false;
3015 size_t i;
3016
Edward Cree42356d92018-03-08 15:45:17 +00003017 for (i = 0;
3018 i < ARRAY_SIZE(efx->rss_context.rx_indir_table) && !mismatch;
Jon Cooper267c0152015-05-06 00:59:38 +01003019 i++)
3020 mismatch = rx_indir_table[i] !=
3021 ethtool_rxfh_indir_default(i, efx->rss_spread);
3022
3023 rc = efx_ef10_rx_push_shared_rss_config(efx, &context_size);
3024 if (rc == 0) {
3025 if (context_size != efx->rss_spread)
3026 netif_warn(efx, probe, efx->net_dev,
3027 "Could not allocate an exclusive RSS"
3028 " context; allocated a shared one of"
3029 " different size."
3030 " Wanted %u, got %u.\n",
3031 efx->rss_spread, context_size);
3032 else if (mismatch)
3033 netif_warn(efx, probe, efx->net_dev,
3034 "Could not allocate an exclusive RSS"
3035 " context; allocated a shared one but"
3036 " could not apply custom"
3037 " indirection.\n");
3038 else
3039 netif_info(efx, probe, efx->net_dev,
3040 "Could not allocate an exclusive RSS"
3041 " context; allocated a shared one.\n");
3042 }
3043 }
3044 return rc;
3045}
3046
3047static int efx_ef10_vf_rx_push_rss_config(struct efx_nic *efx, bool user,
3048 const u32 *rx_indir_table
Edward Creef74d1992017-01-17 12:01:53 +00003049 __attribute__ ((unused)),
3050 const u8 *key
Jon Cooper267c0152015-05-06 00:59:38 +01003051 __attribute__ ((unused)))
3052{
Jon Cooper267c0152015-05-06 00:59:38 +01003053 if (user)
3054 return -EOPNOTSUPP;
Edward Cree42356d92018-03-08 15:45:17 +00003055 if (efx->rss_context.context_id != EFX_EF10_RSS_CONTEXT_INVALID)
Jon Cooper267c0152015-05-06 00:59:38 +01003056 return 0;
3057 return efx_ef10_rx_push_shared_rss_config(efx, NULL);
Ben Hutchings8127d662013-08-29 19:19:29 +01003058}
3059
3060static int efx_ef10_rx_probe(struct efx_rx_queue *rx_queue)
3061{
3062 return efx_nic_alloc_buffer(rx_queue->efx, &rx_queue->rxd.buf,
3063 (rx_queue->ptr_mask + 1) *
3064 sizeof(efx_qword_t),
3065 GFP_KERNEL);
3066}
3067
3068static void efx_ef10_rx_init(struct efx_rx_queue *rx_queue)
3069{
3070 MCDI_DECLARE_BUF(inbuf,
3071 MC_CMD_INIT_RXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 /
3072 EFX_BUF_SIZE));
Ben Hutchings8127d662013-08-29 19:19:29 +01003073 struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
3074 size_t entries = rx_queue->rxd.buf.len / EFX_BUF_SIZE;
3075 struct efx_nic *efx = rx_queue->efx;
Daniel Pieczko45b24492015-05-06 00:57:14 +01003076 struct efx_ef10_nic_data *nic_data = efx->nic_data;
Jon Cooperaa09a3d2015-05-20 11:10:41 +01003077 size_t inlen;
Ben Hutchings8127d662013-08-29 19:19:29 +01003078 dma_addr_t dma_addr;
3079 int rc;
3080 int i;
Jon Cooperaa09a3d2015-05-20 11:10:41 +01003081 BUILD_BUG_ON(MC_CMD_INIT_RXQ_OUT_LEN != 0);
Ben Hutchings8127d662013-08-29 19:19:29 +01003082
3083 rx_queue->scatter_n = 0;
3084 rx_queue->scatter_len = 0;
3085
3086 MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_SIZE, rx_queue->ptr_mask + 1);
3087 MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_TARGET_EVQ, channel->channel);
3088 MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_LABEL, efx_rx_queue_index(rx_queue));
3089 MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_INSTANCE,
3090 efx_rx_queue_index(rx_queue));
Jon Cooperbd9a2652013-11-18 12:54:41 +00003091 MCDI_POPULATE_DWORD_2(inbuf, INIT_RXQ_IN_FLAGS,
3092 INIT_RXQ_IN_FLAG_PREFIX, 1,
3093 INIT_RXQ_IN_FLAG_TIMESTAMP, 1);
Ben Hutchings8127d662013-08-29 19:19:29 +01003094 MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_OWNER_ID, 0);
Daniel Pieczko45b24492015-05-06 00:57:14 +01003095 MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_PORT_ID, nic_data->vport_id);
Ben Hutchings8127d662013-08-29 19:19:29 +01003096
3097 dma_addr = rx_queue->rxd.buf.dma_addr;
3098
3099 netif_dbg(efx, hw, efx->net_dev, "pushing RXQ %d. %zu entries (%llx)\n",
3100 efx_rx_queue_index(rx_queue), entries, (u64)dma_addr);
3101
3102 for (i = 0; i < entries; ++i) {
3103 MCDI_SET_ARRAY_QWORD(inbuf, INIT_RXQ_IN_DMA_ADDR, i, dma_addr);
3104 dma_addr += EFX_BUF_SIZE;
3105 }
3106
3107 inlen = MC_CMD_INIT_RXQ_IN_LEN(entries);
3108
3109 rc = efx_mcdi_rpc(efx, MC_CMD_INIT_RXQ, inbuf, inlen,
Jon Cooperaa09a3d2015-05-20 11:10:41 +01003110 NULL, 0, NULL);
Ben Hutchings48ce5632013-11-01 16:42:44 +00003111 if (rc)
3112 netdev_WARN(efx->net_dev, "failed to initialise RXQ %d\n",
3113 efx_rx_queue_index(rx_queue));
Ben Hutchings8127d662013-08-29 19:19:29 +01003114}
3115
3116static void efx_ef10_rx_fini(struct efx_rx_queue *rx_queue)
3117{
3118 MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_RXQ_IN_LEN);
Jon Cooperaa09a3d2015-05-20 11:10:41 +01003119 MCDI_DECLARE_BUF_ERR(outbuf);
Ben Hutchings8127d662013-08-29 19:19:29 +01003120 struct efx_nic *efx = rx_queue->efx;
3121 size_t outlen;
3122 int rc;
3123
3124 MCDI_SET_DWORD(inbuf, FINI_RXQ_IN_INSTANCE,
3125 efx_rx_queue_index(rx_queue));
3126
Edward Cree1e0b8122013-05-31 18:36:12 +01003127 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_RXQ, inbuf, sizeof(inbuf),
Ben Hutchings8127d662013-08-29 19:19:29 +01003128 outbuf, sizeof(outbuf), &outlen);
3129
3130 if (rc && rc != -EALREADY)
3131 goto fail;
3132
3133 return;
3134
3135fail:
Edward Cree1e0b8122013-05-31 18:36:12 +01003136 efx_mcdi_display_error(efx, MC_CMD_FINI_RXQ, MC_CMD_FINI_RXQ_IN_LEN,
3137 outbuf, outlen, rc);
Ben Hutchings8127d662013-08-29 19:19:29 +01003138}
3139
3140static void efx_ef10_rx_remove(struct efx_rx_queue *rx_queue)
3141{
3142 efx_nic_free_buffer(rx_queue->efx, &rx_queue->rxd.buf);
3143}
3144
3145/* This creates an entry in the RX descriptor queue */
3146static inline void
3147efx_ef10_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index)
3148{
3149 struct efx_rx_buffer *rx_buf;
3150 efx_qword_t *rxd;
3151
3152 rxd = efx_rx_desc(rx_queue, index);
3153 rx_buf = efx_rx_buffer(rx_queue, index);
3154 EFX_POPULATE_QWORD_2(*rxd,
3155 ESF_DZ_RX_KER_BYTE_CNT, rx_buf->len,
3156 ESF_DZ_RX_KER_BUF_ADDR, rx_buf->dma_addr);
3157}
3158
3159static void efx_ef10_rx_write(struct efx_rx_queue *rx_queue)
3160{
3161 struct efx_nic *efx = rx_queue->efx;
3162 unsigned int write_count;
3163 efx_dword_t reg;
3164
3165 /* Firmware requires that RX_DESC_WPTR be a multiple of 8 */
3166 write_count = rx_queue->added_count & ~7;
3167 if (rx_queue->notified_count == write_count)
3168 return;
3169
3170 do
3171 efx_ef10_build_rx_desc(
3172 rx_queue,
3173 rx_queue->notified_count & rx_queue->ptr_mask);
3174 while (++rx_queue->notified_count != write_count);
3175
3176 wmb();
3177 EFX_POPULATE_DWORD_1(reg, ERF_DZ_RX_DESC_WPTR,
3178 write_count & rx_queue->ptr_mask);
3179 efx_writed_page(efx, &reg, ER_DZ_RX_DESC_UPD,
3180 efx_rx_queue_index(rx_queue));
3181}
3182
3183static efx_mcdi_async_completer efx_ef10_rx_defer_refill_complete;
3184
3185static void efx_ef10_rx_defer_refill(struct efx_rx_queue *rx_queue)
3186{
3187 struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
3188 MCDI_DECLARE_BUF(inbuf, MC_CMD_DRIVER_EVENT_IN_LEN);
3189 efx_qword_t event;
3190
3191 EFX_POPULATE_QWORD_2(event,
3192 ESF_DZ_EV_CODE, EFX_EF10_DRVGEN_EV,
3193 ESF_DZ_EV_DATA, EFX_EF10_REFILL);
3194
3195 MCDI_SET_DWORD(inbuf, DRIVER_EVENT_IN_EVQ, channel->channel);
3196
3197 /* MCDI_SET_QWORD is not appropriate here since EFX_POPULATE_* has
3198 * already swapped the data to little-endian order.
3199 */
3200 memcpy(MCDI_PTR(inbuf, DRIVER_EVENT_IN_DATA), &event.u64[0],
3201 sizeof(efx_qword_t));
3202
3203 efx_mcdi_rpc_async(channel->efx, MC_CMD_DRIVER_EVENT,
3204 inbuf, sizeof(inbuf), 0,
3205 efx_ef10_rx_defer_refill_complete, 0);
3206}
3207
3208static void
3209efx_ef10_rx_defer_refill_complete(struct efx_nic *efx, unsigned long cookie,
3210 int rc, efx_dword_t *outbuf,
3211 size_t outlen_actual)
3212{
3213 /* nothing to do */
3214}
3215
3216static int efx_ef10_ev_probe(struct efx_channel *channel)
3217{
3218 return efx_nic_alloc_buffer(channel->efx, &channel->eventq.buf,
3219 (channel->eventq_mask + 1) *
3220 sizeof(efx_qword_t),
3221 GFP_KERNEL);
3222}
3223
Daniel Pieczko46e612b2015-07-21 15:09:18 +01003224static void efx_ef10_ev_fini(struct efx_channel *channel)
3225{
3226 MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_EVQ_IN_LEN);
3227 MCDI_DECLARE_BUF_ERR(outbuf);
3228 struct efx_nic *efx = channel->efx;
3229 size_t outlen;
3230 int rc;
3231
3232 MCDI_SET_DWORD(inbuf, FINI_EVQ_IN_INSTANCE, channel->channel);
3233
3234 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_EVQ, inbuf, sizeof(inbuf),
3235 outbuf, sizeof(outbuf), &outlen);
3236
3237 if (rc && rc != -EALREADY)
3238 goto fail;
3239
3240 return;
3241
3242fail:
3243 efx_mcdi_display_error(efx, MC_CMD_FINI_EVQ, MC_CMD_FINI_EVQ_IN_LEN,
3244 outbuf, outlen, rc);
3245}
3246
Ben Hutchings8127d662013-08-29 19:19:29 +01003247static int efx_ef10_ev_init(struct efx_channel *channel)
3248{
3249 MCDI_DECLARE_BUF(inbuf,
Bert Kenwarda9955602016-08-11 13:01:54 +01003250 MC_CMD_INIT_EVQ_V2_IN_LEN(EFX_MAX_EVQ_SIZE * 8 /
3251 EFX_BUF_SIZE));
3252 MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_EVQ_V2_OUT_LEN);
Ben Hutchings8127d662013-08-29 19:19:29 +01003253 size_t entries = channel->eventq.buf.len / EFX_BUF_SIZE;
3254 struct efx_nic *efx = channel->efx;
3255 struct efx_ef10_nic_data *nic_data;
Ben Hutchings8127d662013-08-29 19:19:29 +01003256 size_t inlen, outlen;
Daniel Pieczko46e612b2015-07-21 15:09:18 +01003257 unsigned int enabled, implemented;
Ben Hutchings8127d662013-08-29 19:19:29 +01003258 dma_addr_t dma_addr;
3259 int rc;
3260 int i;
3261
3262 nic_data = efx->nic_data;
Ben Hutchings8127d662013-08-29 19:19:29 +01003263
3264 /* Fill event queue with all ones (i.e. empty events) */
3265 memset(channel->eventq.buf.addr, 0xff, channel->eventq.buf.len);
3266
3267 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_SIZE, channel->eventq_mask + 1);
3268 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_INSTANCE, channel->channel);
3269 /* INIT_EVQ expects index in vector table, not absolute */
3270 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_IRQ_NUM, channel->channel);
Ben Hutchings8127d662013-08-29 19:19:29 +01003271 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_MODE,
3272 MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS);
3273 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_LOAD, 0);
3274 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_RELOAD, 0);
3275 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_COUNT_MODE,
3276 MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS);
3277 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_COUNT_THRSHLD, 0);
3278
Bert Kenwarda9955602016-08-11 13:01:54 +01003279 if (nic_data->datapath_caps2 &
3280 1 << MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_EVQ_V2_LBN) {
3281 /* Use the new generic approach to specifying event queue
3282 * configuration, requesting lower latency or higher throughput.
3283 * The options that actually get used appear in the output.
3284 */
3285 MCDI_POPULATE_DWORD_2(inbuf, INIT_EVQ_V2_IN_FLAGS,
3286 INIT_EVQ_V2_IN_FLAG_INTERRUPTING, 1,
3287 INIT_EVQ_V2_IN_FLAG_TYPE,
3288 MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_AUTO);
3289 } else {
3290 bool cut_thru = !(nic_data->datapath_caps &
3291 1 << MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN);
3292
3293 MCDI_POPULATE_DWORD_4(inbuf, INIT_EVQ_IN_FLAGS,
3294 INIT_EVQ_IN_FLAG_INTERRUPTING, 1,
3295 INIT_EVQ_IN_FLAG_RX_MERGE, 1,
3296 INIT_EVQ_IN_FLAG_TX_MERGE, 1,
3297 INIT_EVQ_IN_FLAG_CUT_THRU, cut_thru);
3298 }
3299
Ben Hutchings8127d662013-08-29 19:19:29 +01003300 dma_addr = channel->eventq.buf.dma_addr;
3301 for (i = 0; i < entries; ++i) {
3302 MCDI_SET_ARRAY_QWORD(inbuf, INIT_EVQ_IN_DMA_ADDR, i, dma_addr);
3303 dma_addr += EFX_BUF_SIZE;
3304 }
3305
3306 inlen = MC_CMD_INIT_EVQ_IN_LEN(entries);
3307
3308 rc = efx_mcdi_rpc(efx, MC_CMD_INIT_EVQ, inbuf, inlen,
3309 outbuf, sizeof(outbuf), &outlen);
Bert Kenwarda9955602016-08-11 13:01:54 +01003310
3311 if (outlen >= MC_CMD_INIT_EVQ_V2_OUT_LEN)
3312 netif_dbg(efx, drv, efx->net_dev,
3313 "Channel %d using event queue flags %08x\n",
3314 channel->channel,
3315 MCDI_DWORD(outbuf, INIT_EVQ_V2_OUT_FLAGS));
3316
Ben Hutchings8127d662013-08-29 19:19:29 +01003317 /* IRQ return is ignored */
Daniel Pieczko46e612b2015-07-21 15:09:18 +01003318 if (channel->channel || rc)
3319 return rc;
Ben Hutchings8127d662013-08-29 19:19:29 +01003320
Daniel Pieczko46e612b2015-07-21 15:09:18 +01003321 /* Successfully created event queue on channel 0 */
3322 rc = efx_mcdi_get_workarounds(efx, &implemented, &enabled);
Edward Cree832dc9e2015-07-21 15:09:31 +01003323 if (rc == -ENOSYS) {
Bert Kenwardd95e3292016-08-11 13:02:36 +01003324 /* GET_WORKAROUNDS was implemented before this workaround,
3325 * thus it must be unavailable in this firmware.
Edward Cree832dc9e2015-07-21 15:09:31 +01003326 */
3327 nic_data->workaround_26807 = false;
3328 rc = 0;
3329 } else if (rc) {
Ben Hutchings8127d662013-08-29 19:19:29 +01003330 goto fail;
Edward Cree832dc9e2015-07-21 15:09:31 +01003331 } else {
3332 nic_data->workaround_26807 =
3333 !!(enabled & MC_CMD_GET_WORKAROUNDS_OUT_BUG26807);
Ben Hutchings8127d662013-08-29 19:19:29 +01003334
Edward Cree832dc9e2015-07-21 15:09:31 +01003335 if (implemented & MC_CMD_GET_WORKAROUNDS_OUT_BUG26807 &&
3336 !nic_data->workaround_26807) {
Daniel Pieczko5a55a722015-07-21 15:10:02 +01003337 unsigned int flags;
3338
Daniel Pieczko34ccfe62015-07-21 15:09:43 +01003339 rc = efx_mcdi_set_workaround(efx,
3340 MC_CMD_WORKAROUND_BUG26807,
Daniel Pieczko5a55a722015-07-21 15:10:02 +01003341 true, &flags);
3342
3343 if (!rc) {
3344 if (flags &
3345 1 << MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_LBN) {
3346 netif_info(efx, drv, efx->net_dev,
3347 "other functions on NIC have been reset\n");
Daniel Pieczkoabd86a52015-12-04 08:48:39 +00003348
3349 /* With MCFW v4.6.x and earlier, the
3350 * boot count will have incremented,
3351 * so re-read the warm_boot_count
3352 * value now to ensure this function
3353 * doesn't think it has changed next
3354 * time it checks.
3355 */
3356 rc = efx_ef10_get_warm_boot_count(efx);
3357 if (rc >= 0) {
3358 nic_data->warm_boot_count = rc;
3359 rc = 0;
3360 }
Daniel Pieczko5a55a722015-07-21 15:10:02 +01003361 }
Edward Cree832dc9e2015-07-21 15:09:31 +01003362 nic_data->workaround_26807 = true;
Daniel Pieczko5a55a722015-07-21 15:10:02 +01003363 } else if (rc == -EPERM) {
Edward Cree832dc9e2015-07-21 15:09:31 +01003364 rc = 0;
Daniel Pieczko5a55a722015-07-21 15:10:02 +01003365 }
Edward Cree832dc9e2015-07-21 15:09:31 +01003366 }
Daniel Pieczko46e612b2015-07-21 15:09:18 +01003367 }
3368
3369 if (!rc)
3370 return 0;
Ben Hutchings8127d662013-08-29 19:19:29 +01003371
3372fail:
Daniel Pieczko46e612b2015-07-21 15:09:18 +01003373 efx_ef10_ev_fini(channel);
3374 return rc;
Ben Hutchings8127d662013-08-29 19:19:29 +01003375}
3376
3377static void efx_ef10_ev_remove(struct efx_channel *channel)
3378{
3379 efx_nic_free_buffer(channel->efx, &channel->eventq.buf);
3380}
3381
3382static void efx_ef10_handle_rx_wrong_queue(struct efx_rx_queue *rx_queue,
3383 unsigned int rx_queue_label)
3384{
3385 struct efx_nic *efx = rx_queue->efx;
3386
3387 netif_info(efx, hw, efx->net_dev,
3388 "rx event arrived on queue %d labeled as queue %u\n",
3389 efx_rx_queue_index(rx_queue), rx_queue_label);
3390
3391 efx_schedule_reset(efx, RESET_TYPE_DISABLE);
3392}
3393
3394static void
3395efx_ef10_handle_rx_bad_lbits(struct efx_rx_queue *rx_queue,
3396 unsigned int actual, unsigned int expected)
3397{
3398 unsigned int dropped = (actual - expected) & rx_queue->ptr_mask;
3399 struct efx_nic *efx = rx_queue->efx;
3400
3401 netif_info(efx, hw, efx->net_dev,
3402 "dropped %d events (index=%d expected=%d)\n",
3403 dropped, actual, expected);
3404
3405 efx_schedule_reset(efx, RESET_TYPE_DISABLE);
3406}
3407
3408/* partially received RX was aborted. clean up. */
3409static void efx_ef10_handle_rx_abort(struct efx_rx_queue *rx_queue)
3410{
3411 unsigned int rx_desc_ptr;
3412
Ben Hutchings8127d662013-08-29 19:19:29 +01003413 netif_dbg(rx_queue->efx, hw, rx_queue->efx->net_dev,
3414 "scattered RX aborted (dropping %u buffers)\n",
3415 rx_queue->scatter_n);
3416
3417 rx_desc_ptr = rx_queue->removed_count & rx_queue->ptr_mask;
3418
3419 efx_rx_packet(rx_queue, rx_desc_ptr, rx_queue->scatter_n,
3420 0, EFX_RX_PKT_DISCARD);
3421
3422 rx_queue->removed_count += rx_queue->scatter_n;
3423 rx_queue->scatter_n = 0;
3424 rx_queue->scatter_len = 0;
3425 ++efx_rx_queue_channel(rx_queue)->n_rx_nodesc_trunc;
3426}
3427
Jon Coopera0ee3542017-02-08 16:50:40 +00003428static u16 efx_ef10_handle_rx_event_errors(struct efx_channel *channel,
3429 unsigned int n_packets,
3430 unsigned int rx_encap_hdr,
3431 unsigned int rx_l3_class,
3432 unsigned int rx_l4_class,
3433 const efx_qword_t *event)
3434{
3435 struct efx_nic *efx = channel->efx;
Edward Cree69787292017-10-31 14:29:47 +00003436 bool handled = false;
Jon Coopera0ee3542017-02-08 16:50:40 +00003437
3438 if (EFX_QWORD_FIELD(*event, ESF_DZ_RX_ECRC_ERR)) {
Edward Cree69787292017-10-31 14:29:47 +00003439 if (!(efx->net_dev->features & NETIF_F_RXALL)) {
3440 if (!efx->loopback_selftest)
3441 channel->n_rx_eth_crc_err += n_packets;
3442 return EFX_RX_PKT_DISCARD;
3443 }
3444 handled = true;
Jon Coopera0ee3542017-02-08 16:50:40 +00003445 }
3446 if (EFX_QWORD_FIELD(*event, ESF_DZ_RX_IPCKSUM_ERR)) {
3447 if (unlikely(rx_encap_hdr != ESE_EZ_ENCAP_HDR_VXLAN &&
3448 rx_l3_class != ESE_DZ_L3_CLASS_IP4 &&
3449 rx_l3_class != ESE_DZ_L3_CLASS_IP4_FRAG &&
3450 rx_l3_class != ESE_DZ_L3_CLASS_IP6 &&
3451 rx_l3_class != ESE_DZ_L3_CLASS_IP6_FRAG))
3452 netdev_WARN(efx->net_dev,
3453 "invalid class for RX_IPCKSUM_ERR: event="
3454 EFX_QWORD_FMT "\n",
3455 EFX_QWORD_VAL(*event));
3456 if (!efx->loopback_selftest)
3457 *(rx_encap_hdr ?
3458 &channel->n_rx_outer_ip_hdr_chksum_err :
3459 &channel->n_rx_ip_hdr_chksum_err) += n_packets;
3460 return 0;
3461 }
3462 if (EFX_QWORD_FIELD(*event, ESF_DZ_RX_TCPUDP_CKSUM_ERR)) {
3463 if (unlikely(rx_encap_hdr != ESE_EZ_ENCAP_HDR_VXLAN &&
3464 ((rx_l3_class != ESE_DZ_L3_CLASS_IP4 &&
3465 rx_l3_class != ESE_DZ_L3_CLASS_IP6) ||
Bert Kenwardd8d8ccf2017-12-18 16:57:18 +00003466 (rx_l4_class != ESE_FZ_L4_CLASS_TCP &&
3467 rx_l4_class != ESE_FZ_L4_CLASS_UDP))))
Jon Coopera0ee3542017-02-08 16:50:40 +00003468 netdev_WARN(efx->net_dev,
3469 "invalid class for RX_TCPUDP_CKSUM_ERR: event="
3470 EFX_QWORD_FMT "\n",
3471 EFX_QWORD_VAL(*event));
3472 if (!efx->loopback_selftest)
3473 *(rx_encap_hdr ?
3474 &channel->n_rx_outer_tcp_udp_chksum_err :
3475 &channel->n_rx_tcp_udp_chksum_err) += n_packets;
3476 return 0;
3477 }
3478 if (EFX_QWORD_FIELD(*event, ESF_EZ_RX_IP_INNER_CHKSUM_ERR)) {
3479 if (unlikely(!rx_encap_hdr))
3480 netdev_WARN(efx->net_dev,
3481 "invalid encapsulation type for RX_IP_INNER_CHKSUM_ERR: event="
3482 EFX_QWORD_FMT "\n",
3483 EFX_QWORD_VAL(*event));
3484 else if (unlikely(rx_l3_class != ESE_DZ_L3_CLASS_IP4 &&
3485 rx_l3_class != ESE_DZ_L3_CLASS_IP4_FRAG &&
3486 rx_l3_class != ESE_DZ_L3_CLASS_IP6 &&
3487 rx_l3_class != ESE_DZ_L3_CLASS_IP6_FRAG))
3488 netdev_WARN(efx->net_dev,
3489 "invalid class for RX_IP_INNER_CHKSUM_ERR: event="
3490 EFX_QWORD_FMT "\n",
3491 EFX_QWORD_VAL(*event));
3492 if (!efx->loopback_selftest)
3493 channel->n_rx_inner_ip_hdr_chksum_err += n_packets;
3494 return 0;
3495 }
3496 if (EFX_QWORD_FIELD(*event, ESF_EZ_RX_TCP_UDP_INNER_CHKSUM_ERR)) {
3497 if (unlikely(!rx_encap_hdr))
3498 netdev_WARN(efx->net_dev,
3499 "invalid encapsulation type for RX_TCP_UDP_INNER_CHKSUM_ERR: event="
3500 EFX_QWORD_FMT "\n",
3501 EFX_QWORD_VAL(*event));
3502 else if (unlikely((rx_l3_class != ESE_DZ_L3_CLASS_IP4 &&
3503 rx_l3_class != ESE_DZ_L3_CLASS_IP6) ||
Bert Kenwardd8d8ccf2017-12-18 16:57:18 +00003504 (rx_l4_class != ESE_FZ_L4_CLASS_TCP &&
3505 rx_l4_class != ESE_FZ_L4_CLASS_UDP)))
Jon Coopera0ee3542017-02-08 16:50:40 +00003506 netdev_WARN(efx->net_dev,
3507 "invalid class for RX_TCP_UDP_INNER_CHKSUM_ERR: event="
3508 EFX_QWORD_FMT "\n",
3509 EFX_QWORD_VAL(*event));
3510 if (!efx->loopback_selftest)
3511 channel->n_rx_inner_tcp_udp_chksum_err += n_packets;
3512 return 0;
3513 }
3514
Edward Cree69787292017-10-31 14:29:47 +00003515 WARN_ON(!handled); /* No error bits were recognised */
Jon Coopera0ee3542017-02-08 16:50:40 +00003516 return 0;
3517}
3518
Ben Hutchings8127d662013-08-29 19:19:29 +01003519static int efx_ef10_handle_rx_event(struct efx_channel *channel,
3520 const efx_qword_t *event)
3521{
Jon Coopera0ee3542017-02-08 16:50:40 +00003522 unsigned int rx_bytes, next_ptr_lbits, rx_queue_label;
3523 unsigned int rx_l3_class, rx_l4_class, rx_encap_hdr;
Ben Hutchings8127d662013-08-29 19:19:29 +01003524 unsigned int n_descs, n_packets, i;
3525 struct efx_nic *efx = channel->efx;
Jon Coopera0ee3542017-02-08 16:50:40 +00003526 struct efx_ef10_nic_data *nic_data = efx->nic_data;
Ben Hutchings8127d662013-08-29 19:19:29 +01003527 struct efx_rx_queue *rx_queue;
Jon Coopera0ee3542017-02-08 16:50:40 +00003528 efx_qword_t errors;
Ben Hutchings8127d662013-08-29 19:19:29 +01003529 bool rx_cont;
3530 u16 flags = 0;
3531
Mark Rutland6aa7de02017-10-23 14:07:29 -07003532 if (unlikely(READ_ONCE(efx->reset_pending)))
Ben Hutchings8127d662013-08-29 19:19:29 +01003533 return 0;
3534
3535 /* Basic packet information */
3536 rx_bytes = EFX_QWORD_FIELD(*event, ESF_DZ_RX_BYTES);
3537 next_ptr_lbits = EFX_QWORD_FIELD(*event, ESF_DZ_RX_DSC_PTR_LBITS);
3538 rx_queue_label = EFX_QWORD_FIELD(*event, ESF_DZ_RX_QLABEL);
Jon Coopera0ee3542017-02-08 16:50:40 +00003539 rx_l3_class = EFX_QWORD_FIELD(*event, ESF_DZ_RX_L3_CLASS);
Bert Kenwardd8d8ccf2017-12-18 16:57:18 +00003540 rx_l4_class = EFX_QWORD_FIELD(*event, ESF_FZ_RX_L4_CLASS);
Ben Hutchings8127d662013-08-29 19:19:29 +01003541 rx_cont = EFX_QWORD_FIELD(*event, ESF_DZ_RX_CONT);
Jon Coopera0ee3542017-02-08 16:50:40 +00003542 rx_encap_hdr =
3543 nic_data->datapath_caps &
3544 (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN) ?
3545 EFX_QWORD_FIELD(*event, ESF_EZ_RX_ENCAP_HDR) :
3546 ESE_EZ_ENCAP_HDR_NONE;
Ben Hutchings8127d662013-08-29 19:19:29 +01003547
Ben Hutchings48ce5632013-11-01 16:42:44 +00003548 if (EFX_QWORD_FIELD(*event, ESF_DZ_RX_DROP_EVENT))
3549 netdev_WARN(efx->net_dev, "saw RX_DROP_EVENT: event="
3550 EFX_QWORD_FMT "\n",
3551 EFX_QWORD_VAL(*event));
Ben Hutchings8127d662013-08-29 19:19:29 +01003552
3553 rx_queue = efx_channel_get_rx_queue(channel);
3554
3555 if (unlikely(rx_queue_label != efx_rx_queue_index(rx_queue)))
3556 efx_ef10_handle_rx_wrong_queue(rx_queue, rx_queue_label);
3557
3558 n_descs = ((next_ptr_lbits - rx_queue->removed_count) &
3559 ((1 << ESF_DZ_RX_DSC_PTR_LBITS_WIDTH) - 1));
3560
3561 if (n_descs != rx_queue->scatter_n + 1) {
Ben Hutchings92a04162013-09-24 23:21:57 +01003562 struct efx_ef10_nic_data *nic_data = efx->nic_data;
3563
Ben Hutchings8127d662013-08-29 19:19:29 +01003564 /* detect rx abort */
3565 if (unlikely(n_descs == rx_queue->scatter_n)) {
Ben Hutchings48ce5632013-11-01 16:42:44 +00003566 if (rx_queue->scatter_n == 0 || rx_bytes != 0)
3567 netdev_WARN(efx->net_dev,
3568 "invalid RX abort: scatter_n=%u event="
3569 EFX_QWORD_FMT "\n",
3570 rx_queue->scatter_n,
3571 EFX_QWORD_VAL(*event));
Ben Hutchings8127d662013-08-29 19:19:29 +01003572 efx_ef10_handle_rx_abort(rx_queue);
3573 return 0;
3574 }
3575
Ben Hutchings92a04162013-09-24 23:21:57 +01003576 /* Check that RX completion merging is valid, i.e.
3577 * the current firmware supports it and this is a
3578 * non-scattered packet.
3579 */
3580 if (!(nic_data->datapath_caps &
3581 (1 << MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN)) ||
3582 rx_queue->scatter_n != 0 || rx_cont) {
Ben Hutchings8127d662013-08-29 19:19:29 +01003583 efx_ef10_handle_rx_bad_lbits(
3584 rx_queue, next_ptr_lbits,
3585 (rx_queue->removed_count +
3586 rx_queue->scatter_n + 1) &
3587 ((1 << ESF_DZ_RX_DSC_PTR_LBITS_WIDTH) - 1));
3588 return 0;
3589 }
3590
3591 /* Merged completion for multiple non-scattered packets */
3592 rx_queue->scatter_n = 1;
3593 rx_queue->scatter_len = 0;
3594 n_packets = n_descs;
3595 ++channel->n_rx_merge_events;
3596 channel->n_rx_merge_packets += n_packets;
3597 flags |= EFX_RX_PKT_PREFIX_LEN;
3598 } else {
3599 ++rx_queue->scatter_n;
3600 rx_queue->scatter_len += rx_bytes;
3601 if (rx_cont)
3602 return 0;
3603 n_packets = 1;
3604 }
3605
Jon Coopera0ee3542017-02-08 16:50:40 +00003606 EFX_POPULATE_QWORD_5(errors, ESF_DZ_RX_ECRC_ERR, 1,
3607 ESF_DZ_RX_IPCKSUM_ERR, 1,
3608 ESF_DZ_RX_TCPUDP_CKSUM_ERR, 1,
3609 ESF_EZ_RX_IP_INNER_CHKSUM_ERR, 1,
3610 ESF_EZ_RX_TCP_UDP_INNER_CHKSUM_ERR, 1);
3611 EFX_AND_QWORD(errors, *event, errors);
3612 if (unlikely(!EFX_QWORD_IS_ZERO(errors))) {
3613 flags |= efx_ef10_handle_rx_event_errors(channel, n_packets,
Edward Cree90d2ea92017-02-10 17:34:59 +00003614 rx_encap_hdr,
Jon Coopera0ee3542017-02-08 16:50:40 +00003615 rx_l3_class, rx_l4_class,
Edward Cree90d2ea92017-02-10 17:34:59 +00003616 event);
Jon Coopera0ee3542017-02-08 16:50:40 +00003617 } else {
Bert Kenwardd8d8ccf2017-12-18 16:57:18 +00003618 bool tcpudp = rx_l4_class == ESE_FZ_L4_CLASS_TCP ||
3619 rx_l4_class == ESE_FZ_L4_CLASS_UDP;
Jon Cooperda50ae22017-02-08 16:51:02 +00003620
3621 switch (rx_encap_hdr) {
3622 case ESE_EZ_ENCAP_HDR_VXLAN: /* VxLAN or GENEVE */
3623 flags |= EFX_RX_PKT_CSUMMED; /* outer UDP csum */
3624 if (tcpudp)
3625 flags |= EFX_RX_PKT_CSUM_LEVEL; /* inner L4 */
3626 break;
3627 case ESE_EZ_ENCAP_HDR_GRE:
3628 case ESE_EZ_ENCAP_HDR_NONE:
3629 if (tcpudp)
3630 flags |= EFX_RX_PKT_CSUMMED;
3631 break;
3632 default:
3633 netdev_WARN(efx->net_dev,
3634 "unknown encapsulation type: event="
3635 EFX_QWORD_FMT "\n",
3636 EFX_QWORD_VAL(*event));
3637 }
Ben Hutchings8127d662013-08-29 19:19:29 +01003638 }
3639
Bert Kenwardd8d8ccf2017-12-18 16:57:18 +00003640 if (rx_l4_class == ESE_FZ_L4_CLASS_TCP)
Ben Hutchings8127d662013-08-29 19:19:29 +01003641 flags |= EFX_RX_PKT_TCP;
3642
3643 channel->irq_mod_score += 2 * n_packets;
3644
3645 /* Handle received packet(s) */
3646 for (i = 0; i < n_packets; i++) {
3647 efx_rx_packet(rx_queue,
3648 rx_queue->removed_count & rx_queue->ptr_mask,
3649 rx_queue->scatter_n, rx_queue->scatter_len,
3650 flags);
3651 rx_queue->removed_count += rx_queue->scatter_n;
3652 }
3653
3654 rx_queue->scatter_n = 0;
3655 rx_queue->scatter_len = 0;
3656
3657 return n_packets;
3658}
3659
Martin Habetsb9b603d42018-01-25 17:24:43 +00003660static u32 efx_ef10_extract_event_ts(efx_qword_t *event)
3661{
3662 u32 tstamp;
3663
3664 tstamp = EFX_QWORD_FIELD(*event, TX_TIMESTAMP_EVENT_TSTAMP_DATA_HI);
3665 tstamp <<= 16;
3666 tstamp |= EFX_QWORD_FIELD(*event, TX_TIMESTAMP_EVENT_TSTAMP_DATA_LO);
3667
3668 return tstamp;
3669}
3670
Bert Kenward5227ecc2018-01-25 17:24:20 +00003671static void
Ben Hutchings8127d662013-08-29 19:19:29 +01003672efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
3673{
3674 struct efx_nic *efx = channel->efx;
3675 struct efx_tx_queue *tx_queue;
3676 unsigned int tx_ev_desc_ptr;
3677 unsigned int tx_ev_q_label;
Martin Habetsb9b603d42018-01-25 17:24:43 +00003678 unsigned int tx_ev_type;
3679 u64 ts_part;
Ben Hutchings8127d662013-08-29 19:19:29 +01003680
Mark Rutland6aa7de02017-10-23 14:07:29 -07003681 if (unlikely(READ_ONCE(efx->reset_pending)))
Bert Kenward5227ecc2018-01-25 17:24:20 +00003682 return;
Ben Hutchings8127d662013-08-29 19:19:29 +01003683
3684 if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_TX_DROP_EVENT)))
Bert Kenward5227ecc2018-01-25 17:24:20 +00003685 return;
Ben Hutchings8127d662013-08-29 19:19:29 +01003686
Martin Habetsb9b603d42018-01-25 17:24:43 +00003687 /* Get the transmit queue */
Ben Hutchings8127d662013-08-29 19:19:29 +01003688 tx_ev_q_label = EFX_QWORD_FIELD(*event, ESF_DZ_TX_QLABEL);
3689 tx_queue = efx_channel_get_tx_queue(channel,
3690 tx_ev_q_label % EFX_TXQ_TYPES);
Martin Habetsb9b603d42018-01-25 17:24:43 +00003691
3692 if (!tx_queue->timestamping) {
3693 /* Transmit completion */
3694 tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, ESF_DZ_TX_DESCR_INDX);
3695 efx_xmit_done(tx_queue, tx_ev_desc_ptr & tx_queue->ptr_mask);
3696 return;
3697 }
3698
3699 /* Transmit timestamps are only available for 8XXX series. They result
3700 * in three events per packet. These occur in order, and are:
3701 * - the normal completion event
3702 * - the low part of the timestamp
3703 * - the high part of the timestamp
3704 *
3705 * Each part of the timestamp is itself split across two 16 bit
3706 * fields in the event.
3707 */
3708 tx_ev_type = EFX_QWORD_FIELD(*event, ESF_EZ_TX_SOFT1);
3709
3710 switch (tx_ev_type) {
3711 case TX_TIMESTAMP_EVENT_TX_EV_COMPLETION:
3712 /* In case of Queue flush or FLR, we might have received
3713 * the previous TX completion event but not the Timestamp
3714 * events.
3715 */
3716 if (tx_queue->completed_desc_ptr != tx_queue->ptr_mask)
3717 efx_xmit_done(tx_queue, tx_queue->completed_desc_ptr);
3718
3719 tx_ev_desc_ptr = EFX_QWORD_FIELD(*event,
3720 ESF_DZ_TX_DESCR_INDX);
3721 tx_queue->completed_desc_ptr =
3722 tx_ev_desc_ptr & tx_queue->ptr_mask;
3723 break;
3724
3725 case TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_LO:
3726 ts_part = efx_ef10_extract_event_ts(event);
3727 tx_queue->completed_timestamp_minor = ts_part;
3728 break;
3729
3730 case TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_HI:
3731 ts_part = efx_ef10_extract_event_ts(event);
3732 tx_queue->completed_timestamp_major = ts_part;
3733
3734 efx_xmit_done(tx_queue, tx_queue->completed_desc_ptr);
3735 tx_queue->completed_desc_ptr = tx_queue->ptr_mask;
3736 break;
3737
3738 default:
3739 netif_err(efx, hw, efx->net_dev,
3740 "channel %d unknown tx event type %d (data "
3741 EFX_QWORD_FMT ")\n",
3742 channel->channel, tx_ev_type,
3743 EFX_QWORD_VAL(*event));
3744 break;
3745 }
Ben Hutchings8127d662013-08-29 19:19:29 +01003746}
3747
3748static void
3749efx_ef10_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
3750{
3751 struct efx_nic *efx = channel->efx;
3752 int subcode;
3753
3754 subcode = EFX_QWORD_FIELD(*event, ESF_DZ_DRV_SUB_CODE);
3755
3756 switch (subcode) {
3757 case ESE_DZ_DRV_TIMER_EV:
3758 case ESE_DZ_DRV_WAKE_UP_EV:
3759 break;
3760 case ESE_DZ_DRV_START_UP_EV:
3761 /* event queue init complete. ok. */
3762 break;
3763 default:
3764 netif_err(efx, hw, efx->net_dev,
3765 "channel %d unknown driver event type %d"
3766 " (data " EFX_QWORD_FMT ")\n",
3767 channel->channel, subcode,
3768 EFX_QWORD_VAL(*event));
3769
3770 }
3771}
3772
3773static void efx_ef10_handle_driver_generated_event(struct efx_channel *channel,
3774 efx_qword_t *event)
3775{
3776 struct efx_nic *efx = channel->efx;
3777 u32 subcode;
3778
3779 subcode = EFX_QWORD_FIELD(*event, EFX_DWORD_0);
3780
3781 switch (subcode) {
3782 case EFX_EF10_TEST:
3783 channel->event_test_cpu = raw_smp_processor_id();
3784 break;
3785 case EFX_EF10_REFILL:
3786 /* The queue must be empty, so we won't receive any rx
3787 * events, so efx_process_channel() won't refill the
3788 * queue. Refill it here
3789 */
Jon Coopercce28792013-10-02 11:04:14 +01003790 efx_fast_push_rx_descriptors(&channel->rx_queue, true);
Ben Hutchings8127d662013-08-29 19:19:29 +01003791 break;
3792 default:
3793 netif_err(efx, hw, efx->net_dev,
3794 "channel %d unknown driver event type %u"
3795 " (data " EFX_QWORD_FMT ")\n",
3796 channel->channel, (unsigned) subcode,
3797 EFX_QWORD_VAL(*event));
3798 }
3799}
3800
3801static int efx_ef10_ev_process(struct efx_channel *channel, int quota)
3802{
3803 struct efx_nic *efx = channel->efx;
3804 efx_qword_t event, *p_event;
3805 unsigned int read_ptr;
3806 int ev_code;
Ben Hutchings8127d662013-08-29 19:19:29 +01003807 int spent = 0;
3808
Eric W. Biederman75363a42014-03-14 18:11:22 -07003809 if (quota <= 0)
3810 return spent;
3811
Ben Hutchings8127d662013-08-29 19:19:29 +01003812 read_ptr = channel->eventq_read_ptr;
3813
3814 for (;;) {
3815 p_event = efx_event(channel, read_ptr);
3816 event = *p_event;
3817
3818 if (!efx_event_present(&event))
3819 break;
3820
3821 EFX_SET_QWORD(*p_event);
3822
3823 ++read_ptr;
3824
3825 ev_code = EFX_QWORD_FIELD(event, ESF_DZ_EV_CODE);
3826
3827 netif_vdbg(efx, drv, efx->net_dev,
3828 "processing event on %d " EFX_QWORD_FMT "\n",
3829 channel->channel, EFX_QWORD_VAL(event));
3830
3831 switch (ev_code) {
3832 case ESE_DZ_EV_CODE_MCDI_EV:
3833 efx_mcdi_process_event(channel, &event);
3834 break;
3835 case ESE_DZ_EV_CODE_RX_EV:
3836 spent += efx_ef10_handle_rx_event(channel, &event);
3837 if (spent >= quota) {
3838 /* XXX can we split a merged event to
3839 * avoid going over-quota?
3840 */
3841 spent = quota;
3842 goto out;
3843 }
3844 break;
3845 case ESE_DZ_EV_CODE_TX_EV:
Bert Kenward5227ecc2018-01-25 17:24:20 +00003846 efx_ef10_handle_tx_event(channel, &event);
Ben Hutchings8127d662013-08-29 19:19:29 +01003847 break;
3848 case ESE_DZ_EV_CODE_DRIVER_EV:
3849 efx_ef10_handle_driver_event(channel, &event);
3850 if (++spent == quota)
3851 goto out;
3852 break;
3853 case EFX_EF10_DRVGEN_EV:
3854 efx_ef10_handle_driver_generated_event(channel, &event);
3855 break;
3856 default:
3857 netif_err(efx, hw, efx->net_dev,
3858 "channel %d unknown event type %d"
3859 " (data " EFX_QWORD_FMT ")\n",
3860 channel->channel, ev_code,
3861 EFX_QWORD_VAL(event));
3862 }
3863 }
3864
3865out:
3866 channel->eventq_read_ptr = read_ptr;
3867 return spent;
3868}
3869
3870static void efx_ef10_ev_read_ack(struct efx_channel *channel)
3871{
3872 struct efx_nic *efx = channel->efx;
3873 efx_dword_t rptr;
3874
3875 if (EFX_EF10_WORKAROUND_35388(efx)) {
3876 BUILD_BUG_ON(EFX_MIN_EVQ_SIZE <
3877 (1 << ERF_DD_EVQ_IND_RPTR_WIDTH));
3878 BUILD_BUG_ON(EFX_MAX_EVQ_SIZE >
3879 (1 << 2 * ERF_DD_EVQ_IND_RPTR_WIDTH));
3880
3881 EFX_POPULATE_DWORD_2(rptr, ERF_DD_EVQ_IND_RPTR_FLAGS,
3882 EFE_DD_EVQ_IND_RPTR_FLAGS_HIGH,
3883 ERF_DD_EVQ_IND_RPTR,
3884 (channel->eventq_read_ptr &
3885 channel->eventq_mask) >>
3886 ERF_DD_EVQ_IND_RPTR_WIDTH);
3887 efx_writed_page(efx, &rptr, ER_DD_EVQ_INDIRECT,
3888 channel->channel);
3889 EFX_POPULATE_DWORD_2(rptr, ERF_DD_EVQ_IND_RPTR_FLAGS,
3890 EFE_DD_EVQ_IND_RPTR_FLAGS_LOW,
3891 ERF_DD_EVQ_IND_RPTR,
3892 channel->eventq_read_ptr &
3893 ((1 << ERF_DD_EVQ_IND_RPTR_WIDTH) - 1));
3894 efx_writed_page(efx, &rptr, ER_DD_EVQ_INDIRECT,
3895 channel->channel);
3896 } else {
3897 EFX_POPULATE_DWORD_1(rptr, ERF_DZ_EVQ_RPTR,
3898 channel->eventq_read_ptr &
3899 channel->eventq_mask);
3900 efx_writed_page(efx, &rptr, ER_DZ_EVQ_RPTR, channel->channel);
3901 }
3902}
3903
3904static void efx_ef10_ev_test_generate(struct efx_channel *channel)
3905{
3906 MCDI_DECLARE_BUF(inbuf, MC_CMD_DRIVER_EVENT_IN_LEN);
3907 struct efx_nic *efx = channel->efx;
3908 efx_qword_t event;
3909 int rc;
3910
3911 EFX_POPULATE_QWORD_2(event,
3912 ESF_DZ_EV_CODE, EFX_EF10_DRVGEN_EV,
3913 ESF_DZ_EV_DATA, EFX_EF10_TEST);
3914
3915 MCDI_SET_DWORD(inbuf, DRIVER_EVENT_IN_EVQ, channel->channel);
3916
3917 /* MCDI_SET_QWORD is not appropriate here since EFX_POPULATE_* has
3918 * already swapped the data to little-endian order.
3919 */
3920 memcpy(MCDI_PTR(inbuf, DRIVER_EVENT_IN_DATA), &event.u64[0],
3921 sizeof(efx_qword_t));
3922
3923 rc = efx_mcdi_rpc(efx, MC_CMD_DRIVER_EVENT, inbuf, sizeof(inbuf),
3924 NULL, 0, NULL);
3925 if (rc != 0)
3926 goto fail;
3927
3928 return;
3929
3930fail:
3931 WARN_ON(true);
3932 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
3933}
3934
3935void efx_ef10_handle_drain_event(struct efx_nic *efx)
3936{
3937 if (atomic_dec_and_test(&efx->active_queues))
3938 wake_up(&efx->flush_wq);
3939
3940 WARN_ON(atomic_read(&efx->active_queues) < 0);
3941}
3942
3943static int efx_ef10_fini_dmaq(struct efx_nic *efx)
3944{
3945 struct efx_ef10_nic_data *nic_data = efx->nic_data;
3946 struct efx_channel *channel;
3947 struct efx_tx_queue *tx_queue;
3948 struct efx_rx_queue *rx_queue;
3949 int pending;
3950
3951 /* If the MC has just rebooted, the TX/RX queues will have already been
3952 * torn down, but efx->active_queues needs to be set to zero.
3953 */
3954 if (nic_data->must_realloc_vis) {
3955 atomic_set(&efx->active_queues, 0);
3956 return 0;
3957 }
3958
3959 /* Do not attempt to write to the NIC during EEH recovery */
3960 if (efx->state != STATE_RECOVERY) {
3961 efx_for_each_channel(channel, efx) {
3962 efx_for_each_channel_rx_queue(rx_queue, channel)
3963 efx_ef10_rx_fini(rx_queue);
3964 efx_for_each_channel_tx_queue(tx_queue, channel)
3965 efx_ef10_tx_fini(tx_queue);
3966 }
3967
3968 wait_event_timeout(efx->flush_wq,
3969 atomic_read(&efx->active_queues) == 0,
3970 msecs_to_jiffies(EFX_MAX_FLUSH_TIME));
3971 pending = atomic_read(&efx->active_queues);
3972 if (pending) {
3973 netif_err(efx, hw, efx->net_dev, "failed to flush %d queues\n",
3974 pending);
3975 return -ETIMEDOUT;
3976 }
3977 }
3978
3979 return 0;
3980}
3981
Edward Creee2835462014-04-16 19:27:48 +01003982static void efx_ef10_prepare_flr(struct efx_nic *efx)
3983{
3984 atomic_set(&efx->active_queues, 0);
3985}
3986
Ben Hutchings8127d662013-08-29 19:19:29 +01003987static bool efx_ef10_filter_equal(const struct efx_filter_spec *left,
3988 const struct efx_filter_spec *right)
3989{
3990 if ((left->match_flags ^ right->match_flags) |
3991 ((left->flags ^ right->flags) &
3992 (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)))
3993 return false;
3994
3995 return memcmp(&left->outer_vid, &right->outer_vid,
3996 sizeof(struct efx_filter_spec) -
3997 offsetof(struct efx_filter_spec, outer_vid)) == 0;
3998}
3999
4000static unsigned int efx_ef10_filter_hash(const struct efx_filter_spec *spec)
4001{
4002 BUILD_BUG_ON(offsetof(struct efx_filter_spec, outer_vid) & 3);
4003 return jhash2((const u32 *)&spec->outer_vid,
4004 (sizeof(struct efx_filter_spec) -
4005 offsetof(struct efx_filter_spec, outer_vid)) / 4,
4006 0);
4007 /* XXX should we randomise the initval? */
4008}
4009
4010/* Decide whether a filter should be exclusive or else should allow
4011 * delivery to additional recipients. Currently we decide that
4012 * filters for specific local unicast MAC and IP addresses are
4013 * exclusive.
4014 */
4015static bool efx_ef10_filter_is_exclusive(const struct efx_filter_spec *spec)
4016{
4017 if (spec->match_flags & EFX_FILTER_MATCH_LOC_MAC &&
4018 !is_multicast_ether_addr(spec->loc_mac))
4019 return true;
4020
4021 if ((spec->match_flags &
4022 (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) ==
4023 (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) {
4024 if (spec->ether_type == htons(ETH_P_IP) &&
4025 !ipv4_is_multicast(spec->loc_host[0]))
4026 return true;
4027 if (spec->ether_type == htons(ETH_P_IPV6) &&
4028 ((const u8 *)spec->loc_host)[0] != 0xff)
4029 return true;
4030 }
4031
4032 return false;
4033}
4034
4035static struct efx_filter_spec *
4036efx_ef10_filter_entry_spec(const struct efx_ef10_filter_table *table,
4037 unsigned int filter_idx)
4038{
4039 return (struct efx_filter_spec *)(table->entry[filter_idx].spec &
4040 ~EFX_EF10_FILTER_FLAGS);
4041}
4042
4043static unsigned int
4044efx_ef10_filter_entry_flags(const struct efx_ef10_filter_table *table,
4045 unsigned int filter_idx)
4046{
4047 return table->entry[filter_idx].spec & EFX_EF10_FILTER_FLAGS;
4048}
4049
4050static void
4051efx_ef10_filter_set_entry(struct efx_ef10_filter_table *table,
4052 unsigned int filter_idx,
4053 const struct efx_filter_spec *spec,
4054 unsigned int flags)
4055{
4056 table->entry[filter_idx].spec = (unsigned long)spec | flags;
4057}
4058
Edward Cree9b410802017-01-27 15:02:52 +00004059static void
4060efx_ef10_filter_push_prep_set_match_fields(struct efx_nic *efx,
4061 const struct efx_filter_spec *spec,
4062 efx_dword_t *inbuf)
4063{
4064 enum efx_encap_type encap_type = efx_filter_get_encap_type(spec);
4065 u32 match_fields = 0, uc_match, mc_match;
4066
4067 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
4068 efx_ef10_filter_is_exclusive(spec) ?
4069 MC_CMD_FILTER_OP_IN_OP_INSERT :
4070 MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE);
4071
4072 /* Convert match flags and values. Unlike almost
4073 * everything else in MCDI, these fields are in
4074 * network byte order.
4075 */
4076#define COPY_VALUE(value, mcdi_field) \
4077 do { \
4078 match_fields |= \
4079 1 << MC_CMD_FILTER_OP_IN_MATCH_ ## \
4080 mcdi_field ## _LBN; \
4081 BUILD_BUG_ON( \
4082 MC_CMD_FILTER_OP_IN_ ## mcdi_field ## _LEN < \
4083 sizeof(value)); \
4084 memcpy(MCDI_PTR(inbuf, FILTER_OP_IN_ ## mcdi_field), \
4085 &value, sizeof(value)); \
4086 } while (0)
4087#define COPY_FIELD(gen_flag, gen_field, mcdi_field) \
4088 if (spec->match_flags & EFX_FILTER_MATCH_ ## gen_flag) { \
4089 COPY_VALUE(spec->gen_field, mcdi_field); \
4090 }
4091 /* Handle encap filters first. They will always be mismatch
4092 * (unknown UC or MC) filters
4093 */
4094 if (encap_type) {
4095 /* ether_type and outer_ip_proto need to be variables
4096 * because COPY_VALUE wants to memcpy them
4097 */
4098 __be16 ether_type =
4099 htons(encap_type & EFX_ENCAP_FLAG_IPV6 ?
4100 ETH_P_IPV6 : ETH_P_IP);
4101 u8 vni_type = MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_GENEVE;
4102 u8 outer_ip_proto;
4103
4104 switch (encap_type & EFX_ENCAP_TYPES_MASK) {
4105 case EFX_ENCAP_TYPE_VXLAN:
4106 vni_type = MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_VXLAN;
4107 /* fallthrough */
4108 case EFX_ENCAP_TYPE_GENEVE:
4109 COPY_VALUE(ether_type, ETHER_TYPE);
4110 outer_ip_proto = IPPROTO_UDP;
4111 COPY_VALUE(outer_ip_proto, IP_PROTO);
4112 /* We always need to set the type field, even
4113 * though we're not matching on the TNI.
4114 */
4115 MCDI_POPULATE_DWORD_1(inbuf,
4116 FILTER_OP_EXT_IN_VNI_OR_VSID,
4117 FILTER_OP_EXT_IN_VNI_TYPE,
4118 vni_type);
4119 break;
4120 case EFX_ENCAP_TYPE_NVGRE:
4121 COPY_VALUE(ether_type, ETHER_TYPE);
4122 outer_ip_proto = IPPROTO_GRE;
4123 COPY_VALUE(outer_ip_proto, IP_PROTO);
4124 break;
4125 default:
4126 WARN_ON(1);
4127 }
4128
4129 uc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_LBN;
4130 mc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_LBN;
4131 } else {
4132 uc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_LBN;
4133 mc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_LBN;
4134 }
4135
4136 if (spec->match_flags & EFX_FILTER_MATCH_LOC_MAC_IG)
4137 match_fields |=
4138 is_multicast_ether_addr(spec->loc_mac) ?
4139 1 << mc_match :
4140 1 << uc_match;
4141 COPY_FIELD(REM_HOST, rem_host, SRC_IP);
4142 COPY_FIELD(LOC_HOST, loc_host, DST_IP);
4143 COPY_FIELD(REM_MAC, rem_mac, SRC_MAC);
4144 COPY_FIELD(REM_PORT, rem_port, SRC_PORT);
4145 COPY_FIELD(LOC_MAC, loc_mac, DST_MAC);
4146 COPY_FIELD(LOC_PORT, loc_port, DST_PORT);
4147 COPY_FIELD(ETHER_TYPE, ether_type, ETHER_TYPE);
4148 COPY_FIELD(INNER_VID, inner_vid, INNER_VLAN);
4149 COPY_FIELD(OUTER_VID, outer_vid, OUTER_VLAN);
4150 COPY_FIELD(IP_PROTO, ip_proto, IP_PROTO);
4151#undef COPY_FIELD
4152#undef COPY_VALUE
4153 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_MATCH_FIELDS,
4154 match_fields);
4155}
4156
Ben Hutchings8127d662013-08-29 19:19:29 +01004157static void efx_ef10_filter_push_prep(struct efx_nic *efx,
4158 const struct efx_filter_spec *spec,
4159 efx_dword_t *inbuf, u64 handle,
Edward Cree42356d92018-03-08 15:45:17 +00004160 struct efx_rss_context *ctx,
Ben Hutchings8127d662013-08-29 19:19:29 +01004161 bool replacing)
4162{
4163 struct efx_ef10_nic_data *nic_data = efx->nic_data;
Jon Cooperdcb41232016-04-25 16:51:00 +01004164 u32 flags = spec->flags;
Ben Hutchings8127d662013-08-29 19:19:29 +01004165
Edward Cree9b410802017-01-27 15:02:52 +00004166 memset(inbuf, 0, MC_CMD_FILTER_OP_EXT_IN_LEN);
Ben Hutchings8127d662013-08-29 19:19:29 +01004167
Edward Cree42356d92018-03-08 15:45:17 +00004168 /* If RSS filter, caller better have given us an RSS context */
4169 if (flags & EFX_FILTER_FLAG_RX_RSS) {
4170 /* We don't have the ability to return an error, so we'll just
4171 * log a warning and disable RSS for the filter.
4172 */
4173 if (WARN_ON_ONCE(!ctx))
4174 flags &= ~EFX_FILTER_FLAG_RX_RSS;
4175 else if (WARN_ON_ONCE(ctx->context_id == EFX_EF10_RSS_CONTEXT_INVALID))
4176 flags &= ~EFX_FILTER_FLAG_RX_RSS;
4177 }
Jon Cooperdcb41232016-04-25 16:51:00 +01004178
Ben Hutchings8127d662013-08-29 19:19:29 +01004179 if (replacing) {
4180 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
4181 MC_CMD_FILTER_OP_IN_OP_REPLACE);
4182 MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE, handle);
4183 } else {
Edward Cree9b410802017-01-27 15:02:52 +00004184 efx_ef10_filter_push_prep_set_match_fields(efx, spec, inbuf);
Ben Hutchings8127d662013-08-29 19:19:29 +01004185 }
4186
Daniel Pieczko45b24492015-05-06 00:57:14 +01004187 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_PORT_ID, nic_data->vport_id);
Ben Hutchings8127d662013-08-29 19:19:29 +01004188 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_DEST,
4189 spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ?
4190 MC_CMD_FILTER_OP_IN_RX_DEST_DROP :
4191 MC_CMD_FILTER_OP_IN_RX_DEST_HOST);
Shradha Shahe3d36292015-05-06 00:56:24 +01004192 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_TX_DOMAIN, 0);
Ben Hutchings8127d662013-08-29 19:19:29 +01004193 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_TX_DEST,
4194 MC_CMD_FILTER_OP_IN_TX_DEST_DEFAULT);
Ben Hutchingsa0bc3482013-12-16 18:56:24 +00004195 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_QUEUE,
4196 spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ?
4197 0 : spec->dmaq_id);
Ben Hutchings8127d662013-08-29 19:19:29 +01004198 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_MODE,
Jon Cooperdcb41232016-04-25 16:51:00 +01004199 (flags & EFX_FILTER_FLAG_RX_RSS) ?
Ben Hutchings8127d662013-08-29 19:19:29 +01004200 MC_CMD_FILTER_OP_IN_RX_MODE_RSS :
4201 MC_CMD_FILTER_OP_IN_RX_MODE_SIMPLE);
Jon Cooperdcb41232016-04-25 16:51:00 +01004202 if (flags & EFX_FILTER_FLAG_RX_RSS)
Edward Cree42356d92018-03-08 15:45:17 +00004203 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_CONTEXT, ctx->context_id);
Ben Hutchings8127d662013-08-29 19:19:29 +01004204}
4205
4206static int efx_ef10_filter_push(struct efx_nic *efx,
Edward Cree42356d92018-03-08 15:45:17 +00004207 const struct efx_filter_spec *spec, u64 *handle,
4208 struct efx_rss_context *ctx, bool replacing)
Ben Hutchings8127d662013-08-29 19:19:29 +01004209{
Edward Cree9b410802017-01-27 15:02:52 +00004210 MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_EXT_IN_LEN);
4211 MCDI_DECLARE_BUF(outbuf, MC_CMD_FILTER_OP_EXT_OUT_LEN);
Ben Hutchings8127d662013-08-29 19:19:29 +01004212 int rc;
4213
Edward Cree42356d92018-03-08 15:45:17 +00004214 efx_ef10_filter_push_prep(efx, spec, inbuf, *handle, ctx, replacing);
Ben Hutchings8127d662013-08-29 19:19:29 +01004215 rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf),
4216 outbuf, sizeof(outbuf), NULL);
4217 if (rc == 0)
4218 *handle = MCDI_QWORD(outbuf, FILTER_OP_OUT_HANDLE);
Ben Hutchings065e64c2013-10-09 14:17:27 +01004219 if (rc == -ENOSPC)
4220 rc = -EBUSY; /* to match efx_farch_filter_insert() */
Ben Hutchings8127d662013-08-29 19:19:29 +01004221 return rc;
4222}
4223
Andrew Rybchenko7ac0dd92016-06-15 17:49:30 +01004224static u32 efx_ef10_filter_mcdi_flags_from_spec(const struct efx_filter_spec *spec)
Ben Hutchings8127d662013-08-29 19:19:29 +01004225{
Edward Cree9b410802017-01-27 15:02:52 +00004226 enum efx_encap_type encap_type = efx_filter_get_encap_type(spec);
Andrew Rybchenko7ac0dd92016-06-15 17:49:30 +01004227 unsigned int match_flags = spec->match_flags;
Edward Cree9b410802017-01-27 15:02:52 +00004228 unsigned int uc_match, mc_match;
Andrew Rybchenko7ac0dd92016-06-15 17:49:30 +01004229 u32 mcdi_flags = 0;
4230
Edward Cree9b410802017-01-27 15:02:52 +00004231#define MAP_FILTER_TO_MCDI_FLAG(gen_flag, mcdi_field, encap) { \
4232 unsigned int old_match_flags = match_flags; \
4233 match_flags &= ~EFX_FILTER_MATCH_ ## gen_flag; \
4234 if (match_flags != old_match_flags) \
4235 mcdi_flags |= \
4236 (1 << ((encap) ? \
4237 MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_ ## \
4238 mcdi_field ## _LBN : \
4239 MC_CMD_FILTER_OP_EXT_IN_MATCH_ ##\
4240 mcdi_field ## _LBN)); \
4241 }
4242 /* inner or outer based on encap type */
4243 MAP_FILTER_TO_MCDI_FLAG(REM_HOST, SRC_IP, encap_type);
4244 MAP_FILTER_TO_MCDI_FLAG(LOC_HOST, DST_IP, encap_type);
4245 MAP_FILTER_TO_MCDI_FLAG(REM_MAC, SRC_MAC, encap_type);
4246 MAP_FILTER_TO_MCDI_FLAG(REM_PORT, SRC_PORT, encap_type);
4247 MAP_FILTER_TO_MCDI_FLAG(LOC_MAC, DST_MAC, encap_type);
4248 MAP_FILTER_TO_MCDI_FLAG(LOC_PORT, DST_PORT, encap_type);
4249 MAP_FILTER_TO_MCDI_FLAG(ETHER_TYPE, ETHER_TYPE, encap_type);
4250 MAP_FILTER_TO_MCDI_FLAG(IP_PROTO, IP_PROTO, encap_type);
4251 /* always outer */
4252 MAP_FILTER_TO_MCDI_FLAG(INNER_VID, INNER_VLAN, false);
4253 MAP_FILTER_TO_MCDI_FLAG(OUTER_VID, OUTER_VLAN, false);
4254#undef MAP_FILTER_TO_MCDI_FLAG
4255
4256 /* special handling for encap type, and mismatch */
4257 if (encap_type) {
4258 match_flags &= ~EFX_FILTER_MATCH_ENCAP_TYPE;
4259 mcdi_flags |=
4260 (1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE_LBN);
4261 mcdi_flags |= (1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO_LBN);
4262
4263 uc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_LBN;
4264 mc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_LBN;
4265 } else {
4266 uc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_LBN;
4267 mc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_LBN;
4268 }
4269
Andrew Rybchenko7ac0dd92016-06-15 17:49:30 +01004270 if (match_flags & EFX_FILTER_MATCH_LOC_MAC_IG) {
4271 match_flags &= ~EFX_FILTER_MATCH_LOC_MAC_IG;
4272 mcdi_flags |=
4273 is_multicast_ether_addr(spec->loc_mac) ?
Edward Cree9b410802017-01-27 15:02:52 +00004274 1 << mc_match :
4275 1 << uc_match;
Andrew Rybchenko7ac0dd92016-06-15 17:49:30 +01004276 }
4277
Andrew Rybchenko7ac0dd92016-06-15 17:49:30 +01004278 /* Did we map them all? */
4279 WARN_ON_ONCE(match_flags);
4280
4281 return mcdi_flags;
4282}
4283
4284static int efx_ef10_filter_pri(struct efx_ef10_filter_table *table,
4285 const struct efx_filter_spec *spec)
4286{
4287 u32 mcdi_flags = efx_ef10_filter_mcdi_flags_from_spec(spec);
Ben Hutchings8127d662013-08-29 19:19:29 +01004288 unsigned int match_pri;
4289
4290 for (match_pri = 0;
4291 match_pri < table->rx_match_count;
4292 match_pri++)
Andrew Rybchenko7ac0dd92016-06-15 17:49:30 +01004293 if (table->rx_match_mcdi_flags[match_pri] == mcdi_flags)
Ben Hutchings8127d662013-08-29 19:19:29 +01004294 return match_pri;
4295
4296 return -EPROTONOSUPPORT;
4297}
4298
4299static s32 efx_ef10_filter_insert(struct efx_nic *efx,
4300 struct efx_filter_spec *spec,
4301 bool replace_equal)
4302{
Ben Hutchings8127d662013-08-29 19:19:29 +01004303 DECLARE_BITMAP(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT);
Edward Creec2bebe32018-03-27 17:42:28 +01004304 struct efx_ef10_filter_table *table;
Ben Hutchings8127d662013-08-29 19:19:29 +01004305 struct efx_filter_spec *saved_spec;
Edward Cree42356d92018-03-08 15:45:17 +00004306 struct efx_rss_context *ctx = NULL;
Ben Hutchings8127d662013-08-29 19:19:29 +01004307 unsigned int match_pri, hash;
4308 unsigned int priv_flags;
4309 bool replacing = false;
Edward Creec2bebe32018-03-27 17:42:28 +01004310 unsigned int depth, i;
Ben Hutchings8127d662013-08-29 19:19:29 +01004311 int ins_index = -1;
4312 DEFINE_WAIT(wait);
4313 bool is_mc_recip;
4314 s32 rc;
4315
Edward Creec2bebe32018-03-27 17:42:28 +01004316 down_read(&efx->filter_sem);
4317 table = efx->filter_state;
4318 down_write(&table->lock);
4319
Ben Hutchings8127d662013-08-29 19:19:29 +01004320 /* For now, only support RX filters */
4321 if ((spec->flags & (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)) !=
Edward Creec2bebe32018-03-27 17:42:28 +01004322 EFX_FILTER_FLAG_RX) {
4323 rc = -EINVAL;
4324 goto out_unlock;
4325 }
Ben Hutchings8127d662013-08-29 19:19:29 +01004326
Andrew Rybchenko7ac0dd92016-06-15 17:49:30 +01004327 rc = efx_ef10_filter_pri(table, spec);
Ben Hutchings8127d662013-08-29 19:19:29 +01004328 if (rc < 0)
Edward Creec2bebe32018-03-27 17:42:28 +01004329 goto out_unlock;
Ben Hutchings8127d662013-08-29 19:19:29 +01004330 match_pri = rc;
4331
4332 hash = efx_ef10_filter_hash(spec);
4333 is_mc_recip = efx_filter_is_mc_recipient(spec);
4334 if (is_mc_recip)
4335 bitmap_zero(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT);
4336
Edward Cree42356d92018-03-08 15:45:17 +00004337 if (spec->flags & EFX_FILTER_FLAG_RX_RSS) {
4338 if (spec->rss_context)
4339 ctx = efx_find_rss_context_entry(spec->rss_context,
4340 &efx->rss_context.list);
4341 else
4342 ctx = &efx->rss_context;
Edward Creec2bebe32018-03-27 17:42:28 +01004343 if (!ctx) {
4344 rc = -ENOENT;
4345 goto out_unlock;
4346 }
4347 if (ctx->context_id == EFX_EF10_RSS_CONTEXT_INVALID) {
4348 rc = -EOPNOTSUPP;
4349 goto out_unlock;
4350 }
Edward Cree42356d92018-03-08 15:45:17 +00004351 }
4352
Ben Hutchings8127d662013-08-29 19:19:29 +01004353 /* Find any existing filters with the same match tuple or
Edward Creec2bebe32018-03-27 17:42:28 +01004354 * else a free slot to insert at.
Ben Hutchings8127d662013-08-29 19:19:29 +01004355 */
Edward Creec2bebe32018-03-27 17:42:28 +01004356 for (depth = 1; depth < EFX_EF10_FILTER_SEARCH_LIMIT; depth++) {
4357 i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1);
4358 saved_spec = efx_ef10_filter_entry_spec(table, i);
Ben Hutchings8127d662013-08-29 19:19:29 +01004359
Edward Creec2bebe32018-03-27 17:42:28 +01004360 if (!saved_spec) {
4361 if (ins_index < 0)
4362 ins_index = i;
4363 } else if (efx_ef10_filter_equal(spec, saved_spec)) {
4364 if (spec->priority < saved_spec->priority &&
4365 spec->priority != EFX_FILTER_PRI_AUTO) {
4366 rc = -EPERM;
4367 goto out_unlock;
4368 }
4369 if (!is_mc_recip) {
4370 /* This is the only one */
4371 if (spec->priority ==
4372 saved_spec->priority &&
4373 !replace_equal) {
4374 rc = -EEXIST;
4375 goto out_unlock;
4376 }
4377 ins_index = i;
4378 break;
4379 } else if (spec->priority >
4380 saved_spec->priority ||
4381 (spec->priority ==
4382 saved_spec->priority &&
4383 replace_equal)) {
Ben Hutchings8127d662013-08-29 19:19:29 +01004384 if (ins_index < 0)
4385 ins_index = i;
Edward Creec2bebe32018-03-27 17:42:28 +01004386 else
4387 __set_bit(depth, mc_rem_map);
Ben Hutchings8127d662013-08-29 19:19:29 +01004388 }
Ben Hutchings8127d662013-08-29 19:19:29 +01004389 }
Ben Hutchings8127d662013-08-29 19:19:29 +01004390 }
4391
Edward Creec2bebe32018-03-27 17:42:28 +01004392 /* Once we reach the maximum search depth, use the first suitable
4393 * slot, or return -EBUSY if there was none
Ben Hutchings8127d662013-08-29 19:19:29 +01004394 */
Edward Creec2bebe32018-03-27 17:42:28 +01004395 if (ins_index < 0) {
4396 rc = -EBUSY;
4397 goto out_unlock;
4398 }
4399
4400 /* Create a software table entry if necessary. */
Ben Hutchings8127d662013-08-29 19:19:29 +01004401 saved_spec = efx_ef10_filter_entry_spec(table, ins_index);
4402 if (saved_spec) {
Ben Hutchings7665d1a2013-11-21 19:02:18 +00004403 if (spec->priority == EFX_FILTER_PRI_AUTO &&
4404 saved_spec->priority >= EFX_FILTER_PRI_AUTO) {
Ben Hutchings8127d662013-08-29 19:19:29 +01004405 /* Just make sure it won't be removed */
Ben Hutchings7665d1a2013-11-21 19:02:18 +00004406 if (saved_spec->priority > EFX_FILTER_PRI_AUTO)
4407 saved_spec->flags |= EFX_FILTER_FLAG_RX_OVER_AUTO;
Ben Hutchings8127d662013-08-29 19:19:29 +01004408 table->entry[ins_index].spec &=
Ben Hutchingsb59e6ef2013-11-21 19:02:22 +00004409 ~EFX_EF10_FILTER_FLAG_AUTO_OLD;
Ben Hutchings8127d662013-08-29 19:19:29 +01004410 rc = ins_index;
4411 goto out_unlock;
4412 }
4413 replacing = true;
4414 priv_flags = efx_ef10_filter_entry_flags(table, ins_index);
4415 } else {
4416 saved_spec = kmalloc(sizeof(*spec), GFP_ATOMIC);
4417 if (!saved_spec) {
4418 rc = -ENOMEM;
4419 goto out_unlock;
4420 }
4421 *saved_spec = *spec;
4422 priv_flags = 0;
4423 }
Edward Creec2bebe32018-03-27 17:42:28 +01004424 efx_ef10_filter_set_entry(table, ins_index, saved_spec, priv_flags);
Ben Hutchings8127d662013-08-29 19:19:29 +01004425
Edward Creec2bebe32018-03-27 17:42:28 +01004426 /* Actually insert the filter on the HW */
Ben Hutchings8127d662013-08-29 19:19:29 +01004427 rc = efx_ef10_filter_push(efx, spec, &table->entry[ins_index].handle,
Edward Cree42356d92018-03-08 15:45:17 +00004428 ctx, replacing);
Ben Hutchings8127d662013-08-29 19:19:29 +01004429
4430 /* Finalise the software table entry */
Ben Hutchings8127d662013-08-29 19:19:29 +01004431 if (rc == 0) {
4432 if (replacing) {
4433 /* Update the fields that may differ */
Ben Hutchings7665d1a2013-11-21 19:02:18 +00004434 if (saved_spec->priority == EFX_FILTER_PRI_AUTO)
4435 saved_spec->flags |=
4436 EFX_FILTER_FLAG_RX_OVER_AUTO;
Ben Hutchings8127d662013-08-29 19:19:29 +01004437 saved_spec->priority = spec->priority;
Ben Hutchings7665d1a2013-11-21 19:02:18 +00004438 saved_spec->flags &= EFX_FILTER_FLAG_RX_OVER_AUTO;
Ben Hutchings8127d662013-08-29 19:19:29 +01004439 saved_spec->flags |= spec->flags;
4440 saved_spec->rss_context = spec->rss_context;
4441 saved_spec->dmaq_id = spec->dmaq_id;
4442 }
4443 } else if (!replacing) {
4444 kfree(saved_spec);
4445 saved_spec = NULL;
Edward Creec2bebe32018-03-27 17:42:28 +01004446 } else {
4447 /* We failed to replace, so the old filter is still present.
4448 * Roll back the software table to reflect this. In fact the
4449 * efx_ef10_filter_set_entry() call below will do the right
4450 * thing, so nothing extra is needed here.
4451 */
Ben Hutchings8127d662013-08-29 19:19:29 +01004452 }
4453 efx_ef10_filter_set_entry(table, ins_index, saved_spec, priv_flags);
4454
4455 /* Remove and finalise entries for lower-priority multicast
4456 * recipients
4457 */
4458 if (is_mc_recip) {
Martin Habetsbb53f4d2017-06-22 10:50:41 +01004459 MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_EXT_IN_LEN);
Ben Hutchings8127d662013-08-29 19:19:29 +01004460 unsigned int depth, i;
4461
4462 memset(inbuf, 0, sizeof(inbuf));
4463
4464 for (depth = 0; depth < EFX_EF10_FILTER_SEARCH_LIMIT; depth++) {
4465 if (!test_bit(depth, mc_rem_map))
4466 continue;
4467
4468 i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1);
4469 saved_spec = efx_ef10_filter_entry_spec(table, i);
4470 priv_flags = efx_ef10_filter_entry_flags(table, i);
4471
4472 if (rc == 0) {
Ben Hutchings8127d662013-08-29 19:19:29 +01004473 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
4474 MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE);
4475 MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
4476 table->entry[i].handle);
4477 rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP,
4478 inbuf, sizeof(inbuf),
4479 NULL, 0, NULL);
Ben Hutchings8127d662013-08-29 19:19:29 +01004480 }
4481
4482 if (rc == 0) {
4483 kfree(saved_spec);
4484 saved_spec = NULL;
4485 priv_flags = 0;
Ben Hutchings8127d662013-08-29 19:19:29 +01004486 }
4487 efx_ef10_filter_set_entry(table, i, saved_spec,
4488 priv_flags);
4489 }
4490 }
4491
4492 /* If successful, return the inserted filter ID */
4493 if (rc == 0)
Jon Cooper0ccb9982017-02-17 15:49:13 +00004494 rc = efx_ef10_make_filter_id(match_pri, ins_index);
Ben Hutchings8127d662013-08-29 19:19:29 +01004495
Ben Hutchings8127d662013-08-29 19:19:29 +01004496out_unlock:
Edward Creec2bebe32018-03-27 17:42:28 +01004497 up_write(&table->lock);
4498 up_read(&efx->filter_sem);
Ben Hutchings8127d662013-08-29 19:19:29 +01004499 return rc;
4500}
4501
Fengguang Wu9fd8095d2013-08-31 06:54:05 +08004502static void efx_ef10_filter_update_rx_scatter(struct efx_nic *efx)
Ben Hutchings8127d662013-08-29 19:19:29 +01004503{
4504 /* no need to do anything here on EF10 */
4505}
4506
4507/* Remove a filter.
Ben Hutchingsb59e6ef2013-11-21 19:02:22 +00004508 * If !by_index, remove by ID
4509 * If by_index, remove by index
Ben Hutchings8127d662013-08-29 19:19:29 +01004510 * Filter ID may come from userland and must be range-checked.
Edward Creec2bebe32018-03-27 17:42:28 +01004511 * Caller must hold efx->filter_sem for read, and efx->filter_state->lock
4512 * for write.
Ben Hutchings8127d662013-08-29 19:19:29 +01004513 */
4514static int efx_ef10_filter_remove_internal(struct efx_nic *efx,
Ben Hutchingsfbd79122013-11-21 19:15:03 +00004515 unsigned int priority_mask,
Ben Hutchingsb59e6ef2013-11-21 19:02:22 +00004516 u32 filter_id, bool by_index)
Ben Hutchings8127d662013-08-29 19:19:29 +01004517{
Jon Cooper0ccb9982017-02-17 15:49:13 +00004518 unsigned int filter_idx = efx_ef10_filter_get_unsafe_id(filter_id);
Ben Hutchings8127d662013-08-29 19:19:29 +01004519 struct efx_ef10_filter_table *table = efx->filter_state;
4520 MCDI_DECLARE_BUF(inbuf,
4521 MC_CMD_FILTER_OP_IN_HANDLE_OFST +
4522 MC_CMD_FILTER_OP_IN_HANDLE_LEN);
4523 struct efx_filter_spec *spec;
4524 DEFINE_WAIT(wait);
4525 int rc;
4526
Ben Hutchings8127d662013-08-29 19:19:29 +01004527 spec = efx_ef10_filter_entry_spec(table, filter_idx);
Ben Hutchings7665d1a2013-11-21 19:02:18 +00004528 if (!spec ||
Ben Hutchingsb59e6ef2013-11-21 19:02:22 +00004529 (!by_index &&
Andrew Rybchenko7ac0dd92016-06-15 17:49:30 +01004530 efx_ef10_filter_pri(table, spec) !=
Edward Creec2bebe32018-03-27 17:42:28 +01004531 efx_ef10_filter_get_unsafe_pri(filter_id)))
4532 return -ENOENT;
Ben Hutchings7665d1a2013-11-21 19:02:18 +00004533
4534 if (spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO &&
Ben Hutchingsfbd79122013-11-21 19:15:03 +00004535 priority_mask == (1U << EFX_FILTER_PRI_AUTO)) {
Ben Hutchings7665d1a2013-11-21 19:02:18 +00004536 /* Just remove flags */
4537 spec->flags &= ~EFX_FILTER_FLAG_RX_OVER_AUTO;
Ben Hutchingsb59e6ef2013-11-21 19:02:22 +00004538 table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_AUTO_OLD;
Edward Creec2bebe32018-03-27 17:42:28 +01004539 return 0;
Ben Hutchings7665d1a2013-11-21 19:02:18 +00004540 }
4541
Edward Creec2bebe32018-03-27 17:42:28 +01004542 if (!(priority_mask & (1U << spec->priority)))
4543 return -ENOENT;
Ben Hutchings8127d662013-08-29 19:19:29 +01004544
Ben Hutchings7665d1a2013-11-21 19:02:18 +00004545 if (spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO) {
Ben Hutchingsb59e6ef2013-11-21 19:02:22 +00004546 /* Reset to an automatic filter */
Ben Hutchings8127d662013-08-29 19:19:29 +01004547
4548 struct efx_filter_spec new_spec = *spec;
4549
Ben Hutchings7665d1a2013-11-21 19:02:18 +00004550 new_spec.priority = EFX_FILTER_PRI_AUTO;
Ben Hutchings8127d662013-08-29 19:19:29 +01004551 new_spec.flags = (EFX_FILTER_FLAG_RX |
Edward Cree42356d92018-03-08 15:45:17 +00004552 (efx_rss_active(&efx->rss_context) ?
Bert Kenwardf1c2ef42015-12-11 09:39:32 +00004553 EFX_FILTER_FLAG_RX_RSS : 0));
Ben Hutchings8127d662013-08-29 19:19:29 +01004554 new_spec.dmaq_id = 0;
Edward Cree42356d92018-03-08 15:45:17 +00004555 new_spec.rss_context = 0;
Ben Hutchings8127d662013-08-29 19:19:29 +01004556 rc = efx_ef10_filter_push(efx, &new_spec,
4557 &table->entry[filter_idx].handle,
Edward Cree42356d92018-03-08 15:45:17 +00004558 &efx->rss_context,
Ben Hutchings8127d662013-08-29 19:19:29 +01004559 true);
4560
Ben Hutchings8127d662013-08-29 19:19:29 +01004561 if (rc == 0)
4562 *spec = new_spec;
4563 } else {
4564 /* Really remove the filter */
4565
4566 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
4567 efx_ef10_filter_is_exclusive(spec) ?
4568 MC_CMD_FILTER_OP_IN_OP_REMOVE :
4569 MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE);
4570 MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
4571 table->entry[filter_idx].handle);
Bert Kenward105eac62017-02-17 15:50:12 +00004572 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FILTER_OP,
4573 inbuf, sizeof(inbuf), NULL, 0, NULL);
Ben Hutchings8127d662013-08-29 19:19:29 +01004574
Bert Kenward105eac62017-02-17 15:50:12 +00004575 if ((rc == 0) || (rc == -ENOENT)) {
4576 /* Filter removed OK or didn't actually exist */
Ben Hutchings8127d662013-08-29 19:19:29 +01004577 kfree(spec);
4578 efx_ef10_filter_set_entry(table, filter_idx, NULL, 0);
Bert Kenward105eac62017-02-17 15:50:12 +00004579 } else {
4580 efx_mcdi_display_error(efx, MC_CMD_FILTER_OP,
Martin Habetsbb53f4d2017-06-22 10:50:41 +01004581 MC_CMD_FILTER_OP_EXT_IN_LEN,
Bert Kenward105eac62017-02-17 15:50:12 +00004582 NULL, 0, rc);
Ben Hutchings8127d662013-08-29 19:19:29 +01004583 }
4584 }
Ben Hutchings7665d1a2013-11-21 19:02:18 +00004585
Ben Hutchings8127d662013-08-29 19:19:29 +01004586 return rc;
4587}
4588
4589static int efx_ef10_filter_remove_safe(struct efx_nic *efx,
4590 enum efx_filter_priority priority,
4591 u32 filter_id)
4592{
Edward Creec2bebe32018-03-27 17:42:28 +01004593 struct efx_ef10_filter_table *table;
4594 int rc;
4595
4596 down_read(&efx->filter_sem);
4597 table = efx->filter_state;
4598 down_write(&table->lock);
4599 rc = efx_ef10_filter_remove_internal(efx, 1U << priority, filter_id,
4600 false);
4601 up_write(&table->lock);
4602 up_read(&efx->filter_sem);
4603 return rc;
Ben Hutchings8127d662013-08-29 19:19:29 +01004604}
4605
Edward Creec2bebe32018-03-27 17:42:28 +01004606/* Caller must hold efx->filter_sem for read */
Edward Cree8c915622016-06-15 17:49:05 +01004607static void efx_ef10_filter_remove_unsafe(struct efx_nic *efx,
4608 enum efx_filter_priority priority,
4609 u32 filter_id)
Edward Cree12fb0da2015-07-21 15:11:00 +01004610{
Edward Creec2bebe32018-03-27 17:42:28 +01004611 struct efx_ef10_filter_table *table = efx->filter_state;
4612
Edward Cree8c915622016-06-15 17:49:05 +01004613 if (filter_id == EFX_EF10_FILTER_ID_INVALID)
4614 return;
Edward Creec2bebe32018-03-27 17:42:28 +01004615
4616 down_write(&table->lock);
4617 efx_ef10_filter_remove_internal(efx, 1U << priority, filter_id,
4618 true);
4619 up_write(&table->lock);
Edward Cree12fb0da2015-07-21 15:11:00 +01004620}
4621
Ben Hutchings8127d662013-08-29 19:19:29 +01004622static int efx_ef10_filter_get_safe(struct efx_nic *efx,
4623 enum efx_filter_priority priority,
4624 u32 filter_id, struct efx_filter_spec *spec)
4625{
Jon Cooper0ccb9982017-02-17 15:49:13 +00004626 unsigned int filter_idx = efx_ef10_filter_get_unsafe_id(filter_id);
Ben Hutchings8127d662013-08-29 19:19:29 +01004627 const struct efx_filter_spec *saved_spec;
Edward Creec2bebe32018-03-27 17:42:28 +01004628 struct efx_ef10_filter_table *table;
Ben Hutchings8127d662013-08-29 19:19:29 +01004629 int rc;
4630
Edward Creec2bebe32018-03-27 17:42:28 +01004631 down_read(&efx->filter_sem);
4632 table = efx->filter_state;
4633 down_read(&table->lock);
Ben Hutchings8127d662013-08-29 19:19:29 +01004634 saved_spec = efx_ef10_filter_entry_spec(table, filter_idx);
4635 if (saved_spec && saved_spec->priority == priority &&
Andrew Rybchenko7ac0dd92016-06-15 17:49:30 +01004636 efx_ef10_filter_pri(table, saved_spec) ==
Jon Cooper0ccb9982017-02-17 15:49:13 +00004637 efx_ef10_filter_get_unsafe_pri(filter_id)) {
Ben Hutchings8127d662013-08-29 19:19:29 +01004638 *spec = *saved_spec;
4639 rc = 0;
4640 } else {
4641 rc = -ENOENT;
4642 }
Edward Creec2bebe32018-03-27 17:42:28 +01004643 up_read(&table->lock);
4644 up_read(&efx->filter_sem);
Ben Hutchings8127d662013-08-29 19:19:29 +01004645 return rc;
4646}
4647
Ben Hutchingsfbd79122013-11-21 19:15:03 +00004648static int efx_ef10_filter_clear_rx(struct efx_nic *efx,
Edward Creec2bebe32018-03-27 17:42:28 +01004649 enum efx_filter_priority priority)
Ben Hutchings8127d662013-08-29 19:19:29 +01004650{
Edward Creec2bebe32018-03-27 17:42:28 +01004651 struct efx_ef10_filter_table *table;
Ben Hutchingsfbd79122013-11-21 19:15:03 +00004652 unsigned int priority_mask;
4653 unsigned int i;
4654 int rc;
4655
4656 priority_mask = (((1U << (priority + 1)) - 1) &
4657 ~(1U << EFX_FILTER_PRI_AUTO));
4658
Edward Creec2bebe32018-03-27 17:42:28 +01004659 down_read(&efx->filter_sem);
4660 table = efx->filter_state;
4661 down_write(&table->lock);
Ben Hutchingsfbd79122013-11-21 19:15:03 +00004662 for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) {
4663 rc = efx_ef10_filter_remove_internal(efx, priority_mask,
4664 i, true);
4665 if (rc && rc != -ENOENT)
Edward Creec2bebe32018-03-27 17:42:28 +01004666 break;
4667 rc = 0;
Ben Hutchingsfbd79122013-11-21 19:15:03 +00004668 }
4669
Edward Creec2bebe32018-03-27 17:42:28 +01004670 up_write(&table->lock);
4671 up_read(&efx->filter_sem);
4672 return rc;
Ben Hutchings8127d662013-08-29 19:19:29 +01004673}
4674
4675static u32 efx_ef10_filter_count_rx_used(struct efx_nic *efx,
4676 enum efx_filter_priority priority)
4677{
Edward Creec2bebe32018-03-27 17:42:28 +01004678 struct efx_ef10_filter_table *table;
Ben Hutchings8127d662013-08-29 19:19:29 +01004679 unsigned int filter_idx;
4680 s32 count = 0;
4681
Edward Creec2bebe32018-03-27 17:42:28 +01004682 down_read(&efx->filter_sem);
4683 table = efx->filter_state;
4684 down_read(&table->lock);
Ben Hutchings8127d662013-08-29 19:19:29 +01004685 for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
4686 if (table->entry[filter_idx].spec &&
4687 efx_ef10_filter_entry_spec(table, filter_idx)->priority ==
4688 priority)
4689 ++count;
4690 }
Edward Creec2bebe32018-03-27 17:42:28 +01004691 up_read(&table->lock);
4692 up_read(&efx->filter_sem);
Ben Hutchings8127d662013-08-29 19:19:29 +01004693 return count;
4694}
4695
4696static u32 efx_ef10_filter_get_rx_id_limit(struct efx_nic *efx)
4697{
4698 struct efx_ef10_filter_table *table = efx->filter_state;
4699
Jon Cooper0ccb9982017-02-17 15:49:13 +00004700 return table->rx_match_count * HUNT_FILTER_TBL_ROWS * 2;
Ben Hutchings8127d662013-08-29 19:19:29 +01004701}
4702
4703static s32 efx_ef10_filter_get_rx_ids(struct efx_nic *efx,
4704 enum efx_filter_priority priority,
4705 u32 *buf, u32 size)
4706{
Edward Creec2bebe32018-03-27 17:42:28 +01004707 struct efx_ef10_filter_table *table;
Ben Hutchings8127d662013-08-29 19:19:29 +01004708 struct efx_filter_spec *spec;
4709 unsigned int filter_idx;
4710 s32 count = 0;
4711
Edward Creec2bebe32018-03-27 17:42:28 +01004712 down_read(&efx->filter_sem);
4713 table = efx->filter_state;
4714 down_read(&table->lock);
4715
Ben Hutchings8127d662013-08-29 19:19:29 +01004716 for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
4717 spec = efx_ef10_filter_entry_spec(table, filter_idx);
4718 if (spec && spec->priority == priority) {
4719 if (count == size) {
4720 count = -EMSGSIZE;
4721 break;
4722 }
Jon Cooper0ccb9982017-02-17 15:49:13 +00004723 buf[count++] =
4724 efx_ef10_make_filter_id(
4725 efx_ef10_filter_pri(table, spec),
Ben Hutchings8127d662013-08-29 19:19:29 +01004726 filter_idx);
4727 }
4728 }
Edward Creec2bebe32018-03-27 17:42:28 +01004729 up_read(&table->lock);
4730 up_read(&efx->filter_sem);
Ben Hutchings8127d662013-08-29 19:19:29 +01004731 return count;
4732}
4733
4734#ifdef CONFIG_RFS_ACCEL
4735
Ben Hutchings8127d662013-08-29 19:19:29 +01004736static bool efx_ef10_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
4737 unsigned int filter_idx)
4738{
Edward Creec2bebe32018-03-27 17:42:28 +01004739 struct efx_ef10_filter_table *table;
Edward Cree3af0f342018-03-27 17:41:59 +01004740 struct efx_filter_spec *spec;
Edward Creec2bebe32018-03-27 17:42:28 +01004741 bool ret;
Ben Hutchings8127d662013-08-29 19:19:29 +01004742
Edward Creec2bebe32018-03-27 17:42:28 +01004743 down_read(&efx->filter_sem);
4744 table = efx->filter_state;
4745 down_write(&table->lock);
Edward Cree3af0f342018-03-27 17:41:59 +01004746 spec = efx_ef10_filter_entry_spec(table, filter_idx);
Edward Creec2bebe32018-03-27 17:42:28 +01004747
4748 if (!spec || spec->priority != EFX_FILTER_PRI_HINT) {
4749 ret = true;
4750 goto out_unlock;
4751 }
4752
4753 if (!rps_may_expire_flow(efx->net_dev, spec->dmaq_id,
Edward Cree3af0f342018-03-27 17:41:59 +01004754 flow_id, filter_idx)) {
4755 ret = false;
4756 goto out_unlock;
4757 }
Ben Hutchings8127d662013-08-29 19:19:29 +01004758
Edward Creec2bebe32018-03-27 17:42:28 +01004759 ret = efx_ef10_filter_remove_internal(efx, 1U << spec->priority,
4760 filter_idx, true) == 0;
Edward Cree3af0f342018-03-27 17:41:59 +01004761out_unlock:
Edward Creec2bebe32018-03-27 17:42:28 +01004762 up_write(&table->lock);
4763 up_read(&efx->filter_sem);
Edward Cree3af0f342018-03-27 17:41:59 +01004764 return ret;
Ben Hutchings8127d662013-08-29 19:19:29 +01004765}
4766
Ben Hutchings8127d662013-08-29 19:19:29 +01004767#endif /* CONFIG_RFS_ACCEL */
4768
Edward Cree9b410802017-01-27 15:02:52 +00004769static int efx_ef10_filter_match_flags_from_mcdi(bool encap, u32 mcdi_flags)
Ben Hutchings8127d662013-08-29 19:19:29 +01004770{
4771 int match_flags = 0;
4772
Edward Cree9b410802017-01-27 15:02:52 +00004773#define MAP_FLAG(gen_flag, mcdi_field) do { \
Ben Hutchings8127d662013-08-29 19:19:29 +01004774 u32 old_mcdi_flags = mcdi_flags; \
Edward Cree9b410802017-01-27 15:02:52 +00004775 mcdi_flags &= ~(1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_ ## \
4776 mcdi_field ## _LBN); \
Ben Hutchings8127d662013-08-29 19:19:29 +01004777 if (mcdi_flags != old_mcdi_flags) \
4778 match_flags |= EFX_FILTER_MATCH_ ## gen_flag; \
Edward Cree9b410802017-01-27 15:02:52 +00004779 } while (0)
4780
4781 if (encap) {
4782 /* encap filters must specify encap type */
4783 match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
4784 /* and imply ethertype and ip proto */
4785 mcdi_flags &=
4786 ~(1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO_LBN);
4787 mcdi_flags &=
4788 ~(1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE_LBN);
4789 /* VLAN tags refer to the outer packet */
4790 MAP_FLAG(INNER_VID, INNER_VLAN);
4791 MAP_FLAG(OUTER_VID, OUTER_VLAN);
4792 /* everything else refers to the inner packet */
4793 MAP_FLAG(LOC_MAC_IG, IFRM_UNKNOWN_UCAST_DST);
4794 MAP_FLAG(LOC_MAC_IG, IFRM_UNKNOWN_MCAST_DST);
4795 MAP_FLAG(REM_HOST, IFRM_SRC_IP);
4796 MAP_FLAG(LOC_HOST, IFRM_DST_IP);
4797 MAP_FLAG(REM_MAC, IFRM_SRC_MAC);
4798 MAP_FLAG(REM_PORT, IFRM_SRC_PORT);
4799 MAP_FLAG(LOC_MAC, IFRM_DST_MAC);
4800 MAP_FLAG(LOC_PORT, IFRM_DST_PORT);
4801 MAP_FLAG(ETHER_TYPE, IFRM_ETHER_TYPE);
4802 MAP_FLAG(IP_PROTO, IFRM_IP_PROTO);
4803 } else {
4804 MAP_FLAG(LOC_MAC_IG, UNKNOWN_UCAST_DST);
4805 MAP_FLAG(LOC_MAC_IG, UNKNOWN_MCAST_DST);
4806 MAP_FLAG(REM_HOST, SRC_IP);
4807 MAP_FLAG(LOC_HOST, DST_IP);
4808 MAP_FLAG(REM_MAC, SRC_MAC);
4809 MAP_FLAG(REM_PORT, SRC_PORT);
4810 MAP_FLAG(LOC_MAC, DST_MAC);
4811 MAP_FLAG(LOC_PORT, DST_PORT);
4812 MAP_FLAG(ETHER_TYPE, ETHER_TYPE);
4813 MAP_FLAG(INNER_VID, INNER_VLAN);
4814 MAP_FLAG(OUTER_VID, OUTER_VLAN);
4815 MAP_FLAG(IP_PROTO, IP_PROTO);
Ben Hutchings8127d662013-08-29 19:19:29 +01004816 }
Ben Hutchings8127d662013-08-29 19:19:29 +01004817#undef MAP_FLAG
4818
4819 /* Did we map them all? */
4820 if (mcdi_flags)
4821 return -EINVAL;
4822
4823 return match_flags;
4824}
4825
Andrew Rybchenko34813fe2016-06-15 17:48:14 +01004826static void efx_ef10_filter_cleanup_vlans(struct efx_nic *efx)
4827{
4828 struct efx_ef10_filter_table *table = efx->filter_state;
4829 struct efx_ef10_filter_vlan *vlan, *next_vlan;
4830
4831 /* See comment in efx_ef10_filter_table_remove() */
4832 if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
4833 return;
4834
4835 if (!table)
4836 return;
4837
4838 list_for_each_entry_safe(vlan, next_vlan, &table->vlan_list, list)
4839 efx_ef10_filter_del_vlan_internal(efx, vlan);
4840}
4841
Andrew Rybchenko7ac0dd92016-06-15 17:49:30 +01004842static bool efx_ef10_filter_match_supported(struct efx_ef10_filter_table *table,
Edward Cree9b410802017-01-27 15:02:52 +00004843 bool encap,
Andrew Rybchenko7ac0dd92016-06-15 17:49:30 +01004844 enum efx_filter_match_flags match_flags)
4845{
4846 unsigned int match_pri;
4847 int mf;
4848
4849 for (match_pri = 0;
4850 match_pri < table->rx_match_count;
4851 match_pri++) {
Edward Cree9b410802017-01-27 15:02:52 +00004852 mf = efx_ef10_filter_match_flags_from_mcdi(encap,
Andrew Rybchenko7ac0dd92016-06-15 17:49:30 +01004853 table->rx_match_mcdi_flags[match_pri]);
4854 if (mf == match_flags)
4855 return true;
4856 }
4857
4858 return false;
4859}
4860
Edward Cree9b410802017-01-27 15:02:52 +00004861static int
4862efx_ef10_filter_table_probe_matches(struct efx_nic *efx,
4863 struct efx_ef10_filter_table *table,
4864 bool encap)
Ben Hutchings8127d662013-08-29 19:19:29 +01004865{
4866 MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_PARSER_DISP_INFO_IN_LEN);
4867 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX);
4868 unsigned int pd_match_pri, pd_match_count;
Ben Hutchings8127d662013-08-29 19:19:29 +01004869 size_t outlen;
4870 int rc;
4871
Ben Hutchings8127d662013-08-29 19:19:29 +01004872 /* Find out which RX filter types are supported, and their priorities */
4873 MCDI_SET_DWORD(inbuf, GET_PARSER_DISP_INFO_IN_OP,
Edward Cree9b410802017-01-27 15:02:52 +00004874 encap ?
4875 MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_ENCAP_RX_MATCHES :
Ben Hutchings8127d662013-08-29 19:19:29 +01004876 MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES);
4877 rc = efx_mcdi_rpc(efx, MC_CMD_GET_PARSER_DISP_INFO,
4878 inbuf, sizeof(inbuf), outbuf, sizeof(outbuf),
4879 &outlen);
4880 if (rc)
Edward Cree9b410802017-01-27 15:02:52 +00004881 return rc;
4882
Ben Hutchings8127d662013-08-29 19:19:29 +01004883 pd_match_count = MCDI_VAR_ARRAY_LEN(
4884 outlen, GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES);
Ben Hutchings8127d662013-08-29 19:19:29 +01004885
4886 for (pd_match_pri = 0; pd_match_pri < pd_match_count; pd_match_pri++) {
4887 u32 mcdi_flags =
4888 MCDI_ARRAY_DWORD(
4889 outbuf,
4890 GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES,
4891 pd_match_pri);
Edward Cree9b410802017-01-27 15:02:52 +00004892 rc = efx_ef10_filter_match_flags_from_mcdi(encap, mcdi_flags);
Ben Hutchings8127d662013-08-29 19:19:29 +01004893 if (rc < 0) {
4894 netif_dbg(efx, probe, efx->net_dev,
4895 "%s: fw flags %#x pri %u not supported in driver\n",
4896 __func__, mcdi_flags, pd_match_pri);
4897 } else {
4898 netif_dbg(efx, probe, efx->net_dev,
4899 "%s: fw flags %#x pri %u supported as driver flags %#x pri %u\n",
4900 __func__, mcdi_flags, pd_match_pri,
4901 rc, table->rx_match_count);
Andrew Rybchenko7ac0dd92016-06-15 17:49:30 +01004902 table->rx_match_mcdi_flags[table->rx_match_count] = mcdi_flags;
4903 table->rx_match_count++;
Ben Hutchings8127d662013-08-29 19:19:29 +01004904 }
4905 }
4906
Edward Cree9b410802017-01-27 15:02:52 +00004907 return 0;
4908}
4909
4910static int efx_ef10_filter_table_probe(struct efx_nic *efx)
4911{
4912 struct efx_ef10_nic_data *nic_data = efx->nic_data;
4913 struct net_device *net_dev = efx->net_dev;
4914 struct efx_ef10_filter_table *table;
4915 struct efx_ef10_vlan *vlan;
4916 int rc;
4917
4918 if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
4919 return -EINVAL;
4920
4921 if (efx->filter_state) /* already probed */
4922 return 0;
4923
4924 table = kzalloc(sizeof(*table), GFP_KERNEL);
4925 if (!table)
4926 return -ENOMEM;
4927
4928 table->rx_match_count = 0;
4929 rc = efx_ef10_filter_table_probe_matches(efx, table, false);
4930 if (rc)
4931 goto fail;
4932 if (nic_data->datapath_caps &
4933 (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN))
4934 rc = efx_ef10_filter_table_probe_matches(efx, table, true);
4935 if (rc)
4936 goto fail;
Martin Habetse4478ad2016-06-15 17:51:07 +01004937 if ((efx_supported_features(efx) & NETIF_F_HW_VLAN_CTAG_FILTER) &&
Edward Cree9b410802017-01-27 15:02:52 +00004938 !(efx_ef10_filter_match_supported(table, false,
Martin Habetse4478ad2016-06-15 17:51:07 +01004939 (EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_LOC_MAC)) &&
Edward Cree9b410802017-01-27 15:02:52 +00004940 efx_ef10_filter_match_supported(table, false,
Martin Habetse4478ad2016-06-15 17:51:07 +01004941 (EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_LOC_MAC_IG)))) {
4942 netif_info(efx, probe, net_dev,
4943 "VLAN filters are not supported in this firmware variant\n");
4944 net_dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
4945 efx->fixed_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
4946 net_dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
4947 }
4948
Ben Hutchings8127d662013-08-29 19:19:29 +01004949 table->entry = vzalloc(HUNT_FILTER_TBL_ROWS * sizeof(*table->entry));
4950 if (!table->entry) {
4951 rc = -ENOMEM;
4952 goto fail;
4953 }
4954
Andrew Rybchenkob071c3a2016-06-15 17:43:00 +01004955 table->mc_promisc_last = false;
Andrew Rybchenko4a53ea82016-06-15 17:48:32 +01004956 table->vlan_filter =
4957 !!(efx->net_dev->features & NETIF_F_HW_VLAN_CTAG_FILTER);
Andrew Rybchenko34813fe2016-06-15 17:48:14 +01004958 INIT_LIST_HEAD(&table->vlan_list);
Edward Creec2bebe32018-03-27 17:42:28 +01004959 init_rwsem(&table->lock);
Edward Cree12fb0da2015-07-21 15:11:00 +01004960
Ben Hutchings8127d662013-08-29 19:19:29 +01004961 efx->filter_state = table;
Andrew Rybchenko34813fe2016-06-15 17:48:14 +01004962
4963 list_for_each_entry(vlan, &nic_data->vlan_list, list) {
4964 rc = efx_ef10_filter_add_vlan(efx, vlan->vid);
4965 if (rc)
4966 goto fail_add_vlan;
4967 }
4968
Ben Hutchings8127d662013-08-29 19:19:29 +01004969 return 0;
4970
Andrew Rybchenko34813fe2016-06-15 17:48:14 +01004971fail_add_vlan:
4972 efx_ef10_filter_cleanup_vlans(efx);
4973 efx->filter_state = NULL;
Ben Hutchings8127d662013-08-29 19:19:29 +01004974fail:
4975 kfree(table);
4976 return rc;
4977}
4978
Edward Cree0d322412015-05-20 11:10:03 +01004979/* Caller must hold efx->filter_sem for read if race against
4980 * efx_ef10_filter_table_remove() is possible
4981 */
Ben Hutchings8127d662013-08-29 19:19:29 +01004982static void efx_ef10_filter_table_restore(struct efx_nic *efx)
4983{
4984 struct efx_ef10_filter_table *table = efx->filter_state;
4985 struct efx_ef10_nic_data *nic_data = efx->nic_data;
Jon Cooper2d3d4ec2017-01-27 15:02:11 +00004986 unsigned int invalid_filters = 0, failed = 0;
4987 struct efx_ef10_filter_vlan *vlan;
Ben Hutchings8127d662013-08-29 19:19:29 +01004988 struct efx_filter_spec *spec;
Edward Cree42356d92018-03-08 15:45:17 +00004989 struct efx_rss_context *ctx;
Ben Hutchings8127d662013-08-29 19:19:29 +01004990 unsigned int filter_idx;
Jon Cooper2d3d4ec2017-01-27 15:02:11 +00004991 u32 mcdi_flags;
4992 int match_pri;
Edward Cree9b410802017-01-27 15:02:52 +00004993 int rc, i;
Ben Hutchings8127d662013-08-29 19:19:29 +01004994
Edward Cree0d322412015-05-20 11:10:03 +01004995 WARN_ON(!rwsem_is_locked(&efx->filter_sem));
4996
Ben Hutchings8127d662013-08-29 19:19:29 +01004997 if (!nic_data->must_restore_filters)
4998 return;
4999
Edward Cree0d322412015-05-20 11:10:03 +01005000 if (!table)
5001 return;
5002
Edward Creec2bebe32018-03-27 17:42:28 +01005003 down_write(&table->lock);
Ben Hutchings8127d662013-08-29 19:19:29 +01005004
5005 for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
5006 spec = efx_ef10_filter_entry_spec(table, filter_idx);
5007 if (!spec)
5008 continue;
5009
Jon Cooper2d3d4ec2017-01-27 15:02:11 +00005010 mcdi_flags = efx_ef10_filter_mcdi_flags_from_spec(spec);
5011 match_pri = 0;
5012 while (match_pri < table->rx_match_count &&
5013 table->rx_match_mcdi_flags[match_pri] != mcdi_flags)
5014 ++match_pri;
5015 if (match_pri >= table->rx_match_count) {
5016 invalid_filters++;
5017 goto not_restored;
5018 }
Edward Cree42356d92018-03-08 15:45:17 +00005019 if (spec->rss_context)
5020 ctx = efx_find_rss_context_entry(spec->rss_context,
5021 &efx->rss_context.list);
5022 else
5023 ctx = &efx->rss_context;
5024 if (spec->flags & EFX_FILTER_FLAG_RX_RSS) {
5025 if (!ctx) {
5026 netif_warn(efx, drv, efx->net_dev,
5027 "Warning: unable to restore a filter with nonexistent RSS context %u.\n",
5028 spec->rss_context);
5029 invalid_filters++;
5030 goto not_restored;
5031 }
5032 if (ctx->context_id == EFX_EF10_RSS_CONTEXT_INVALID) {
5033 netif_warn(efx, drv, efx->net_dev,
5034 "Warning: unable to restore a filter with RSS context %u as it was not created.\n",
5035 spec->rss_context);
5036 invalid_filters++;
5037 goto not_restored;
5038 }
5039 }
Jon Cooper2d3d4ec2017-01-27 15:02:11 +00005040
Ben Hutchings8127d662013-08-29 19:19:29 +01005041 rc = efx_ef10_filter_push(efx, spec,
5042 &table->entry[filter_idx].handle,
Edward Cree42356d92018-03-08 15:45:17 +00005043 ctx, false);
Ben Hutchings8127d662013-08-29 19:19:29 +01005044 if (rc)
Jon Cooper2d3d4ec2017-01-27 15:02:11 +00005045 failed++;
Jon Cooper2d3d4ec2017-01-27 15:02:11 +00005046
Ben Hutchings8127d662013-08-29 19:19:29 +01005047 if (rc) {
Jon Cooper2d3d4ec2017-01-27 15:02:11 +00005048not_restored:
Edward Cree9b410802017-01-27 15:02:52 +00005049 list_for_each_entry(vlan, &table->vlan_list, list)
5050 for (i = 0; i < EFX_EF10_NUM_DEFAULT_FILTERS; ++i)
5051 if (vlan->default_filters[i] == filter_idx)
5052 vlan->default_filters[i] =
5053 EFX_EF10_FILTER_ID_INVALID;
5054
Ben Hutchings8127d662013-08-29 19:19:29 +01005055 kfree(spec);
5056 efx_ef10_filter_set_entry(table, filter_idx, NULL, 0);
Ben Hutchings8127d662013-08-29 19:19:29 +01005057 }
5058 }
5059
Edward Creec2bebe32018-03-27 17:42:28 +01005060 up_write(&table->lock);
Ben Hutchings8127d662013-08-29 19:19:29 +01005061
Jon Cooper2d3d4ec2017-01-27 15:02:11 +00005062 /* This can happen validly if the MC's capabilities have changed, so
5063 * is not an error.
5064 */
5065 if (invalid_filters)
5066 netif_dbg(efx, drv, efx->net_dev,
5067 "Did not restore %u filters that are now unsupported.\n",
5068 invalid_filters);
5069
Ben Hutchings8127d662013-08-29 19:19:29 +01005070 if (failed)
5071 netif_err(efx, hw, efx->net_dev,
Jon Cooper2d3d4ec2017-01-27 15:02:11 +00005072 "unable to restore %u filters\n", failed);
Ben Hutchings8127d662013-08-29 19:19:29 +01005073 else
5074 nic_data->must_restore_filters = false;
5075}
5076
5077static void efx_ef10_filter_table_remove(struct efx_nic *efx)
5078{
5079 struct efx_ef10_filter_table *table = efx->filter_state;
Martin Habetsbb53f4d2017-06-22 10:50:41 +01005080 MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_EXT_IN_LEN);
Ben Hutchings8127d662013-08-29 19:19:29 +01005081 struct efx_filter_spec *spec;
5082 unsigned int filter_idx;
5083 int rc;
5084
Andrew Rybchenko34813fe2016-06-15 17:48:14 +01005085 efx_ef10_filter_cleanup_vlans(efx);
Edward Cree0d322412015-05-20 11:10:03 +01005086 efx->filter_state = NULL;
Edward Creedd987082016-06-15 17:43:43 +01005087 /* If we were called without locking, then it's not safe to free
5088 * the table as others might be using it. So we just WARN, leak
5089 * the memory, and potentially get an inconsistent filter table
5090 * state.
5091 * This should never actually happen.
5092 */
5093 if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
5094 return;
5095
Edward Cree0d322412015-05-20 11:10:03 +01005096 if (!table)
5097 return;
5098
Ben Hutchings8127d662013-08-29 19:19:29 +01005099 for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
5100 spec = efx_ef10_filter_entry_spec(table, filter_idx);
5101 if (!spec)
5102 continue;
5103
5104 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
5105 efx_ef10_filter_is_exclusive(spec) ?
5106 MC_CMD_FILTER_OP_IN_OP_REMOVE :
5107 MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE);
5108 MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
5109 table->entry[filter_idx].handle);
Bert Kenwarde65a5102015-12-23 08:57:36 +00005110 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FILTER_OP, inbuf,
5111 sizeof(inbuf), NULL, 0, NULL);
Ben Hutchings48ce5632013-11-01 16:42:44 +00005112 if (rc)
Bert Kenwarde65a5102015-12-23 08:57:36 +00005113 netif_info(efx, drv, efx->net_dev,
5114 "%s: filter %04x remove failed\n",
5115 __func__, filter_idx);
Ben Hutchings8127d662013-08-29 19:19:29 +01005116 kfree(spec);
5117 }
5118
5119 vfree(table->entry);
5120 kfree(table);
5121}
5122
Andrew Rybchenko6a379582016-06-15 17:44:20 +01005123static void efx_ef10_filter_mark_one_old(struct efx_nic *efx, uint16_t *id)
5124{
5125 struct efx_ef10_filter_table *table = efx->filter_state;
5126 unsigned int filter_idx;
5127
Edward Creec2bebe32018-03-27 17:42:28 +01005128 efx_rwsem_assert_write_locked(&table->lock);
5129
Andrew Rybchenko6a379582016-06-15 17:44:20 +01005130 if (*id != EFX_EF10_FILTER_ID_INVALID) {
Jon Cooper0ccb9982017-02-17 15:49:13 +00005131 filter_idx = efx_ef10_filter_get_unsafe_id(*id);
Andrew Rybchenko6a379582016-06-15 17:44:20 +01005132 if (!table->entry[filter_idx].spec)
5133 netif_dbg(efx, drv, efx->net_dev,
5134 "marked null spec old %04x:%04x\n", *id,
5135 filter_idx);
5136 table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_AUTO_OLD;
5137 *id = EFX_EF10_FILTER_ID_INVALID;
Bert Kenwarde65a5102015-12-23 08:57:36 +00005138 }
Andrew Rybchenko6a379582016-06-15 17:44:20 +01005139}
5140
Andrew Rybchenkob3a3c032016-06-15 17:47:36 +01005141/* Mark old per-VLAN filters that may need to be removed */
5142static void _efx_ef10_filter_vlan_mark_old(struct efx_nic *efx,
5143 struct efx_ef10_filter_vlan *vlan)
Ben Hutchings8127d662013-08-29 19:19:29 +01005144{
5145 struct efx_ef10_filter_table *table = efx->filter_state;
Andrew Rybchenko6a379582016-06-15 17:44:20 +01005146 unsigned int i;
Ben Hutchings8127d662013-08-29 19:19:29 +01005147
Edward Cree12fb0da2015-07-21 15:11:00 +01005148 for (i = 0; i < table->dev_uc_count; i++)
Andrew Rybchenkodc3273e2016-06-15 17:45:36 +01005149 efx_ef10_filter_mark_one_old(efx, &vlan->uc[i]);
Edward Cree12fb0da2015-07-21 15:11:00 +01005150 for (i = 0; i < table->dev_mc_count; i++)
Andrew Rybchenkodc3273e2016-06-15 17:45:36 +01005151 efx_ef10_filter_mark_one_old(efx, &vlan->mc[i]);
Edward Cree9b410802017-01-27 15:02:52 +00005152 for (i = 0; i < EFX_EF10_NUM_DEFAULT_FILTERS; i++)
5153 efx_ef10_filter_mark_one_old(efx, &vlan->default_filters[i]);
Andrew Rybchenkob3a3c032016-06-15 17:47:36 +01005154}
5155
Andrew Rybchenko34813fe2016-06-15 17:48:14 +01005156/* Mark old filters that may need to be removed.
5157 * Caller must hold efx->filter_sem for read if race against
5158 * efx_ef10_filter_table_remove() is possible
5159 */
Andrew Rybchenkob3a3c032016-06-15 17:47:36 +01005160static void efx_ef10_filter_mark_old(struct efx_nic *efx)
5161{
5162 struct efx_ef10_filter_table *table = efx->filter_state;
Andrew Rybchenko34813fe2016-06-15 17:48:14 +01005163 struct efx_ef10_filter_vlan *vlan;
Andrew Rybchenkob3a3c032016-06-15 17:47:36 +01005164
Edward Creec2bebe32018-03-27 17:42:28 +01005165 down_write(&table->lock);
Andrew Rybchenko34813fe2016-06-15 17:48:14 +01005166 list_for_each_entry(vlan, &table->vlan_list, list)
5167 _efx_ef10_filter_vlan_mark_old(efx, vlan);
Edward Creec2bebe32018-03-27 17:42:28 +01005168 up_write(&table->lock);
Daniel Pieczko822b96f2015-07-21 15:10:27 +01005169}
Ben Hutchings8127d662013-08-29 19:19:29 +01005170
Andrew Rybchenkoafa4ce12016-06-15 17:45:56 +01005171static void efx_ef10_filter_uc_addr_list(struct efx_nic *efx)
Daniel Pieczko822b96f2015-07-21 15:10:27 +01005172{
5173 struct efx_ef10_filter_table *table = efx->filter_state;
5174 struct net_device *net_dev = efx->net_dev;
5175 struct netdev_hw_addr *uc;
5176 unsigned int i;
5177
Andrew Rybchenkoafa4ce12016-06-15 17:45:56 +01005178 table->uc_promisc = !!(net_dev->flags & IFF_PROMISC);
Daniel Pieczko822b96f2015-07-21 15:10:27 +01005179 ether_addr_copy(table->dev_uc_list[0].addr, net_dev->dev_addr);
5180 i = 1;
5181 netdev_for_each_uc_addr(uc, net_dev) {
Edward Cree12fb0da2015-07-21 15:11:00 +01005182 if (i >= EFX_EF10_FILTER_DEV_UC_MAX) {
Andrew Rybchenkoafa4ce12016-06-15 17:45:56 +01005183 table->uc_promisc = true;
Edward Cree12fb0da2015-07-21 15:11:00 +01005184 break;
5185 }
Daniel Pieczko822b96f2015-07-21 15:10:27 +01005186 ether_addr_copy(table->dev_uc_list[i].addr, uc->addr);
5187 i++;
5188 }
Bert Kenwardc70d6812017-07-12 17:19:41 +01005189
5190 table->dev_uc_count = i;
Daniel Pieczko822b96f2015-07-21 15:10:27 +01005191}
5192
Andrew Rybchenkoafa4ce12016-06-15 17:45:56 +01005193static void efx_ef10_filter_mc_addr_list(struct efx_nic *efx)
Daniel Pieczko822b96f2015-07-21 15:10:27 +01005194{
5195 struct efx_ef10_filter_table *table = efx->filter_state;
5196 struct net_device *net_dev = efx->net_dev;
5197 struct netdev_hw_addr *mc;
Bert Kenwardc70d6812017-07-12 17:19:41 +01005198 unsigned int i;
Daniel Pieczko822b96f2015-07-21 15:10:27 +01005199
Edward Cree148cbab2017-04-04 17:02:49 +01005200 table->mc_overflow = false;
Andrew Rybchenkoafa4ce12016-06-15 17:45:56 +01005201 table->mc_promisc = !!(net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI));
Daniel Pieczko822b96f2015-07-21 15:10:27 +01005202
Edward Cree12fb0da2015-07-21 15:11:00 +01005203 i = 0;
Daniel Pieczkoab8b1f7c2015-07-21 15:10:44 +01005204 netdev_for_each_mc_addr(mc, net_dev) {
Edward Cree12fb0da2015-07-21 15:11:00 +01005205 if (i >= EFX_EF10_FILTER_DEV_MC_MAX) {
Andrew Rybchenkoafa4ce12016-06-15 17:45:56 +01005206 table->mc_promisc = true;
Edward Cree148cbab2017-04-04 17:02:49 +01005207 table->mc_overflow = true;
Edward Cree12fb0da2015-07-21 15:11:00 +01005208 break;
5209 }
Daniel Pieczkoab8b1f7c2015-07-21 15:10:44 +01005210 ether_addr_copy(table->dev_mc_list[i].addr, mc->addr);
5211 i++;
Ben Hutchings8127d662013-08-29 19:19:29 +01005212 }
Edward Cree12fb0da2015-07-21 15:11:00 +01005213
5214 table->dev_mc_count = i;
Daniel Pieczko822b96f2015-07-21 15:10:27 +01005215}
Ben Hutchings8127d662013-08-29 19:19:29 +01005216
Edward Cree12fb0da2015-07-21 15:11:00 +01005217static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx,
Andrew Rybchenkob3a3c032016-06-15 17:47:36 +01005218 struct efx_ef10_filter_vlan *vlan,
5219 bool multicast, bool rollback)
Daniel Pieczko822b96f2015-07-21 15:10:27 +01005220{
5221 struct efx_ef10_filter_table *table = efx->filter_state;
5222 struct efx_ef10_dev_addr *addr_list;
Bert Kenwardf1c2ef42015-12-11 09:39:32 +00005223 enum efx_filter_flags filter_flags;
Daniel Pieczko822b96f2015-07-21 15:10:27 +01005224 struct efx_filter_spec spec;
Edward Cree12fb0da2015-07-21 15:11:00 +01005225 u8 baddr[ETH_ALEN];
5226 unsigned int i, j;
5227 int addr_count;
Andrew Rybchenkodc3273e2016-06-15 17:45:36 +01005228 u16 *ids;
Daniel Pieczko822b96f2015-07-21 15:10:27 +01005229 int rc;
5230
5231 if (multicast) {
5232 addr_list = table->dev_mc_list;
Edward Cree12fb0da2015-07-21 15:11:00 +01005233 addr_count = table->dev_mc_count;
Andrew Rybchenkodc3273e2016-06-15 17:45:36 +01005234 ids = vlan->mc;
Daniel Pieczko822b96f2015-07-21 15:10:27 +01005235 } else {
5236 addr_list = table->dev_uc_list;
Edward Cree12fb0da2015-07-21 15:11:00 +01005237 addr_count = table->dev_uc_count;
Andrew Rybchenkodc3273e2016-06-15 17:45:36 +01005238 ids = vlan->uc;
Daniel Pieczko822b96f2015-07-21 15:10:27 +01005239 }
5240
Bert Kenwardf1c2ef42015-12-11 09:39:32 +00005241 filter_flags = efx_rss_enabled(efx) ? EFX_FILTER_FLAG_RX_RSS : 0;
5242
Daniel Pieczko822b96f2015-07-21 15:10:27 +01005243 /* Insert/renew filters */
Edward Cree12fb0da2015-07-21 15:11:00 +01005244 for (i = 0; i < addr_count; i++) {
Edward Creed58299a2017-06-29 16:50:06 +01005245 EFX_WARN_ON_PARANOID(ids[i] != EFX_EF10_FILTER_ID_INVALID);
Bert Kenwardf1c2ef42015-12-11 09:39:32 +00005246 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0);
Andrew Rybchenkob3a3c032016-06-15 17:47:36 +01005247 efx_filter_set_eth_local(&spec, vlan->vid, addr_list[i].addr);
Jon Cooperb6f568e2015-07-21 15:10:15 +01005248 rc = efx_ef10_filter_insert(efx, &spec, true);
5249 if (rc < 0) {
Edward Cree12fb0da2015-07-21 15:11:00 +01005250 if (rollback) {
5251 netif_info(efx, drv, efx->net_dev,
5252 "efx_ef10_filter_insert failed rc=%d\n",
5253 rc);
5254 /* Fall back to promiscuous */
5255 for (j = 0; j < i; j++) {
Edward Cree12fb0da2015-07-21 15:11:00 +01005256 efx_ef10_filter_remove_unsafe(
5257 efx, EFX_FILTER_PRI_AUTO,
Andrew Rybchenkodc3273e2016-06-15 17:45:36 +01005258 ids[j]);
5259 ids[j] = EFX_EF10_FILTER_ID_INVALID;
Edward Cree12fb0da2015-07-21 15:11:00 +01005260 }
5261 return rc;
5262 } else {
Edward Creed58299a2017-06-29 16:50:06 +01005263 /* keep invalid ID, and carry on */
Daniel Pieczko822b96f2015-07-21 15:10:27 +01005264 }
Edward Creed58299a2017-06-29 16:50:06 +01005265 } else {
5266 ids[i] = efx_ef10_filter_get_unsafe_id(rc);
Ben Hutchings8127d662013-08-29 19:19:29 +01005267 }
5268 }
Daniel Pieczko822b96f2015-07-21 15:10:27 +01005269
Edward Cree12fb0da2015-07-21 15:11:00 +01005270 if (multicast && rollback) {
5271 /* Also need an Ethernet broadcast filter */
Edward Cree9b410802017-01-27 15:02:52 +00005272 EFX_WARN_ON_PARANOID(vlan->default_filters[EFX_EF10_BCAST] !=
5273 EFX_EF10_FILTER_ID_INVALID);
Bert Kenwardf1c2ef42015-12-11 09:39:32 +00005274 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0);
Edward Cree12fb0da2015-07-21 15:11:00 +01005275 eth_broadcast_addr(baddr);
Andrew Rybchenkob3a3c032016-06-15 17:47:36 +01005276 efx_filter_set_eth_local(&spec, vlan->vid, baddr);
Daniel Pieczko822b96f2015-07-21 15:10:27 +01005277 rc = efx_ef10_filter_insert(efx, &spec, true);
Edward Cree12fb0da2015-07-21 15:11:00 +01005278 if (rc < 0) {
Daniel Pieczko822b96f2015-07-21 15:10:27 +01005279 netif_warn(efx, drv, efx->net_dev,
Edward Cree12fb0da2015-07-21 15:11:00 +01005280 "Broadcast filter insert failed rc=%d\n", rc);
5281 /* Fall back to promiscuous */
5282 for (j = 0; j < i; j++) {
Edward Cree12fb0da2015-07-21 15:11:00 +01005283 efx_ef10_filter_remove_unsafe(
5284 efx, EFX_FILTER_PRI_AUTO,
Andrew Rybchenkodc3273e2016-06-15 17:45:36 +01005285 ids[j]);
5286 ids[j] = EFX_EF10_FILTER_ID_INVALID;
Edward Cree12fb0da2015-07-21 15:11:00 +01005287 }
5288 return rc;
5289 } else {
Edward Cree9b410802017-01-27 15:02:52 +00005290 vlan->default_filters[EFX_EF10_BCAST] =
Jon Cooper0ccb9982017-02-17 15:49:13 +00005291 efx_ef10_filter_get_unsafe_id(rc);
Edward Cree12fb0da2015-07-21 15:11:00 +01005292 }
Daniel Pieczko822b96f2015-07-21 15:10:27 +01005293 }
Edward Cree12fb0da2015-07-21 15:11:00 +01005294
5295 return 0;
5296}
5297
Andrew Rybchenkob3a3c032016-06-15 17:47:36 +01005298static int efx_ef10_filter_insert_def(struct efx_nic *efx,
5299 struct efx_ef10_filter_vlan *vlan,
Edward Cree9b410802017-01-27 15:02:52 +00005300 enum efx_encap_type encap_type,
Andrew Rybchenkob3a3c032016-06-15 17:47:36 +01005301 bool multicast, bool rollback)
Edward Cree12fb0da2015-07-21 15:11:00 +01005302{
Edward Cree12fb0da2015-07-21 15:11:00 +01005303 struct efx_ef10_nic_data *nic_data = efx->nic_data;
Bert Kenwardf1c2ef42015-12-11 09:39:32 +00005304 enum efx_filter_flags filter_flags;
Edward Cree12fb0da2015-07-21 15:11:00 +01005305 struct efx_filter_spec spec;
5306 u8 baddr[ETH_ALEN];
5307 int rc;
Edward Cree9b410802017-01-27 15:02:52 +00005308 u16 *id;
Edward Cree12fb0da2015-07-21 15:11:00 +01005309
Bert Kenwardf1c2ef42015-12-11 09:39:32 +00005310 filter_flags = efx_rss_enabled(efx) ? EFX_FILTER_FLAG_RX_RSS : 0;
5311
5312 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0);
Edward Cree12fb0da2015-07-21 15:11:00 +01005313
5314 if (multicast)
5315 efx_filter_set_mc_def(&spec);
5316 else
5317 efx_filter_set_uc_def(&spec);
5318
Edward Cree9b410802017-01-27 15:02:52 +00005319 if (encap_type) {
5320 if (nic_data->datapath_caps &
5321 (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN))
5322 efx_filter_set_encap_type(&spec, encap_type);
5323 else
5324 /* don't insert encap filters on non-supporting
5325 * platforms. ID will be left as INVALID.
5326 */
5327 return 0;
5328 }
5329
Andrew Rybchenkob3a3c032016-06-15 17:47:36 +01005330 if (vlan->vid != EFX_FILTER_VID_UNSPEC)
5331 efx_filter_set_eth_local(&spec, vlan->vid, NULL);
5332
Edward Cree12fb0da2015-07-21 15:11:00 +01005333 rc = efx_ef10_filter_insert(efx, &spec, true);
5334 if (rc < 0) {
Edward Cree9b410802017-01-27 15:02:52 +00005335 const char *um = multicast ? "Multicast" : "Unicast";
5336 const char *encap_name = "";
5337 const char *encap_ipv = "";
5338
5339 if ((encap_type & EFX_ENCAP_TYPES_MASK) ==
5340 EFX_ENCAP_TYPE_VXLAN)
5341 encap_name = "VXLAN ";
5342 else if ((encap_type & EFX_ENCAP_TYPES_MASK) ==
5343 EFX_ENCAP_TYPE_NVGRE)
5344 encap_name = "NVGRE ";
5345 else if ((encap_type & EFX_ENCAP_TYPES_MASK) ==
5346 EFX_ENCAP_TYPE_GENEVE)
5347 encap_name = "GENEVE ";
5348 if (encap_type & EFX_ENCAP_FLAG_IPV6)
5349 encap_ipv = "IPv6 ";
5350 else if (encap_type)
5351 encap_ipv = "IPv4 ";
5352
5353 /* unprivileged functions can't insert mismatch filters
5354 * for encapsulated or unicast traffic, so downgrade
5355 * those warnings to debug.
5356 */
Jon Cooper34e7aef2017-01-27 15:02:39 +00005357 netif_cond_dbg(efx, drv, efx->net_dev,
Edward Cree9b410802017-01-27 15:02:52 +00005358 rc == -EPERM && (encap_type || !multicast), warn,
5359 "%s%s%s mismatch filter insert failed rc=%d\n",
5360 encap_name, encap_ipv, um, rc);
Edward Cree12fb0da2015-07-21 15:11:00 +01005361 } else if (multicast) {
Edward Cree9b410802017-01-27 15:02:52 +00005362 /* mapping from encap types to default filter IDs (multicast) */
5363 static enum efx_ef10_default_filters map[] = {
5364 [EFX_ENCAP_TYPE_NONE] = EFX_EF10_MCDEF,
5365 [EFX_ENCAP_TYPE_VXLAN] = EFX_EF10_VXLAN4_MCDEF,
5366 [EFX_ENCAP_TYPE_NVGRE] = EFX_EF10_NVGRE4_MCDEF,
5367 [EFX_ENCAP_TYPE_GENEVE] = EFX_EF10_GENEVE4_MCDEF,
5368 [EFX_ENCAP_TYPE_VXLAN | EFX_ENCAP_FLAG_IPV6] =
5369 EFX_EF10_VXLAN6_MCDEF,
5370 [EFX_ENCAP_TYPE_NVGRE | EFX_ENCAP_FLAG_IPV6] =
5371 EFX_EF10_NVGRE6_MCDEF,
5372 [EFX_ENCAP_TYPE_GENEVE | EFX_ENCAP_FLAG_IPV6] =
5373 EFX_EF10_GENEVE6_MCDEF,
5374 };
5375
5376 /* quick bounds check (BCAST result impossible) */
5377 BUILD_BUG_ON(EFX_EF10_BCAST != 0);
Colin Ian Kinge9904992017-01-31 16:30:02 +00005378 if (encap_type >= ARRAY_SIZE(map) || map[encap_type] == 0) {
Edward Cree9b410802017-01-27 15:02:52 +00005379 WARN_ON(1);
5380 return -EINVAL;
5381 }
5382 /* then follow map */
5383 id = &vlan->default_filters[map[encap_type]];
5384
5385 EFX_WARN_ON_PARANOID(*id != EFX_EF10_FILTER_ID_INVALID);
Jon Cooper0ccb9982017-02-17 15:49:13 +00005386 *id = efx_ef10_filter_get_unsafe_id(rc);
Edward Cree9b410802017-01-27 15:02:52 +00005387 if (!nic_data->workaround_26807 && !encap_type) {
Edward Cree12fb0da2015-07-21 15:11:00 +01005388 /* Also need an Ethernet broadcast filter */
5389 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
Bert Kenwardf1c2ef42015-12-11 09:39:32 +00005390 filter_flags, 0);
Edward Cree12fb0da2015-07-21 15:11:00 +01005391 eth_broadcast_addr(baddr);
Andrew Rybchenkob3a3c032016-06-15 17:47:36 +01005392 efx_filter_set_eth_local(&spec, vlan->vid, baddr);
Edward Cree12fb0da2015-07-21 15:11:00 +01005393 rc = efx_ef10_filter_insert(efx, &spec, true);
5394 if (rc < 0) {
5395 netif_warn(efx, drv, efx->net_dev,
5396 "Broadcast filter insert failed rc=%d\n",
5397 rc);
5398 if (rollback) {
5399 /* Roll back the mc_def filter */
5400 efx_ef10_filter_remove_unsafe(
5401 efx, EFX_FILTER_PRI_AUTO,
Edward Cree9b410802017-01-27 15:02:52 +00005402 *id);
5403 *id = EFX_EF10_FILTER_ID_INVALID;
Edward Cree12fb0da2015-07-21 15:11:00 +01005404 return rc;
5405 }
5406 } else {
Edward Cree9b410802017-01-27 15:02:52 +00005407 EFX_WARN_ON_PARANOID(
5408 vlan->default_filters[EFX_EF10_BCAST] !=
5409 EFX_EF10_FILTER_ID_INVALID);
5410 vlan->default_filters[EFX_EF10_BCAST] =
Jon Cooper0ccb9982017-02-17 15:49:13 +00005411 efx_ef10_filter_get_unsafe_id(rc);
Edward Cree12fb0da2015-07-21 15:11:00 +01005412 }
5413 }
5414 rc = 0;
5415 } else {
Edward Cree9b410802017-01-27 15:02:52 +00005416 /* mapping from encap types to default filter IDs (unicast) */
5417 static enum efx_ef10_default_filters map[] = {
5418 [EFX_ENCAP_TYPE_NONE] = EFX_EF10_UCDEF,
5419 [EFX_ENCAP_TYPE_VXLAN] = EFX_EF10_VXLAN4_UCDEF,
5420 [EFX_ENCAP_TYPE_NVGRE] = EFX_EF10_NVGRE4_UCDEF,
5421 [EFX_ENCAP_TYPE_GENEVE] = EFX_EF10_GENEVE4_UCDEF,
5422 [EFX_ENCAP_TYPE_VXLAN | EFX_ENCAP_FLAG_IPV6] =
5423 EFX_EF10_VXLAN6_UCDEF,
5424 [EFX_ENCAP_TYPE_NVGRE | EFX_ENCAP_FLAG_IPV6] =
5425 EFX_EF10_NVGRE6_UCDEF,
5426 [EFX_ENCAP_TYPE_GENEVE | EFX_ENCAP_FLAG_IPV6] =
5427 EFX_EF10_GENEVE6_UCDEF,
5428 };
5429
5430 /* quick bounds check (BCAST result impossible) */
5431 BUILD_BUG_ON(EFX_EF10_BCAST != 0);
Dan Carpenteree467fb2017-02-07 10:44:31 +03005432 if (encap_type >= ARRAY_SIZE(map) || map[encap_type] == 0) {
Edward Cree9b410802017-01-27 15:02:52 +00005433 WARN_ON(1);
5434 return -EINVAL;
5435 }
5436 /* then follow map */
5437 id = &vlan->default_filters[map[encap_type]];
5438 EFX_WARN_ON_PARANOID(*id != EFX_EF10_FILTER_ID_INVALID);
5439 *id = rc;
Edward Cree12fb0da2015-07-21 15:11:00 +01005440 rc = 0;
5441 }
5442 return rc;
Daniel Pieczko822b96f2015-07-21 15:10:27 +01005443}
5444
Edward Creec2bebe32018-03-27 17:42:28 +01005445/* Remove filters that weren't renewed. */
Daniel Pieczko822b96f2015-07-21 15:10:27 +01005446static void efx_ef10_filter_remove_old(struct efx_nic *efx)
5447{
5448 struct efx_ef10_filter_table *table = efx->filter_state;
Bert Kenwarde65a5102015-12-23 08:57:36 +00005449 int remove_failed = 0;
5450 int remove_noent = 0;
5451 int rc;
Daniel Pieczko822b96f2015-07-21 15:10:27 +01005452 int i;
5453
Edward Creec2bebe32018-03-27 17:42:28 +01005454 down_write(&table->lock);
Ben Hutchings8127d662013-08-29 19:19:29 +01005455 for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) {
Mark Rutland6aa7de02017-10-23 14:07:29 -07005456 if (READ_ONCE(table->entry[i].spec) &
Ben Hutchingsb59e6ef2013-11-21 19:02:22 +00005457 EFX_EF10_FILTER_FLAG_AUTO_OLD) {
Bert Kenwarde65a5102015-12-23 08:57:36 +00005458 rc = efx_ef10_filter_remove_internal(efx,
5459 1U << EFX_FILTER_PRI_AUTO, i, true);
5460 if (rc == -ENOENT)
5461 remove_noent++;
5462 else if (rc)
5463 remove_failed++;
Ben Hutchings8127d662013-08-29 19:19:29 +01005464 }
5465 }
Edward Creec2bebe32018-03-27 17:42:28 +01005466 up_write(&table->lock);
Bert Kenwarde65a5102015-12-23 08:57:36 +00005467
5468 if (remove_failed)
5469 netif_info(efx, drv, efx->net_dev,
5470 "%s: failed to remove %d filters\n",
5471 __func__, remove_failed);
5472 if (remove_noent)
5473 netif_info(efx, drv, efx->net_dev,
5474 "%s: failed to remove %d non-existent filters\n",
5475 __func__, remove_noent);
Ben Hutchings8127d662013-08-29 19:19:29 +01005476}
5477
Daniel Pieczko7a186f42015-07-07 11:37:19 +01005478static int efx_ef10_vport_set_mac_address(struct efx_nic *efx)
5479{
5480 struct efx_ef10_nic_data *nic_data = efx->nic_data;
5481 u8 mac_old[ETH_ALEN];
5482 int rc, rc2;
5483
5484 /* Only reconfigure a PF-created vport */
5485 if (is_zero_ether_addr(nic_data->vport_mac))
5486 return 0;
5487
5488 efx_device_detach_sync(efx);
5489 efx_net_stop(efx->net_dev);
5490 down_write(&efx->filter_sem);
5491 efx_ef10_filter_table_remove(efx);
5492 up_write(&efx->filter_sem);
5493
5494 rc = efx_ef10_vadaptor_free(efx, nic_data->vport_id);
5495 if (rc)
5496 goto restore_filters;
5497
5498 ether_addr_copy(mac_old, nic_data->vport_mac);
5499 rc = efx_ef10_vport_del_mac(efx, nic_data->vport_id,
5500 nic_data->vport_mac);
5501 if (rc)
5502 goto restore_vadaptor;
5503
5504 rc = efx_ef10_vport_add_mac(efx, nic_data->vport_id,
5505 efx->net_dev->dev_addr);
5506 if (!rc) {
5507 ether_addr_copy(nic_data->vport_mac, efx->net_dev->dev_addr);
5508 } else {
5509 rc2 = efx_ef10_vport_add_mac(efx, nic_data->vport_id, mac_old);
5510 if (rc2) {
5511 /* Failed to add original MAC, so clear vport_mac */
5512 eth_zero_addr(nic_data->vport_mac);
5513 goto reset_nic;
5514 }
5515 }
5516
5517restore_vadaptor:
5518 rc2 = efx_ef10_vadaptor_alloc(efx, nic_data->vport_id);
5519 if (rc2)
5520 goto reset_nic;
5521restore_filters:
5522 down_write(&efx->filter_sem);
5523 rc2 = efx_ef10_filter_table_probe(efx);
5524 up_write(&efx->filter_sem);
5525 if (rc2)
5526 goto reset_nic;
5527
5528 rc2 = efx_net_open(efx->net_dev);
5529 if (rc2)
5530 goto reset_nic;
5531
Peter Dunning9c568fd2017-02-17 15:50:43 +00005532 efx_device_attach_if_not_resetting(efx);
Daniel Pieczko7a186f42015-07-07 11:37:19 +01005533
5534 return rc;
5535
5536reset_nic:
5537 netif_err(efx, drv, efx->net_dev,
5538 "Failed to restore when changing MAC address - scheduling reset\n");
5539 efx_schedule_reset(efx, RESET_TYPE_DATAPATH);
5540
5541 return rc ? rc : rc2;
5542}
5543
Daniel Pieczko822b96f2015-07-21 15:10:27 +01005544/* Caller must hold efx->filter_sem for read if race against
5545 * efx_ef10_filter_table_remove() is possible
5546 */
Andrew Rybchenko34813fe2016-06-15 17:48:14 +01005547static void efx_ef10_filter_vlan_sync_rx_mode(struct efx_nic *efx,
5548 struct efx_ef10_filter_vlan *vlan)
Daniel Pieczko822b96f2015-07-21 15:10:27 +01005549{
5550 struct efx_ef10_filter_table *table = efx->filter_state;
Daniel Pieczkoab8b1f7c2015-07-21 15:10:44 +01005551 struct efx_ef10_nic_data *nic_data = efx->nic_data;
Andrew Rybchenkob3a3c032016-06-15 17:47:36 +01005552
Andrew Rybchenko4a53ea82016-06-15 17:48:32 +01005553 /* Do not install unspecified VID if VLAN filtering is enabled.
5554 * Do not install all specified VIDs if VLAN filtering is disabled.
5555 */
5556 if ((vlan->vid == EFX_FILTER_VID_UNSPEC) == table->vlan_filter)
5557 return;
5558
Edward Cree12fb0da2015-07-21 15:11:00 +01005559 /* Insert/renew unicast filters */
Andrew Rybchenkoafa4ce12016-06-15 17:45:56 +01005560 if (table->uc_promisc) {
Edward Cree9b410802017-01-27 15:02:52 +00005561 efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NONE,
5562 false, false);
Andrew Rybchenkob3a3c032016-06-15 17:47:36 +01005563 efx_ef10_filter_insert_addr_list(efx, vlan, false, false);
Edward Cree12fb0da2015-07-21 15:11:00 +01005564 } else {
5565 /* If any of the filters failed to insert, fall back to
5566 * promiscuous mode - add in the uc_def filter. But keep
5567 * our individual unicast filters.
5568 */
Andrew Rybchenkob3a3c032016-06-15 17:47:36 +01005569 if (efx_ef10_filter_insert_addr_list(efx, vlan, false, false))
Edward Cree9b410802017-01-27 15:02:52 +00005570 efx_ef10_filter_insert_def(efx, vlan,
5571 EFX_ENCAP_TYPE_NONE,
5572 false, false);
Edward Cree12fb0da2015-07-21 15:11:00 +01005573 }
Edward Cree9b410802017-01-27 15:02:52 +00005574 efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_VXLAN,
5575 false, false);
5576 efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_VXLAN |
5577 EFX_ENCAP_FLAG_IPV6,
5578 false, false);
5579 efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NVGRE,
5580 false, false);
5581 efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NVGRE |
5582 EFX_ENCAP_FLAG_IPV6,
5583 false, false);
5584 efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_GENEVE,
5585 false, false);
5586 efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_GENEVE |
5587 EFX_ENCAP_FLAG_IPV6,
5588 false, false);
Daniel Pieczkoab8b1f7c2015-07-21 15:10:44 +01005589
Edward Cree12fb0da2015-07-21 15:11:00 +01005590 /* Insert/renew multicast filters */
Daniel Pieczkoab8b1f7c2015-07-21 15:10:44 +01005591 /* If changing promiscuous state with cascaded multicast filters, remove
5592 * old filters first, so that packets are dropped rather than duplicated
5593 */
Andrew Rybchenkoafa4ce12016-06-15 17:45:56 +01005594 if (nic_data->workaround_26807 &&
5595 table->mc_promisc_last != table->mc_promisc)
Daniel Pieczkoab8b1f7c2015-07-21 15:10:44 +01005596 efx_ef10_filter_remove_old(efx);
Andrew Rybchenkoafa4ce12016-06-15 17:45:56 +01005597 if (table->mc_promisc) {
Edward Cree12fb0da2015-07-21 15:11:00 +01005598 if (nic_data->workaround_26807) {
5599 /* If we failed to insert promiscuous filters, rollback
5600 * and fall back to individual multicast filters
5601 */
Edward Cree9b410802017-01-27 15:02:52 +00005602 if (efx_ef10_filter_insert_def(efx, vlan,
5603 EFX_ENCAP_TYPE_NONE,
5604 true, true)) {
Edward Cree12fb0da2015-07-21 15:11:00 +01005605 /* Changing promisc state, so remove old filters */
5606 efx_ef10_filter_remove_old(efx);
Andrew Rybchenkob3a3c032016-06-15 17:47:36 +01005607 efx_ef10_filter_insert_addr_list(efx, vlan,
5608 true, false);
Edward Cree12fb0da2015-07-21 15:11:00 +01005609 }
5610 } else {
5611 /* If we failed to insert promiscuous filters, don't
Edward Cree148cbab2017-04-04 17:02:49 +01005612 * rollback. Regardless, also insert the mc_list,
5613 * unless it's incomplete due to overflow
Edward Cree12fb0da2015-07-21 15:11:00 +01005614 */
Edward Cree9b410802017-01-27 15:02:52 +00005615 efx_ef10_filter_insert_def(efx, vlan,
5616 EFX_ENCAP_TYPE_NONE,
5617 true, false);
Edward Cree148cbab2017-04-04 17:02:49 +01005618 if (!table->mc_overflow)
5619 efx_ef10_filter_insert_addr_list(efx, vlan,
5620 true, false);
Edward Cree12fb0da2015-07-21 15:11:00 +01005621 }
5622 } else {
5623 /* If any filters failed to insert, rollback and fall back to
5624 * promiscuous mode - mc_def filter and maybe broadcast. If
5625 * that fails, roll back again and insert as many of our
5626 * individual multicast filters as we can.
5627 */
Andrew Rybchenkob3a3c032016-06-15 17:47:36 +01005628 if (efx_ef10_filter_insert_addr_list(efx, vlan, true, true)) {
Edward Cree12fb0da2015-07-21 15:11:00 +01005629 /* Changing promisc state, so remove old filters */
5630 if (nic_data->workaround_26807)
5631 efx_ef10_filter_remove_old(efx);
Edward Cree9b410802017-01-27 15:02:52 +00005632 if (efx_ef10_filter_insert_def(efx, vlan,
5633 EFX_ENCAP_TYPE_NONE,
5634 true, true))
Andrew Rybchenkob3a3c032016-06-15 17:47:36 +01005635 efx_ef10_filter_insert_addr_list(efx, vlan,
5636 true, false);
Edward Cree12fb0da2015-07-21 15:11:00 +01005637 }
5638 }
Edward Cree9b410802017-01-27 15:02:52 +00005639 efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_VXLAN,
5640 true, false);
5641 efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_VXLAN |
5642 EFX_ENCAP_FLAG_IPV6,
5643 true, false);
5644 efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NVGRE,
5645 true, false);
5646 efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NVGRE |
5647 EFX_ENCAP_FLAG_IPV6,
5648 true, false);
5649 efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_GENEVE,
5650 true, false);
5651 efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_GENEVE |
5652 EFX_ENCAP_FLAG_IPV6,
5653 true, false);
Andrew Rybchenko34813fe2016-06-15 17:48:14 +01005654}
5655
5656/* Caller must hold efx->filter_sem for read if race against
5657 * efx_ef10_filter_table_remove() is possible
5658 */
5659static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx)
5660{
5661 struct efx_ef10_filter_table *table = efx->filter_state;
5662 struct net_device *net_dev = efx->net_dev;
5663 struct efx_ef10_filter_vlan *vlan;
Andrew Rybchenko4a53ea82016-06-15 17:48:32 +01005664 bool vlan_filter;
Andrew Rybchenko34813fe2016-06-15 17:48:14 +01005665
5666 if (!efx_dev_registered(efx))
5667 return;
5668
5669 if (!table)
5670 return;
5671
5672 efx_ef10_filter_mark_old(efx);
5673
5674 /* Copy/convert the address lists; add the primary station
5675 * address and broadcast address
5676 */
5677 netif_addr_lock_bh(net_dev);
5678 efx_ef10_filter_uc_addr_list(efx);
5679 efx_ef10_filter_mc_addr_list(efx);
5680 netif_addr_unlock_bh(net_dev);
5681
Andrew Rybchenko4a53ea82016-06-15 17:48:32 +01005682 /* If VLAN filtering changes, all old filters are finally removed.
5683 * Do it in advance to avoid conflicts for unicast untagged and
5684 * VLAN 0 tagged filters.
5685 */
5686 vlan_filter = !!(net_dev->features & NETIF_F_HW_VLAN_CTAG_FILTER);
5687 if (table->vlan_filter != vlan_filter) {
5688 table->vlan_filter = vlan_filter;
5689 efx_ef10_filter_remove_old(efx);
5690 }
5691
Andrew Rybchenko34813fe2016-06-15 17:48:14 +01005692 list_for_each_entry(vlan, &table->vlan_list, list)
5693 efx_ef10_filter_vlan_sync_rx_mode(efx, vlan);
Daniel Pieczko822b96f2015-07-21 15:10:27 +01005694
5695 efx_ef10_filter_remove_old(efx);
Andrew Rybchenkoafa4ce12016-06-15 17:45:56 +01005696 table->mc_promisc_last = table->mc_promisc;
Daniel Pieczko822b96f2015-07-21 15:10:27 +01005697}
5698
Andrew Rybchenko34813fe2016-06-15 17:48:14 +01005699static struct efx_ef10_filter_vlan *efx_ef10_filter_find_vlan(struct efx_nic *efx, u16 vid)
5700{
5701 struct efx_ef10_filter_table *table = efx->filter_state;
5702 struct efx_ef10_filter_vlan *vlan;
5703
5704 WARN_ON(!rwsem_is_locked(&efx->filter_sem));
5705
5706 list_for_each_entry(vlan, &table->vlan_list, list) {
5707 if (vlan->vid == vid)
5708 return vlan;
5709 }
5710
5711 return NULL;
5712}
5713
5714static int efx_ef10_filter_add_vlan(struct efx_nic *efx, u16 vid)
5715{
5716 struct efx_ef10_filter_table *table = efx->filter_state;
5717 struct efx_ef10_filter_vlan *vlan;
5718 unsigned int i;
5719
5720 if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
5721 return -EINVAL;
5722
5723 vlan = efx_ef10_filter_find_vlan(efx, vid);
5724 if (WARN_ON(vlan)) {
5725 netif_err(efx, drv, efx->net_dev,
5726 "VLAN %u already added\n", vid);
5727 return -EALREADY;
5728 }
5729
5730 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
5731 if (!vlan)
5732 return -ENOMEM;
5733
5734 vlan->vid = vid;
5735
5736 for (i = 0; i < ARRAY_SIZE(vlan->uc); i++)
5737 vlan->uc[i] = EFX_EF10_FILTER_ID_INVALID;
5738 for (i = 0; i < ARRAY_SIZE(vlan->mc); i++)
5739 vlan->mc[i] = EFX_EF10_FILTER_ID_INVALID;
Edward Cree9b410802017-01-27 15:02:52 +00005740 for (i = 0; i < EFX_EF10_NUM_DEFAULT_FILTERS; i++)
5741 vlan->default_filters[i] = EFX_EF10_FILTER_ID_INVALID;
Andrew Rybchenko34813fe2016-06-15 17:48:14 +01005742
5743 list_add_tail(&vlan->list, &table->vlan_list);
5744
5745 if (efx_dev_registered(efx))
5746 efx_ef10_filter_vlan_sync_rx_mode(efx, vlan);
5747
5748 return 0;
5749}
5750
5751static void efx_ef10_filter_del_vlan_internal(struct efx_nic *efx,
5752 struct efx_ef10_filter_vlan *vlan)
5753{
5754 unsigned int i;
5755
5756 /* See comment in efx_ef10_filter_table_remove() */
5757 if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
5758 return;
5759
5760 list_del(&vlan->list);
5761
Edward Cree8c915622016-06-15 17:49:05 +01005762 for (i = 0; i < ARRAY_SIZE(vlan->uc); i++)
Andrew Rybchenko34813fe2016-06-15 17:48:14 +01005763 efx_ef10_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO,
Edward Cree8c915622016-06-15 17:49:05 +01005764 vlan->uc[i]);
5765 for (i = 0; i < ARRAY_SIZE(vlan->mc); i++)
Andrew Rybchenko34813fe2016-06-15 17:48:14 +01005766 efx_ef10_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO,
Edward Cree8c915622016-06-15 17:49:05 +01005767 vlan->mc[i]);
Edward Cree9b410802017-01-27 15:02:52 +00005768 for (i = 0; i < EFX_EF10_NUM_DEFAULT_FILTERS; i++)
5769 if (vlan->default_filters[i] != EFX_EF10_FILTER_ID_INVALID)
5770 efx_ef10_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO,
5771 vlan->default_filters[i]);
Andrew Rybchenko34813fe2016-06-15 17:48:14 +01005772
5773 kfree(vlan);
5774}
5775
5776static void efx_ef10_filter_del_vlan(struct efx_nic *efx, u16 vid)
5777{
5778 struct efx_ef10_filter_vlan *vlan;
5779
5780 /* See comment in efx_ef10_filter_table_remove() */
5781 if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
5782 return;
5783
5784 vlan = efx_ef10_filter_find_vlan(efx, vid);
5785 if (!vlan) {
5786 netif_err(efx, drv, efx->net_dev,
5787 "VLAN %u not found in filter state\n", vid);
5788 return;
5789 }
5790
5791 efx_ef10_filter_del_vlan_internal(efx, vlan);
5792}
5793
Shradha Shah910c8782015-05-20 11:12:48 +01005794static int efx_ef10_set_mac_address(struct efx_nic *efx)
5795{
5796 MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_SET_MAC_IN_LEN);
5797 struct efx_ef10_nic_data *nic_data = efx->nic_data;
5798 bool was_enabled = efx->port_enabled;
5799 int rc;
5800
5801 efx_device_detach_sync(efx);
5802 efx_net_stop(efx->net_dev);
Martin Habetsd2489532016-06-15 17:48:49 +01005803
5804 mutex_lock(&efx->mac_lock);
Shradha Shah910c8782015-05-20 11:12:48 +01005805 down_write(&efx->filter_sem);
5806 efx_ef10_filter_table_remove(efx);
5807
5808 ether_addr_copy(MCDI_PTR(inbuf, VADAPTOR_SET_MAC_IN_MACADDR),
5809 efx->net_dev->dev_addr);
5810 MCDI_SET_DWORD(inbuf, VADAPTOR_SET_MAC_IN_UPSTREAM_PORT_ID,
5811 nic_data->vport_id);
Daniel Pieczko535a6172015-07-07 11:37:33 +01005812 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_VADAPTOR_SET_MAC, inbuf,
5813 sizeof(inbuf), NULL, 0, NULL);
Shradha Shah910c8782015-05-20 11:12:48 +01005814
5815 efx_ef10_filter_table_probe(efx);
5816 up_write(&efx->filter_sem);
Martin Habetsd2489532016-06-15 17:48:49 +01005817 mutex_unlock(&efx->mac_lock);
5818
Shradha Shah910c8782015-05-20 11:12:48 +01005819 if (was_enabled)
5820 efx_net_open(efx->net_dev);
Peter Dunning9c568fd2017-02-17 15:50:43 +00005821 efx_device_attach_if_not_resetting(efx);
Shradha Shah910c8782015-05-20 11:12:48 +01005822
Daniel Pieczko9e9f6652015-07-07 11:37:00 +01005823#ifdef CONFIG_SFC_SRIOV
5824 if (efx->pci_dev->is_virtfn && efx->pci_dev->physfn) {
Shradha Shah910c8782015-05-20 11:12:48 +01005825 struct pci_dev *pci_dev_pf = efx->pci_dev->physfn;
5826
Daniel Pieczko9e9f6652015-07-07 11:37:00 +01005827 if (rc == -EPERM) {
5828 struct efx_nic *efx_pf;
Shradha Shah910c8782015-05-20 11:12:48 +01005829
Daniel Pieczko9e9f6652015-07-07 11:37:00 +01005830 /* Switch to PF and change MAC address on vport */
5831 efx_pf = pci_get_drvdata(pci_dev_pf);
5832
5833 rc = efx_ef10_sriov_set_vf_mac(efx_pf,
Shradha Shah910c8782015-05-20 11:12:48 +01005834 nic_data->vf_index,
Daniel Pieczko9e9f6652015-07-07 11:37:00 +01005835 efx->net_dev->dev_addr);
5836 } else if (!rc) {
Shradha Shah910c8782015-05-20 11:12:48 +01005837 struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf);
5838 struct efx_ef10_nic_data *nic_data = efx_pf->nic_data;
5839 unsigned int i;
5840
Daniel Pieczko9e9f6652015-07-07 11:37:00 +01005841 /* MAC address successfully changed by VF (with MAC
5842 * spoofing) so update the parent PF if possible.
5843 */
Shradha Shah910c8782015-05-20 11:12:48 +01005844 for (i = 0; i < efx_pf->vf_count; ++i) {
5845 struct ef10_vf *vf = nic_data->vf + i;
5846
5847 if (vf->efx == efx) {
5848 ether_addr_copy(vf->mac,
5849 efx->net_dev->dev_addr);
5850 return 0;
5851 }
5852 }
5853 }
Daniel Pieczko9e9f6652015-07-07 11:37:00 +01005854 } else
Shradha Shah910c8782015-05-20 11:12:48 +01005855#endif
Daniel Pieczko9e9f6652015-07-07 11:37:00 +01005856 if (rc == -EPERM) {
5857 netif_err(efx, drv, efx->net_dev,
5858 "Cannot change MAC address; use sfboot to enable"
5859 " mac-spoofing on this interface\n");
Daniel Pieczko7a186f42015-07-07 11:37:19 +01005860 } else if (rc == -ENOSYS && !efx_ef10_is_vf(efx)) {
5861 /* If the active MCFW does not support MC_CMD_VADAPTOR_SET_MAC
5862 * fall-back to the method of changing the MAC address on the
5863 * vport. This only applies to PFs because such versions of
5864 * MCFW do not support VFs.
5865 */
5866 rc = efx_ef10_vport_set_mac_address(efx);
Robert Stonehousecbad52e2017-11-07 17:30:30 +00005867 } else if (rc) {
Daniel Pieczko535a6172015-07-07 11:37:33 +01005868 efx_mcdi_display_error(efx, MC_CMD_VADAPTOR_SET_MAC,
5869 sizeof(inbuf), NULL, 0, rc);
Daniel Pieczko9e9f6652015-07-07 11:37:00 +01005870 }
5871
Shradha Shah910c8782015-05-20 11:12:48 +01005872 return rc;
5873}
5874
Ben Hutchings8127d662013-08-29 19:19:29 +01005875static int efx_ef10_mac_reconfigure(struct efx_nic *efx)
5876{
5877 efx_ef10_filter_sync_rx_mode(efx);
5878
5879 return efx_mcdi_set_mac(efx);
5880}
5881
Shradha Shah862f8942015-05-20 11:08:56 +01005882static int efx_ef10_mac_reconfigure_vf(struct efx_nic *efx)
5883{
5884 efx_ef10_filter_sync_rx_mode(efx);
5885
5886 return 0;
5887}
5888
Jon Cooper74cd60a2013-09-16 14:18:51 +01005889static int efx_ef10_start_bist(struct efx_nic *efx, u32 bist_type)
5890{
5891 MCDI_DECLARE_BUF(inbuf, MC_CMD_START_BIST_IN_LEN);
5892
5893 MCDI_SET_DWORD(inbuf, START_BIST_IN_TYPE, bist_type);
5894 return efx_mcdi_rpc(efx, MC_CMD_START_BIST, inbuf, sizeof(inbuf),
5895 NULL, 0, NULL);
5896}
5897
5898/* MC BISTs follow a different poll mechanism to phy BISTs.
5899 * The BIST is done in the poll handler on the MC, and the MCDI command
5900 * will block until the BIST is done.
5901 */
5902static int efx_ef10_poll_bist(struct efx_nic *efx)
5903{
5904 int rc;
5905 MCDI_DECLARE_BUF(outbuf, MC_CMD_POLL_BIST_OUT_LEN);
5906 size_t outlen;
5907 u32 result;
5908
5909 rc = efx_mcdi_rpc(efx, MC_CMD_POLL_BIST, NULL, 0,
5910 outbuf, sizeof(outbuf), &outlen);
5911 if (rc != 0)
5912 return rc;
5913
5914 if (outlen < MC_CMD_POLL_BIST_OUT_LEN)
5915 return -EIO;
5916
5917 result = MCDI_DWORD(outbuf, POLL_BIST_OUT_RESULT);
5918 switch (result) {
5919 case MC_CMD_POLL_BIST_PASSED:
5920 netif_dbg(efx, hw, efx->net_dev, "BIST passed.\n");
5921 return 0;
5922 case MC_CMD_POLL_BIST_TIMEOUT:
5923 netif_err(efx, hw, efx->net_dev, "BIST timed out\n");
5924 return -EIO;
5925 case MC_CMD_POLL_BIST_FAILED:
5926 netif_err(efx, hw, efx->net_dev, "BIST failed.\n");
5927 return -EIO;
5928 default:
5929 netif_err(efx, hw, efx->net_dev,
5930 "BIST returned unknown result %u", result);
5931 return -EIO;
5932 }
5933}
5934
5935static int efx_ef10_run_bist(struct efx_nic *efx, u32 bist_type)
5936{
5937 int rc;
5938
5939 netif_dbg(efx, drv, efx->net_dev, "starting BIST type %u\n", bist_type);
5940
5941 rc = efx_ef10_start_bist(efx, bist_type);
5942 if (rc != 0)
5943 return rc;
5944
5945 return efx_ef10_poll_bist(efx);
5946}
5947
5948static int
5949efx_ef10_test_chip(struct efx_nic *efx, struct efx_self_tests *tests)
5950{
5951 int rc, rc2;
5952
5953 efx_reset_down(efx, RESET_TYPE_WORLD);
5954
5955 rc = efx_mcdi_rpc(efx, MC_CMD_ENABLE_OFFLINE_BIST,
5956 NULL, 0, NULL, 0, NULL);
5957 if (rc != 0)
5958 goto out;
5959
5960 tests->memory = efx_ef10_run_bist(efx, MC_CMD_MC_MEM_BIST) ? -1 : 1;
5961 tests->registers = efx_ef10_run_bist(efx, MC_CMD_REG_BIST) ? -1 : 1;
5962
5963 rc = efx_mcdi_reset(efx, RESET_TYPE_WORLD);
5964
5965out:
Daniel Pieczko27324822015-07-31 11:14:54 +01005966 if (rc == -EPERM)
5967 rc = 0;
Jon Cooper74cd60a2013-09-16 14:18:51 +01005968 rc2 = efx_reset_up(efx, RESET_TYPE_WORLD, rc == 0);
5969 return rc ? rc : rc2;
5970}
5971
Ben Hutchings8127d662013-08-29 19:19:29 +01005972#ifdef CONFIG_SFC_MTD
5973
5974struct efx_ef10_nvram_type_info {
5975 u16 type, type_mask;
5976 u8 port;
5977 const char *name;
5978};
5979
5980static const struct efx_ef10_nvram_type_info efx_ef10_nvram_types[] = {
5981 { NVRAM_PARTITION_TYPE_MC_FIRMWARE, 0, 0, "sfc_mcfw" },
5982 { NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP, 0, 0, "sfc_mcfw_backup" },
5983 { NVRAM_PARTITION_TYPE_EXPANSION_ROM, 0, 0, "sfc_exp_rom" },
5984 { NVRAM_PARTITION_TYPE_STATIC_CONFIG, 0, 0, "sfc_static_cfg" },
5985 { NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG, 0, 0, "sfc_dynamic_cfg" },
5986 { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0, 0, 0, "sfc_exp_rom_cfg" },
5987 { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT1, 0, 1, "sfc_exp_rom_cfg" },
5988 { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT2, 0, 2, "sfc_exp_rom_cfg" },
5989 { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT3, 0, 3, "sfc_exp_rom_cfg" },
Ben Hutchingsa84f3bf92013-10-09 14:14:41 +01005990 { NVRAM_PARTITION_TYPE_LICENSE, 0, 0, "sfc_license" },
Ben Hutchings8127d662013-08-29 19:19:29 +01005991 { NVRAM_PARTITION_TYPE_PHY_MIN, 0xff, 0, "sfc_phy_fw" },
5992};
5993
5994static int efx_ef10_mtd_probe_partition(struct efx_nic *efx,
5995 struct efx_mcdi_mtd_partition *part,
5996 unsigned int type)
5997{
5998 MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_METADATA_IN_LEN);
5999 MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_METADATA_OUT_LENMAX);
6000 const struct efx_ef10_nvram_type_info *info;
6001 size_t size, erase_size, outlen;
6002 bool protected;
6003 int rc;
6004
6005 for (info = efx_ef10_nvram_types; ; info++) {
6006 if (info ==
6007 efx_ef10_nvram_types + ARRAY_SIZE(efx_ef10_nvram_types))
6008 return -ENODEV;
6009 if ((type & ~info->type_mask) == info->type)
6010 break;
6011 }
6012 if (info->port != efx_port_num(efx))
6013 return -ENODEV;
6014
6015 rc = efx_mcdi_nvram_info(efx, type, &size, &erase_size, &protected);
6016 if (rc)
6017 return rc;
6018 if (protected)
6019 return -ENODEV; /* hide it */
6020
6021 part->nvram_type = type;
6022
6023 MCDI_SET_DWORD(inbuf, NVRAM_METADATA_IN_TYPE, type);
6024 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_METADATA, inbuf, sizeof(inbuf),
6025 outbuf, sizeof(outbuf), &outlen);
6026 if (rc)
6027 return rc;
6028 if (outlen < MC_CMD_NVRAM_METADATA_OUT_LENMIN)
6029 return -EIO;
6030 if (MCDI_DWORD(outbuf, NVRAM_METADATA_OUT_FLAGS) &
6031 (1 << MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_LBN))
6032 part->fw_subtype = MCDI_DWORD(outbuf,
6033 NVRAM_METADATA_OUT_SUBTYPE);
6034
6035 part->common.dev_type_name = "EF10 NVRAM manager";
6036 part->common.type_name = info->name;
6037
6038 part->common.mtd.type = MTD_NORFLASH;
6039 part->common.mtd.flags = MTD_CAP_NORFLASH;
6040 part->common.mtd.size = size;
6041 part->common.mtd.erasesize = erase_size;
6042
6043 return 0;
6044}
6045
6046static int efx_ef10_mtd_probe(struct efx_nic *efx)
6047{
6048 MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX);
6049 struct efx_mcdi_mtd_partition *parts;
6050 size_t outlen, n_parts_total, i, n_parts;
6051 unsigned int type;
6052 int rc;
6053
6054 ASSERT_RTNL();
6055
6056 BUILD_BUG_ON(MC_CMD_NVRAM_PARTITIONS_IN_LEN != 0);
6057 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_PARTITIONS, NULL, 0,
6058 outbuf, sizeof(outbuf), &outlen);
6059 if (rc)
6060 return rc;
6061 if (outlen < MC_CMD_NVRAM_PARTITIONS_OUT_LENMIN)
6062 return -EIO;
6063
6064 n_parts_total = MCDI_DWORD(outbuf, NVRAM_PARTITIONS_OUT_NUM_PARTITIONS);
6065 if (n_parts_total >
6066 MCDI_VAR_ARRAY_LEN(outlen, NVRAM_PARTITIONS_OUT_TYPE_ID))
6067 return -EIO;
6068
6069 parts = kcalloc(n_parts_total, sizeof(*parts), GFP_KERNEL);
6070 if (!parts)
6071 return -ENOMEM;
6072
6073 n_parts = 0;
6074 for (i = 0; i < n_parts_total; i++) {
6075 type = MCDI_ARRAY_DWORD(outbuf, NVRAM_PARTITIONS_OUT_TYPE_ID,
6076 i);
6077 rc = efx_ef10_mtd_probe_partition(efx, &parts[n_parts], type);
6078 if (rc == 0)
6079 n_parts++;
6080 else if (rc != -ENODEV)
6081 goto fail;
6082 }
6083
6084 rc = efx_mtd_add(efx, &parts[0].common, n_parts, sizeof(*parts));
6085fail:
6086 if (rc)
6087 kfree(parts);
6088 return rc;
6089}
6090
6091#endif /* CONFIG_SFC_MTD */
6092
6093static void efx_ef10_ptp_write_host_time(struct efx_nic *efx, u32 host_time)
6094{
6095 _efx_writed(efx, cpu_to_le32(host_time), ER_DZ_MC_DB_LWRD);
6096}
6097
Shradha Shah02246a72015-05-06 00:58:14 +01006098static void efx_ef10_ptp_write_host_time_vf(struct efx_nic *efx,
6099 u32 host_time) {}
6100
Jon Cooperbd9a2652013-11-18 12:54:41 +00006101static int efx_ef10_rx_enable_timestamping(struct efx_channel *channel,
6102 bool temp)
6103{
6104 MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_LEN);
6105 int rc;
6106
6107 if (channel->sync_events_state == SYNC_EVENTS_REQUESTED ||
6108 channel->sync_events_state == SYNC_EVENTS_VALID ||
6109 (temp && channel->sync_events_state == SYNC_EVENTS_DISABLED))
6110 return 0;
6111 channel->sync_events_state = SYNC_EVENTS_REQUESTED;
6112
6113 MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_TIME_EVENT_SUBSCRIBE);
6114 MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
6115 MCDI_SET_DWORD(inbuf, PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE,
6116 channel->channel);
6117
6118 rc = efx_mcdi_rpc(channel->efx, MC_CMD_PTP,
6119 inbuf, sizeof(inbuf), NULL, 0, NULL);
6120
6121 if (rc != 0)
6122 channel->sync_events_state = temp ? SYNC_EVENTS_QUIESCENT :
6123 SYNC_EVENTS_DISABLED;
6124
6125 return rc;
6126}
6127
6128static int efx_ef10_rx_disable_timestamping(struct efx_channel *channel,
6129 bool temp)
6130{
6131 MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_LEN);
6132 int rc;
6133
6134 if (channel->sync_events_state == SYNC_EVENTS_DISABLED ||
6135 (temp && channel->sync_events_state == SYNC_EVENTS_QUIESCENT))
6136 return 0;
6137 if (channel->sync_events_state == SYNC_EVENTS_QUIESCENT) {
6138 channel->sync_events_state = SYNC_EVENTS_DISABLED;
6139 return 0;
6140 }
6141 channel->sync_events_state = temp ? SYNC_EVENTS_QUIESCENT :
6142 SYNC_EVENTS_DISABLED;
6143
6144 MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_TIME_EVENT_UNSUBSCRIBE);
6145 MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
6146 MCDI_SET_DWORD(inbuf, PTP_IN_TIME_EVENT_UNSUBSCRIBE_CONTROL,
6147 MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_SINGLE);
6148 MCDI_SET_DWORD(inbuf, PTP_IN_TIME_EVENT_UNSUBSCRIBE_QUEUE,
6149 channel->channel);
6150
6151 rc = efx_mcdi_rpc(channel->efx, MC_CMD_PTP,
6152 inbuf, sizeof(inbuf), NULL, 0, NULL);
6153
6154 return rc;
6155}
6156
6157static int efx_ef10_ptp_set_ts_sync_events(struct efx_nic *efx, bool en,
6158 bool temp)
6159{
6160 int (*set)(struct efx_channel *channel, bool temp);
6161 struct efx_channel *channel;
6162
6163 set = en ?
6164 efx_ef10_rx_enable_timestamping :
6165 efx_ef10_rx_disable_timestamping;
6166
Edward Cree2935e3c2018-01-25 17:26:06 +00006167 channel = efx_ptp_channel(efx);
6168 if (channel) {
Jon Cooperbd9a2652013-11-18 12:54:41 +00006169 int rc = set(channel, temp);
6170 if (en && rc != 0) {
6171 efx_ef10_ptp_set_ts_sync_events(efx, false, temp);
6172 return rc;
6173 }
6174 }
6175
6176 return 0;
6177}
6178
Shradha Shah02246a72015-05-06 00:58:14 +01006179static int efx_ef10_ptp_set_ts_config_vf(struct efx_nic *efx,
6180 struct hwtstamp_config *init)
6181{
6182 return -EOPNOTSUPP;
6183}
6184
Jon Cooperbd9a2652013-11-18 12:54:41 +00006185static int efx_ef10_ptp_set_ts_config(struct efx_nic *efx,
6186 struct hwtstamp_config *init)
6187{
6188 int rc;
6189
6190 switch (init->rx_filter) {
6191 case HWTSTAMP_FILTER_NONE:
6192 efx_ef10_ptp_set_ts_sync_events(efx, false, false);
6193 /* if TX timestamping is still requested then leave PTP on */
6194 return efx_ptp_change_mode(efx,
6195 init->tx_type != HWTSTAMP_TX_OFF, 0);
6196 case HWTSTAMP_FILTER_ALL:
6197 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
6198 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
6199 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
6200 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
6201 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
6202 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
6203 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
6204 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
6205 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
6206 case HWTSTAMP_FILTER_PTP_V2_EVENT:
6207 case HWTSTAMP_FILTER_PTP_V2_SYNC:
6208 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
Miroslav Lichvare3412572017-05-19 17:52:36 +02006209 case HWTSTAMP_FILTER_NTP_ALL:
Jon Cooperbd9a2652013-11-18 12:54:41 +00006210 init->rx_filter = HWTSTAMP_FILTER_ALL;
6211 rc = efx_ptp_change_mode(efx, true, 0);
6212 if (!rc)
6213 rc = efx_ef10_ptp_set_ts_sync_events(efx, true, false);
6214 if (rc)
6215 efx_ptp_change_mode(efx, false, 0);
6216 return rc;
6217 default:
6218 return -ERANGE;
6219 }
6220}
6221
Bert Kenward08a7b29b2017-01-10 16:23:33 +00006222static int efx_ef10_get_phys_port_id(struct efx_nic *efx,
6223 struct netdev_phys_item_id *ppid)
6224{
6225 struct efx_ef10_nic_data *nic_data = efx->nic_data;
6226
6227 if (!is_valid_ether_addr(nic_data->port_id))
6228 return -EOPNOTSUPP;
6229
6230 ppid->id_len = ETH_ALEN;
6231 memcpy(ppid->id, nic_data->port_id, ppid->id_len);
6232
6233 return 0;
6234}
6235
Andrew Rybchenko4a53ea82016-06-15 17:48:32 +01006236static int efx_ef10_vlan_rx_add_vid(struct efx_nic *efx, __be16 proto, u16 vid)
6237{
6238 if (proto != htons(ETH_P_8021Q))
6239 return -EINVAL;
6240
6241 return efx_ef10_add_vlan(efx, vid);
6242}
6243
6244static int efx_ef10_vlan_rx_kill_vid(struct efx_nic *efx, __be16 proto, u16 vid)
6245{
6246 if (proto != htons(ETH_P_8021Q))
6247 return -EINVAL;
6248
6249 return efx_ef10_del_vlan(efx, vid);
6250}
6251
Jon Coopere5fbd972017-02-08 16:52:10 +00006252/* We rely on the MCDI wiping out our TX rings if it made any changes to the
6253 * ports table, ensuring that any TSO descriptors that were made on a now-
6254 * removed tunnel port will be blown away and won't break things when we try
6255 * to transmit them using the new ports table.
6256 */
6257static int efx_ef10_set_udp_tnl_ports(struct efx_nic *efx, bool unloading)
6258{
6259 struct efx_ef10_nic_data *nic_data = efx->nic_data;
6260 MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LENMAX);
6261 MCDI_DECLARE_BUF(outbuf, MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_LEN);
6262 bool will_reset = false;
6263 size_t num_entries = 0;
6264 size_t inlen, outlen;
6265 size_t i;
6266 int rc;
6267 efx_dword_t flags_and_num_entries;
6268
6269 WARN_ON(!mutex_is_locked(&nic_data->udp_tunnels_lock));
6270
6271 nic_data->udp_tunnels_dirty = false;
6272
6273 if (!(nic_data->datapath_caps &
6274 (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN))) {
Peter Dunning9c568fd2017-02-17 15:50:43 +00006275 efx_device_attach_if_not_resetting(efx);
Jon Coopere5fbd972017-02-08 16:52:10 +00006276 return 0;
6277 }
6278
6279 BUILD_BUG_ON(ARRAY_SIZE(nic_data->udp_tunnels) >
6280 MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_MAXNUM);
6281
6282 for (i = 0; i < ARRAY_SIZE(nic_data->udp_tunnels); ++i) {
6283 if (nic_data->udp_tunnels[i].count &&
6284 nic_data->udp_tunnels[i].port) {
6285 efx_dword_t entry;
6286
6287 EFX_POPULATE_DWORD_2(entry,
6288 TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT,
6289 ntohs(nic_data->udp_tunnels[i].port),
6290 TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL,
6291 nic_data->udp_tunnels[i].type);
6292 *_MCDI_ARRAY_DWORD(inbuf,
6293 SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES,
6294 num_entries++) = entry;
6295 }
6296 }
6297
6298 BUILD_BUG_ON((MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_NUM_ENTRIES_OFST -
6299 MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_FLAGS_OFST) * 8 !=
6300 EFX_WORD_1_LBN);
6301 BUILD_BUG_ON(MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_NUM_ENTRIES_LEN * 8 !=
6302 EFX_WORD_1_WIDTH);
6303 EFX_POPULATE_DWORD_2(flags_and_num_entries,
6304 MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_UNLOADING,
6305 !!unloading,
6306 EFX_WORD_1, num_entries);
6307 *_MCDI_DWORD(inbuf, SET_TUNNEL_ENCAP_UDP_PORTS_IN_FLAGS) =
6308 flags_and_num_entries;
6309
6310 inlen = MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LEN(num_entries);
6311
6312 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS,
6313 inbuf, inlen, outbuf, sizeof(outbuf), &outlen);
6314 if (rc == -EIO) {
6315 /* Most likely the MC rebooted due to another function also
6316 * setting its tunnel port list. Mark the tunnel port list as
6317 * dirty, so it will be pushed upon coming up from the reboot.
6318 */
6319 nic_data->udp_tunnels_dirty = true;
6320 return 0;
6321 }
6322
6323 if (rc) {
6324 /* expected not available on unprivileged functions */
6325 if (rc != -EPERM)
6326 netif_warn(efx, drv, efx->net_dev,
6327 "Unable to set UDP tunnel ports; rc=%d.\n", rc);
6328 } else if (MCDI_DWORD(outbuf, SET_TUNNEL_ENCAP_UDP_PORTS_OUT_FLAGS) &
6329 (1 << MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_RESETTING_LBN)) {
6330 netif_info(efx, drv, efx->net_dev,
6331 "Rebooting MC due to UDP tunnel port list change\n");
6332 will_reset = true;
6333 if (unloading)
6334 /* Delay for the MC reset to complete. This will make
6335 * unloading other functions a bit smoother. This is a
6336 * race, but the other unload will work whichever way
6337 * it goes, this just avoids an unnecessary error
6338 * message.
6339 */
6340 msleep(100);
6341 }
6342 if (!will_reset && !unloading) {
6343 /* The caller will have detached, relying on the MC reset to
6344 * trigger a re-attach. Since there won't be an MC reset, we
6345 * have to do the attach ourselves.
6346 */
Peter Dunning9c568fd2017-02-17 15:50:43 +00006347 efx_device_attach_if_not_resetting(efx);
Jon Coopere5fbd972017-02-08 16:52:10 +00006348 }
6349
6350 return rc;
6351}
6352
6353static int efx_ef10_udp_tnl_push_ports(struct efx_nic *efx)
6354{
6355 struct efx_ef10_nic_data *nic_data = efx->nic_data;
6356 int rc = 0;
6357
6358 mutex_lock(&nic_data->udp_tunnels_lock);
6359 if (nic_data->udp_tunnels_dirty) {
6360 /* Make sure all TX are stopped while we modify the table, else
6361 * we might race against an efx_features_check().
6362 */
6363 efx_device_detach_sync(efx);
6364 rc = efx_ef10_set_udp_tnl_ports(efx, false);
6365 }
6366 mutex_unlock(&nic_data->udp_tunnels_lock);
6367 return rc;
6368}
6369
6370static struct efx_udp_tunnel *__efx_ef10_udp_tnl_lookup_port(struct efx_nic *efx,
6371 __be16 port)
6372{
6373 struct efx_ef10_nic_data *nic_data = efx->nic_data;
6374 size_t i;
6375
6376 for (i = 0; i < ARRAY_SIZE(nic_data->udp_tunnels); ++i) {
6377 if (!nic_data->udp_tunnels[i].count)
6378 continue;
6379 if (nic_data->udp_tunnels[i].port == port)
6380 return &nic_data->udp_tunnels[i];
6381 }
6382 return NULL;
6383}
6384
6385static int efx_ef10_udp_tnl_add_port(struct efx_nic *efx,
6386 struct efx_udp_tunnel tnl)
6387{
6388 struct efx_ef10_nic_data *nic_data = efx->nic_data;
6389 struct efx_udp_tunnel *match;
6390 char typebuf[8];
6391 size_t i;
6392 int rc;
6393
6394 if (!(nic_data->datapath_caps &
6395 (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN)))
6396 return 0;
6397
6398 efx_get_udp_tunnel_type_name(tnl.type, typebuf, sizeof(typebuf));
6399 netif_dbg(efx, drv, efx->net_dev, "Adding UDP tunnel (%s) port %d\n",
6400 typebuf, ntohs(tnl.port));
6401
6402 mutex_lock(&nic_data->udp_tunnels_lock);
6403 /* Make sure all TX are stopped while we add to the table, else we
6404 * might race against an efx_features_check().
6405 */
6406 efx_device_detach_sync(efx);
6407
6408 match = __efx_ef10_udp_tnl_lookup_port(efx, tnl.port);
6409 if (match != NULL) {
6410 if (match->type == tnl.type) {
6411 netif_dbg(efx, drv, efx->net_dev,
6412 "Referencing existing tunnel entry\n");
6413 match->count++;
6414 /* No need to cause an MCDI update */
6415 rc = 0;
6416 goto unlock_out;
6417 }
6418 efx_get_udp_tunnel_type_name(match->type,
6419 typebuf, sizeof(typebuf));
6420 netif_dbg(efx, drv, efx->net_dev,
6421 "UDP port %d is already in use by %s\n",
6422 ntohs(tnl.port), typebuf);
6423 rc = -EEXIST;
6424 goto unlock_out;
6425 }
6426
6427 for (i = 0; i < ARRAY_SIZE(nic_data->udp_tunnels); ++i)
6428 if (!nic_data->udp_tunnels[i].count) {
6429 nic_data->udp_tunnels[i] = tnl;
6430 nic_data->udp_tunnels[i].count = 1;
6431 rc = efx_ef10_set_udp_tnl_ports(efx, false);
6432 goto unlock_out;
6433 }
6434
6435 netif_dbg(efx, drv, efx->net_dev,
6436 "Unable to add UDP tunnel (%s) port %d; insufficient resources.\n",
6437 typebuf, ntohs(tnl.port));
6438
6439 rc = -ENOMEM;
6440
6441unlock_out:
6442 mutex_unlock(&nic_data->udp_tunnels_lock);
6443 return rc;
6444}
6445
6446/* Called under the TX lock with the TX queue running, hence no-one can be
6447 * in the middle of updating the UDP tunnels table. However, they could
6448 * have tried and failed the MCDI, in which case they'll have set the dirty
6449 * flag before dropping their locks.
6450 */
6451static bool efx_ef10_udp_tnl_has_port(struct efx_nic *efx, __be16 port)
6452{
6453 struct efx_ef10_nic_data *nic_data = efx->nic_data;
6454
6455 if (!(nic_data->datapath_caps &
6456 (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN)))
6457 return false;
6458
6459 if (nic_data->udp_tunnels_dirty)
6460 /* SW table may not match HW state, so just assume we can't
6461 * use any UDP tunnel offloads.
6462 */
6463 return false;
6464
6465 return __efx_ef10_udp_tnl_lookup_port(efx, port) != NULL;
6466}
6467
6468static int efx_ef10_udp_tnl_del_port(struct efx_nic *efx,
6469 struct efx_udp_tunnel tnl)
6470{
6471 struct efx_ef10_nic_data *nic_data = efx->nic_data;
6472 struct efx_udp_tunnel *match;
6473 char typebuf[8];
6474 int rc;
6475
6476 if (!(nic_data->datapath_caps &
6477 (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN)))
6478 return 0;
6479
6480 efx_get_udp_tunnel_type_name(tnl.type, typebuf, sizeof(typebuf));
6481 netif_dbg(efx, drv, efx->net_dev, "Removing UDP tunnel (%s) port %d\n",
6482 typebuf, ntohs(tnl.port));
6483
6484 mutex_lock(&nic_data->udp_tunnels_lock);
6485 /* Make sure all TX are stopped while we remove from the table, else we
6486 * might race against an efx_features_check().
6487 */
6488 efx_device_detach_sync(efx);
6489
6490 match = __efx_ef10_udp_tnl_lookup_port(efx, tnl.port);
6491 if (match != NULL) {
6492 if (match->type == tnl.type) {
6493 if (--match->count) {
6494 /* Port is still in use, so nothing to do */
6495 netif_dbg(efx, drv, efx->net_dev,
6496 "UDP tunnel port %d remains active\n",
6497 ntohs(tnl.port));
6498 rc = 0;
6499 goto out_unlock;
6500 }
6501 rc = efx_ef10_set_udp_tnl_ports(efx, false);
6502 goto out_unlock;
6503 }
6504 efx_get_udp_tunnel_type_name(match->type,
6505 typebuf, sizeof(typebuf));
6506 netif_warn(efx, drv, efx->net_dev,
6507 "UDP port %d is actually in use by %s, not removing\n",
6508 ntohs(tnl.port), typebuf);
6509 }
6510 rc = -ENOENT;
6511
6512out_unlock:
6513 mutex_unlock(&nic_data->udp_tunnels_lock);
6514 return rc;
6515}
6516
Andrew Rybchenko100a9db2016-06-15 17:42:26 +01006517#define EF10_OFFLOAD_FEATURES \
6518 (NETIF_F_IP_CSUM | \
Andrew Rybchenko4a53ea82016-06-15 17:48:32 +01006519 NETIF_F_HW_VLAN_CTAG_FILTER | \
Andrew Rybchenko100a9db2016-06-15 17:42:26 +01006520 NETIF_F_IPV6_CSUM | \
6521 NETIF_F_RXHASH | \
6522 NETIF_F_NTUPLE)
6523
Shradha Shah02246a72015-05-06 00:58:14 +01006524const struct efx_nic_type efx_hunt_a0_vf_nic_type = {
Shradha Shah6f7f8aa2015-05-06 01:00:07 +01006525 .is_vf = true,
Edward Cree03714bb2017-12-18 16:55:50 +00006526 .mem_bar = efx_ef10_vf_mem_bar,
Ben Hutchings8127d662013-08-29 19:19:29 +01006527 .mem_map_size = efx_ef10_mem_map_size,
Shradha Shah02246a72015-05-06 00:58:14 +01006528 .probe = efx_ef10_probe_vf,
6529 .remove = efx_ef10_remove,
6530 .dimension_resources = efx_ef10_dimension_resources,
6531 .init = efx_ef10_init_nic,
6532 .fini = efx_port_dummy_op_void,
Jon Cooper087e9022015-05-20 11:11:35 +01006533 .map_reset_reason = efx_ef10_map_reset_reason,
Shradha Shah02246a72015-05-06 00:58:14 +01006534 .map_reset_flags = efx_ef10_map_reset_flags,
6535 .reset = efx_ef10_reset,
6536 .probe_port = efx_mcdi_port_probe,
6537 .remove_port = efx_mcdi_port_remove,
6538 .fini_dmaq = efx_ef10_fini_dmaq,
6539 .prepare_flr = efx_ef10_prepare_flr,
6540 .finish_flr = efx_port_dummy_op_void,
6541 .describe_stats = efx_ef10_describe_stats,
Daniel Pieczkod7788192015-06-02 11:39:20 +01006542 .update_stats = efx_ef10_update_stats_vf,
Shradha Shah02246a72015-05-06 00:58:14 +01006543 .start_stats = efx_port_dummy_op_void,
6544 .pull_stats = efx_port_dummy_op_void,
6545 .stop_stats = efx_port_dummy_op_void,
6546 .set_id_led = efx_mcdi_set_id_led,
6547 .push_irq_moderation = efx_ef10_push_irq_moderation,
Shradha Shah862f8942015-05-20 11:08:56 +01006548 .reconfigure_mac = efx_ef10_mac_reconfigure_vf,
Shradha Shah02246a72015-05-06 00:58:14 +01006549 .check_mac_fault = efx_mcdi_mac_check_fault,
6550 .reconfigure_port = efx_mcdi_port_reconfigure,
6551 .get_wol = efx_ef10_get_wol_vf,
6552 .set_wol = efx_ef10_set_wol_vf,
6553 .resume_wol = efx_port_dummy_op_void,
6554 .mcdi_request = efx_ef10_mcdi_request,
6555 .mcdi_poll_response = efx_ef10_mcdi_poll_response,
6556 .mcdi_read_response = efx_ef10_mcdi_read_response,
6557 .mcdi_poll_reboot = efx_ef10_mcdi_poll_reboot,
Daniel Pieczkoc577e592015-10-09 10:40:35 +01006558 .mcdi_reboot_detected = efx_ef10_mcdi_reboot_detected,
Shradha Shah02246a72015-05-06 00:58:14 +01006559 .irq_enable_master = efx_port_dummy_op_void,
6560 .irq_test_generate = efx_ef10_irq_test_generate,
6561 .irq_disable_non_ev = efx_port_dummy_op_void,
6562 .irq_handle_msi = efx_ef10_msi_interrupt,
6563 .irq_handle_legacy = efx_ef10_legacy_interrupt,
6564 .tx_probe = efx_ef10_tx_probe,
6565 .tx_init = efx_ef10_tx_init,
6566 .tx_remove = efx_ef10_tx_remove,
6567 .tx_write = efx_ef10_tx_write,
Bert Kenwarde9117e52016-11-17 10:51:54 +00006568 .tx_limit_len = efx_ef10_tx_limit_len,
Jon Cooper267c0152015-05-06 00:59:38 +01006569 .rx_push_rss_config = efx_ef10_vf_rx_push_rss_config,
Edward Creea707d182017-01-17 12:02:12 +00006570 .rx_pull_rss_config = efx_ef10_rx_pull_rss_config,
Shradha Shah02246a72015-05-06 00:58:14 +01006571 .rx_probe = efx_ef10_rx_probe,
6572 .rx_init = efx_ef10_rx_init,
6573 .rx_remove = efx_ef10_rx_remove,
6574 .rx_write = efx_ef10_rx_write,
6575 .rx_defer_refill = efx_ef10_rx_defer_refill,
6576 .ev_probe = efx_ef10_ev_probe,
6577 .ev_init = efx_ef10_ev_init,
6578 .ev_fini = efx_ef10_ev_fini,
6579 .ev_remove = efx_ef10_ev_remove,
6580 .ev_process = efx_ef10_ev_process,
6581 .ev_read_ack = efx_ef10_ev_read_ack,
6582 .ev_test_generate = efx_ef10_ev_test_generate,
6583 .filter_table_probe = efx_ef10_filter_table_probe,
6584 .filter_table_restore = efx_ef10_filter_table_restore,
6585 .filter_table_remove = efx_ef10_filter_table_remove,
6586 .filter_update_rx_scatter = efx_ef10_filter_update_rx_scatter,
6587 .filter_insert = efx_ef10_filter_insert,
6588 .filter_remove_safe = efx_ef10_filter_remove_safe,
6589 .filter_get_safe = efx_ef10_filter_get_safe,
6590 .filter_clear_rx = efx_ef10_filter_clear_rx,
6591 .filter_count_rx_used = efx_ef10_filter_count_rx_used,
6592 .filter_get_rx_id_limit = efx_ef10_filter_get_rx_id_limit,
6593 .filter_get_rx_ids = efx_ef10_filter_get_rx_ids,
6594#ifdef CONFIG_RFS_ACCEL
Shradha Shah02246a72015-05-06 00:58:14 +01006595 .filter_rfs_expire_one = efx_ef10_filter_rfs_expire_one,
6596#endif
6597#ifdef CONFIG_SFC_MTD
6598 .mtd_probe = efx_port_dummy_op_int,
6599#endif
6600 .ptp_write_host_time = efx_ef10_ptp_write_host_time_vf,
6601 .ptp_set_ts_config = efx_ef10_ptp_set_ts_config_vf,
Andrew Rybchenko4a53ea82016-06-15 17:48:32 +01006602 .vlan_rx_add_vid = efx_ef10_vlan_rx_add_vid,
6603 .vlan_rx_kill_vid = efx_ef10_vlan_rx_kill_vid,
Shradha Shah02246a72015-05-06 00:58:14 +01006604#ifdef CONFIG_SFC_SRIOV
Shradha Shah7b8c7b52015-05-06 00:58:54 +01006605 .vswitching_probe = efx_ef10_vswitching_probe_vf,
6606 .vswitching_restore = efx_ef10_vswitching_restore_vf,
6607 .vswitching_remove = efx_ef10_vswitching_remove_vf,
Shradha Shah02246a72015-05-06 00:58:14 +01006608#endif
Daniel Pieczko0d5e0fb2015-05-20 11:10:20 +01006609 .get_mac_address = efx_ef10_get_mac_address_vf,
Shradha Shah910c8782015-05-20 11:12:48 +01006610 .set_mac_address = efx_ef10_set_mac_address,
Daniel Pieczko0d5e0fb2015-05-20 11:10:20 +01006611
Bert Kenward08a7b29b2017-01-10 16:23:33 +00006612 .get_phys_port_id = efx_ef10_get_phys_port_id,
Shradha Shah02246a72015-05-06 00:58:14 +01006613 .revision = EFX_REV_HUNT_A0,
6614 .max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH),
6615 .rx_prefix_size = ES_DZ_RX_PREFIX_SIZE,
6616 .rx_hash_offset = ES_DZ_RX_PREFIX_HASH_OFST,
6617 .rx_ts_offset = ES_DZ_RX_PREFIX_TSTAMP_OFST,
6618 .can_rx_scatter = true,
6619 .always_rx_scatter = true,
Andrew Rybchenko6f9f6ec2017-02-13 14:57:39 +00006620 .min_interrupt_mode = EFX_INT_MODE_MSIX,
Shradha Shah02246a72015-05-06 00:58:14 +01006621 .max_interrupt_mode = EFX_INT_MODE_MSIX,
6622 .timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH,
Andrew Rybchenko100a9db2016-06-15 17:42:26 +01006623 .offload_features = EF10_OFFLOAD_FEATURES,
Shradha Shah02246a72015-05-06 00:58:14 +01006624 .mcdi_max_ver = 2,
6625 .max_rx_ip_filters = HUNT_FILTER_TBL_ROWS,
6626 .hwtstamp_filters = 1 << HWTSTAMP_FILTER_NONE |
6627 1 << HWTSTAMP_FILTER_ALL,
Edward Creef74d1992017-01-17 12:01:53 +00006628 .rx_hash_key_size = 40,
Shradha Shah02246a72015-05-06 00:58:14 +01006629};
6630
6631const struct efx_nic_type efx_hunt_a0_nic_type = {
Shradha Shah6f7f8aa2015-05-06 01:00:07 +01006632 .is_vf = false,
Edward Cree03714bb2017-12-18 16:55:50 +00006633 .mem_bar = efx_ef10_pf_mem_bar,
Shradha Shah02246a72015-05-06 00:58:14 +01006634 .mem_map_size = efx_ef10_mem_map_size,
6635 .probe = efx_ef10_probe_pf,
Ben Hutchings8127d662013-08-29 19:19:29 +01006636 .remove = efx_ef10_remove,
6637 .dimension_resources = efx_ef10_dimension_resources,
6638 .init = efx_ef10_init_nic,
6639 .fini = efx_port_dummy_op_void,
Jon Cooper087e9022015-05-20 11:11:35 +01006640 .map_reset_reason = efx_ef10_map_reset_reason,
Ben Hutchings8127d662013-08-29 19:19:29 +01006641 .map_reset_flags = efx_ef10_map_reset_flags,
Jon Cooper3e336262014-01-17 19:48:06 +00006642 .reset = efx_ef10_reset,
Ben Hutchings8127d662013-08-29 19:19:29 +01006643 .probe_port = efx_mcdi_port_probe,
6644 .remove_port = efx_mcdi_port_remove,
6645 .fini_dmaq = efx_ef10_fini_dmaq,
Edward Creee2835462014-04-16 19:27:48 +01006646 .prepare_flr = efx_ef10_prepare_flr,
6647 .finish_flr = efx_port_dummy_op_void,
Ben Hutchings8127d662013-08-29 19:19:29 +01006648 .describe_stats = efx_ef10_describe_stats,
Daniel Pieczkod7788192015-06-02 11:39:20 +01006649 .update_stats = efx_ef10_update_stats_pf,
Ben Hutchings8127d662013-08-29 19:19:29 +01006650 .start_stats = efx_mcdi_mac_start_stats,
Jon Cooperf8f3b5a2013-09-30 17:36:50 +01006651 .pull_stats = efx_mcdi_mac_pull_stats,
Ben Hutchings8127d662013-08-29 19:19:29 +01006652 .stop_stats = efx_mcdi_mac_stop_stats,
6653 .set_id_led = efx_mcdi_set_id_led,
6654 .push_irq_moderation = efx_ef10_push_irq_moderation,
6655 .reconfigure_mac = efx_ef10_mac_reconfigure,
6656 .check_mac_fault = efx_mcdi_mac_check_fault,
6657 .reconfigure_port = efx_mcdi_port_reconfigure,
6658 .get_wol = efx_ef10_get_wol,
6659 .set_wol = efx_ef10_set_wol,
6660 .resume_wol = efx_port_dummy_op_void,
Jon Cooper74cd60a2013-09-16 14:18:51 +01006661 .test_chip = efx_ef10_test_chip,
Ben Hutchings8127d662013-08-29 19:19:29 +01006662 .test_nvram = efx_mcdi_nvram_test_all,
6663 .mcdi_request = efx_ef10_mcdi_request,
6664 .mcdi_poll_response = efx_ef10_mcdi_poll_response,
6665 .mcdi_read_response = efx_ef10_mcdi_read_response,
6666 .mcdi_poll_reboot = efx_ef10_mcdi_poll_reboot,
Daniel Pieczkoc577e592015-10-09 10:40:35 +01006667 .mcdi_reboot_detected = efx_ef10_mcdi_reboot_detected,
Ben Hutchings8127d662013-08-29 19:19:29 +01006668 .irq_enable_master = efx_port_dummy_op_void,
6669 .irq_test_generate = efx_ef10_irq_test_generate,
6670 .irq_disable_non_ev = efx_port_dummy_op_void,
6671 .irq_handle_msi = efx_ef10_msi_interrupt,
6672 .irq_handle_legacy = efx_ef10_legacy_interrupt,
6673 .tx_probe = efx_ef10_tx_probe,
6674 .tx_init = efx_ef10_tx_init,
6675 .tx_remove = efx_ef10_tx_remove,
6676 .tx_write = efx_ef10_tx_write,
Bert Kenwarde9117e52016-11-17 10:51:54 +00006677 .tx_limit_len = efx_ef10_tx_limit_len,
Jon Cooper267c0152015-05-06 00:59:38 +01006678 .rx_push_rss_config = efx_ef10_pf_rx_push_rss_config,
Edward Creea707d182017-01-17 12:02:12 +00006679 .rx_pull_rss_config = efx_ef10_rx_pull_rss_config,
Edward Cree42356d92018-03-08 15:45:17 +00006680 .rx_push_rss_context_config = efx_ef10_rx_push_rss_context_config,
6681 .rx_pull_rss_context_config = efx_ef10_rx_pull_rss_context_config,
6682 .rx_restore_rss_contexts = efx_ef10_rx_restore_rss_contexts,
Ben Hutchings8127d662013-08-29 19:19:29 +01006683 .rx_probe = efx_ef10_rx_probe,
6684 .rx_init = efx_ef10_rx_init,
6685 .rx_remove = efx_ef10_rx_remove,
6686 .rx_write = efx_ef10_rx_write,
6687 .rx_defer_refill = efx_ef10_rx_defer_refill,
6688 .ev_probe = efx_ef10_ev_probe,
6689 .ev_init = efx_ef10_ev_init,
6690 .ev_fini = efx_ef10_ev_fini,
6691 .ev_remove = efx_ef10_ev_remove,
6692 .ev_process = efx_ef10_ev_process,
6693 .ev_read_ack = efx_ef10_ev_read_ack,
6694 .ev_test_generate = efx_ef10_ev_test_generate,
6695 .filter_table_probe = efx_ef10_filter_table_probe,
6696 .filter_table_restore = efx_ef10_filter_table_restore,
6697 .filter_table_remove = efx_ef10_filter_table_remove,
6698 .filter_update_rx_scatter = efx_ef10_filter_update_rx_scatter,
6699 .filter_insert = efx_ef10_filter_insert,
6700 .filter_remove_safe = efx_ef10_filter_remove_safe,
6701 .filter_get_safe = efx_ef10_filter_get_safe,
6702 .filter_clear_rx = efx_ef10_filter_clear_rx,
6703 .filter_count_rx_used = efx_ef10_filter_count_rx_used,
6704 .filter_get_rx_id_limit = efx_ef10_filter_get_rx_id_limit,
6705 .filter_get_rx_ids = efx_ef10_filter_get_rx_ids,
6706#ifdef CONFIG_RFS_ACCEL
Ben Hutchings8127d662013-08-29 19:19:29 +01006707 .filter_rfs_expire_one = efx_ef10_filter_rfs_expire_one,
6708#endif
6709#ifdef CONFIG_SFC_MTD
6710 .mtd_probe = efx_ef10_mtd_probe,
6711 .mtd_rename = efx_mcdi_mtd_rename,
6712 .mtd_read = efx_mcdi_mtd_read,
6713 .mtd_erase = efx_mcdi_mtd_erase,
6714 .mtd_write = efx_mcdi_mtd_write,
6715 .mtd_sync = efx_mcdi_mtd_sync,
6716#endif
6717 .ptp_write_host_time = efx_ef10_ptp_write_host_time,
Jon Cooperbd9a2652013-11-18 12:54:41 +00006718 .ptp_set_ts_sync_events = efx_ef10_ptp_set_ts_sync_events,
6719 .ptp_set_ts_config = efx_ef10_ptp_set_ts_config,
Andrew Rybchenko4a53ea82016-06-15 17:48:32 +01006720 .vlan_rx_add_vid = efx_ef10_vlan_rx_add_vid,
6721 .vlan_rx_kill_vid = efx_ef10_vlan_rx_kill_vid,
Jon Coopere5fbd972017-02-08 16:52:10 +00006722 .udp_tnl_push_ports = efx_ef10_udp_tnl_push_ports,
6723 .udp_tnl_add_port = efx_ef10_udp_tnl_add_port,
6724 .udp_tnl_has_port = efx_ef10_udp_tnl_has_port,
6725 .udp_tnl_del_port = efx_ef10_udp_tnl_del_port,
Shradha Shah7fa8d542015-05-06 00:55:13 +01006726#ifdef CONFIG_SFC_SRIOV
Shradha Shah834e23d2015-05-06 00:55:58 +01006727 .sriov_configure = efx_ef10_sriov_configure,
Shradha Shahd98a4ff2014-11-05 12:16:46 +00006728 .sriov_init = efx_ef10_sriov_init,
6729 .sriov_fini = efx_ef10_sriov_fini,
Shradha Shahd98a4ff2014-11-05 12:16:46 +00006730 .sriov_wanted = efx_ef10_sriov_wanted,
6731 .sriov_reset = efx_ef10_sriov_reset,
Shradha Shah7fa8d542015-05-06 00:55:13 +01006732 .sriov_flr = efx_ef10_sriov_flr,
6733 .sriov_set_vf_mac = efx_ef10_sriov_set_vf_mac,
6734 .sriov_set_vf_vlan = efx_ef10_sriov_set_vf_vlan,
6735 .sriov_set_vf_spoofchk = efx_ef10_sriov_set_vf_spoofchk,
6736 .sriov_get_vf_config = efx_ef10_sriov_get_vf_config,
Edward Cree4392dc62015-05-20 11:12:13 +01006737 .sriov_set_vf_link_state = efx_ef10_sriov_set_vf_link_state,
Shradha Shah7b8c7b52015-05-06 00:58:54 +01006738 .vswitching_probe = efx_ef10_vswitching_probe_pf,
6739 .vswitching_restore = efx_ef10_vswitching_restore_pf,
6740 .vswitching_remove = efx_ef10_vswitching_remove_pf,
Shradha Shah7fa8d542015-05-06 00:55:13 +01006741#endif
Daniel Pieczko0d5e0fb2015-05-20 11:10:20 +01006742 .get_mac_address = efx_ef10_get_mac_address_pf,
Shradha Shah910c8782015-05-20 11:12:48 +01006743 .set_mac_address = efx_ef10_set_mac_address,
Edward Cree46d1efd2016-11-17 10:52:36 +00006744 .tso_versions = efx_ef10_tso_versions,
Ben Hutchings8127d662013-08-29 19:19:29 +01006745
Bert Kenward08a7b29b2017-01-10 16:23:33 +00006746 .get_phys_port_id = efx_ef10_get_phys_port_id,
Ben Hutchings8127d662013-08-29 19:19:29 +01006747 .revision = EFX_REV_HUNT_A0,
6748 .max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH),
6749 .rx_prefix_size = ES_DZ_RX_PREFIX_SIZE,
6750 .rx_hash_offset = ES_DZ_RX_PREFIX_HASH_OFST,
Jon Cooperbd9a2652013-11-18 12:54:41 +00006751 .rx_ts_offset = ES_DZ_RX_PREFIX_TSTAMP_OFST,
Ben Hutchings8127d662013-08-29 19:19:29 +01006752 .can_rx_scatter = true,
6753 .always_rx_scatter = true,
Edward Creede1deff2017-01-13 21:20:14 +00006754 .option_descriptors = true,
Andrew Rybchenko6f9f6ec2017-02-13 14:57:39 +00006755 .min_interrupt_mode = EFX_INT_MODE_LEGACY,
Ben Hutchings8127d662013-08-29 19:19:29 +01006756 .max_interrupt_mode = EFX_INT_MODE_MSIX,
6757 .timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH,
Andrew Rybchenko100a9db2016-06-15 17:42:26 +01006758 .offload_features = EF10_OFFLOAD_FEATURES,
Ben Hutchings8127d662013-08-29 19:19:29 +01006759 .mcdi_max_ver = 2,
6760 .max_rx_ip_filters = HUNT_FILTER_TBL_ROWS,
Jon Cooperbd9a2652013-11-18 12:54:41 +00006761 .hwtstamp_filters = 1 << HWTSTAMP_FILTER_NONE |
6762 1 << HWTSTAMP_FILTER_ALL,
Edward Creef74d1992017-01-17 12:01:53 +00006763 .rx_hash_key_size = 40,
Ben Hutchings8127d662013-08-29 19:19:29 +01006764};