blob: 83ce229f4eb7a29a9502b7f843aee4f5a8a8675a [file] [log] [blame]
Ben Hutchings8127d662013-08-29 19:19:29 +01001/****************************************************************************
2 * Driver for Solarflare network controllers and boards
3 * Copyright 2012-2013 Solarflare Communications Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation, incorporated herein by reference.
8 */
9
10#include "net_driver.h"
11#include "ef10_regs.h"
12#include "io.h"
13#include "mcdi.h"
14#include "mcdi_pcol.h"
15#include "nic.h"
16#include "workarounds.h"
Jon Cooper74cd60a2013-09-16 14:18:51 +010017#include "selftest.h"
Shradha Shah7fa8d542015-05-06 00:55:13 +010018#include "ef10_sriov.h"
Ben Hutchings8127d662013-08-29 19:19:29 +010019#include <linux/in.h>
20#include <linux/jhash.h>
21#include <linux/wait.h>
22#include <linux/workqueue.h>
23
24/* Hardware control for EF10 architecture including 'Huntington'. */
25
26#define EFX_EF10_DRVGEN_EV 7
27enum {
28 EFX_EF10_TEST = 1,
29 EFX_EF10_REFILL,
30};
Jon Cooper267c0152015-05-06 00:59:38 +010031/* The maximum size of a shared RSS context */
32/* TODO: this should really be from the mcdi protocol export */
33#define EFX_EF10_MAX_SHARED_RSS_CONTEXT_SIZE 64UL
Ben Hutchings8127d662013-08-29 19:19:29 +010034
35/* The filter table(s) are managed by firmware and we have write-only
36 * access. When removing filters we must identify them to the
37 * firmware by a 64-bit handle, but this is too wide for Linux kernel
38 * interfaces (32-bit for RX NFC, 16-bit for RFS). Also, we need to
39 * be able to tell in advance whether a requested insertion will
40 * replace an existing filter. Therefore we maintain a software hash
41 * table, which should be at least as large as the hardware hash
42 * table.
43 *
44 * Huntington has a single 8K filter table shared between all filter
45 * types and both ports.
46 */
47#define HUNT_FILTER_TBL_ROWS 8192
48
Edward Cree12fb0da2015-07-21 15:11:00 +010049#define EFX_EF10_FILTER_ID_INVALID 0xffff
Andrew Rybchenkodc3273e2016-06-15 17:45:36 +010050
51#define EFX_EF10_FILTER_DEV_UC_MAX 32
52#define EFX_EF10_FILTER_DEV_MC_MAX 256
53
Andrew Rybchenko34813fe2016-06-15 17:48:14 +010054/* VLAN list entry */
55struct efx_ef10_vlan {
56 struct list_head list;
57 u16 vid;
58};
59
Edward Cree9b410802017-01-27 15:02:52 +000060enum efx_ef10_default_filters {
61 EFX_EF10_BCAST,
62 EFX_EF10_UCDEF,
63 EFX_EF10_MCDEF,
64 EFX_EF10_VXLAN4_UCDEF,
65 EFX_EF10_VXLAN4_MCDEF,
66 EFX_EF10_VXLAN6_UCDEF,
67 EFX_EF10_VXLAN6_MCDEF,
68 EFX_EF10_NVGRE4_UCDEF,
69 EFX_EF10_NVGRE4_MCDEF,
70 EFX_EF10_NVGRE6_UCDEF,
71 EFX_EF10_NVGRE6_MCDEF,
72 EFX_EF10_GENEVE4_UCDEF,
73 EFX_EF10_GENEVE4_MCDEF,
74 EFX_EF10_GENEVE6_UCDEF,
75 EFX_EF10_GENEVE6_MCDEF,
76
77 EFX_EF10_NUM_DEFAULT_FILTERS
78};
79
Andrew Rybchenkodc3273e2016-06-15 17:45:36 +010080/* Per-VLAN filters information */
81struct efx_ef10_filter_vlan {
Andrew Rybchenko34813fe2016-06-15 17:48:14 +010082 struct list_head list;
Andrew Rybchenkob3a3c032016-06-15 17:47:36 +010083 u16 vid;
Andrew Rybchenkodc3273e2016-06-15 17:45:36 +010084 u16 uc[EFX_EF10_FILTER_DEV_UC_MAX];
85 u16 mc[EFX_EF10_FILTER_DEV_MC_MAX];
Edward Cree9b410802017-01-27 15:02:52 +000086 u16 default_filters[EFX_EF10_NUM_DEFAULT_FILTERS];
Andrew Rybchenkodc3273e2016-06-15 17:45:36 +010087};
88
Daniel Pieczko822b96f2015-07-21 15:10:27 +010089struct efx_ef10_dev_addr {
90 u8 addr[ETH_ALEN];
Daniel Pieczko822b96f2015-07-21 15:10:27 +010091};
92
Ben Hutchings8127d662013-08-29 19:19:29 +010093struct efx_ef10_filter_table {
Andrew Rybchenko7ac0dd92016-06-15 17:49:30 +010094/* The MCDI match masks supported by this fw & hw, in order of priority */
95 u32 rx_match_mcdi_flags[
Edward Cree9b410802017-01-27 15:02:52 +000096 MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MAXNUM * 2];
Ben Hutchings8127d662013-08-29 19:19:29 +010097 unsigned int rx_match_count;
98
Edward Creec2bebe32018-03-27 17:42:28 +010099 struct rw_semaphore lock; /* Protects entries */
Ben Hutchings8127d662013-08-29 19:19:29 +0100100 struct {
101 unsigned long spec; /* pointer to spec plus flag bits */
Edward Creec2bebe32018-03-27 17:42:28 +0100102/* AUTO_OLD is used to mark and sweep MAC filters for the device address lists. */
103/* unused flag 1UL */
Ben Hutchingsb59e6ef2013-11-21 19:02:22 +0000104#define EFX_EF10_FILTER_FLAG_AUTO_OLD 2UL
Ben Hutchings8127d662013-08-29 19:19:29 +0100105#define EFX_EF10_FILTER_FLAGS 3UL
106 u64 handle; /* firmware handle */
107 } *entry;
Ben Hutchings8127d662013-08-29 19:19:29 +0100108/* Shadow of net_device address lists, guarded by mac_lock */
Daniel Pieczko822b96f2015-07-21 15:10:27 +0100109 struct efx_ef10_dev_addr dev_uc_list[EFX_EF10_FILTER_DEV_UC_MAX];
110 struct efx_ef10_dev_addr dev_mc_list[EFX_EF10_FILTER_DEV_MC_MAX];
Edward Cree12fb0da2015-07-21 15:11:00 +0100111 int dev_uc_count;
112 int dev_mc_count;
Andrew Rybchenkoafa4ce12016-06-15 17:45:56 +0100113 bool uc_promisc;
114 bool mc_promisc;
Andrew Rybchenkob071c3a2016-06-15 17:43:00 +0100115/* Whether in multicast promiscuous mode when last changed */
116 bool mc_promisc_last;
Edward Cree148cbab2017-04-04 17:02:49 +0100117 bool mc_overflow; /* Too many MC addrs; should always imply mc_promisc */
Andrew Rybchenko4a53ea82016-06-15 17:48:32 +0100118 bool vlan_filter;
Andrew Rybchenko34813fe2016-06-15 17:48:14 +0100119 struct list_head vlan_list;
Ben Hutchings8127d662013-08-29 19:19:29 +0100120};
121
122/* An arbitrary search limit for the software hash table */
123#define EFX_EF10_FILTER_SEARCH_LIMIT 200
124
Ben Hutchings8127d662013-08-29 19:19:29 +0100125static void efx_ef10_rx_free_indir_table(struct efx_nic *efx);
126static void efx_ef10_filter_table_remove(struct efx_nic *efx);
Andrew Rybchenko34813fe2016-06-15 17:48:14 +0100127static int efx_ef10_filter_add_vlan(struct efx_nic *efx, u16 vid);
128static void efx_ef10_filter_del_vlan_internal(struct efx_nic *efx,
129 struct efx_ef10_filter_vlan *vlan);
130static void efx_ef10_filter_del_vlan(struct efx_nic *efx, u16 vid);
Jon Coopere5fbd972017-02-08 16:52:10 +0000131static int efx_ef10_set_udp_tnl_ports(struct efx_nic *efx, bool unloading);
Ben Hutchings8127d662013-08-29 19:19:29 +0100132
Jon Cooper0ccb9982017-02-17 15:49:13 +0000133static u32 efx_ef10_filter_get_unsafe_id(u32 filter_id)
134{
135 WARN_ON_ONCE(filter_id == EFX_EF10_FILTER_ID_INVALID);
136 return filter_id & (HUNT_FILTER_TBL_ROWS - 1);
137}
138
139static unsigned int efx_ef10_filter_get_unsafe_pri(u32 filter_id)
140{
141 return filter_id / (HUNT_FILTER_TBL_ROWS * 2);
142}
143
144static u32 efx_ef10_make_filter_id(unsigned int pri, u16 idx)
145{
146 return pri * HUNT_FILTER_TBL_ROWS * 2 + idx;
147}
148
Ben Hutchings8127d662013-08-29 19:19:29 +0100149static int efx_ef10_get_warm_boot_count(struct efx_nic *efx)
150{
151 efx_dword_t reg;
152
153 efx_readd(efx, &reg, ER_DZ_BIU_MC_SFT_STATUS);
154 return EFX_DWORD_FIELD(reg, EFX_WORD_1) == 0xb007 ?
155 EFX_DWORD_FIELD(reg, EFX_WORD_0) : -EIO;
156}
157
Edward Cree03714bb2017-12-18 16:55:50 +0000158/* On all EF10s up to and including SFC9220 (Medford1), all PFs use BAR 0 for
159 * I/O space and BAR 2(&3) for memory. On SFC9250 (Medford2), there is no I/O
160 * bar; PFs use BAR 0/1 for memory.
161 */
162static unsigned int efx_ef10_pf_mem_bar(struct efx_nic *efx)
163{
164 switch (efx->pci_dev->device) {
165 case 0x0b03: /* SFC9250 PF */
166 return 0;
167 default:
168 return 2;
169 }
170}
171
172/* All VFs use BAR 0/1 for memory */
173static unsigned int efx_ef10_vf_mem_bar(struct efx_nic *efx)
174{
175 return 0;
176}
177
Ben Hutchings8127d662013-08-29 19:19:29 +0100178static unsigned int efx_ef10_mem_map_size(struct efx_nic *efx)
179{
Shradha Shah02246a72015-05-06 00:58:14 +0100180 int bar;
181
Edward Cree03714bb2017-12-18 16:55:50 +0000182 bar = efx->type->mem_bar(efx);
Shradha Shah02246a72015-05-06 00:58:14 +0100183 return resource_size(&efx->pci_dev->resource[bar]);
Ben Hutchings8127d662013-08-29 19:19:29 +0100184}
185
Daniel Pieczko7a186f42015-07-07 11:37:19 +0100186static bool efx_ef10_is_vf(struct efx_nic *efx)
187{
188 return efx->type->is_vf;
189}
190
Daniel Pieczko1cd9ecb2015-05-06 00:57:53 +0100191static int efx_ef10_get_pf_index(struct efx_nic *efx)
192{
193 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_FUNCTION_INFO_OUT_LEN);
194 struct efx_ef10_nic_data *nic_data = efx->nic_data;
195 size_t outlen;
196 int rc;
197
198 rc = efx_mcdi_rpc(efx, MC_CMD_GET_FUNCTION_INFO, NULL, 0, outbuf,
199 sizeof(outbuf), &outlen);
200 if (rc)
201 return rc;
202 if (outlen < sizeof(outbuf))
203 return -EIO;
204
205 nic_data->pf_index = MCDI_DWORD(outbuf, GET_FUNCTION_INFO_OUT_PF);
206 return 0;
207}
208
Shradha Shah88a37de2015-05-20 11:09:15 +0100209#ifdef CONFIG_SFC_SRIOV
210static int efx_ef10_get_vf_index(struct efx_nic *efx)
211{
212 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_FUNCTION_INFO_OUT_LEN);
213 struct efx_ef10_nic_data *nic_data = efx->nic_data;
214 size_t outlen;
215 int rc;
216
217 rc = efx_mcdi_rpc(efx, MC_CMD_GET_FUNCTION_INFO, NULL, 0, outbuf,
218 sizeof(outbuf), &outlen);
219 if (rc)
220 return rc;
221 if (outlen < sizeof(outbuf))
222 return -EIO;
223
224 nic_data->vf_index = MCDI_DWORD(outbuf, GET_FUNCTION_INFO_OUT_VF);
225 return 0;
226}
227#endif
228
Ben Hutchingse5a25382013-09-05 22:50:59 +0100229static int efx_ef10_init_datapath_caps(struct efx_nic *efx)
Ben Hutchings8127d662013-08-29 19:19:29 +0100230{
Edward Creec1be4822017-12-21 09:00:26 +0000231 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CAPABILITIES_V4_OUT_LEN);
Ben Hutchings8127d662013-08-29 19:19:29 +0100232 struct efx_ef10_nic_data *nic_data = efx->nic_data;
233 size_t outlen;
234 int rc;
235
236 BUILD_BUG_ON(MC_CMD_GET_CAPABILITIES_IN_LEN != 0);
237
238 rc = efx_mcdi_rpc(efx, MC_CMD_GET_CAPABILITIES, NULL, 0,
239 outbuf, sizeof(outbuf), &outlen);
240 if (rc)
241 return rc;
Bert Kenwardca889a02016-08-11 13:01:35 +0100242 if (outlen < MC_CMD_GET_CAPABILITIES_OUT_LEN) {
Ben Hutchingse5a25382013-09-05 22:50:59 +0100243 netif_err(efx, drv, efx->net_dev,
244 "unable to read datapath firmware capabilities\n");
245 return -EIO;
246 }
Ben Hutchings8127d662013-08-29 19:19:29 +0100247
Ben Hutchingse5a25382013-09-05 22:50:59 +0100248 nic_data->datapath_caps =
249 MCDI_DWORD(outbuf, GET_CAPABILITIES_OUT_FLAGS1);
250
Edward Creec6347002017-01-13 21:20:29 +0000251 if (outlen >= MC_CMD_GET_CAPABILITIES_V2_OUT_LEN) {
Bert Kenwardca889a02016-08-11 13:01:35 +0100252 nic_data->datapath_caps2 = MCDI_DWORD(outbuf,
253 GET_CAPABILITIES_V2_OUT_FLAGS2);
Edward Creec6347002017-01-13 21:20:29 +0000254 nic_data->piobuf_size = MCDI_WORD(outbuf,
255 GET_CAPABILITIES_V2_OUT_SIZE_PIO_BUFF);
256 } else {
Bert Kenwardca889a02016-08-11 13:01:35 +0100257 nic_data->datapath_caps2 = 0;
Edward Creec6347002017-01-13 21:20:29 +0000258 nic_data->piobuf_size = ER_DZ_TX_PIOBUF_SIZE;
259 }
Bert Kenwardca889a02016-08-11 13:01:35 +0100260
Daniel Pieczko8d9f9dd2015-05-06 00:56:55 +0100261 /* record the DPCPU firmware IDs to determine VEB vswitching support.
262 */
263 nic_data->rx_dpcpu_fw_id =
264 MCDI_WORD(outbuf, GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID);
265 nic_data->tx_dpcpu_fw_id =
266 MCDI_WORD(outbuf, GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID);
267
Ben Hutchingse5a25382013-09-05 22:50:59 +0100268 if (!(nic_data->datapath_caps &
Ben Hutchingse5a25382013-09-05 22:50:59 +0100269 (1 << MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_14_LBN))) {
270 netif_err(efx, probe, efx->net_dev,
271 "current firmware does not support an RX prefix\n");
272 return -ENODEV;
Ben Hutchings8127d662013-08-29 19:19:29 +0100273 }
274
Edward Cree71827442017-12-18 16:56:19 +0000275 if (outlen >= MC_CMD_GET_CAPABILITIES_V3_OUT_LEN) {
276 u8 vi_window_mode = MCDI_BYTE(outbuf,
277 GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE);
278
279 switch (vi_window_mode) {
280 case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_8K:
281 efx->vi_stride = 8192;
282 break;
283 case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_16K:
284 efx->vi_stride = 16384;
285 break;
286 case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_64K:
287 efx->vi_stride = 65536;
288 break;
289 default:
290 netif_err(efx, probe, efx->net_dev,
291 "Unrecognised VI window mode %d\n",
292 vi_window_mode);
293 return -EIO;
294 }
295 netif_dbg(efx, probe, efx->net_dev, "vi_stride = %u\n",
296 efx->vi_stride);
297 } else {
298 /* keep default VI stride */
299 netif_dbg(efx, probe, efx->net_dev,
300 "firmware did not report VI window mode, assuming vi_stride = %u\n",
301 efx->vi_stride);
302 }
303
Edward Creec1be4822017-12-21 09:00:26 +0000304 if (outlen >= MC_CMD_GET_CAPABILITIES_V4_OUT_LEN) {
305 efx->num_mac_stats = MCDI_WORD(outbuf,
306 GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS);
307 netif_dbg(efx, probe, efx->net_dev,
308 "firmware reports num_mac_stats = %u\n",
309 efx->num_mac_stats);
310 } else {
311 /* leave num_mac_stats as the default value, MC_CMD_MAC_NSTATS */
312 netif_dbg(efx, probe, efx->net_dev,
313 "firmware did not report num_mac_stats, assuming %u\n",
314 efx->num_mac_stats);
315 }
316
Ben Hutchings8127d662013-08-29 19:19:29 +0100317 return 0;
318}
319
Martin Habets50663fe2018-01-25 17:25:33 +0000320static void efx_ef10_read_licensed_features(struct efx_nic *efx)
321{
322 MCDI_DECLARE_BUF(inbuf, MC_CMD_LICENSING_V3_IN_LEN);
323 MCDI_DECLARE_BUF(outbuf, MC_CMD_LICENSING_V3_OUT_LEN);
324 struct efx_ef10_nic_data *nic_data = efx->nic_data;
325 size_t outlen;
326 int rc;
327
328 MCDI_SET_DWORD(inbuf, LICENSING_V3_IN_OP,
329 MC_CMD_LICENSING_V3_IN_OP_REPORT_LICENSE);
330 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_LICENSING_V3, inbuf, sizeof(inbuf),
331 outbuf, sizeof(outbuf), &outlen);
332 if (rc || (outlen < MC_CMD_LICENSING_V3_OUT_LEN))
333 return;
334
335 nic_data->licensed_features = MCDI_QWORD(outbuf,
336 LICENSING_V3_OUT_LICENSED_FEATURES);
337}
338
Ben Hutchings8127d662013-08-29 19:19:29 +0100339static int efx_ef10_get_sysclk_freq(struct efx_nic *efx)
340{
341 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CLOCK_OUT_LEN);
342 int rc;
343
344 rc = efx_mcdi_rpc(efx, MC_CMD_GET_CLOCK, NULL, 0,
345 outbuf, sizeof(outbuf), NULL);
346 if (rc)
347 return rc;
348 rc = MCDI_DWORD(outbuf, GET_CLOCK_OUT_SYS_FREQ);
349 return rc > 0 ? rc : -ERANGE;
350}
351
Bert Kenwardd95e3292016-08-11 13:02:36 +0100352static int efx_ef10_get_timer_workarounds(struct efx_nic *efx)
353{
354 struct efx_ef10_nic_data *nic_data = efx->nic_data;
355 unsigned int implemented;
356 unsigned int enabled;
357 int rc;
358
359 nic_data->workaround_35388 = false;
360 nic_data->workaround_61265 = false;
361
362 rc = efx_mcdi_get_workarounds(efx, &implemented, &enabled);
363
364 if (rc == -ENOSYS) {
365 /* Firmware without GET_WORKAROUNDS - not a problem. */
366 rc = 0;
367 } else if (rc == 0) {
368 /* Bug61265 workaround is always enabled if implemented. */
369 if (enabled & MC_CMD_GET_WORKAROUNDS_OUT_BUG61265)
370 nic_data->workaround_61265 = true;
371
372 if (enabled & MC_CMD_GET_WORKAROUNDS_OUT_BUG35388) {
373 nic_data->workaround_35388 = true;
374 } else if (implemented & MC_CMD_GET_WORKAROUNDS_OUT_BUG35388) {
375 /* Workaround is implemented but not enabled.
376 * Try to enable it.
377 */
378 rc = efx_mcdi_set_workaround(efx,
379 MC_CMD_WORKAROUND_BUG35388,
380 true, NULL);
381 if (rc == 0)
382 nic_data->workaround_35388 = true;
383 /* If we failed to set the workaround just carry on. */
384 rc = 0;
385 }
386 }
387
388 netif_dbg(efx, probe, efx->net_dev,
389 "workaround for bug 35388 is %sabled\n",
390 nic_data->workaround_35388 ? "en" : "dis");
391 netif_dbg(efx, probe, efx->net_dev,
392 "workaround for bug 61265 is %sabled\n",
393 nic_data->workaround_61265 ? "en" : "dis");
394
395 return rc;
396}
397
398static void efx_ef10_process_timer_config(struct efx_nic *efx,
399 const efx_dword_t *data)
400{
401 unsigned int max_count;
402
403 if (EFX_EF10_WORKAROUND_61265(efx)) {
404 efx->timer_quantum_ns = MCDI_DWORD(data,
405 GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_STEP_NS);
406 efx->timer_max_ns = MCDI_DWORD(data,
407 GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_MAX_NS);
408 } else if (EFX_EF10_WORKAROUND_35388(efx)) {
409 efx->timer_quantum_ns = MCDI_DWORD(data,
410 GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_NS_PER_COUNT);
411 max_count = MCDI_DWORD(data,
412 GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_MAX_COUNT);
413 efx->timer_max_ns = max_count * efx->timer_quantum_ns;
414 } else {
415 efx->timer_quantum_ns = MCDI_DWORD(data,
416 GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_NS_PER_COUNT);
417 max_count = MCDI_DWORD(data,
418 GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_MAX_COUNT);
419 efx->timer_max_ns = max_count * efx->timer_quantum_ns;
420 }
421
422 netif_dbg(efx, probe, efx->net_dev,
423 "got timer properties from MC: quantum %u ns; max %u ns\n",
424 efx->timer_quantum_ns, efx->timer_max_ns);
425}
426
427static int efx_ef10_get_timer_config(struct efx_nic *efx)
428{
429 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_LEN);
430 int rc;
431
432 rc = efx_ef10_get_timer_workarounds(efx);
433 if (rc)
434 return rc;
435
436 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_GET_EVQ_TMR_PROPERTIES, NULL, 0,
437 outbuf, sizeof(outbuf), NULL);
438
439 if (rc == 0) {
440 efx_ef10_process_timer_config(efx, outbuf);
441 } else if (rc == -ENOSYS || rc == -EPERM) {
442 /* Not available - fall back to Huntington defaults. */
443 unsigned int quantum;
444
445 rc = efx_ef10_get_sysclk_freq(efx);
446 if (rc < 0)
447 return rc;
448
449 quantum = 1536000 / rc; /* 1536 cycles */
450 efx->timer_quantum_ns = quantum;
451 efx->timer_max_ns = efx->type->timer_period_max * quantum;
452 rc = 0;
453 } else {
454 efx_mcdi_display_error(efx, MC_CMD_GET_EVQ_TMR_PROPERTIES,
455 MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_LEN,
456 NULL, 0, rc);
457 }
458
459 return rc;
460}
461
Daniel Pieczko0d5e0fb2015-05-20 11:10:20 +0100462static int efx_ef10_get_mac_address_pf(struct efx_nic *efx, u8 *mac_address)
Ben Hutchings8127d662013-08-29 19:19:29 +0100463{
464 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_MAC_ADDRESSES_OUT_LEN);
465 size_t outlen;
466 int rc;
467
468 BUILD_BUG_ON(MC_CMD_GET_MAC_ADDRESSES_IN_LEN != 0);
469
470 rc = efx_mcdi_rpc(efx, MC_CMD_GET_MAC_ADDRESSES, NULL, 0,
471 outbuf, sizeof(outbuf), &outlen);
472 if (rc)
473 return rc;
474 if (outlen < MC_CMD_GET_MAC_ADDRESSES_OUT_LEN)
475 return -EIO;
476
Edward Creecd84ff42014-03-07 18:27:41 +0000477 ether_addr_copy(mac_address,
478 MCDI_PTR(outbuf, GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE));
Ben Hutchings8127d662013-08-29 19:19:29 +0100479 return 0;
480}
481
Daniel Pieczko0d5e0fb2015-05-20 11:10:20 +0100482static int efx_ef10_get_mac_address_vf(struct efx_nic *efx, u8 *mac_address)
483{
484 MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN);
485 MCDI_DECLARE_BUF(outbuf, MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX);
486 size_t outlen;
487 int num_addrs, rc;
488
489 MCDI_SET_DWORD(inbuf, VPORT_GET_MAC_ADDRESSES_IN_VPORT_ID,
490 EVB_PORT_ID_ASSIGNED);
491 rc = efx_mcdi_rpc(efx, MC_CMD_VPORT_GET_MAC_ADDRESSES, inbuf,
492 sizeof(inbuf), outbuf, sizeof(outbuf), &outlen);
493
494 if (rc)
495 return rc;
496 if (outlen < MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMIN)
497 return -EIO;
498
499 num_addrs = MCDI_DWORD(outbuf,
500 VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_COUNT);
501
502 WARN_ON(num_addrs != 1);
503
504 ether_addr_copy(mac_address,
505 MCDI_PTR(outbuf, VPORT_GET_MAC_ADDRESSES_OUT_MACADDR));
506
507 return 0;
508}
509
Shradha Shah0f5c0842015-06-02 11:37:58 +0100510static ssize_t efx_ef10_show_link_control_flag(struct device *dev,
511 struct device_attribute *attr,
512 char *buf)
513{
514 struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
515
516 return sprintf(buf, "%d\n",
517 ((efx->mcdi->fn_flags) &
518 (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL))
519 ? 1 : 0);
520}
521
522static ssize_t efx_ef10_show_primary_flag(struct device *dev,
523 struct device_attribute *attr,
524 char *buf)
525{
526 struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
527
528 return sprintf(buf, "%d\n",
529 ((efx->mcdi->fn_flags) &
530 (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY))
531 ? 1 : 0);
532}
533
Andrew Rybchenko34813fe2016-06-15 17:48:14 +0100534static struct efx_ef10_vlan *efx_ef10_find_vlan(struct efx_nic *efx, u16 vid)
535{
536 struct efx_ef10_nic_data *nic_data = efx->nic_data;
537 struct efx_ef10_vlan *vlan;
538
539 WARN_ON(!mutex_is_locked(&nic_data->vlan_lock));
540
541 list_for_each_entry(vlan, &nic_data->vlan_list, list) {
542 if (vlan->vid == vid)
543 return vlan;
544 }
545
546 return NULL;
547}
548
549static int efx_ef10_add_vlan(struct efx_nic *efx, u16 vid)
550{
551 struct efx_ef10_nic_data *nic_data = efx->nic_data;
552 struct efx_ef10_vlan *vlan;
553 int rc;
554
555 mutex_lock(&nic_data->vlan_lock);
556
557 vlan = efx_ef10_find_vlan(efx, vid);
558 if (vlan) {
Andrew Rybchenko4a53ea82016-06-15 17:48:32 +0100559 /* We add VID 0 on init. 8021q adds it on module init
560 * for all interfaces with VLAN filtring feature.
561 */
562 if (vid == 0)
563 goto done_unlock;
Andrew Rybchenko34813fe2016-06-15 17:48:14 +0100564 netif_warn(efx, drv, efx->net_dev,
565 "VLAN %u already added\n", vid);
566 rc = -EALREADY;
567 goto fail_exist;
568 }
569
570 rc = -ENOMEM;
571 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
572 if (!vlan)
573 goto fail_alloc;
574
575 vlan->vid = vid;
576
577 list_add_tail(&vlan->list, &nic_data->vlan_list);
578
579 if (efx->filter_state) {
580 mutex_lock(&efx->mac_lock);
581 down_write(&efx->filter_sem);
582 rc = efx_ef10_filter_add_vlan(efx, vlan->vid);
583 up_write(&efx->filter_sem);
584 mutex_unlock(&efx->mac_lock);
585 if (rc)
586 goto fail_filter_add_vlan;
587 }
588
Andrew Rybchenko4a53ea82016-06-15 17:48:32 +0100589done_unlock:
Andrew Rybchenko34813fe2016-06-15 17:48:14 +0100590 mutex_unlock(&nic_data->vlan_lock);
591 return 0;
592
593fail_filter_add_vlan:
594 list_del(&vlan->list);
595 kfree(vlan);
596fail_alloc:
597fail_exist:
598 mutex_unlock(&nic_data->vlan_lock);
599 return rc;
600}
601
602static void efx_ef10_del_vlan_internal(struct efx_nic *efx,
603 struct efx_ef10_vlan *vlan)
604{
605 struct efx_ef10_nic_data *nic_data = efx->nic_data;
606
607 WARN_ON(!mutex_is_locked(&nic_data->vlan_lock));
608
609 if (efx->filter_state) {
610 down_write(&efx->filter_sem);
611 efx_ef10_filter_del_vlan(efx, vlan->vid);
612 up_write(&efx->filter_sem);
613 }
614
615 list_del(&vlan->list);
616 kfree(vlan);
617}
618
Andrew Rybchenko4a53ea82016-06-15 17:48:32 +0100619static int efx_ef10_del_vlan(struct efx_nic *efx, u16 vid)
620{
621 struct efx_ef10_nic_data *nic_data = efx->nic_data;
622 struct efx_ef10_vlan *vlan;
623 int rc = 0;
624
625 /* 8021q removes VID 0 on module unload for all interfaces
626 * with VLAN filtering feature. We need to keep it to receive
627 * untagged traffic.
628 */
629 if (vid == 0)
630 return 0;
631
632 mutex_lock(&nic_data->vlan_lock);
633
634 vlan = efx_ef10_find_vlan(efx, vid);
635 if (!vlan) {
636 netif_err(efx, drv, efx->net_dev,
637 "VLAN %u to be deleted not found\n", vid);
638 rc = -ENOENT;
639 } else {
640 efx_ef10_del_vlan_internal(efx, vlan);
641 }
642
643 mutex_unlock(&nic_data->vlan_lock);
644
645 return rc;
646}
647
Andrew Rybchenko34813fe2016-06-15 17:48:14 +0100648static void efx_ef10_cleanup_vlans(struct efx_nic *efx)
649{
650 struct efx_ef10_nic_data *nic_data = efx->nic_data;
651 struct efx_ef10_vlan *vlan, *next_vlan;
652
653 mutex_lock(&nic_data->vlan_lock);
654 list_for_each_entry_safe(vlan, next_vlan, &nic_data->vlan_list, list)
655 efx_ef10_del_vlan_internal(efx, vlan);
656 mutex_unlock(&nic_data->vlan_lock);
657}
658
Shradha Shah0f5c0842015-06-02 11:37:58 +0100659static DEVICE_ATTR(link_control_flag, 0444, efx_ef10_show_link_control_flag,
660 NULL);
661static DEVICE_ATTR(primary_flag, 0444, efx_ef10_show_primary_flag, NULL);
662
Ben Hutchings8127d662013-08-29 19:19:29 +0100663static int efx_ef10_probe(struct efx_nic *efx)
664{
665 struct efx_ef10_nic_data *nic_data;
666 int i, rc;
667
Ben Hutchings8127d662013-08-29 19:19:29 +0100668 nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL);
669 if (!nic_data)
670 return -ENOMEM;
671 efx->nic_data = nic_data;
672
Edward Cree75aba2a2015-05-27 13:13:54 +0100673 /* we assume later that we can copy from this buffer in dwords */
674 BUILD_BUG_ON(MCDI_CTL_SDU_LEN_MAX_V2 % 4);
675
Ben Hutchings8127d662013-08-29 19:19:29 +0100676 rc = efx_nic_alloc_buffer(efx, &nic_data->mcdi_buf,
677 8 + MCDI_CTL_SDU_LEN_MAX_V2, GFP_KERNEL);
678 if (rc)
679 goto fail1;
680
681 /* Get the MC's warm boot count. In case it's rebooting right
682 * now, be prepared to retry.
683 */
684 i = 0;
685 for (;;) {
686 rc = efx_ef10_get_warm_boot_count(efx);
687 if (rc >= 0)
688 break;
689 if (++i == 5)
690 goto fail2;
691 ssleep(1);
692 }
693 nic_data->warm_boot_count = rc;
694
Edward Cree42356d92018-03-08 15:45:17 +0000695 efx->rss_context.context_id = EFX_EF10_RSS_CONTEXT_INVALID;
Ben Hutchings8127d662013-08-29 19:19:29 +0100696
Daniel Pieczko45b24492015-05-06 00:57:14 +0100697 nic_data->vport_id = EVB_PORT_ID_ASSIGNED;
698
Ben Hutchings8127d662013-08-29 19:19:29 +0100699 /* In case we're recovering from a crash (kexec), we want to
700 * cancel any outstanding request by the previous user of this
701 * function. We send a special message using the least
702 * significant bits of the 'high' (doorbell) register.
703 */
704 _efx_writed(efx, cpu_to_le32(1), ER_DZ_MC_DB_HWRD);
705
706 rc = efx_mcdi_init(efx);
707 if (rc)
708 goto fail2;
709
Jon Coopere5fbd972017-02-08 16:52:10 +0000710 mutex_init(&nic_data->udp_tunnels_lock);
711
Ben Hutchings8127d662013-08-29 19:19:29 +0100712 /* Reset (most) configuration for this function */
713 rc = efx_mcdi_reset(efx, RESET_TYPE_ALL);
714 if (rc)
715 goto fail3;
716
717 /* Enable event logging */
718 rc = efx_mcdi_log_ctrl(efx, true, false, 0);
719 if (rc)
720 goto fail3;
721
Shradha Shah0f5c0842015-06-02 11:37:58 +0100722 rc = device_create_file(&efx->pci_dev->dev,
723 &dev_attr_link_control_flag);
Daniel Pieczko1cd9ecb2015-05-06 00:57:53 +0100724 if (rc)
725 goto fail3;
726
Shradha Shah0f5c0842015-06-02 11:37:58 +0100727 rc = device_create_file(&efx->pci_dev->dev, &dev_attr_primary_flag);
728 if (rc)
729 goto fail4;
730
731 rc = efx_ef10_get_pf_index(efx);
732 if (rc)
733 goto fail5;
734
Ben Hutchingse5a25382013-09-05 22:50:59 +0100735 rc = efx_ef10_init_datapath_caps(efx);
Ben Hutchings8127d662013-08-29 19:19:29 +0100736 if (rc < 0)
Shradha Shah0f5c0842015-06-02 11:37:58 +0100737 goto fail5;
Ben Hutchings8127d662013-08-29 19:19:29 +0100738
Martin Habets50663fe2018-01-25 17:25:33 +0000739 efx_ef10_read_licensed_features(efx);
740
Edward Cree71827442017-12-18 16:56:19 +0000741 /* We can have one VI for each vi_stride-byte region.
742 * However, until we use TX option descriptors we need two TX queues
743 * per channel.
744 */
745 efx->max_channels = min_t(unsigned int,
746 EFX_MAX_CHANNELS,
747 efx_ef10_mem_map_size(efx) /
748 (efx->vi_stride * EFX_TXQ_TYPES));
749 efx->max_tx_channels = efx->max_channels;
750 if (WARN_ON(efx->max_channels == 0)) {
751 rc = -EIO;
752 goto fail5;
753 }
754
Ben Hutchings8127d662013-08-29 19:19:29 +0100755 efx->rx_packet_len_offset =
756 ES_DZ_RX_PREFIX_PKTLEN_OFST - ES_DZ_RX_PREFIX_SIZE;
757
Edward Cree69787292017-10-31 14:29:47 +0000758 if (nic_data->datapath_caps &
759 (1 << MC_CMD_GET_CAPABILITIES_OUT_RX_INCLUDE_FCS_LBN))
760 efx->net_dev->hw_features |= NETIF_F_RXFCS;
761
Ben Hutchings8127d662013-08-29 19:19:29 +0100762 rc = efx_mcdi_port_get_number(efx);
763 if (rc < 0)
Shradha Shah0f5c0842015-06-02 11:37:58 +0100764 goto fail5;
Ben Hutchings8127d662013-08-29 19:19:29 +0100765 efx->port_num = rc;
766
Daniel Pieczko0d5e0fb2015-05-20 11:10:20 +0100767 rc = efx->type->get_mac_address(efx, efx->net_dev->perm_addr);
Ben Hutchings8127d662013-08-29 19:19:29 +0100768 if (rc)
Shradha Shah0f5c0842015-06-02 11:37:58 +0100769 goto fail5;
Ben Hutchings8127d662013-08-29 19:19:29 +0100770
Bert Kenwardd95e3292016-08-11 13:02:36 +0100771 rc = efx_ef10_get_timer_config(efx);
Ben Hutchings8127d662013-08-29 19:19:29 +0100772 if (rc < 0)
Shradha Shah0f5c0842015-06-02 11:37:58 +0100773 goto fail5;
Ben Hutchings8127d662013-08-29 19:19:29 +0100774
Ben Hutchings8127d662013-08-29 19:19:29 +0100775 rc = efx_mcdi_mon_probe(efx);
Edward Cree267d9d72015-05-06 00:59:18 +0100776 if (rc && rc != -EPERM)
Shradha Shah0f5c0842015-06-02 11:37:58 +0100777 goto fail5;
Ben Hutchings8127d662013-08-29 19:19:29 +0100778
Martin Habets23418dc2018-01-25 17:25:15 +0000779 efx_ptp_defer_probe_with_channel(efx);
Ben Hutchings9aecda92013-12-05 21:28:42 +0000780
Shradha Shah1d051e02015-06-02 11:38:16 +0100781#ifdef CONFIG_SFC_SRIOV
782 if ((efx->pci_dev->physfn) && (!efx->pci_dev->is_physfn)) {
783 struct pci_dev *pci_dev_pf = efx->pci_dev->physfn;
784 struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf);
785
786 efx_pf->type->get_mac_address(efx_pf, nic_data->port_id);
787 } else
788#endif
789 ether_addr_copy(nic_data->port_id, efx->net_dev->perm_addr);
790
Andrew Rybchenko34813fe2016-06-15 17:48:14 +0100791 INIT_LIST_HEAD(&nic_data->vlan_list);
792 mutex_init(&nic_data->vlan_lock);
793
794 /* Add unspecified VID to support VLAN filtering being disabled */
795 rc = efx_ef10_add_vlan(efx, EFX_FILTER_VID_UNSPEC);
796 if (rc)
797 goto fail_add_vid_unspec;
798
Andrew Rybchenko4a53ea82016-06-15 17:48:32 +0100799 /* If VLAN filtering is enabled, we need VID 0 to get untagged
800 * traffic. It is added automatically if 8021q module is loaded,
801 * but we can't rely on it since module may be not loaded.
802 */
803 rc = efx_ef10_add_vlan(efx, 0);
804 if (rc)
805 goto fail_add_vid_0;
806
Ben Hutchings8127d662013-08-29 19:19:29 +0100807 return 0;
808
Andrew Rybchenko4a53ea82016-06-15 17:48:32 +0100809fail_add_vid_0:
810 efx_ef10_cleanup_vlans(efx);
Andrew Rybchenko34813fe2016-06-15 17:48:14 +0100811fail_add_vid_unspec:
812 mutex_destroy(&nic_data->vlan_lock);
813 efx_ptp_remove(efx);
814 efx_mcdi_mon_remove(efx);
Shradha Shah0f5c0842015-06-02 11:37:58 +0100815fail5:
816 device_remove_file(&efx->pci_dev->dev, &dev_attr_primary_flag);
817fail4:
818 device_remove_file(&efx->pci_dev->dev, &dev_attr_link_control_flag);
Ben Hutchings8127d662013-08-29 19:19:29 +0100819fail3:
Jon Coopere5fbd972017-02-08 16:52:10 +0000820 efx_mcdi_detach(efx);
821
822 mutex_lock(&nic_data->udp_tunnels_lock);
823 memset(nic_data->udp_tunnels, 0, sizeof(nic_data->udp_tunnels));
824 (void)efx_ef10_set_udp_tnl_ports(efx, true);
825 mutex_unlock(&nic_data->udp_tunnels_lock);
826 mutex_destroy(&nic_data->udp_tunnels_lock);
827
Ben Hutchings8127d662013-08-29 19:19:29 +0100828 efx_mcdi_fini(efx);
829fail2:
830 efx_nic_free_buffer(efx, &nic_data->mcdi_buf);
831fail1:
832 kfree(nic_data);
833 efx->nic_data = NULL;
834 return rc;
835}
836
837static int efx_ef10_free_vis(struct efx_nic *efx)
838{
Jon Cooperaa09a3d2015-05-20 11:10:41 +0100839 MCDI_DECLARE_BUF_ERR(outbuf);
Edward Cree1e0b8122013-05-31 18:36:12 +0100840 size_t outlen;
841 int rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FREE_VIS, NULL, 0,
842 outbuf, sizeof(outbuf), &outlen);
Ben Hutchings8127d662013-08-29 19:19:29 +0100843
844 /* -EALREADY means nothing to free, so ignore */
845 if (rc == -EALREADY)
846 rc = 0;
Edward Cree1e0b8122013-05-31 18:36:12 +0100847 if (rc)
848 efx_mcdi_display_error(efx, MC_CMD_FREE_VIS, 0, outbuf, outlen,
849 rc);
Ben Hutchings8127d662013-08-29 19:19:29 +0100850 return rc;
851}
852
Ben Hutchings183233b2013-06-28 21:47:12 +0100853#ifdef EFX_USE_PIO
854
855static void efx_ef10_free_piobufs(struct efx_nic *efx)
856{
857 struct efx_ef10_nic_data *nic_data = efx->nic_data;
858 MCDI_DECLARE_BUF(inbuf, MC_CMD_FREE_PIOBUF_IN_LEN);
859 unsigned int i;
860 int rc;
861
862 BUILD_BUG_ON(MC_CMD_FREE_PIOBUF_OUT_LEN != 0);
863
864 for (i = 0; i < nic_data->n_piobufs; i++) {
865 MCDI_SET_DWORD(inbuf, FREE_PIOBUF_IN_PIOBUF_HANDLE,
866 nic_data->piobuf_handle[i]);
867 rc = efx_mcdi_rpc(efx, MC_CMD_FREE_PIOBUF, inbuf, sizeof(inbuf),
868 NULL, 0, NULL);
869 WARN_ON(rc);
870 }
871
872 nic_data->n_piobufs = 0;
873}
874
875static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n)
876{
877 struct efx_ef10_nic_data *nic_data = efx->nic_data;
878 MCDI_DECLARE_BUF(outbuf, MC_CMD_ALLOC_PIOBUF_OUT_LEN);
879 unsigned int i;
880 size_t outlen;
881 int rc = 0;
882
883 BUILD_BUG_ON(MC_CMD_ALLOC_PIOBUF_IN_LEN != 0);
884
885 for (i = 0; i < n; i++) {
Bert Kenward09a04202015-12-23 08:58:15 +0000886 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_ALLOC_PIOBUF, NULL, 0,
887 outbuf, sizeof(outbuf), &outlen);
888 if (rc) {
889 /* Don't display the MC error if we didn't have space
890 * for a VF.
891 */
892 if (!(efx_ef10_is_vf(efx) && rc == -ENOSPC))
893 efx_mcdi_display_error(efx, MC_CMD_ALLOC_PIOBUF,
894 0, outbuf, outlen, rc);
Ben Hutchings183233b2013-06-28 21:47:12 +0100895 break;
Bert Kenward09a04202015-12-23 08:58:15 +0000896 }
Ben Hutchings183233b2013-06-28 21:47:12 +0100897 if (outlen < MC_CMD_ALLOC_PIOBUF_OUT_LEN) {
898 rc = -EIO;
899 break;
900 }
901 nic_data->piobuf_handle[i] =
902 MCDI_DWORD(outbuf, ALLOC_PIOBUF_OUT_PIOBUF_HANDLE);
903 netif_dbg(efx, probe, efx->net_dev,
904 "allocated PIO buffer %u handle %x\n", i,
905 nic_data->piobuf_handle[i]);
906 }
907
908 nic_data->n_piobufs = i;
909 if (rc)
910 efx_ef10_free_piobufs(efx);
911 return rc;
912}
913
914static int efx_ef10_link_piobufs(struct efx_nic *efx)
915{
916 struct efx_ef10_nic_data *nic_data = efx->nic_data;
Edward Creed0346b02017-03-03 15:22:09 +0000917 MCDI_DECLARE_BUF(inbuf, MC_CMD_LINK_PIOBUF_IN_LEN);
Ben Hutchings183233b2013-06-28 21:47:12 +0100918 struct efx_channel *channel;
919 struct efx_tx_queue *tx_queue;
920 unsigned int offset, index;
921 int rc;
922
923 BUILD_BUG_ON(MC_CMD_LINK_PIOBUF_OUT_LEN != 0);
924 BUILD_BUG_ON(MC_CMD_UNLINK_PIOBUF_OUT_LEN != 0);
925
926 /* Link a buffer to each VI in the write-combining mapping */
927 for (index = 0; index < nic_data->n_piobufs; ++index) {
928 MCDI_SET_DWORD(inbuf, LINK_PIOBUF_IN_PIOBUF_HANDLE,
929 nic_data->piobuf_handle[index]);
930 MCDI_SET_DWORD(inbuf, LINK_PIOBUF_IN_TXQ_INSTANCE,
931 nic_data->pio_write_vi_base + index);
932 rc = efx_mcdi_rpc(efx, MC_CMD_LINK_PIOBUF,
933 inbuf, MC_CMD_LINK_PIOBUF_IN_LEN,
934 NULL, 0, NULL);
935 if (rc) {
936 netif_err(efx, drv, efx->net_dev,
937 "failed to link VI %u to PIO buffer %u (%d)\n",
938 nic_data->pio_write_vi_base + index, index,
939 rc);
940 goto fail;
941 }
942 netif_dbg(efx, probe, efx->net_dev,
943 "linked VI %u to PIO buffer %u\n",
944 nic_data->pio_write_vi_base + index, index);
945 }
946
947 /* Link a buffer to each TX queue */
948 efx_for_each_channel(channel, efx) {
Edward Cree2935e3c2018-01-25 17:26:06 +0000949 /* Extra channels, even those with TXQs (PTP), do not require
950 * PIO resources.
951 */
952 if (!channel->type->want_pio)
953 continue;
Ben Hutchings183233b2013-06-28 21:47:12 +0100954 efx_for_each_channel_tx_queue(tx_queue, channel) {
955 /* We assign the PIO buffers to queues in
956 * reverse order to allow for the following
957 * special case.
958 */
959 offset = ((efx->tx_channel_offset + efx->n_tx_channels -
960 tx_queue->channel->channel - 1) *
961 efx_piobuf_size);
Edward Creec6347002017-01-13 21:20:29 +0000962 index = offset / nic_data->piobuf_size;
963 offset = offset % nic_data->piobuf_size;
Ben Hutchings183233b2013-06-28 21:47:12 +0100964
965 /* When the host page size is 4K, the first
966 * host page in the WC mapping may be within
967 * the same VI page as the last TX queue. We
968 * can only link one buffer to each VI.
969 */
970 if (tx_queue->queue == nic_data->pio_write_vi_base) {
971 BUG_ON(index != 0);
972 rc = 0;
973 } else {
974 MCDI_SET_DWORD(inbuf,
975 LINK_PIOBUF_IN_PIOBUF_HANDLE,
976 nic_data->piobuf_handle[index]);
977 MCDI_SET_DWORD(inbuf,
978 LINK_PIOBUF_IN_TXQ_INSTANCE,
979 tx_queue->queue);
980 rc = efx_mcdi_rpc(efx, MC_CMD_LINK_PIOBUF,
981 inbuf, MC_CMD_LINK_PIOBUF_IN_LEN,
982 NULL, 0, NULL);
983 }
984
985 if (rc) {
986 /* This is non-fatal; the TX path just
987 * won't use PIO for this queue
988 */
989 netif_err(efx, drv, efx->net_dev,
990 "failed to link VI %u to PIO buffer %u (%d)\n",
991 tx_queue->queue, index, rc);
992 tx_queue->piobuf = NULL;
993 } else {
994 tx_queue->piobuf =
995 nic_data->pio_write_base +
Edward Cree71827442017-12-18 16:56:19 +0000996 index * efx->vi_stride + offset;
Ben Hutchings183233b2013-06-28 21:47:12 +0100997 tx_queue->piobuf_offset = offset;
998 netif_dbg(efx, probe, efx->net_dev,
999 "linked VI %u to PIO buffer %u offset %x addr %p\n",
1000 tx_queue->queue, index,
1001 tx_queue->piobuf_offset,
1002 tx_queue->piobuf);
1003 }
1004 }
1005 }
1006
1007 return 0;
1008
1009fail:
Edward Creed0346b02017-03-03 15:22:09 +00001010 /* inbuf was defined for MC_CMD_LINK_PIOBUF. We can use the same
1011 * buffer for MC_CMD_UNLINK_PIOBUF because it's shorter.
1012 */
1013 BUILD_BUG_ON(MC_CMD_LINK_PIOBUF_IN_LEN < MC_CMD_UNLINK_PIOBUF_IN_LEN);
Ben Hutchings183233b2013-06-28 21:47:12 +01001014 while (index--) {
1015 MCDI_SET_DWORD(inbuf, UNLINK_PIOBUF_IN_TXQ_INSTANCE,
1016 nic_data->pio_write_vi_base + index);
1017 efx_mcdi_rpc(efx, MC_CMD_UNLINK_PIOBUF,
1018 inbuf, MC_CMD_UNLINK_PIOBUF_IN_LEN,
1019 NULL, 0, NULL);
1020 }
1021 return rc;
1022}
1023
Edward Creec0795bf2016-05-24 18:53:36 +01001024static void efx_ef10_forget_old_piobufs(struct efx_nic *efx)
1025{
1026 struct efx_channel *channel;
1027 struct efx_tx_queue *tx_queue;
1028
1029 /* All our existing PIO buffers went away */
1030 efx_for_each_channel(channel, efx)
1031 efx_for_each_channel_tx_queue(tx_queue, channel)
1032 tx_queue->piobuf = NULL;
1033}
1034
Ben Hutchings183233b2013-06-28 21:47:12 +01001035#else /* !EFX_USE_PIO */
1036
1037static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n)
1038{
1039 return n == 0 ? 0 : -ENOBUFS;
1040}
1041
1042static int efx_ef10_link_piobufs(struct efx_nic *efx)
1043{
1044 return 0;
1045}
1046
1047static void efx_ef10_free_piobufs(struct efx_nic *efx)
1048{
1049}
1050
Edward Creec0795bf2016-05-24 18:53:36 +01001051static void efx_ef10_forget_old_piobufs(struct efx_nic *efx)
1052{
1053}
1054
Ben Hutchings183233b2013-06-28 21:47:12 +01001055#endif /* EFX_USE_PIO */
1056
Ben Hutchings8127d662013-08-29 19:19:29 +01001057static void efx_ef10_remove(struct efx_nic *efx)
1058{
1059 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1060 int rc;
1061
Shradha Shahf1122a32015-05-20 11:09:46 +01001062#ifdef CONFIG_SFC_SRIOV
1063 struct efx_ef10_nic_data *nic_data_pf;
1064 struct pci_dev *pci_dev_pf;
1065 struct efx_nic *efx_pf;
1066 struct ef10_vf *vf;
1067
1068 if (efx->pci_dev->is_virtfn) {
1069 pci_dev_pf = efx->pci_dev->physfn;
1070 if (pci_dev_pf) {
1071 efx_pf = pci_get_drvdata(pci_dev_pf);
1072 nic_data_pf = efx_pf->nic_data;
1073 vf = nic_data_pf->vf + nic_data->vf_index;
1074 vf->efx = NULL;
1075 } else
1076 netif_info(efx, drv, efx->net_dev,
1077 "Could not get the PF id from VF\n");
1078 }
1079#endif
1080
Andrew Rybchenko34813fe2016-06-15 17:48:14 +01001081 efx_ef10_cleanup_vlans(efx);
1082 mutex_destroy(&nic_data->vlan_lock);
1083
Ben Hutchings9aecda92013-12-05 21:28:42 +00001084 efx_ptp_remove(efx);
1085
Ben Hutchings8127d662013-08-29 19:19:29 +01001086 efx_mcdi_mon_remove(efx);
1087
Ben Hutchings8127d662013-08-29 19:19:29 +01001088 efx_ef10_rx_free_indir_table(efx);
1089
Ben Hutchings183233b2013-06-28 21:47:12 +01001090 if (nic_data->wc_membase)
1091 iounmap(nic_data->wc_membase);
1092
Ben Hutchings8127d662013-08-29 19:19:29 +01001093 rc = efx_ef10_free_vis(efx);
1094 WARN_ON(rc != 0);
1095
Ben Hutchings183233b2013-06-28 21:47:12 +01001096 if (!nic_data->must_restore_piobufs)
1097 efx_ef10_free_piobufs(efx);
1098
Shradha Shah0f5c0842015-06-02 11:37:58 +01001099 device_remove_file(&efx->pci_dev->dev, &dev_attr_primary_flag);
1100 device_remove_file(&efx->pci_dev->dev, &dev_attr_link_control_flag);
1101
Jon Coopere5fbd972017-02-08 16:52:10 +00001102 efx_mcdi_detach(efx);
1103
1104 memset(nic_data->udp_tunnels, 0, sizeof(nic_data->udp_tunnels));
1105 mutex_lock(&nic_data->udp_tunnels_lock);
1106 (void)efx_ef10_set_udp_tnl_ports(efx, true);
1107 mutex_unlock(&nic_data->udp_tunnels_lock);
1108
1109 mutex_destroy(&nic_data->udp_tunnels_lock);
1110
Ben Hutchings8127d662013-08-29 19:19:29 +01001111 efx_mcdi_fini(efx);
1112 efx_nic_free_buffer(efx, &nic_data->mcdi_buf);
1113 kfree(nic_data);
1114}
1115
Shradha Shah88a37de2015-05-20 11:09:15 +01001116static int efx_ef10_probe_pf(struct efx_nic *efx)
1117{
1118 return efx_ef10_probe(efx);
1119}
1120
Andrew Rybchenko38d27f32016-06-15 17:52:08 +01001121int efx_ef10_vadaptor_query(struct efx_nic *efx, unsigned int port_id,
1122 u32 *port_flags, u32 *vadaptor_flags,
1123 unsigned int *vlan_tags)
1124{
1125 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1126 MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_QUERY_IN_LEN);
1127 MCDI_DECLARE_BUF(outbuf, MC_CMD_VADAPTOR_QUERY_OUT_LEN);
1128 size_t outlen;
1129 int rc;
1130
1131 if (nic_data->datapath_caps &
1132 (1 << MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_QUERY_LBN)) {
1133 MCDI_SET_DWORD(inbuf, VADAPTOR_QUERY_IN_UPSTREAM_PORT_ID,
1134 port_id);
1135
1136 rc = efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_QUERY, inbuf, sizeof(inbuf),
1137 outbuf, sizeof(outbuf), &outlen);
1138 if (rc)
1139 return rc;
1140
1141 if (outlen < sizeof(outbuf)) {
1142 rc = -EIO;
1143 return rc;
1144 }
1145 }
1146
1147 if (port_flags)
1148 *port_flags = MCDI_DWORD(outbuf, VADAPTOR_QUERY_OUT_PORT_FLAGS);
1149 if (vadaptor_flags)
1150 *vadaptor_flags =
1151 MCDI_DWORD(outbuf, VADAPTOR_QUERY_OUT_VADAPTOR_FLAGS);
1152 if (vlan_tags)
1153 *vlan_tags =
1154 MCDI_DWORD(outbuf,
1155 VADAPTOR_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS);
1156
1157 return 0;
1158}
1159
Daniel Pieczko7a186f42015-07-07 11:37:19 +01001160int efx_ef10_vadaptor_alloc(struct efx_nic *efx, unsigned int port_id)
1161{
1162 MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_ALLOC_IN_LEN);
1163
1164 MCDI_SET_DWORD(inbuf, VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID, port_id);
1165 return efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_ALLOC, inbuf, sizeof(inbuf),
1166 NULL, 0, NULL);
1167}
1168
1169int efx_ef10_vadaptor_free(struct efx_nic *efx, unsigned int port_id)
1170{
1171 MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_FREE_IN_LEN);
1172
1173 MCDI_SET_DWORD(inbuf, VADAPTOR_FREE_IN_UPSTREAM_PORT_ID, port_id);
1174 return efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_FREE, inbuf, sizeof(inbuf),
1175 NULL, 0, NULL);
1176}
1177
1178int efx_ef10_vport_add_mac(struct efx_nic *efx,
1179 unsigned int port_id, u8 *mac)
1180{
1181 MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_LEN);
1182
1183 MCDI_SET_DWORD(inbuf, VPORT_ADD_MAC_ADDRESS_IN_VPORT_ID, port_id);
1184 ether_addr_copy(MCDI_PTR(inbuf, VPORT_ADD_MAC_ADDRESS_IN_MACADDR), mac);
1185
1186 return efx_mcdi_rpc(efx, MC_CMD_VPORT_ADD_MAC_ADDRESS, inbuf,
1187 sizeof(inbuf), NULL, 0, NULL);
1188}
1189
1190int efx_ef10_vport_del_mac(struct efx_nic *efx,
1191 unsigned int port_id, u8 *mac)
1192{
1193 MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_LEN);
1194
1195 MCDI_SET_DWORD(inbuf, VPORT_DEL_MAC_ADDRESS_IN_VPORT_ID, port_id);
1196 ether_addr_copy(MCDI_PTR(inbuf, VPORT_DEL_MAC_ADDRESS_IN_MACADDR), mac);
1197
1198 return efx_mcdi_rpc(efx, MC_CMD_VPORT_DEL_MAC_ADDRESS, inbuf,
1199 sizeof(inbuf), NULL, 0, NULL);
1200}
1201
Shradha Shah88a37de2015-05-20 11:09:15 +01001202#ifdef CONFIG_SFC_SRIOV
1203static int efx_ef10_probe_vf(struct efx_nic *efx)
1204{
1205 int rc;
Daniel Pieczko6598dad2015-06-02 11:41:00 +01001206 struct pci_dev *pci_dev_pf;
1207
1208 /* If the parent PF has no VF data structure, it doesn't know about this
1209 * VF so fail probe. The VF needs to be re-created. This can happen
1210 * if the PF driver is unloaded while the VF is assigned to a guest.
1211 */
1212 pci_dev_pf = efx->pci_dev->physfn;
1213 if (pci_dev_pf) {
1214 struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf);
1215 struct efx_ef10_nic_data *nic_data_pf = efx_pf->nic_data;
1216
1217 if (!nic_data_pf->vf) {
1218 netif_info(efx, drv, efx->net_dev,
1219 "The VF cannot link to its parent PF; "
1220 "please destroy and re-create the VF\n");
1221 return -EBUSY;
1222 }
1223 }
Shradha Shah88a37de2015-05-20 11:09:15 +01001224
1225 rc = efx_ef10_probe(efx);
1226 if (rc)
1227 return rc;
1228
1229 rc = efx_ef10_get_vf_index(efx);
1230 if (rc)
1231 goto fail;
1232
Shradha Shahf1122a32015-05-20 11:09:46 +01001233 if (efx->pci_dev->is_virtfn) {
1234 if (efx->pci_dev->physfn) {
1235 struct efx_nic *efx_pf =
1236 pci_get_drvdata(efx->pci_dev->physfn);
1237 struct efx_ef10_nic_data *nic_data_p = efx_pf->nic_data;
1238 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1239
1240 nic_data_p->vf[nic_data->vf_index].efx = efx;
Daniel Pieczko6598dad2015-06-02 11:41:00 +01001241 nic_data_p->vf[nic_data->vf_index].pci_dev =
1242 efx->pci_dev;
Shradha Shahf1122a32015-05-20 11:09:46 +01001243 } else
1244 netif_info(efx, drv, efx->net_dev,
1245 "Could not get the PF id from VF\n");
1246 }
1247
Shradha Shah88a37de2015-05-20 11:09:15 +01001248 return 0;
1249
1250fail:
1251 efx_ef10_remove(efx);
1252 return rc;
1253}
1254#else
1255static int efx_ef10_probe_vf(struct efx_nic *efx __attribute__ ((unused)))
1256{
1257 return 0;
1258}
1259#endif
1260
Ben Hutchings8127d662013-08-29 19:19:29 +01001261static int efx_ef10_alloc_vis(struct efx_nic *efx,
1262 unsigned int min_vis, unsigned int max_vis)
1263{
1264 MCDI_DECLARE_BUF(inbuf, MC_CMD_ALLOC_VIS_IN_LEN);
1265 MCDI_DECLARE_BUF(outbuf, MC_CMD_ALLOC_VIS_OUT_LEN);
1266 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1267 size_t outlen;
1268 int rc;
1269
1270 MCDI_SET_DWORD(inbuf, ALLOC_VIS_IN_MIN_VI_COUNT, min_vis);
1271 MCDI_SET_DWORD(inbuf, ALLOC_VIS_IN_MAX_VI_COUNT, max_vis);
1272 rc = efx_mcdi_rpc(efx, MC_CMD_ALLOC_VIS, inbuf, sizeof(inbuf),
1273 outbuf, sizeof(outbuf), &outlen);
1274 if (rc != 0)
1275 return rc;
1276
1277 if (outlen < MC_CMD_ALLOC_VIS_OUT_LEN)
1278 return -EIO;
1279
1280 netif_dbg(efx, drv, efx->net_dev, "base VI is A0x%03x\n",
1281 MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_BASE));
1282
1283 nic_data->vi_base = MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_BASE);
1284 nic_data->n_allocated_vis = MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_COUNT);
1285 return 0;
1286}
1287
Ben Hutchings183233b2013-06-28 21:47:12 +01001288/* Note that the failure path of this function does not free
1289 * resources, as this will be done by efx_ef10_remove().
1290 */
Ben Hutchings8127d662013-08-29 19:19:29 +01001291static int efx_ef10_dimension_resources(struct efx_nic *efx)
1292{
Ben Hutchings183233b2013-06-28 21:47:12 +01001293 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1294 unsigned int uc_mem_map_size, wc_mem_map_size;
Shradha Shahb0fbdae2015-08-28 10:55:42 +01001295 unsigned int min_vis = max(EFX_TXQ_TYPES,
1296 efx_separate_tx_channels ? 2 : 1);
1297 unsigned int channel_vis, pio_write_vi_base, max_vis;
Ben Hutchings183233b2013-06-28 21:47:12 +01001298 void __iomem *membase;
1299 int rc;
Ben Hutchings8127d662013-08-29 19:19:29 +01001300
Edward Cree2935e3c2018-01-25 17:26:06 +00001301 channel_vis = max(efx->n_channels,
1302 (efx->n_tx_channels + efx->n_extra_tx_channels) *
1303 EFX_TXQ_TYPES);
Ben Hutchings183233b2013-06-28 21:47:12 +01001304
1305#ifdef EFX_USE_PIO
1306 /* Try to allocate PIO buffers if wanted and if the full
1307 * number of PIO buffers would be sufficient to allocate one
1308 * copy-buffer per TX channel. Failure is non-fatal, as there
1309 * are only a small number of PIO buffers shared between all
1310 * functions of the controller.
1311 */
1312 if (efx_piobuf_size != 0 &&
Edward Creec6347002017-01-13 21:20:29 +00001313 nic_data->piobuf_size / efx_piobuf_size * EF10_TX_PIOBUF_COUNT >=
Ben Hutchings183233b2013-06-28 21:47:12 +01001314 efx->n_tx_channels) {
1315 unsigned int n_piobufs =
1316 DIV_ROUND_UP(efx->n_tx_channels,
Edward Creec6347002017-01-13 21:20:29 +00001317 nic_data->piobuf_size / efx_piobuf_size);
Ben Hutchings183233b2013-06-28 21:47:12 +01001318
1319 rc = efx_ef10_alloc_piobufs(efx, n_piobufs);
Tomáš Pilař6eacfb52017-01-25 13:48:17 +00001320 if (rc == -ENOSPC)
1321 netif_dbg(efx, probe, efx->net_dev,
1322 "out of PIO buffers; cannot allocate more\n");
1323 else if (rc == -EPERM)
1324 netif_dbg(efx, probe, efx->net_dev,
1325 "not permitted to allocate PIO buffers\n");
1326 else if (rc)
Ben Hutchings183233b2013-06-28 21:47:12 +01001327 netif_err(efx, probe, efx->net_dev,
1328 "failed to allocate PIO buffers (%d)\n", rc);
1329 else
1330 netif_dbg(efx, probe, efx->net_dev,
1331 "allocated %u PIO buffers\n", n_piobufs);
1332 }
1333#else
1334 nic_data->n_piobufs = 0;
1335#endif
1336
1337 /* PIO buffers should be mapped with write-combining enabled,
1338 * and we want to make single UC and WC mappings rather than
1339 * several of each (in fact that's the only option if host
1340 * page size is >4K). So we may allocate some extra VIs just
1341 * for writing PIO buffers through.
Daniel Pieczko52ad7622014-04-01 13:10:34 +01001342 *
Shradha Shahb0fbdae2015-08-28 10:55:42 +01001343 * The UC mapping contains (channel_vis - 1) complete VIs and the
Edward Cree71827442017-12-18 16:56:19 +00001344 * first 4K of the next VI. Then the WC mapping begins with
1345 * the remainder of this last VI.
Ben Hutchings183233b2013-06-28 21:47:12 +01001346 */
Edward Cree71827442017-12-18 16:56:19 +00001347 uc_mem_map_size = PAGE_ALIGN((channel_vis - 1) * efx->vi_stride +
Ben Hutchings183233b2013-06-28 21:47:12 +01001348 ER_DZ_TX_PIOBUF);
1349 if (nic_data->n_piobufs) {
Daniel Pieczko52ad7622014-04-01 13:10:34 +01001350 /* pio_write_vi_base rounds down to give the number of complete
1351 * VIs inside the UC mapping.
1352 */
Edward Cree71827442017-12-18 16:56:19 +00001353 pio_write_vi_base = uc_mem_map_size / efx->vi_stride;
Ben Hutchings183233b2013-06-28 21:47:12 +01001354 wc_mem_map_size = (PAGE_ALIGN((pio_write_vi_base +
1355 nic_data->n_piobufs) *
Edward Cree71827442017-12-18 16:56:19 +00001356 efx->vi_stride) -
Ben Hutchings183233b2013-06-28 21:47:12 +01001357 uc_mem_map_size);
1358 max_vis = pio_write_vi_base + nic_data->n_piobufs;
1359 } else {
1360 pio_write_vi_base = 0;
1361 wc_mem_map_size = 0;
Shradha Shahb0fbdae2015-08-28 10:55:42 +01001362 max_vis = channel_vis;
Ben Hutchings183233b2013-06-28 21:47:12 +01001363 }
1364
1365 /* In case the last attached driver failed to free VIs, do it now */
1366 rc = efx_ef10_free_vis(efx);
1367 if (rc != 0)
1368 return rc;
1369
1370 rc = efx_ef10_alloc_vis(efx, min_vis, max_vis);
1371 if (rc != 0)
1372 return rc;
1373
Shradha Shahb0fbdae2015-08-28 10:55:42 +01001374 if (nic_data->n_allocated_vis < channel_vis) {
1375 netif_info(efx, drv, efx->net_dev,
1376 "Could not allocate enough VIs to satisfy RSS"
1377 " requirements. Performance may not be optimal.\n");
1378 /* We didn't get the VIs to populate our channels.
1379 * We could keep what we got but then we'd have more
1380 * interrupts than we need.
1381 * Instead calculate new max_channels and restart
1382 */
1383 efx->max_channels = nic_data->n_allocated_vis;
1384 efx->max_tx_channels =
1385 nic_data->n_allocated_vis / EFX_TXQ_TYPES;
1386
1387 efx_ef10_free_vis(efx);
1388 return -EAGAIN;
1389 }
1390
Ben Hutchings183233b2013-06-28 21:47:12 +01001391 /* If we didn't get enough VIs to map all the PIO buffers, free the
1392 * PIO buffers
1393 */
1394 if (nic_data->n_piobufs &&
1395 nic_data->n_allocated_vis <
1396 pio_write_vi_base + nic_data->n_piobufs) {
1397 netif_dbg(efx, probe, efx->net_dev,
1398 "%u VIs are not sufficient to map %u PIO buffers\n",
1399 nic_data->n_allocated_vis, nic_data->n_piobufs);
1400 efx_ef10_free_piobufs(efx);
1401 }
1402
1403 /* Shrink the original UC mapping of the memory BAR */
1404 membase = ioremap_nocache(efx->membase_phys, uc_mem_map_size);
1405 if (!membase) {
1406 netif_err(efx, probe, efx->net_dev,
1407 "could not shrink memory BAR to %x\n",
1408 uc_mem_map_size);
1409 return -ENOMEM;
1410 }
1411 iounmap(efx->membase);
1412 efx->membase = membase;
1413
1414 /* Set up the WC mapping if needed */
1415 if (wc_mem_map_size) {
1416 nic_data->wc_membase = ioremap_wc(efx->membase_phys +
1417 uc_mem_map_size,
1418 wc_mem_map_size);
1419 if (!nic_data->wc_membase) {
1420 netif_err(efx, probe, efx->net_dev,
1421 "could not allocate WC mapping of size %x\n",
1422 wc_mem_map_size);
1423 return -ENOMEM;
1424 }
1425 nic_data->pio_write_vi_base = pio_write_vi_base;
1426 nic_data->pio_write_base =
1427 nic_data->wc_membase +
Edward Cree71827442017-12-18 16:56:19 +00001428 (pio_write_vi_base * efx->vi_stride + ER_DZ_TX_PIOBUF -
Ben Hutchings183233b2013-06-28 21:47:12 +01001429 uc_mem_map_size);
1430
1431 rc = efx_ef10_link_piobufs(efx);
1432 if (rc)
1433 efx_ef10_free_piobufs(efx);
1434 }
1435
1436 netif_dbg(efx, probe, efx->net_dev,
1437 "memory BAR at %pa (virtual %p+%x UC, %p+%x WC)\n",
1438 &efx->membase_phys, efx->membase, uc_mem_map_size,
1439 nic_data->wc_membase, wc_mem_map_size);
1440
1441 return 0;
Ben Hutchings8127d662013-08-29 19:19:29 +01001442}
1443
1444static int efx_ef10_init_nic(struct efx_nic *efx)
1445{
1446 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1447 int rc;
1448
Ben Hutchingsa915ccc2013-09-05 22:51:55 +01001449 if (nic_data->must_check_datapath_caps) {
1450 rc = efx_ef10_init_datapath_caps(efx);
1451 if (rc)
1452 return rc;
1453 nic_data->must_check_datapath_caps = false;
1454 }
1455
Ben Hutchings8127d662013-08-29 19:19:29 +01001456 if (nic_data->must_realloc_vis) {
1457 /* We cannot let the number of VIs change now */
1458 rc = efx_ef10_alloc_vis(efx, nic_data->n_allocated_vis,
1459 nic_data->n_allocated_vis);
1460 if (rc)
1461 return rc;
1462 nic_data->must_realloc_vis = false;
1463 }
1464
Ben Hutchings183233b2013-06-28 21:47:12 +01001465 if (nic_data->must_restore_piobufs && nic_data->n_piobufs) {
1466 rc = efx_ef10_alloc_piobufs(efx, nic_data->n_piobufs);
1467 if (rc == 0) {
1468 rc = efx_ef10_link_piobufs(efx);
1469 if (rc)
1470 efx_ef10_free_piobufs(efx);
1471 }
1472
Tomáš Pilař6eacfb52017-01-25 13:48:17 +00001473 /* Log an error on failure, but this is non-fatal.
1474 * Permission errors are less important - we've presumably
1475 * had the PIO buffer licence removed.
1476 */
1477 if (rc == -EPERM)
1478 netif_dbg(efx, drv, efx->net_dev,
1479 "not permitted to restore PIO buffers\n");
1480 else if (rc)
Ben Hutchings183233b2013-06-28 21:47:12 +01001481 netif_err(efx, drv, efx->net_dev,
1482 "failed to restore PIO buffers (%d)\n", rc);
1483 nic_data->must_restore_piobufs = false;
1484 }
1485
Jon Cooper267c0152015-05-06 00:59:38 +01001486 /* don't fail init if RSS setup doesn't work */
Edward Cree42356d92018-03-08 15:45:17 +00001487 rc = efx->type->rx_push_rss_config(efx, false,
1488 efx->rss_context.rx_indir_table, NULL);
Jon Cooper267c0152015-05-06 00:59:38 +01001489
Ben Hutchings8127d662013-08-29 19:19:29 +01001490 return 0;
1491}
1492
Jon Cooper3e336262014-01-17 19:48:06 +00001493static void efx_ef10_reset_mc_allocations(struct efx_nic *efx)
1494{
1495 struct efx_ef10_nic_data *nic_data = efx->nic_data;
Daniel Pieczko774ad032015-07-31 11:15:22 +01001496#ifdef CONFIG_SFC_SRIOV
1497 unsigned int i;
1498#endif
Jon Cooper3e336262014-01-17 19:48:06 +00001499
1500 /* All our allocations have been reset */
1501 nic_data->must_realloc_vis = true;
Edward Creee0a65e32018-03-27 17:44:36 +01001502 nic_data->must_restore_rss_contexts = true;
Jon Cooper3e336262014-01-17 19:48:06 +00001503 nic_data->must_restore_filters = true;
1504 nic_data->must_restore_piobufs = true;
Edward Creec0795bf2016-05-24 18:53:36 +01001505 efx_ef10_forget_old_piobufs(efx);
Edward Cree42356d92018-03-08 15:45:17 +00001506 efx->rss_context.context_id = EFX_EF10_RSS_CONTEXT_INVALID;
Daniel Pieczko774ad032015-07-31 11:15:22 +01001507
1508 /* Driver-created vswitches and vports must be re-created */
1509 nic_data->must_probe_vswitching = true;
1510 nic_data->vport_id = EVB_PORT_ID_ASSIGNED;
1511#ifdef CONFIG_SFC_SRIOV
1512 if (nic_data->vf)
1513 for (i = 0; i < efx->vf_count; i++)
1514 nic_data->vf[i].vport_id = 0;
1515#endif
Jon Cooper3e336262014-01-17 19:48:06 +00001516}
1517
Jon Cooper087e9022015-05-20 11:11:35 +01001518static enum reset_type efx_ef10_map_reset_reason(enum reset_type reason)
1519{
1520 if (reason == RESET_TYPE_MC_FAILURE)
1521 return RESET_TYPE_DATAPATH;
1522
1523 return efx_mcdi_map_reset_reason(reason);
1524}
1525
Ben Hutchings8127d662013-08-29 19:19:29 +01001526static int efx_ef10_map_reset_flags(u32 *flags)
1527{
1528 enum {
1529 EF10_RESET_PORT = ((ETH_RESET_MAC | ETH_RESET_PHY) <<
1530 ETH_RESET_SHARED_SHIFT),
1531 EF10_RESET_MC = ((ETH_RESET_DMA | ETH_RESET_FILTER |
1532 ETH_RESET_OFFLOAD | ETH_RESET_MAC |
1533 ETH_RESET_PHY | ETH_RESET_MGMT) <<
1534 ETH_RESET_SHARED_SHIFT)
1535 };
1536
1537 /* We assume for now that our PCI function is permitted to
1538 * reset everything.
1539 */
1540
1541 if ((*flags & EF10_RESET_MC) == EF10_RESET_MC) {
1542 *flags &= ~EF10_RESET_MC;
1543 return RESET_TYPE_WORLD;
1544 }
1545
1546 if ((*flags & EF10_RESET_PORT) == EF10_RESET_PORT) {
1547 *flags &= ~EF10_RESET_PORT;
1548 return RESET_TYPE_ALL;
1549 }
1550
1551 /* no invisible reset implemented */
1552
1553 return -EINVAL;
1554}
1555
Jon Cooper3e336262014-01-17 19:48:06 +00001556static int efx_ef10_reset(struct efx_nic *efx, enum reset_type reset_type)
1557{
1558 int rc = efx_mcdi_reset(efx, reset_type);
1559
Daniel Pieczko27324822015-07-31 11:14:54 +01001560 /* Unprivileged functions return -EPERM, but need to return success
1561 * here so that the datapath is brought back up.
1562 */
1563 if (reset_type == RESET_TYPE_WORLD && rc == -EPERM)
1564 rc = 0;
1565
Jon Cooper3e336262014-01-17 19:48:06 +00001566 /* If it was a port reset, trigger reallocation of MC resources.
1567 * Note that on an MC reset nothing needs to be done now because we'll
1568 * detect the MC reset later and handle it then.
Edward Creee2835462014-04-16 19:27:48 +01001569 * For an FLR, we never get an MC reset event, but the MC has reset all
1570 * resources assigned to us, so we have to trigger reallocation now.
Jon Cooper3e336262014-01-17 19:48:06 +00001571 */
Edward Creee2835462014-04-16 19:27:48 +01001572 if ((reset_type == RESET_TYPE_ALL ||
1573 reset_type == RESET_TYPE_MCDI_TIMEOUT) && !rc)
Jon Cooper3e336262014-01-17 19:48:06 +00001574 efx_ef10_reset_mc_allocations(efx);
1575 return rc;
1576}
1577
Ben Hutchings8127d662013-08-29 19:19:29 +01001578#define EF10_DMA_STAT(ext_name, mcdi_name) \
1579 [EF10_STAT_ ## ext_name] = \
1580 { #ext_name, 64, 8 * MC_CMD_MAC_ ## mcdi_name }
1581#define EF10_DMA_INVIS_STAT(int_name, mcdi_name) \
1582 [EF10_STAT_ ## int_name] = \
1583 { NULL, 64, 8 * MC_CMD_MAC_ ## mcdi_name }
1584#define EF10_OTHER_STAT(ext_name) \
1585 [EF10_STAT_ ## ext_name] = { #ext_name, 0, 0 }
Edward Creee4d112e2014-07-15 11:58:12 +01001586#define GENERIC_SW_STAT(ext_name) \
1587 [GENERIC_STAT_ ## ext_name] = { #ext_name, 0, 0 }
Ben Hutchings8127d662013-08-29 19:19:29 +01001588
1589static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = {
Daniel Pieczkoe80ca0132015-06-02 11:38:34 +01001590 EF10_DMA_STAT(port_tx_bytes, TX_BYTES),
1591 EF10_DMA_STAT(port_tx_packets, TX_PKTS),
1592 EF10_DMA_STAT(port_tx_pause, TX_PAUSE_PKTS),
1593 EF10_DMA_STAT(port_tx_control, TX_CONTROL_PKTS),
1594 EF10_DMA_STAT(port_tx_unicast, TX_UNICAST_PKTS),
1595 EF10_DMA_STAT(port_tx_multicast, TX_MULTICAST_PKTS),
1596 EF10_DMA_STAT(port_tx_broadcast, TX_BROADCAST_PKTS),
1597 EF10_DMA_STAT(port_tx_lt64, TX_LT64_PKTS),
1598 EF10_DMA_STAT(port_tx_64, TX_64_PKTS),
1599 EF10_DMA_STAT(port_tx_65_to_127, TX_65_TO_127_PKTS),
1600 EF10_DMA_STAT(port_tx_128_to_255, TX_128_TO_255_PKTS),
1601 EF10_DMA_STAT(port_tx_256_to_511, TX_256_TO_511_PKTS),
1602 EF10_DMA_STAT(port_tx_512_to_1023, TX_512_TO_1023_PKTS),
1603 EF10_DMA_STAT(port_tx_1024_to_15xx, TX_1024_TO_15XX_PKTS),
1604 EF10_DMA_STAT(port_tx_15xx_to_jumbo, TX_15XX_TO_JUMBO_PKTS),
1605 EF10_DMA_STAT(port_rx_bytes, RX_BYTES),
1606 EF10_DMA_INVIS_STAT(port_rx_bytes_minus_good_bytes, RX_BAD_BYTES),
1607 EF10_OTHER_STAT(port_rx_good_bytes),
1608 EF10_OTHER_STAT(port_rx_bad_bytes),
1609 EF10_DMA_STAT(port_rx_packets, RX_PKTS),
1610 EF10_DMA_STAT(port_rx_good, RX_GOOD_PKTS),
1611 EF10_DMA_STAT(port_rx_bad, RX_BAD_FCS_PKTS),
1612 EF10_DMA_STAT(port_rx_pause, RX_PAUSE_PKTS),
1613 EF10_DMA_STAT(port_rx_control, RX_CONTROL_PKTS),
1614 EF10_DMA_STAT(port_rx_unicast, RX_UNICAST_PKTS),
1615 EF10_DMA_STAT(port_rx_multicast, RX_MULTICAST_PKTS),
1616 EF10_DMA_STAT(port_rx_broadcast, RX_BROADCAST_PKTS),
1617 EF10_DMA_STAT(port_rx_lt64, RX_UNDERSIZE_PKTS),
1618 EF10_DMA_STAT(port_rx_64, RX_64_PKTS),
1619 EF10_DMA_STAT(port_rx_65_to_127, RX_65_TO_127_PKTS),
1620 EF10_DMA_STAT(port_rx_128_to_255, RX_128_TO_255_PKTS),
1621 EF10_DMA_STAT(port_rx_256_to_511, RX_256_TO_511_PKTS),
1622 EF10_DMA_STAT(port_rx_512_to_1023, RX_512_TO_1023_PKTS),
1623 EF10_DMA_STAT(port_rx_1024_to_15xx, RX_1024_TO_15XX_PKTS),
1624 EF10_DMA_STAT(port_rx_15xx_to_jumbo, RX_15XX_TO_JUMBO_PKTS),
1625 EF10_DMA_STAT(port_rx_gtjumbo, RX_GTJUMBO_PKTS),
1626 EF10_DMA_STAT(port_rx_bad_gtjumbo, RX_JABBER_PKTS),
1627 EF10_DMA_STAT(port_rx_overflow, RX_OVERFLOW_PKTS),
1628 EF10_DMA_STAT(port_rx_align_error, RX_ALIGN_ERROR_PKTS),
1629 EF10_DMA_STAT(port_rx_length_error, RX_LENGTH_ERROR_PKTS),
1630 EF10_DMA_STAT(port_rx_nodesc_drops, RX_NODESC_DROPS),
Edward Creee4d112e2014-07-15 11:58:12 +01001631 GENERIC_SW_STAT(rx_nodesc_trunc),
1632 GENERIC_SW_STAT(rx_noskb_drops),
Daniel Pieczkoe80ca0132015-06-02 11:38:34 +01001633 EF10_DMA_STAT(port_rx_pm_trunc_bb_overflow, PM_TRUNC_BB_OVERFLOW),
1634 EF10_DMA_STAT(port_rx_pm_discard_bb_overflow, PM_DISCARD_BB_OVERFLOW),
1635 EF10_DMA_STAT(port_rx_pm_trunc_vfifo_full, PM_TRUNC_VFIFO_FULL),
1636 EF10_DMA_STAT(port_rx_pm_discard_vfifo_full, PM_DISCARD_VFIFO_FULL),
1637 EF10_DMA_STAT(port_rx_pm_trunc_qbb, PM_TRUNC_QBB),
1638 EF10_DMA_STAT(port_rx_pm_discard_qbb, PM_DISCARD_QBB),
1639 EF10_DMA_STAT(port_rx_pm_discard_mapping, PM_DISCARD_MAPPING),
1640 EF10_DMA_STAT(port_rx_dp_q_disabled_packets, RXDP_Q_DISABLED_PKTS),
1641 EF10_DMA_STAT(port_rx_dp_di_dropped_packets, RXDP_DI_DROPPED_PKTS),
1642 EF10_DMA_STAT(port_rx_dp_streaming_packets, RXDP_STREAMING_PKTS),
1643 EF10_DMA_STAT(port_rx_dp_hlb_fetch, RXDP_HLB_FETCH_CONDITIONS),
1644 EF10_DMA_STAT(port_rx_dp_hlb_wait, RXDP_HLB_WAIT_CONDITIONS),
Daniel Pieczko3c36a2a2015-06-02 11:39:06 +01001645 EF10_DMA_STAT(rx_unicast, VADAPTER_RX_UNICAST_PACKETS),
1646 EF10_DMA_STAT(rx_unicast_bytes, VADAPTER_RX_UNICAST_BYTES),
1647 EF10_DMA_STAT(rx_multicast, VADAPTER_RX_MULTICAST_PACKETS),
1648 EF10_DMA_STAT(rx_multicast_bytes, VADAPTER_RX_MULTICAST_BYTES),
1649 EF10_DMA_STAT(rx_broadcast, VADAPTER_RX_BROADCAST_PACKETS),
1650 EF10_DMA_STAT(rx_broadcast_bytes, VADAPTER_RX_BROADCAST_BYTES),
1651 EF10_DMA_STAT(rx_bad, VADAPTER_RX_BAD_PACKETS),
1652 EF10_DMA_STAT(rx_bad_bytes, VADAPTER_RX_BAD_BYTES),
1653 EF10_DMA_STAT(rx_overflow, VADAPTER_RX_OVERFLOW),
1654 EF10_DMA_STAT(tx_unicast, VADAPTER_TX_UNICAST_PACKETS),
1655 EF10_DMA_STAT(tx_unicast_bytes, VADAPTER_TX_UNICAST_BYTES),
1656 EF10_DMA_STAT(tx_multicast, VADAPTER_TX_MULTICAST_PACKETS),
1657 EF10_DMA_STAT(tx_multicast_bytes, VADAPTER_TX_MULTICAST_BYTES),
1658 EF10_DMA_STAT(tx_broadcast, VADAPTER_TX_BROADCAST_PACKETS),
1659 EF10_DMA_STAT(tx_broadcast_bytes, VADAPTER_TX_BROADCAST_BYTES),
1660 EF10_DMA_STAT(tx_bad, VADAPTER_TX_BAD_PACKETS),
1661 EF10_DMA_STAT(tx_bad_bytes, VADAPTER_TX_BAD_BYTES),
1662 EF10_DMA_STAT(tx_overflow, VADAPTER_TX_OVERFLOW),
Edward Creef411b542017-12-21 09:00:36 +00001663 EF10_DMA_STAT(fec_uncorrected_errors, FEC_UNCORRECTED_ERRORS),
1664 EF10_DMA_STAT(fec_corrected_errors, FEC_CORRECTED_ERRORS),
1665 EF10_DMA_STAT(fec_corrected_symbols_lane0, FEC_CORRECTED_SYMBOLS_LANE0),
1666 EF10_DMA_STAT(fec_corrected_symbols_lane1, FEC_CORRECTED_SYMBOLS_LANE1),
1667 EF10_DMA_STAT(fec_corrected_symbols_lane2, FEC_CORRECTED_SYMBOLS_LANE2),
1668 EF10_DMA_STAT(fec_corrected_symbols_lane3, FEC_CORRECTED_SYMBOLS_LANE3),
Bert Kenward2c0b6ee2017-12-21 09:00:41 +00001669 EF10_DMA_STAT(ctpio_vi_busy_fallback, CTPIO_VI_BUSY_FALLBACK),
1670 EF10_DMA_STAT(ctpio_long_write_success, CTPIO_LONG_WRITE_SUCCESS),
1671 EF10_DMA_STAT(ctpio_missing_dbell_fail, CTPIO_MISSING_DBELL_FAIL),
1672 EF10_DMA_STAT(ctpio_overflow_fail, CTPIO_OVERFLOW_FAIL),
1673 EF10_DMA_STAT(ctpio_underflow_fail, CTPIO_UNDERFLOW_FAIL),
1674 EF10_DMA_STAT(ctpio_timeout_fail, CTPIO_TIMEOUT_FAIL),
1675 EF10_DMA_STAT(ctpio_noncontig_wr_fail, CTPIO_NONCONTIG_WR_FAIL),
1676 EF10_DMA_STAT(ctpio_frm_clobber_fail, CTPIO_FRM_CLOBBER_FAIL),
1677 EF10_DMA_STAT(ctpio_invalid_wr_fail, CTPIO_INVALID_WR_FAIL),
1678 EF10_DMA_STAT(ctpio_vi_clobber_fallback, CTPIO_VI_CLOBBER_FALLBACK),
1679 EF10_DMA_STAT(ctpio_unqualified_fallback, CTPIO_UNQUALIFIED_FALLBACK),
1680 EF10_DMA_STAT(ctpio_runt_fallback, CTPIO_RUNT_FALLBACK),
1681 EF10_DMA_STAT(ctpio_success, CTPIO_SUCCESS),
1682 EF10_DMA_STAT(ctpio_fallback, CTPIO_FALLBACK),
1683 EF10_DMA_STAT(ctpio_poison, CTPIO_POISON),
1684 EF10_DMA_STAT(ctpio_erase, CTPIO_ERASE),
Ben Hutchings8127d662013-08-29 19:19:29 +01001685};
1686
Daniel Pieczkoe80ca0132015-06-02 11:38:34 +01001687#define HUNT_COMMON_STAT_MASK ((1ULL << EF10_STAT_port_tx_bytes) | \
1688 (1ULL << EF10_STAT_port_tx_packets) | \
1689 (1ULL << EF10_STAT_port_tx_pause) | \
1690 (1ULL << EF10_STAT_port_tx_unicast) | \
1691 (1ULL << EF10_STAT_port_tx_multicast) | \
1692 (1ULL << EF10_STAT_port_tx_broadcast) | \
1693 (1ULL << EF10_STAT_port_rx_bytes) | \
1694 (1ULL << \
1695 EF10_STAT_port_rx_bytes_minus_good_bytes) | \
1696 (1ULL << EF10_STAT_port_rx_good_bytes) | \
1697 (1ULL << EF10_STAT_port_rx_bad_bytes) | \
1698 (1ULL << EF10_STAT_port_rx_packets) | \
1699 (1ULL << EF10_STAT_port_rx_good) | \
1700 (1ULL << EF10_STAT_port_rx_bad) | \
1701 (1ULL << EF10_STAT_port_rx_pause) | \
1702 (1ULL << EF10_STAT_port_rx_control) | \
1703 (1ULL << EF10_STAT_port_rx_unicast) | \
1704 (1ULL << EF10_STAT_port_rx_multicast) | \
1705 (1ULL << EF10_STAT_port_rx_broadcast) | \
1706 (1ULL << EF10_STAT_port_rx_lt64) | \
1707 (1ULL << EF10_STAT_port_rx_64) | \
1708 (1ULL << EF10_STAT_port_rx_65_to_127) | \
1709 (1ULL << EF10_STAT_port_rx_128_to_255) | \
1710 (1ULL << EF10_STAT_port_rx_256_to_511) | \
1711 (1ULL << EF10_STAT_port_rx_512_to_1023) |\
1712 (1ULL << EF10_STAT_port_rx_1024_to_15xx) |\
1713 (1ULL << EF10_STAT_port_rx_15xx_to_jumbo) |\
1714 (1ULL << EF10_STAT_port_rx_gtjumbo) | \
1715 (1ULL << EF10_STAT_port_rx_bad_gtjumbo) |\
1716 (1ULL << EF10_STAT_port_rx_overflow) | \
1717 (1ULL << EF10_STAT_port_rx_nodesc_drops) |\
Edward Creee4d112e2014-07-15 11:58:12 +01001718 (1ULL << GENERIC_STAT_rx_nodesc_trunc) | \
1719 (1ULL << GENERIC_STAT_rx_noskb_drops))
Ben Hutchings8127d662013-08-29 19:19:29 +01001720
Edward Cree69b365c2016-08-26 15:12:41 +01001721/* On 7000 series NICs, these statistics are only provided by the 10G MAC.
1722 * For a 10G/40G switchable port we do not expose these because they might
1723 * not include all the packets they should.
1724 * On 8000 series NICs these statistics are always provided.
Ben Hutchings8127d662013-08-29 19:19:29 +01001725 */
Daniel Pieczkoe80ca0132015-06-02 11:38:34 +01001726#define HUNT_10G_ONLY_STAT_MASK ((1ULL << EF10_STAT_port_tx_control) | \
1727 (1ULL << EF10_STAT_port_tx_lt64) | \
1728 (1ULL << EF10_STAT_port_tx_64) | \
1729 (1ULL << EF10_STAT_port_tx_65_to_127) |\
1730 (1ULL << EF10_STAT_port_tx_128_to_255) |\
1731 (1ULL << EF10_STAT_port_tx_256_to_511) |\
1732 (1ULL << EF10_STAT_port_tx_512_to_1023) |\
1733 (1ULL << EF10_STAT_port_tx_1024_to_15xx) |\
1734 (1ULL << EF10_STAT_port_tx_15xx_to_jumbo))
Ben Hutchings8127d662013-08-29 19:19:29 +01001735
1736/* These statistics are only provided by the 40G MAC. For a 10G/40G
1737 * switchable port we do expose these because the errors will otherwise
1738 * be silent.
1739 */
Daniel Pieczkoe80ca0132015-06-02 11:38:34 +01001740#define HUNT_40G_EXTRA_STAT_MASK ((1ULL << EF10_STAT_port_rx_align_error) |\
1741 (1ULL << EF10_STAT_port_rx_length_error))
Ben Hutchings8127d662013-08-29 19:19:29 +01001742
Edward Cree568d7a02013-09-25 17:32:09 +01001743/* These statistics are only provided if the firmware supports the
1744 * capability PM_AND_RXDP_COUNTERS.
1745 */
1746#define HUNT_PM_AND_RXDP_STAT_MASK ( \
Daniel Pieczkoe80ca0132015-06-02 11:38:34 +01001747 (1ULL << EF10_STAT_port_rx_pm_trunc_bb_overflow) | \
1748 (1ULL << EF10_STAT_port_rx_pm_discard_bb_overflow) | \
1749 (1ULL << EF10_STAT_port_rx_pm_trunc_vfifo_full) | \
1750 (1ULL << EF10_STAT_port_rx_pm_discard_vfifo_full) | \
1751 (1ULL << EF10_STAT_port_rx_pm_trunc_qbb) | \
1752 (1ULL << EF10_STAT_port_rx_pm_discard_qbb) | \
1753 (1ULL << EF10_STAT_port_rx_pm_discard_mapping) | \
1754 (1ULL << EF10_STAT_port_rx_dp_q_disabled_packets) | \
1755 (1ULL << EF10_STAT_port_rx_dp_di_dropped_packets) | \
1756 (1ULL << EF10_STAT_port_rx_dp_streaming_packets) | \
1757 (1ULL << EF10_STAT_port_rx_dp_hlb_fetch) | \
1758 (1ULL << EF10_STAT_port_rx_dp_hlb_wait))
Ben Hutchings8127d662013-08-29 19:19:29 +01001759
Edward Creef411b542017-12-21 09:00:36 +00001760/* These statistics are only provided if the NIC supports MC_CMD_MAC_STATS_V2,
1761 * indicated by returning a value >= MC_CMD_MAC_NSTATS_V2 in
1762 * MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS.
1763 * These bits are in the second u64 of the raw mask.
1764 */
1765#define EF10_FEC_STAT_MASK ( \
1766 (1ULL << (EF10_STAT_fec_uncorrected_errors - 64)) | \
1767 (1ULL << (EF10_STAT_fec_corrected_errors - 64)) | \
1768 (1ULL << (EF10_STAT_fec_corrected_symbols_lane0 - 64)) | \
1769 (1ULL << (EF10_STAT_fec_corrected_symbols_lane1 - 64)) | \
1770 (1ULL << (EF10_STAT_fec_corrected_symbols_lane2 - 64)) | \
1771 (1ULL << (EF10_STAT_fec_corrected_symbols_lane3 - 64)))
1772
Bert Kenward2c0b6ee2017-12-21 09:00:41 +00001773/* These statistics are only provided if the NIC supports MC_CMD_MAC_STATS_V3,
1774 * indicated by returning a value >= MC_CMD_MAC_NSTATS_V3 in
1775 * MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS.
1776 * These bits are in the second u64 of the raw mask.
1777 */
1778#define EF10_CTPIO_STAT_MASK ( \
Bert Kenward2c0b6ee2017-12-21 09:00:41 +00001779 (1ULL << (EF10_STAT_ctpio_vi_busy_fallback - 64)) | \
1780 (1ULL << (EF10_STAT_ctpio_long_write_success - 64)) | \
1781 (1ULL << (EF10_STAT_ctpio_missing_dbell_fail - 64)) | \
1782 (1ULL << (EF10_STAT_ctpio_overflow_fail - 64)) | \
1783 (1ULL << (EF10_STAT_ctpio_underflow_fail - 64)) | \
1784 (1ULL << (EF10_STAT_ctpio_timeout_fail - 64)) | \
1785 (1ULL << (EF10_STAT_ctpio_noncontig_wr_fail - 64)) | \
1786 (1ULL << (EF10_STAT_ctpio_frm_clobber_fail - 64)) | \
1787 (1ULL << (EF10_STAT_ctpio_invalid_wr_fail - 64)) | \
1788 (1ULL << (EF10_STAT_ctpio_vi_clobber_fallback - 64)) | \
1789 (1ULL << (EF10_STAT_ctpio_unqualified_fallback - 64)) | \
1790 (1ULL << (EF10_STAT_ctpio_runt_fallback - 64)) | \
1791 (1ULL << (EF10_STAT_ctpio_success - 64)) | \
1792 (1ULL << (EF10_STAT_ctpio_fallback - 64)) | \
1793 (1ULL << (EF10_STAT_ctpio_poison - 64)) | \
1794 (1ULL << (EF10_STAT_ctpio_erase - 64)))
1795
Edward Cree4bae9132013-09-27 18:52:49 +01001796static u64 efx_ef10_raw_stat_mask(struct efx_nic *efx)
Ben Hutchings8127d662013-08-29 19:19:29 +01001797{
Edward Cree4bae9132013-09-27 18:52:49 +01001798 u64 raw_mask = HUNT_COMMON_STAT_MASK;
Ben Hutchings8127d662013-08-29 19:19:29 +01001799 u32 port_caps = efx_mcdi_phy_get_caps(efx);
Edward Cree568d7a02013-09-25 17:32:09 +01001800 struct efx_ef10_nic_data *nic_data = efx->nic_data;
Ben Hutchings8127d662013-08-29 19:19:29 +01001801
Daniel Pieczko3c36a2a2015-06-02 11:39:06 +01001802 if (!(efx->mcdi->fn_flags &
1803 1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL))
1804 return 0;
1805
Edward Cree69b365c2016-08-26 15:12:41 +01001806 if (port_caps & (1 << MC_CMD_PHY_CAP_40000FDX_LBN)) {
Edward Cree4bae9132013-09-27 18:52:49 +01001807 raw_mask |= HUNT_40G_EXTRA_STAT_MASK;
Edward Cree69b365c2016-08-26 15:12:41 +01001808 /* 8000 series have everything even at 40G */
1809 if (nic_data->datapath_caps2 &
1810 (1 << MC_CMD_GET_CAPABILITIES_V2_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN))
1811 raw_mask |= HUNT_10G_ONLY_STAT_MASK;
1812 } else {
Edward Cree4bae9132013-09-27 18:52:49 +01001813 raw_mask |= HUNT_10G_ONLY_STAT_MASK;
Edward Cree69b365c2016-08-26 15:12:41 +01001814 }
Edward Cree568d7a02013-09-25 17:32:09 +01001815
1816 if (nic_data->datapath_caps &
1817 (1 << MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_LBN))
1818 raw_mask |= HUNT_PM_AND_RXDP_STAT_MASK;
1819
Edward Cree4bae9132013-09-27 18:52:49 +01001820 return raw_mask;
1821}
1822
1823static void efx_ef10_get_stat_mask(struct efx_nic *efx, unsigned long *mask)
1824{
Daniel Pieczkod94619c2015-06-02 11:40:05 +01001825 struct efx_ef10_nic_data *nic_data = efx->nic_data;
Daniel Pieczko3c36a2a2015-06-02 11:39:06 +01001826 u64 raw_mask[2];
1827
1828 raw_mask[0] = efx_ef10_raw_stat_mask(efx);
1829
Daniel Pieczkod94619c2015-06-02 11:40:05 +01001830 /* Only show vadaptor stats when EVB capability is present */
1831 if (nic_data->datapath_caps &
1832 (1 << MC_CMD_GET_CAPABILITIES_OUT_EVB_LBN)) {
1833 raw_mask[0] |= ~((1ULL << EF10_STAT_rx_unicast) - 1);
Edward Creef411b542017-12-21 09:00:36 +00001834 raw_mask[1] = (1ULL << (EF10_STAT_V1_COUNT - 64)) - 1;
Daniel Pieczkod94619c2015-06-02 11:40:05 +01001835 } else {
1836 raw_mask[1] = 0;
1837 }
Edward Creef411b542017-12-21 09:00:36 +00001838 /* Only show FEC stats when NIC supports MC_CMD_MAC_STATS_V2 */
1839 if (efx->num_mac_stats >= MC_CMD_MAC_NSTATS_V2)
1840 raw_mask[1] |= EF10_FEC_STAT_MASK;
Edward Cree4bae9132013-09-27 18:52:49 +01001841
Bert Kenward2c0b6ee2017-12-21 09:00:41 +00001842 /* CTPIO stats appear in V3. Only show them on devices that actually
1843 * support CTPIO. Although this driver doesn't use CTPIO others might,
1844 * and we may be reporting the stats for the underlying port.
1845 */
1846 if (efx->num_mac_stats >= MC_CMD_MAC_NSTATS_V3 &&
1847 (nic_data->datapath_caps2 &
1848 (1 << MC_CMD_GET_CAPABILITIES_V4_OUT_CTPIO_LBN)))
1849 raw_mask[1] |= EF10_CTPIO_STAT_MASK;
1850
Edward Cree4bae9132013-09-27 18:52:49 +01001851#if BITS_PER_LONG == 64
Andrew Rybchenkoe70c70c32016-08-26 11:19:34 +01001852 BUILD_BUG_ON(BITS_TO_LONGS(EF10_STAT_COUNT) != 2);
Daniel Pieczko3c36a2a2015-06-02 11:39:06 +01001853 mask[0] = raw_mask[0];
1854 mask[1] = raw_mask[1];
Edward Cree4bae9132013-09-27 18:52:49 +01001855#else
Andrew Rybchenkoe70c70c32016-08-26 11:19:34 +01001856 BUILD_BUG_ON(BITS_TO_LONGS(EF10_STAT_COUNT) != 3);
Daniel Pieczko3c36a2a2015-06-02 11:39:06 +01001857 mask[0] = raw_mask[0] & 0xffffffff;
1858 mask[1] = raw_mask[0] >> 32;
1859 mask[2] = raw_mask[1] & 0xffffffff;
Edward Cree4bae9132013-09-27 18:52:49 +01001860#endif
Ben Hutchings8127d662013-08-29 19:19:29 +01001861}
1862
1863static size_t efx_ef10_describe_stats(struct efx_nic *efx, u8 *names)
1864{
Edward Cree4bae9132013-09-27 18:52:49 +01001865 DECLARE_BITMAP(mask, EF10_STAT_COUNT);
1866
1867 efx_ef10_get_stat_mask(efx, mask);
Ben Hutchings8127d662013-08-29 19:19:29 +01001868 return efx_nic_describe_stats(efx_ef10_stat_desc, EF10_STAT_COUNT,
Edward Cree4bae9132013-09-27 18:52:49 +01001869 mask, names);
Ben Hutchings8127d662013-08-29 19:19:29 +01001870}
1871
Daniel Pieczkod7788192015-06-02 11:39:20 +01001872static size_t efx_ef10_update_stats_common(struct efx_nic *efx, u64 *full_stats,
1873 struct rtnl_link_stats64 *core_stats)
1874{
1875 DECLARE_BITMAP(mask, EF10_STAT_COUNT);
1876 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1877 u64 *stats = nic_data->stats;
1878 size_t stats_count = 0, index;
1879
1880 efx_ef10_get_stat_mask(efx, mask);
1881
1882 if (full_stats) {
1883 for_each_set_bit(index, mask, EF10_STAT_COUNT) {
1884 if (efx_ef10_stat_desc[index].name) {
1885 *full_stats++ = stats[index];
1886 ++stats_count;
1887 }
1888 }
1889 }
1890
Bert Kenwardfbe43072015-08-26 16:39:03 +01001891 if (!core_stats)
1892 return stats_count;
1893
1894 if (nic_data->datapath_caps &
1895 1 << MC_CMD_GET_CAPABILITIES_OUT_EVB_LBN) {
1896 /* Use vadaptor stats. */
Daniel Pieczko0fc95fc2015-06-02 11:39:33 +01001897 core_stats->rx_packets = stats[EF10_STAT_rx_unicast] +
1898 stats[EF10_STAT_rx_multicast] +
1899 stats[EF10_STAT_rx_broadcast];
1900 core_stats->tx_packets = stats[EF10_STAT_tx_unicast] +
1901 stats[EF10_STAT_tx_multicast] +
1902 stats[EF10_STAT_tx_broadcast];
1903 core_stats->rx_bytes = stats[EF10_STAT_rx_unicast_bytes] +
1904 stats[EF10_STAT_rx_multicast_bytes] +
1905 stats[EF10_STAT_rx_broadcast_bytes];
1906 core_stats->tx_bytes = stats[EF10_STAT_tx_unicast_bytes] +
1907 stats[EF10_STAT_tx_multicast_bytes] +
1908 stats[EF10_STAT_tx_broadcast_bytes];
1909 core_stats->rx_dropped = stats[GENERIC_STAT_rx_nodesc_trunc] +
Daniel Pieczkod7788192015-06-02 11:39:20 +01001910 stats[GENERIC_STAT_rx_noskb_drops];
Daniel Pieczko0fc95fc2015-06-02 11:39:33 +01001911 core_stats->multicast = stats[EF10_STAT_rx_multicast];
1912 core_stats->rx_crc_errors = stats[EF10_STAT_rx_bad];
1913 core_stats->rx_fifo_errors = stats[EF10_STAT_rx_overflow];
1914 core_stats->rx_errors = core_stats->rx_crc_errors;
1915 core_stats->tx_errors = stats[EF10_STAT_tx_bad];
Bert Kenwardfbe43072015-08-26 16:39:03 +01001916 } else {
1917 /* Use port stats. */
1918 core_stats->rx_packets = stats[EF10_STAT_port_rx_packets];
1919 core_stats->tx_packets = stats[EF10_STAT_port_tx_packets];
1920 core_stats->rx_bytes = stats[EF10_STAT_port_rx_bytes];
1921 core_stats->tx_bytes = stats[EF10_STAT_port_tx_bytes];
1922 core_stats->rx_dropped = stats[EF10_STAT_port_rx_nodesc_drops] +
1923 stats[GENERIC_STAT_rx_nodesc_trunc] +
1924 stats[GENERIC_STAT_rx_noskb_drops];
1925 core_stats->multicast = stats[EF10_STAT_port_rx_multicast];
1926 core_stats->rx_length_errors =
1927 stats[EF10_STAT_port_rx_gtjumbo] +
1928 stats[EF10_STAT_port_rx_length_error];
1929 core_stats->rx_crc_errors = stats[EF10_STAT_port_rx_bad];
1930 core_stats->rx_frame_errors =
1931 stats[EF10_STAT_port_rx_align_error];
1932 core_stats->rx_fifo_errors = stats[EF10_STAT_port_rx_overflow];
1933 core_stats->rx_errors = (core_stats->rx_length_errors +
1934 core_stats->rx_crc_errors +
1935 core_stats->rx_frame_errors);
Daniel Pieczkod7788192015-06-02 11:39:20 +01001936 }
1937
1938 return stats_count;
1939}
1940
1941static int efx_ef10_try_update_nic_stats_pf(struct efx_nic *efx)
Ben Hutchings8127d662013-08-29 19:19:29 +01001942{
1943 struct efx_ef10_nic_data *nic_data = efx->nic_data;
Edward Cree4bae9132013-09-27 18:52:49 +01001944 DECLARE_BITMAP(mask, EF10_STAT_COUNT);
Ben Hutchings8127d662013-08-29 19:19:29 +01001945 __le64 generation_start, generation_end;
1946 u64 *stats = nic_data->stats;
1947 __le64 *dma_stats;
1948
Edward Cree4bae9132013-09-27 18:52:49 +01001949 efx_ef10_get_stat_mask(efx, mask);
1950
Ben Hutchings8127d662013-08-29 19:19:29 +01001951 dma_stats = efx->stats_buffer.addr;
Ben Hutchings8127d662013-08-29 19:19:29 +01001952
Edward Creec1be4822017-12-21 09:00:26 +00001953 generation_end = dma_stats[efx->num_mac_stats - 1];
Ben Hutchings8127d662013-08-29 19:19:29 +01001954 if (generation_end == EFX_MC_STATS_GENERATION_INVALID)
1955 return 0;
1956 rmb();
Edward Cree4bae9132013-09-27 18:52:49 +01001957 efx_nic_update_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, mask,
Ben Hutchings8127d662013-08-29 19:19:29 +01001958 stats, efx->stats_buffer.addr, false);
Jon Cooperd546a892013-09-27 18:26:30 +01001959 rmb();
Ben Hutchings8127d662013-08-29 19:19:29 +01001960 generation_start = dma_stats[MC_CMD_MAC_GENERATION_START];
1961 if (generation_end != generation_start)
1962 return -EAGAIN;
1963
1964 /* Update derived statistics */
Daniel Pieczkoe80ca0132015-06-02 11:38:34 +01001965 efx_nic_fix_nodesc_drop_stat(efx,
1966 &stats[EF10_STAT_port_rx_nodesc_drops]);
1967 stats[EF10_STAT_port_rx_good_bytes] =
1968 stats[EF10_STAT_port_rx_bytes] -
1969 stats[EF10_STAT_port_rx_bytes_minus_good_bytes];
1970 efx_update_diff_stat(&stats[EF10_STAT_port_rx_bad_bytes],
1971 stats[EF10_STAT_port_rx_bytes_minus_good_bytes]);
Edward Creee4d112e2014-07-15 11:58:12 +01001972 efx_update_sw_stats(efx, stats);
Ben Hutchings8127d662013-08-29 19:19:29 +01001973 return 0;
1974}
1975
1976
Daniel Pieczkod7788192015-06-02 11:39:20 +01001977static size_t efx_ef10_update_stats_pf(struct efx_nic *efx, u64 *full_stats,
1978 struct rtnl_link_stats64 *core_stats)
Ben Hutchings8127d662013-08-29 19:19:29 +01001979{
Ben Hutchings8127d662013-08-29 19:19:29 +01001980 int retry;
1981
1982 /* If we're unlucky enough to read statistics during the DMA, wait
1983 * up to 10ms for it to finish (typically takes <500us)
1984 */
1985 for (retry = 0; retry < 100; ++retry) {
Daniel Pieczkod7788192015-06-02 11:39:20 +01001986 if (efx_ef10_try_update_nic_stats_pf(efx) == 0)
Ben Hutchings8127d662013-08-29 19:19:29 +01001987 break;
1988 udelay(100);
1989 }
1990
Daniel Pieczkod7788192015-06-02 11:39:20 +01001991 return efx_ef10_update_stats_common(efx, full_stats, core_stats);
1992}
1993
1994static int efx_ef10_try_update_nic_stats_vf(struct efx_nic *efx)
1995{
1996 MCDI_DECLARE_BUF(inbuf, MC_CMD_MAC_STATS_IN_LEN);
1997 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1998 DECLARE_BITMAP(mask, EF10_STAT_COUNT);
1999 __le64 generation_start, generation_end;
2000 u64 *stats = nic_data->stats;
Edward Creec1be4822017-12-21 09:00:26 +00002001 u32 dma_len = efx->num_mac_stats * sizeof(u64);
Daniel Pieczkod7788192015-06-02 11:39:20 +01002002 struct efx_buffer stats_buf;
2003 __le64 *dma_stats;
2004 int rc;
2005
Daniel Pieczkof00bf232015-06-02 11:40:18 +01002006 spin_unlock_bh(&efx->stats_lock);
2007
2008 if (in_interrupt()) {
2009 /* If in atomic context, cannot update stats. Just update the
2010 * software stats and return so the caller can continue.
2011 */
2012 spin_lock_bh(&efx->stats_lock);
2013 efx_update_sw_stats(efx, stats);
2014 return 0;
2015 }
2016
Daniel Pieczkod7788192015-06-02 11:39:20 +01002017 efx_ef10_get_stat_mask(efx, mask);
2018
2019 rc = efx_nic_alloc_buffer(efx, &stats_buf, dma_len, GFP_ATOMIC);
Daniel Pieczkof00bf232015-06-02 11:40:18 +01002020 if (rc) {
2021 spin_lock_bh(&efx->stats_lock);
Daniel Pieczkod7788192015-06-02 11:39:20 +01002022 return rc;
Daniel Pieczkof00bf232015-06-02 11:40:18 +01002023 }
Daniel Pieczkod7788192015-06-02 11:39:20 +01002024
2025 dma_stats = stats_buf.addr;
Edward Creec1be4822017-12-21 09:00:26 +00002026 dma_stats[efx->num_mac_stats - 1] = EFX_MC_STATS_GENERATION_INVALID;
Daniel Pieczkod7788192015-06-02 11:39:20 +01002027
2028 MCDI_SET_QWORD(inbuf, MAC_STATS_IN_DMA_ADDR, stats_buf.dma_addr);
2029 MCDI_POPULATE_DWORD_1(inbuf, MAC_STATS_IN_CMD,
Daniel Pieczko0fc95fc2015-06-02 11:39:33 +01002030 MAC_STATS_IN_DMA, 1);
Daniel Pieczkod7788192015-06-02 11:39:20 +01002031 MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len);
2032 MCDI_SET_DWORD(inbuf, MAC_STATS_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
2033
Daniel Pieczko6dd48592015-06-02 11:39:49 +01002034 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf),
2035 NULL, 0, NULL);
Daniel Pieczkod7788192015-06-02 11:39:20 +01002036 spin_lock_bh(&efx->stats_lock);
Daniel Pieczko6dd48592015-06-02 11:39:49 +01002037 if (rc) {
2038 /* Expect ENOENT if DMA queues have not been set up */
2039 if (rc != -ENOENT || atomic_read(&efx->active_queues))
2040 efx_mcdi_display_error(efx, MC_CMD_MAC_STATS,
2041 sizeof(inbuf), NULL, 0, rc);
Daniel Pieczkod7788192015-06-02 11:39:20 +01002042 goto out;
Daniel Pieczko6dd48592015-06-02 11:39:49 +01002043 }
Daniel Pieczkod7788192015-06-02 11:39:20 +01002044
Edward Creec1be4822017-12-21 09:00:26 +00002045 generation_end = dma_stats[efx->num_mac_stats - 1];
Daniel Pieczko0fc95fc2015-06-02 11:39:33 +01002046 if (generation_end == EFX_MC_STATS_GENERATION_INVALID) {
2047 WARN_ON_ONCE(1);
Daniel Pieczkod7788192015-06-02 11:39:20 +01002048 goto out;
Daniel Pieczko0fc95fc2015-06-02 11:39:33 +01002049 }
Daniel Pieczkod7788192015-06-02 11:39:20 +01002050 rmb();
2051 efx_nic_update_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, mask,
2052 stats, stats_buf.addr, false);
2053 rmb();
2054 generation_start = dma_stats[MC_CMD_MAC_GENERATION_START];
2055 if (generation_end != generation_start) {
2056 rc = -EAGAIN;
2057 goto out;
Ben Hutchings8127d662013-08-29 19:19:29 +01002058 }
2059
Daniel Pieczkod7788192015-06-02 11:39:20 +01002060 efx_update_sw_stats(efx, stats);
2061out:
2062 efx_nic_free_buffer(efx, &stats_buf);
2063 return rc;
2064}
Ben Hutchings8127d662013-08-29 19:19:29 +01002065
Daniel Pieczkod7788192015-06-02 11:39:20 +01002066static size_t efx_ef10_update_stats_vf(struct efx_nic *efx, u64 *full_stats,
2067 struct rtnl_link_stats64 *core_stats)
2068{
2069 if (efx_ef10_try_update_nic_stats_vf(efx))
2070 return 0;
2071
2072 return efx_ef10_update_stats_common(efx, full_stats, core_stats);
Ben Hutchings8127d662013-08-29 19:19:29 +01002073}
2074
2075static void efx_ef10_push_irq_moderation(struct efx_channel *channel)
2076{
2077 struct efx_nic *efx = channel->efx;
Bert Kenward539de7c2016-08-11 13:02:09 +01002078 unsigned int mode, usecs;
Ben Hutchings8127d662013-08-29 19:19:29 +01002079 efx_dword_t timer_cmd;
2080
Bert Kenward539de7c2016-08-11 13:02:09 +01002081 if (channel->irq_moderation_us) {
Ben Hutchings8127d662013-08-29 19:19:29 +01002082 mode = 3;
Bert Kenward539de7c2016-08-11 13:02:09 +01002083 usecs = channel->irq_moderation_us;
Ben Hutchings8127d662013-08-29 19:19:29 +01002084 } else {
2085 mode = 0;
Bert Kenward539de7c2016-08-11 13:02:09 +01002086 usecs = 0;
Ben Hutchings8127d662013-08-29 19:19:29 +01002087 }
2088
Bert Kenward539de7c2016-08-11 13:02:09 +01002089 if (EFX_EF10_WORKAROUND_61265(efx)) {
2090 MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_EVQ_TMR_IN_LEN);
2091 unsigned int ns = usecs * 1000;
2092
2093 MCDI_SET_DWORD(inbuf, SET_EVQ_TMR_IN_INSTANCE,
2094 channel->channel);
2095 MCDI_SET_DWORD(inbuf, SET_EVQ_TMR_IN_TMR_LOAD_REQ_NS, ns);
2096 MCDI_SET_DWORD(inbuf, SET_EVQ_TMR_IN_TMR_RELOAD_REQ_NS, ns);
2097 MCDI_SET_DWORD(inbuf, SET_EVQ_TMR_IN_TMR_MODE, mode);
2098
2099 efx_mcdi_rpc_async(efx, MC_CMD_SET_EVQ_TMR,
2100 inbuf, sizeof(inbuf), 0, NULL, 0);
2101 } else if (EFX_EF10_WORKAROUND_35388(efx)) {
2102 unsigned int ticks = efx_usecs_to_ticks(efx, usecs);
2103
Ben Hutchings8127d662013-08-29 19:19:29 +01002104 EFX_POPULATE_DWORD_3(timer_cmd, ERF_DD_EVQ_IND_TIMER_FLAGS,
2105 EFE_DD_EVQ_IND_TIMER_FLAGS,
2106 ERF_DD_EVQ_IND_TIMER_MODE, mode,
Bert Kenward539de7c2016-08-11 13:02:09 +01002107 ERF_DD_EVQ_IND_TIMER_VAL, ticks);
Ben Hutchings8127d662013-08-29 19:19:29 +01002108 efx_writed_page(efx, &timer_cmd, ER_DD_EVQ_INDIRECT,
2109 channel->channel);
2110 } else {
Bert Kenward539de7c2016-08-11 13:02:09 +01002111 unsigned int ticks = efx_usecs_to_ticks(efx, usecs);
2112
Bert Kenward0bc959a2017-12-18 16:57:41 +00002113 EFX_POPULATE_DWORD_3(timer_cmd, ERF_DZ_TC_TIMER_MODE, mode,
2114 ERF_DZ_TC_TIMER_VAL, ticks,
2115 ERF_FZ_TC_TMR_REL_VAL, ticks);
Ben Hutchings8127d662013-08-29 19:19:29 +01002116 efx_writed_page(efx, &timer_cmd, ER_DZ_EVQ_TMR,
2117 channel->channel);
2118 }
2119}
2120
Shradha Shah02246a72015-05-06 00:58:14 +01002121static void efx_ef10_get_wol_vf(struct efx_nic *efx,
2122 struct ethtool_wolinfo *wol) {}
2123
2124static int efx_ef10_set_wol_vf(struct efx_nic *efx, u32 type)
2125{
2126 return -EOPNOTSUPP;
2127}
2128
Ben Hutchings8127d662013-08-29 19:19:29 +01002129static void efx_ef10_get_wol(struct efx_nic *efx, struct ethtool_wolinfo *wol)
2130{
2131 wol->supported = 0;
2132 wol->wolopts = 0;
2133 memset(&wol->sopass, 0, sizeof(wol->sopass));
2134}
2135
2136static int efx_ef10_set_wol(struct efx_nic *efx, u32 type)
2137{
2138 if (type != 0)
2139 return -EINVAL;
2140 return 0;
2141}
2142
2143static void efx_ef10_mcdi_request(struct efx_nic *efx,
2144 const efx_dword_t *hdr, size_t hdr_len,
2145 const efx_dword_t *sdu, size_t sdu_len)
2146{
2147 struct efx_ef10_nic_data *nic_data = efx->nic_data;
2148 u8 *pdu = nic_data->mcdi_buf.addr;
2149
2150 memcpy(pdu, hdr, hdr_len);
2151 memcpy(pdu + hdr_len, sdu, sdu_len);
2152 wmb();
2153
2154 /* The hardware provides 'low' and 'high' (doorbell) registers
2155 * for passing the 64-bit address of an MCDI request to
2156 * firmware. However the dwords are swapped by firmware. The
2157 * least significant bits of the doorbell are then 0 for all
2158 * MCDI requests due to alignment.
2159 */
2160 _efx_writed(efx, cpu_to_le32((u64)nic_data->mcdi_buf.dma_addr >> 32),
2161 ER_DZ_MC_DB_LWRD);
2162 _efx_writed(efx, cpu_to_le32((u32)nic_data->mcdi_buf.dma_addr),
2163 ER_DZ_MC_DB_HWRD);
2164}
2165
2166static bool efx_ef10_mcdi_poll_response(struct efx_nic *efx)
2167{
2168 struct efx_ef10_nic_data *nic_data = efx->nic_data;
2169 const efx_dword_t hdr = *(const efx_dword_t *)nic_data->mcdi_buf.addr;
2170
2171 rmb();
2172 return EFX_DWORD_FIELD(hdr, MCDI_HEADER_RESPONSE);
2173}
2174
2175static void
2176efx_ef10_mcdi_read_response(struct efx_nic *efx, efx_dword_t *outbuf,
2177 size_t offset, size_t outlen)
2178{
2179 struct efx_ef10_nic_data *nic_data = efx->nic_data;
2180 const u8 *pdu = nic_data->mcdi_buf.addr;
2181
2182 memcpy(outbuf, pdu + offset, outlen);
2183}
2184
Daniel Pieczkoc577e592015-10-09 10:40:35 +01002185static void efx_ef10_mcdi_reboot_detected(struct efx_nic *efx)
2186{
2187 struct efx_ef10_nic_data *nic_data = efx->nic_data;
2188
2189 /* All our allocations have been reset */
2190 efx_ef10_reset_mc_allocations(efx);
2191
2192 /* The datapath firmware might have been changed */
2193 nic_data->must_check_datapath_caps = true;
2194
2195 /* MAC statistics have been cleared on the NIC; clear the local
2196 * statistic that we update with efx_update_diff_stat().
2197 */
2198 nic_data->stats[EF10_STAT_port_rx_bad_bytes] = 0;
2199}
2200
Ben Hutchings8127d662013-08-29 19:19:29 +01002201static int efx_ef10_mcdi_poll_reboot(struct efx_nic *efx)
2202{
2203 struct efx_ef10_nic_data *nic_data = efx->nic_data;
2204 int rc;
2205
2206 rc = efx_ef10_get_warm_boot_count(efx);
2207 if (rc < 0) {
2208 /* The firmware is presumably in the process of
2209 * rebooting. However, we are supposed to report each
2210 * reboot just once, so we must only do that once we
2211 * can read and store the updated warm boot count.
2212 */
2213 return 0;
2214 }
2215
2216 if (rc == nic_data->warm_boot_count)
2217 return 0;
2218
2219 nic_data->warm_boot_count = rc;
Daniel Pieczkoc577e592015-10-09 10:40:35 +01002220 efx_ef10_mcdi_reboot_detected(efx);
Ben Hutchings869070c2013-09-05 22:46:10 +01002221
Ben Hutchings8127d662013-08-29 19:19:29 +01002222 return -EIO;
2223}
2224
2225/* Handle an MSI interrupt
2226 *
2227 * Handle an MSI hardware interrupt. This routine schedules event
2228 * queue processing. No interrupt acknowledgement cycle is necessary.
2229 * Also, we never need to check that the interrupt is for us, since
2230 * MSI interrupts cannot be shared.
2231 */
2232static irqreturn_t efx_ef10_msi_interrupt(int irq, void *dev_id)
2233{
2234 struct efx_msi_context *context = dev_id;
2235 struct efx_nic *efx = context->efx;
2236
2237 netif_vdbg(efx, intr, efx->net_dev,
2238 "IRQ %d on CPU %d\n", irq, raw_smp_processor_id());
2239
Mark Rutland6aa7de02017-10-23 14:07:29 -07002240 if (likely(READ_ONCE(efx->irq_soft_enabled))) {
Ben Hutchings8127d662013-08-29 19:19:29 +01002241 /* Note test interrupts */
2242 if (context->index == efx->irq_level)
2243 efx->last_irq_cpu = raw_smp_processor_id();
2244
2245 /* Schedule processing of the channel */
2246 efx_schedule_channel_irq(efx->channel[context->index]);
2247 }
2248
2249 return IRQ_HANDLED;
2250}
2251
2252static irqreturn_t efx_ef10_legacy_interrupt(int irq, void *dev_id)
2253{
2254 struct efx_nic *efx = dev_id;
Mark Rutland6aa7de02017-10-23 14:07:29 -07002255 bool soft_enabled = READ_ONCE(efx->irq_soft_enabled);
Ben Hutchings8127d662013-08-29 19:19:29 +01002256 struct efx_channel *channel;
2257 efx_dword_t reg;
2258 u32 queues;
2259
2260 /* Read the ISR which also ACKs the interrupts */
2261 efx_readd(efx, &reg, ER_DZ_BIU_INT_ISR);
2262 queues = EFX_DWORD_FIELD(reg, ERF_DZ_ISR_REG);
2263
2264 if (queues == 0)
2265 return IRQ_NONE;
2266
2267 if (likely(soft_enabled)) {
2268 /* Note test interrupts */
2269 if (queues & (1U << efx->irq_level))
2270 efx->last_irq_cpu = raw_smp_processor_id();
2271
2272 efx_for_each_channel(channel, efx) {
2273 if (queues & 1)
2274 efx_schedule_channel_irq(channel);
2275 queues >>= 1;
2276 }
2277 }
2278
2279 netif_vdbg(efx, intr, efx->net_dev,
2280 "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n",
2281 irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg));
2282
2283 return IRQ_HANDLED;
2284}
2285
Jon Cooper942e2982016-08-26 15:13:30 +01002286static int efx_ef10_irq_test_generate(struct efx_nic *efx)
Ben Hutchings8127d662013-08-29 19:19:29 +01002287{
2288 MCDI_DECLARE_BUF(inbuf, MC_CMD_TRIGGER_INTERRUPT_IN_LEN);
2289
Jon Cooper942e2982016-08-26 15:13:30 +01002290 if (efx_mcdi_set_workaround(efx, MC_CMD_WORKAROUND_BUG41750, true,
2291 NULL) == 0)
2292 return -ENOTSUPP;
2293
Ben Hutchings8127d662013-08-29 19:19:29 +01002294 BUILD_BUG_ON(MC_CMD_TRIGGER_INTERRUPT_OUT_LEN != 0);
2295
2296 MCDI_SET_DWORD(inbuf, TRIGGER_INTERRUPT_IN_INTR_LEVEL, efx->irq_level);
Jon Cooper942e2982016-08-26 15:13:30 +01002297 return efx_mcdi_rpc(efx, MC_CMD_TRIGGER_INTERRUPT,
Ben Hutchings8127d662013-08-29 19:19:29 +01002298 inbuf, sizeof(inbuf), NULL, 0, NULL);
2299}
2300
2301static int efx_ef10_tx_probe(struct efx_tx_queue *tx_queue)
2302{
2303 return efx_nic_alloc_buffer(tx_queue->efx, &tx_queue->txd.buf,
2304 (tx_queue->ptr_mask + 1) *
2305 sizeof(efx_qword_t),
2306 GFP_KERNEL);
2307}
2308
2309/* This writes to the TX_DESC_WPTR and also pushes data */
2310static inline void efx_ef10_push_tx_desc(struct efx_tx_queue *tx_queue,
2311 const efx_qword_t *txd)
2312{
2313 unsigned int write_ptr;
2314 efx_oword_t reg;
2315
2316 write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
2317 EFX_POPULATE_OWORD_1(reg, ERF_DZ_TX_DESC_WPTR, write_ptr);
2318 reg.qword[0] = *txd;
2319 efx_writeo_page(tx_queue->efx, &reg,
2320 ER_DZ_TX_DESC_UPD, tx_queue->queue);
2321}
2322
Bert Kenwarde9117e52016-11-17 10:51:54 +00002323/* Add Firmware-Assisted TSO v2 option descriptors to a queue.
2324 */
2325static int efx_ef10_tx_tso_desc(struct efx_tx_queue *tx_queue,
2326 struct sk_buff *skb,
2327 bool *data_mapped)
2328{
2329 struct efx_tx_buffer *buffer;
2330 struct tcphdr *tcp;
2331 struct iphdr *ip;
2332
2333 u16 ipv4_id;
2334 u32 seqnum;
2335 u32 mss;
2336
Edward Creee01b16a2016-12-02 15:51:33 +00002337 EFX_WARN_ON_ONCE_PARANOID(tx_queue->tso_version != 2);
Bert Kenwarde9117e52016-11-17 10:51:54 +00002338
2339 mss = skb_shinfo(skb)->gso_size;
2340
2341 if (unlikely(mss < 4)) {
2342 WARN_ONCE(1, "MSS of %u is too small for TSO v2\n", mss);
2343 return -EINVAL;
2344 }
2345
2346 ip = ip_hdr(skb);
2347 if (ip->version == 4) {
2348 /* Modify IPv4 header if needed. */
2349 ip->tot_len = 0;
2350 ip->check = 0;
Edward Cree6d431312017-03-03 15:22:27 +00002351 ipv4_id = ntohs(ip->id);
Bert Kenwarde9117e52016-11-17 10:51:54 +00002352 } else {
2353 /* Modify IPv6 header if needed. */
2354 struct ipv6hdr *ipv6 = ipv6_hdr(skb);
2355
2356 ipv6->payload_len = 0;
2357 ipv4_id = 0;
2358 }
2359
2360 tcp = tcp_hdr(skb);
2361 seqnum = ntohl(tcp->seq);
2362
2363 buffer = efx_tx_queue_get_insert_buffer(tx_queue);
2364
2365 buffer->flags = EFX_TX_BUF_OPTION;
2366 buffer->len = 0;
2367 buffer->unmap_len = 0;
2368 EFX_POPULATE_QWORD_5(buffer->option,
2369 ESF_DZ_TX_DESC_IS_OPT, 1,
2370 ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_TSO,
2371 ESF_DZ_TX_TSO_OPTION_TYPE,
2372 ESE_DZ_TX_TSO_OPTION_DESC_FATSO2A,
2373 ESF_DZ_TX_TSO_IP_ID, ipv4_id,
2374 ESF_DZ_TX_TSO_TCP_SEQNO, seqnum
2375 );
2376 ++tx_queue->insert_count;
2377
2378 buffer = efx_tx_queue_get_insert_buffer(tx_queue);
2379
2380 buffer->flags = EFX_TX_BUF_OPTION;
2381 buffer->len = 0;
2382 buffer->unmap_len = 0;
2383 EFX_POPULATE_QWORD_4(buffer->option,
2384 ESF_DZ_TX_DESC_IS_OPT, 1,
2385 ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_TSO,
2386 ESF_DZ_TX_TSO_OPTION_TYPE,
2387 ESE_DZ_TX_TSO_OPTION_DESC_FATSO2B,
2388 ESF_DZ_TX_TSO_TCP_MSS, mss
2389 );
2390 ++tx_queue->insert_count;
2391
2392 return 0;
2393}
2394
Edward Cree46d1efd2016-11-17 10:52:36 +00002395static u32 efx_ef10_tso_versions(struct efx_nic *efx)
2396{
2397 struct efx_ef10_nic_data *nic_data = efx->nic_data;
2398 u32 tso_versions = 0;
2399
2400 if (nic_data->datapath_caps &
2401 (1 << MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_LBN))
2402 tso_versions |= BIT(1);
2403 if (nic_data->datapath_caps2 &
2404 (1 << MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_LBN))
2405 tso_versions |= BIT(2);
2406 return tso_versions;
2407}
2408
Ben Hutchings8127d662013-08-29 19:19:29 +01002409static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue)
2410{
2411 MCDI_DECLARE_BUF(inbuf, MC_CMD_INIT_TXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 /
2412 EFX_BUF_SIZE));
Ben Hutchings8127d662013-08-29 19:19:29 +01002413 bool csum_offload = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD;
2414 size_t entries = tx_queue->txd.buf.len / EFX_BUF_SIZE;
2415 struct efx_channel *channel = tx_queue->channel;
2416 struct efx_nic *efx = tx_queue->efx;
Daniel Pieczko45b24492015-05-06 00:57:14 +01002417 struct efx_ef10_nic_data *nic_data = efx->nic_data;
Bert Kenwarde9117e52016-11-17 10:51:54 +00002418 bool tso_v2 = false;
Jon Cooperaa09a3d2015-05-20 11:10:41 +01002419 size_t inlen;
Ben Hutchings8127d662013-08-29 19:19:29 +01002420 dma_addr_t dma_addr;
2421 efx_qword_t *txd;
2422 int rc;
2423 int i;
Jon Cooperaa09a3d2015-05-20 11:10:41 +01002424 BUILD_BUG_ON(MC_CMD_INIT_TXQ_OUT_LEN != 0);
Ben Hutchings8127d662013-08-29 19:19:29 +01002425
Martin Habets50663fe2018-01-25 17:25:33 +00002426 /* Only attempt to enable TX timestamping if we have the license for it,
2427 * otherwise TXQ init will fail
2428 */
2429 if (!(nic_data->licensed_features &
Martin Habets6aa47c82018-01-25 17:26:31 +00002430 (1 << LICENSED_V3_FEATURES_TX_TIMESTAMPS_LBN))) {
Martin Habets50663fe2018-01-25 17:25:33 +00002431 tx_queue->timestamping = false;
Martin Habets6aa47c82018-01-25 17:26:31 +00002432 /* Disable sync events on this channel. */
2433 if (efx->type->ptp_set_ts_sync_events)
2434 efx->type->ptp_set_ts_sync_events(efx, false, false);
2435 }
Martin Habets50663fe2018-01-25 17:25:33 +00002436
Bert Kenwarde9117e52016-11-17 10:51:54 +00002437 /* TSOv2 is a limited resource that can only be configured on a limited
2438 * number of queues. TSO without checksum offload is not really a thing,
2439 * so we only enable it for those queues.
Martin Habetsb9b603d42018-01-25 17:24:43 +00002440 * TSOv2 cannot be used with Hardware timestamping.
Bert Kenwarde9117e52016-11-17 10:51:54 +00002441 */
2442 if (csum_offload && (nic_data->datapath_caps2 &
Martin Habetsb9b603d42018-01-25 17:24:43 +00002443 (1 << MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_LBN)) &&
2444 !tx_queue->timestamping) {
Bert Kenwarde9117e52016-11-17 10:51:54 +00002445 tso_v2 = true;
2446 netif_dbg(efx, hw, efx->net_dev, "Using TSOv2 for channel %u\n",
2447 channel->channel);
2448 }
2449
Ben Hutchings8127d662013-08-29 19:19:29 +01002450 MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_SIZE, tx_queue->ptr_mask + 1);
2451 MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_TARGET_EVQ, channel->channel);
2452 MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_LABEL, tx_queue->queue);
2453 MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_INSTANCE, tx_queue->queue);
Ben Hutchings8127d662013-08-29 19:19:29 +01002454 MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_OWNER_ID, 0);
Daniel Pieczko45b24492015-05-06 00:57:14 +01002455 MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_PORT_ID, nic_data->vport_id);
Ben Hutchings8127d662013-08-29 19:19:29 +01002456
2457 dma_addr = tx_queue->txd.buf.dma_addr;
2458
2459 netif_dbg(efx, hw, efx->net_dev, "pushing TXQ %d. %zu entries (%llx)\n",
2460 tx_queue->queue, entries, (u64)dma_addr);
2461
2462 for (i = 0; i < entries; ++i) {
2463 MCDI_SET_ARRAY_QWORD(inbuf, INIT_TXQ_IN_DMA_ADDR, i, dma_addr);
2464 dma_addr += EFX_BUF_SIZE;
2465 }
2466
2467 inlen = MC_CMD_INIT_TXQ_IN_LEN(entries);
2468
Edward Creee638ee12016-11-17 10:52:07 +00002469 do {
Martin Habetsb9b603d42018-01-25 17:24:43 +00002470 MCDI_POPULATE_DWORD_4(inbuf, INIT_TXQ_IN_FLAGS,
Edward Creee638ee12016-11-17 10:52:07 +00002471 /* This flag was removed from mcdi_pcol.h for
2472 * the non-_EXT version of INIT_TXQ. However,
2473 * firmware still honours it.
2474 */
2475 INIT_TXQ_EXT_IN_FLAG_TSOV2_EN, tso_v2,
2476 INIT_TXQ_IN_FLAG_IP_CSUM_DIS, !csum_offload,
Martin Habetsb9b603d42018-01-25 17:24:43 +00002477 INIT_TXQ_IN_FLAG_TCP_CSUM_DIS, !csum_offload,
2478 INIT_TXQ_EXT_IN_FLAG_TIMESTAMP,
2479 tx_queue->timestamping);
Edward Creee638ee12016-11-17 10:52:07 +00002480
2481 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_INIT_TXQ, inbuf, inlen,
2482 NULL, 0, NULL);
2483 if (rc == -ENOSPC && tso_v2) {
2484 /* Retry without TSOv2 if we're short on contexts. */
2485 tso_v2 = false;
2486 netif_warn(efx, probe, efx->net_dev,
2487 "TSOv2 context not available to segment in hardware. TCP performance may be reduced.\n");
2488 } else if (rc) {
2489 efx_mcdi_display_error(efx, MC_CMD_INIT_TXQ,
2490 MC_CMD_INIT_TXQ_EXT_IN_LEN,
2491 NULL, 0, rc);
2492 goto fail;
2493 }
2494 } while (rc);
Ben Hutchings8127d662013-08-29 19:19:29 +01002495
2496 /* A previous user of this TX queue might have set us up the
2497 * bomb by writing a descriptor to the TX push collector but
2498 * not the doorbell. (Each collector belongs to a port, not a
2499 * queue or function, so cannot easily be reset.) We must
2500 * attempt to push a no-op descriptor in its place.
2501 */
2502 tx_queue->buffer[0].flags = EFX_TX_BUF_OPTION;
2503 tx_queue->insert_count = 1;
2504 txd = efx_tx_desc(tx_queue, 0);
Martin Habetsb9b603d42018-01-25 17:24:43 +00002505 EFX_POPULATE_QWORD_5(*txd,
Ben Hutchings8127d662013-08-29 19:19:29 +01002506 ESF_DZ_TX_DESC_IS_OPT, true,
2507 ESF_DZ_TX_OPTION_TYPE,
2508 ESE_DZ_TX_OPTION_DESC_CRC_CSUM,
2509 ESF_DZ_TX_OPTION_UDP_TCP_CSUM, csum_offload,
Martin Habetsb9b603d42018-01-25 17:24:43 +00002510 ESF_DZ_TX_OPTION_IP_CSUM, csum_offload,
2511 ESF_DZ_TX_TIMESTAMP, tx_queue->timestamping);
Ben Hutchings8127d662013-08-29 19:19:29 +01002512 tx_queue->write_count = 1;
Bert Kenward93171b12015-11-30 09:05:35 +00002513
Bert Kenwarde9117e52016-11-17 10:51:54 +00002514 if (tso_v2) {
2515 tx_queue->handle_tso = efx_ef10_tx_tso_desc;
2516 tx_queue->tso_version = 2;
2517 } else if (nic_data->datapath_caps &
2518 (1 << MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_LBN)) {
Bert Kenward93171b12015-11-30 09:05:35 +00002519 tx_queue->tso_version = 1;
2520 }
2521
Ben Hutchings8127d662013-08-29 19:19:29 +01002522 wmb();
2523 efx_ef10_push_tx_desc(tx_queue, txd);
2524
2525 return;
2526
2527fail:
Ben Hutchings48ce5632013-11-01 16:42:44 +00002528 netdev_WARN(efx->net_dev, "failed to initialise TXQ %d\n",
2529 tx_queue->queue);
Ben Hutchings8127d662013-08-29 19:19:29 +01002530}
2531
2532static void efx_ef10_tx_fini(struct efx_tx_queue *tx_queue)
2533{
2534 MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_TXQ_IN_LEN);
Jon Cooperaa09a3d2015-05-20 11:10:41 +01002535 MCDI_DECLARE_BUF_ERR(outbuf);
Ben Hutchings8127d662013-08-29 19:19:29 +01002536 struct efx_nic *efx = tx_queue->efx;
2537 size_t outlen;
2538 int rc;
2539
2540 MCDI_SET_DWORD(inbuf, FINI_TXQ_IN_INSTANCE,
2541 tx_queue->queue);
2542
Edward Cree1e0b8122013-05-31 18:36:12 +01002543 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_TXQ, inbuf, sizeof(inbuf),
Ben Hutchings8127d662013-08-29 19:19:29 +01002544 outbuf, sizeof(outbuf), &outlen);
2545
2546 if (rc && rc != -EALREADY)
2547 goto fail;
2548
2549 return;
2550
2551fail:
Edward Cree1e0b8122013-05-31 18:36:12 +01002552 efx_mcdi_display_error(efx, MC_CMD_FINI_TXQ, MC_CMD_FINI_TXQ_IN_LEN,
2553 outbuf, outlen, rc);
Ben Hutchings8127d662013-08-29 19:19:29 +01002554}
2555
2556static void efx_ef10_tx_remove(struct efx_tx_queue *tx_queue)
2557{
2558 efx_nic_free_buffer(tx_queue->efx, &tx_queue->txd.buf);
2559}
2560
2561/* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */
2562static inline void efx_ef10_notify_tx_desc(struct efx_tx_queue *tx_queue)
2563{
2564 unsigned int write_ptr;
2565 efx_dword_t reg;
2566
2567 write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
2568 EFX_POPULATE_DWORD_1(reg, ERF_DZ_TX_DESC_WPTR_DWORD, write_ptr);
2569 efx_writed_page(tx_queue->efx, &reg,
2570 ER_DZ_TX_DESC_UPD_DWORD, tx_queue->queue);
2571}
2572
Bert Kenwarde9117e52016-11-17 10:51:54 +00002573#define EFX_EF10_MAX_TX_DESCRIPTOR_LEN 0x3fff
2574
2575static unsigned int efx_ef10_tx_limit_len(struct efx_tx_queue *tx_queue,
2576 dma_addr_t dma_addr, unsigned int len)
2577{
2578 if (len > EFX_EF10_MAX_TX_DESCRIPTOR_LEN) {
2579 /* If we need to break across multiple descriptors we should
2580 * stop at a page boundary. This assumes the length limit is
2581 * greater than the page size.
2582 */
2583 dma_addr_t end = dma_addr + EFX_EF10_MAX_TX_DESCRIPTOR_LEN;
2584
2585 BUILD_BUG_ON(EFX_EF10_MAX_TX_DESCRIPTOR_LEN < EFX_PAGE_SIZE);
2586 len = (end & (~(EFX_PAGE_SIZE - 1))) - dma_addr;
2587 }
2588
2589 return len;
2590}
2591
Ben Hutchings8127d662013-08-29 19:19:29 +01002592static void efx_ef10_tx_write(struct efx_tx_queue *tx_queue)
2593{
2594 unsigned int old_write_count = tx_queue->write_count;
2595 struct efx_tx_buffer *buffer;
2596 unsigned int write_ptr;
2597 efx_qword_t *txd;
2598
Martin Habetsb2663a42015-11-02 12:51:31 +00002599 tx_queue->xmit_more_available = false;
2600 if (unlikely(tx_queue->write_count == tx_queue->insert_count))
2601 return;
Ben Hutchings8127d662013-08-29 19:19:29 +01002602
2603 do {
2604 write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
2605 buffer = &tx_queue->buffer[write_ptr];
2606 txd = efx_tx_desc(tx_queue, write_ptr);
2607 ++tx_queue->write_count;
2608
2609 /* Create TX descriptor ring entry */
2610 if (buffer->flags & EFX_TX_BUF_OPTION) {
2611 *txd = buffer->option;
Edward Creede1deff2017-01-13 21:20:14 +00002612 if (EFX_QWORD_FIELD(*txd, ESF_DZ_TX_OPTION_TYPE) == 1)
2613 /* PIO descriptor */
2614 tx_queue->packet_write_count = tx_queue->write_count;
Ben Hutchings8127d662013-08-29 19:19:29 +01002615 } else {
Edward Creede1deff2017-01-13 21:20:14 +00002616 tx_queue->packet_write_count = tx_queue->write_count;
Ben Hutchings8127d662013-08-29 19:19:29 +01002617 BUILD_BUG_ON(EFX_TX_BUF_CONT != 1);
2618 EFX_POPULATE_QWORD_3(
2619 *txd,
2620 ESF_DZ_TX_KER_CONT,
2621 buffer->flags & EFX_TX_BUF_CONT,
2622 ESF_DZ_TX_KER_BYTE_CNT, buffer->len,
2623 ESF_DZ_TX_KER_BUF_ADDR, buffer->dma_addr);
2624 }
2625 } while (tx_queue->write_count != tx_queue->insert_count);
2626
2627 wmb(); /* Ensure descriptors are written before they are fetched */
2628
2629 if (efx_nic_may_push_tx_desc(tx_queue, old_write_count)) {
2630 txd = efx_tx_desc(tx_queue,
2631 old_write_count & tx_queue->ptr_mask);
2632 efx_ef10_push_tx_desc(tx_queue, txd);
2633 ++tx_queue->pushes;
2634 } else {
2635 efx_ef10_notify_tx_desc(tx_queue);
2636 }
2637}
2638
Edward Creea33a4c72016-11-03 22:12:27 +00002639#define RSS_MODE_HASH_ADDRS (1 << RSS_MODE_HASH_SRC_ADDR_LBN |\
2640 1 << RSS_MODE_HASH_DST_ADDR_LBN)
2641#define RSS_MODE_HASH_PORTS (1 << RSS_MODE_HASH_SRC_PORT_LBN |\
2642 1 << RSS_MODE_HASH_DST_PORT_LBN)
2643#define RSS_CONTEXT_FLAGS_DEFAULT (1 << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_LBN |\
2644 1 << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV4_EN_LBN |\
2645 1 << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV6_EN_LBN |\
2646 1 << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV6_EN_LBN |\
2647 (RSS_MODE_HASH_ADDRS | RSS_MODE_HASH_PORTS) << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV4_RSS_MODE_LBN |\
2648 RSS_MODE_HASH_ADDRS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV4_RSS_MODE_LBN |\
2649 RSS_MODE_HASH_ADDRS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV4_RSS_MODE_LBN |\
2650 (RSS_MODE_HASH_ADDRS | RSS_MODE_HASH_PORTS) << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV6_RSS_MODE_LBN |\
2651 RSS_MODE_HASH_ADDRS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV6_RSS_MODE_LBN |\
2652 RSS_MODE_HASH_ADDRS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV6_RSS_MODE_LBN)
2653
2654static int efx_ef10_get_rss_flags(struct efx_nic *efx, u32 context, u32 *flags)
2655{
2656 /* Firmware had a bug (sfc bug 61952) where it would not actually
2657 * fill in the flags field in the response to MC_CMD_RSS_CONTEXT_GET_FLAGS.
2658 * This meant that it would always contain whatever was previously
2659 * in the MCDI buffer. Fortunately, all firmware versions with
2660 * this bug have the same default flags value for a newly-allocated
2661 * RSS context, and the only time we want to get the flags is just
2662 * after allocating. Moreover, the response has a 32-bit hole
2663 * where the context ID would be in the request, so we can use an
2664 * overlength buffer in the request and pre-fill the flags field
2665 * with what we believe the default to be. Thus if the firmware
2666 * has the bug, it will leave our pre-filled value in the flags
2667 * field of the response, and we will get the right answer.
2668 *
2669 * However, this does mean that this function should NOT be used if
2670 * the RSS context flags might not be their defaults - it is ONLY
2671 * reliably correct for a newly-allocated RSS context.
2672 */
2673 MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_LEN);
2674 MCDI_DECLARE_BUF(outbuf, MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_LEN);
2675 size_t outlen;
2676 int rc;
2677
2678 /* Check we have a hole for the context ID */
2679 BUILD_BUG_ON(MC_CMD_RSS_CONTEXT_GET_FLAGS_IN_LEN != MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_FLAGS_OFST);
2680 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_GET_FLAGS_IN_RSS_CONTEXT_ID, context);
2681 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_GET_FLAGS_OUT_FLAGS,
2682 RSS_CONTEXT_FLAGS_DEFAULT);
2683 rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_GET_FLAGS, inbuf,
2684 sizeof(inbuf), outbuf, sizeof(outbuf), &outlen);
2685 if (rc == 0) {
2686 if (outlen < MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_LEN)
2687 rc = -EIO;
2688 else
2689 *flags = MCDI_DWORD(outbuf, RSS_CONTEXT_GET_FLAGS_OUT_FLAGS);
2690 }
2691 return rc;
2692}
2693
2694/* Attempt to enable 4-tuple UDP hashing on the specified RSS context.
2695 * If we fail, we just leave the RSS context at its default hash settings,
2696 * which is safe but may slightly reduce performance.
2697 * Defaults are 4-tuple for TCP and 2-tuple for UDP and other-IP, so we
2698 * just need to set the UDP ports flags (for both IP versions).
2699 */
Edward Cree42356d92018-03-08 15:45:17 +00002700static void efx_ef10_set_rss_flags(struct efx_nic *efx,
2701 struct efx_rss_context *ctx)
Edward Creea33a4c72016-11-03 22:12:27 +00002702{
2703 MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN);
2704 u32 flags;
2705
2706 BUILD_BUG_ON(MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT_LEN != 0);
2707
Edward Cree42356d92018-03-08 15:45:17 +00002708 if (efx_ef10_get_rss_flags(efx, ctx->context_id, &flags) != 0)
Edward Creea33a4c72016-11-03 22:12:27 +00002709 return;
Edward Cree42356d92018-03-08 15:45:17 +00002710 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_SET_FLAGS_IN_RSS_CONTEXT_ID,
2711 ctx->context_id);
Edward Creea33a4c72016-11-03 22:12:27 +00002712 flags |= RSS_MODE_HASH_PORTS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV4_RSS_MODE_LBN;
2713 flags |= RSS_MODE_HASH_PORTS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV6_RSS_MODE_LBN;
2714 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_SET_FLAGS_IN_FLAGS, flags);
Edward Creeb718c882016-11-03 22:12:58 +00002715 if (!efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_FLAGS, inbuf, sizeof(inbuf),
2716 NULL, 0, NULL))
2717 /* Succeeded, so UDP 4-tuple is now enabled */
Edward Cree42356d92018-03-08 15:45:17 +00002718 ctx->rx_hash_udp_4tuple = true;
Edward Creea33a4c72016-11-03 22:12:27 +00002719}
2720
Edward Cree42356d92018-03-08 15:45:17 +00002721static int efx_ef10_alloc_rss_context(struct efx_nic *efx, bool exclusive,
2722 struct efx_rss_context *ctx,
2723 unsigned *context_size)
Ben Hutchings8127d662013-08-29 19:19:29 +01002724{
2725 MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN);
2726 MCDI_DECLARE_BUF(outbuf, MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN);
Daniel Pieczko45b24492015-05-06 00:57:14 +01002727 struct efx_ef10_nic_data *nic_data = efx->nic_data;
Ben Hutchings8127d662013-08-29 19:19:29 +01002728 size_t outlen;
2729 int rc;
Jon Cooper267c0152015-05-06 00:59:38 +01002730 u32 alloc_type = exclusive ?
2731 MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_EXCLUSIVE :
2732 MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_SHARED;
2733 unsigned rss_spread = exclusive ?
2734 efx->rss_spread :
2735 min(rounddown_pow_of_two(efx->rss_spread),
2736 EFX_EF10_MAX_SHARED_RSS_CONTEXT_SIZE);
2737
2738 if (!exclusive && rss_spread == 1) {
Edward Cree42356d92018-03-08 15:45:17 +00002739 ctx->context_id = EFX_EF10_RSS_CONTEXT_INVALID;
Jon Cooper267c0152015-05-06 00:59:38 +01002740 if (context_size)
2741 *context_size = 1;
2742 return 0;
2743 }
Ben Hutchings8127d662013-08-29 19:19:29 +01002744
Jon Cooperdcb41232016-04-25 16:51:00 +01002745 if (nic_data->datapath_caps &
2746 1 << MC_CMD_GET_CAPABILITIES_OUT_RX_RSS_LIMITED_LBN)
2747 return -EOPNOTSUPP;
2748
Ben Hutchings8127d662013-08-29 19:19:29 +01002749 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID,
Daniel Pieczko45b24492015-05-06 00:57:14 +01002750 nic_data->vport_id);
Jon Cooper267c0152015-05-06 00:59:38 +01002751 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_TYPE, alloc_type);
2752 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_NUM_QUEUES, rss_spread);
Ben Hutchings8127d662013-08-29 19:19:29 +01002753
2754 rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_ALLOC, inbuf, sizeof(inbuf),
2755 outbuf, sizeof(outbuf), &outlen);
2756 if (rc != 0)
2757 return rc;
2758
2759 if (outlen < MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN)
2760 return -EIO;
2761
Edward Cree42356d92018-03-08 15:45:17 +00002762 ctx->context_id = MCDI_DWORD(outbuf, RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID);
Ben Hutchings8127d662013-08-29 19:19:29 +01002763
Jon Cooper267c0152015-05-06 00:59:38 +01002764 if (context_size)
2765 *context_size = rss_spread;
2766
Edward Creea33a4c72016-11-03 22:12:27 +00002767 if (nic_data->datapath_caps &
2768 1 << MC_CMD_GET_CAPABILITIES_OUT_ADDITIONAL_RSS_MODES_LBN)
Edward Cree42356d92018-03-08 15:45:17 +00002769 efx_ef10_set_rss_flags(efx, ctx);
Edward Creea33a4c72016-11-03 22:12:27 +00002770
Ben Hutchings8127d662013-08-29 19:19:29 +01002771 return 0;
2772}
2773
Edward Cree42356d92018-03-08 15:45:17 +00002774static int efx_ef10_free_rss_context(struct efx_nic *efx, u32 context)
Ben Hutchings8127d662013-08-29 19:19:29 +01002775{
2776 MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_FREE_IN_LEN);
Ben Hutchings8127d662013-08-29 19:19:29 +01002777
2778 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_FREE_IN_RSS_CONTEXT_ID,
2779 context);
Edward Cree42356d92018-03-08 15:45:17 +00002780 return efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_FREE, inbuf, sizeof(inbuf),
Ben Hutchings8127d662013-08-29 19:19:29 +01002781 NULL, 0, NULL);
Ben Hutchings8127d662013-08-29 19:19:29 +01002782}
2783
Jon Cooper267c0152015-05-06 00:59:38 +01002784static int efx_ef10_populate_rss_table(struct efx_nic *efx, u32 context,
Edward Creef74d1992017-01-17 12:01:53 +00002785 const u32 *rx_indir_table, const u8 *key)
Ben Hutchings8127d662013-08-29 19:19:29 +01002786{
2787 MCDI_DECLARE_BUF(tablebuf, MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN);
2788 MCDI_DECLARE_BUF(keybuf, MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN);
2789 int i, rc;
2790
2791 MCDI_SET_DWORD(tablebuf, RSS_CONTEXT_SET_TABLE_IN_RSS_CONTEXT_ID,
2792 context);
Edward Cree42356d92018-03-08 15:45:17 +00002793 BUILD_BUG_ON(ARRAY_SIZE(efx->rss_context.rx_indir_table) !=
Ben Hutchings8127d662013-08-29 19:19:29 +01002794 MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_LEN);
2795
Edward Cree42356d92018-03-08 15:45:17 +00002796 /* This iterates over the length of efx->rss_context.rx_indir_table, but
2797 * copies bytes from rx_indir_table. That's because the latter is a
2798 * pointer rather than an array, but should have the same length.
2799 * The efx->rss_context.rx_hash_key loop below is similar.
Edward Creef74d1992017-01-17 12:01:53 +00002800 */
Edward Cree42356d92018-03-08 15:45:17 +00002801 for (i = 0; i < ARRAY_SIZE(efx->rss_context.rx_indir_table); ++i)
Ben Hutchings8127d662013-08-29 19:19:29 +01002802 MCDI_PTR(tablebuf,
2803 RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE)[i] =
Jon Cooper267c0152015-05-06 00:59:38 +01002804 (u8) rx_indir_table[i];
Ben Hutchings8127d662013-08-29 19:19:29 +01002805
2806 rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_TABLE, tablebuf,
2807 sizeof(tablebuf), NULL, 0, NULL);
2808 if (rc != 0)
2809 return rc;
2810
2811 MCDI_SET_DWORD(keybuf, RSS_CONTEXT_SET_KEY_IN_RSS_CONTEXT_ID,
2812 context);
Edward Cree42356d92018-03-08 15:45:17 +00002813 BUILD_BUG_ON(ARRAY_SIZE(efx->rss_context.rx_hash_key) !=
Ben Hutchings8127d662013-08-29 19:19:29 +01002814 MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN);
Edward Cree42356d92018-03-08 15:45:17 +00002815 for (i = 0; i < ARRAY_SIZE(efx->rss_context.rx_hash_key); ++i)
Edward Creef74d1992017-01-17 12:01:53 +00002816 MCDI_PTR(keybuf, RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY)[i] = key[i];
Ben Hutchings8127d662013-08-29 19:19:29 +01002817
2818 return efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_KEY, keybuf,
2819 sizeof(keybuf), NULL, 0, NULL);
2820}
2821
2822static void efx_ef10_rx_free_indir_table(struct efx_nic *efx)
2823{
Edward Cree42356d92018-03-08 15:45:17 +00002824 int rc;
Ben Hutchings8127d662013-08-29 19:19:29 +01002825
Edward Cree42356d92018-03-08 15:45:17 +00002826 if (efx->rss_context.context_id != EFX_EF10_RSS_CONTEXT_INVALID) {
2827 rc = efx_ef10_free_rss_context(efx, efx->rss_context.context_id);
2828 WARN_ON(rc != 0);
2829 }
2830 efx->rss_context.context_id = EFX_EF10_RSS_CONTEXT_INVALID;
Ben Hutchings8127d662013-08-29 19:19:29 +01002831}
2832
Jon Cooper267c0152015-05-06 00:59:38 +01002833static int efx_ef10_rx_push_shared_rss_config(struct efx_nic *efx,
2834 unsigned *context_size)
2835{
Jon Cooper267c0152015-05-06 00:59:38 +01002836 struct efx_ef10_nic_data *nic_data = efx->nic_data;
Edward Cree42356d92018-03-08 15:45:17 +00002837 int rc = efx_ef10_alloc_rss_context(efx, false, &efx->rss_context,
2838 context_size);
Jon Cooper267c0152015-05-06 00:59:38 +01002839
2840 if (rc != 0)
2841 return rc;
2842
Jon Cooper267c0152015-05-06 00:59:38 +01002843 nic_data->rx_rss_context_exclusive = false;
Edward Cree42356d92018-03-08 15:45:17 +00002844 efx_set_default_rx_indir_table(efx, &efx->rss_context);
Jon Cooper267c0152015-05-06 00:59:38 +01002845 return 0;
2846}
2847
2848static int efx_ef10_rx_push_exclusive_rss_config(struct efx_nic *efx,
Edward Creef74d1992017-01-17 12:01:53 +00002849 const u32 *rx_indir_table,
2850 const u8 *key)
Ben Hutchings8127d662013-08-29 19:19:29 +01002851{
Edward Cree42356d92018-03-08 15:45:17 +00002852 u32 old_rx_rss_context = efx->rss_context.context_id;
Ben Hutchings8127d662013-08-29 19:19:29 +01002853 struct efx_ef10_nic_data *nic_data = efx->nic_data;
2854 int rc;
2855
Edward Cree42356d92018-03-08 15:45:17 +00002856 if (efx->rss_context.context_id == EFX_EF10_RSS_CONTEXT_INVALID ||
Jon Cooper267c0152015-05-06 00:59:38 +01002857 !nic_data->rx_rss_context_exclusive) {
Edward Cree42356d92018-03-08 15:45:17 +00002858 rc = efx_ef10_alloc_rss_context(efx, true, &efx->rss_context,
2859 NULL);
Jon Cooper267c0152015-05-06 00:59:38 +01002860 if (rc == -EOPNOTSUPP)
2861 return rc;
2862 else if (rc != 0)
2863 goto fail1;
Ben Hutchings8127d662013-08-29 19:19:29 +01002864 }
2865
Edward Cree42356d92018-03-08 15:45:17 +00002866 rc = efx_ef10_populate_rss_table(efx, efx->rss_context.context_id,
Edward Creef74d1992017-01-17 12:01:53 +00002867 rx_indir_table, key);
Ben Hutchings8127d662013-08-29 19:19:29 +01002868 if (rc != 0)
Jon Cooper267c0152015-05-06 00:59:38 +01002869 goto fail2;
Ben Hutchings8127d662013-08-29 19:19:29 +01002870
Edward Cree42356d92018-03-08 15:45:17 +00002871 if (efx->rss_context.context_id != old_rx_rss_context &&
2872 old_rx_rss_context != EFX_EF10_RSS_CONTEXT_INVALID)
2873 WARN_ON(efx_ef10_free_rss_context(efx, old_rx_rss_context) != 0);
Jon Cooper267c0152015-05-06 00:59:38 +01002874 nic_data->rx_rss_context_exclusive = true;
Edward Cree42356d92018-03-08 15:45:17 +00002875 if (rx_indir_table != efx->rss_context.rx_indir_table)
2876 memcpy(efx->rss_context.rx_indir_table, rx_indir_table,
2877 sizeof(efx->rss_context.rx_indir_table));
2878 if (key != efx->rss_context.rx_hash_key)
2879 memcpy(efx->rss_context.rx_hash_key, key,
2880 efx->type->rx_hash_key_size);
Edward Creef74d1992017-01-17 12:01:53 +00002881
Jon Cooper267c0152015-05-06 00:59:38 +01002882 return 0;
Ben Hutchings8127d662013-08-29 19:19:29 +01002883
Jon Cooper267c0152015-05-06 00:59:38 +01002884fail2:
Edward Cree42356d92018-03-08 15:45:17 +00002885 if (old_rx_rss_context != efx->rss_context.context_id) {
2886 WARN_ON(efx_ef10_free_rss_context(efx, efx->rss_context.context_id) != 0);
2887 efx->rss_context.context_id = old_rx_rss_context;
2888 }
Jon Cooper267c0152015-05-06 00:59:38 +01002889fail1:
Ben Hutchings8127d662013-08-29 19:19:29 +01002890 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
Jon Cooper267c0152015-05-06 00:59:38 +01002891 return rc;
2892}
2893
Edward Cree42356d92018-03-08 15:45:17 +00002894static int efx_ef10_rx_push_rss_context_config(struct efx_nic *efx,
2895 struct efx_rss_context *ctx,
2896 const u32 *rx_indir_table,
2897 const u8 *key)
Edward Creea707d182017-01-17 12:02:12 +00002898{
Edward Cree42356d92018-03-08 15:45:17 +00002899 int rc;
2900
Edward Creee0a65e32018-03-27 17:44:36 +01002901 WARN_ON(!mutex_is_locked(&efx->rss_lock));
2902
Edward Cree42356d92018-03-08 15:45:17 +00002903 if (ctx->context_id == EFX_EF10_RSS_CONTEXT_INVALID) {
2904 rc = efx_ef10_alloc_rss_context(efx, true, ctx, NULL);
2905 if (rc)
2906 return rc;
2907 }
2908
2909 if (!rx_indir_table) /* Delete this context */
2910 return efx_ef10_free_rss_context(efx, ctx->context_id);
2911
2912 rc = efx_ef10_populate_rss_table(efx, ctx->context_id,
2913 rx_indir_table, key);
2914 if (rc)
2915 return rc;
2916
2917 memcpy(ctx->rx_indir_table, rx_indir_table,
2918 sizeof(efx->rss_context.rx_indir_table));
2919 memcpy(ctx->rx_hash_key, key, efx->type->rx_hash_key_size);
2920
2921 return 0;
2922}
2923
2924static int efx_ef10_rx_pull_rss_context_config(struct efx_nic *efx,
2925 struct efx_rss_context *ctx)
2926{
Edward Creea707d182017-01-17 12:02:12 +00002927 MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_GET_TABLE_IN_LEN);
2928 MCDI_DECLARE_BUF(tablebuf, MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_LEN);
2929 MCDI_DECLARE_BUF(keybuf, MC_CMD_RSS_CONTEXT_GET_KEY_OUT_LEN);
2930 size_t outlen;
2931 int rc, i;
2932
Edward Creee0a65e32018-03-27 17:44:36 +01002933 WARN_ON(!mutex_is_locked(&efx->rss_lock));
2934
Edward Creea707d182017-01-17 12:02:12 +00002935 BUILD_BUG_ON(MC_CMD_RSS_CONTEXT_GET_TABLE_IN_LEN !=
2936 MC_CMD_RSS_CONTEXT_GET_KEY_IN_LEN);
2937
Edward Cree42356d92018-03-08 15:45:17 +00002938 if (ctx->context_id == EFX_EF10_RSS_CONTEXT_INVALID)
Edward Creea707d182017-01-17 12:02:12 +00002939 return -ENOENT;
2940
2941 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_GET_TABLE_IN_RSS_CONTEXT_ID,
Edward Cree42356d92018-03-08 15:45:17 +00002942 ctx->context_id);
2943 BUILD_BUG_ON(ARRAY_SIZE(ctx->rx_indir_table) !=
Edward Creea707d182017-01-17 12:02:12 +00002944 MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_INDIRECTION_TABLE_LEN);
2945 rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_GET_TABLE, inbuf, sizeof(inbuf),
2946 tablebuf, sizeof(tablebuf), &outlen);
2947 if (rc != 0)
2948 return rc;
2949
2950 if (WARN_ON(outlen != MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_LEN))
2951 return -EIO;
2952
Edward Cree42356d92018-03-08 15:45:17 +00002953 for (i = 0; i < ARRAY_SIZE(ctx->rx_indir_table); i++)
2954 ctx->rx_indir_table[i] = MCDI_PTR(tablebuf,
Edward Creea707d182017-01-17 12:02:12 +00002955 RSS_CONTEXT_GET_TABLE_OUT_INDIRECTION_TABLE)[i];
2956
2957 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_GET_KEY_IN_RSS_CONTEXT_ID,
Edward Cree42356d92018-03-08 15:45:17 +00002958 ctx->context_id);
2959 BUILD_BUG_ON(ARRAY_SIZE(ctx->rx_hash_key) !=
Edward Creea707d182017-01-17 12:02:12 +00002960 MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN);
2961 rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_GET_KEY, inbuf, sizeof(inbuf),
2962 keybuf, sizeof(keybuf), &outlen);
2963 if (rc != 0)
2964 return rc;
2965
2966 if (WARN_ON(outlen != MC_CMD_RSS_CONTEXT_GET_KEY_OUT_LEN))
2967 return -EIO;
2968
Edward Cree42356d92018-03-08 15:45:17 +00002969 for (i = 0; i < ARRAY_SIZE(ctx->rx_hash_key); ++i)
2970 ctx->rx_hash_key[i] = MCDI_PTR(
Edward Creea707d182017-01-17 12:02:12 +00002971 keybuf, RSS_CONTEXT_GET_KEY_OUT_TOEPLITZ_KEY)[i];
2972
2973 return 0;
2974}
2975
Edward Cree42356d92018-03-08 15:45:17 +00002976static int efx_ef10_rx_pull_rss_config(struct efx_nic *efx)
2977{
Edward Creee0a65e32018-03-27 17:44:36 +01002978 int rc;
2979
2980 mutex_lock(&efx->rss_lock);
2981 rc = efx_ef10_rx_pull_rss_context_config(efx, &efx->rss_context);
2982 mutex_unlock(&efx->rss_lock);
2983 return rc;
Edward Cree42356d92018-03-08 15:45:17 +00002984}
2985
2986static void efx_ef10_rx_restore_rss_contexts(struct efx_nic *efx)
2987{
Edward Creee0a65e32018-03-27 17:44:36 +01002988 struct efx_ef10_nic_data *nic_data = efx->nic_data;
Edward Cree42356d92018-03-08 15:45:17 +00002989 struct efx_rss_context *ctx;
2990 int rc;
2991
Edward Creee0a65e32018-03-27 17:44:36 +01002992 WARN_ON(!mutex_is_locked(&efx->rss_lock));
2993
2994 if (!nic_data->must_restore_rss_contexts)
2995 return;
2996
Edward Cree42356d92018-03-08 15:45:17 +00002997 list_for_each_entry(ctx, &efx->rss_context.list, list) {
2998 /* previous NIC RSS context is gone */
2999 ctx->context_id = EFX_EF10_RSS_CONTEXT_INVALID;
3000 /* so try to allocate a new one */
3001 rc = efx_ef10_rx_push_rss_context_config(efx, ctx,
3002 ctx->rx_indir_table,
3003 ctx->rx_hash_key);
3004 if (rc)
3005 netif_warn(efx, probe, efx->net_dev,
3006 "failed to restore RSS context %u, rc=%d"
3007 "; RSS filters may fail to be applied\n",
3008 ctx->user_id, rc);
3009 }
Edward Creee0a65e32018-03-27 17:44:36 +01003010 nic_data->must_restore_rss_contexts = false;
Edward Cree42356d92018-03-08 15:45:17 +00003011}
3012
Jon Cooper267c0152015-05-06 00:59:38 +01003013static int efx_ef10_pf_rx_push_rss_config(struct efx_nic *efx, bool user,
Edward Creef74d1992017-01-17 12:01:53 +00003014 const u32 *rx_indir_table,
3015 const u8 *key)
Jon Cooper267c0152015-05-06 00:59:38 +01003016{
3017 int rc;
3018
3019 if (efx->rss_spread == 1)
3020 return 0;
3021
Edward Creef74d1992017-01-17 12:01:53 +00003022 if (!key)
Edward Cree42356d92018-03-08 15:45:17 +00003023 key = efx->rss_context.rx_hash_key;
Edward Creef74d1992017-01-17 12:01:53 +00003024
3025 rc = efx_ef10_rx_push_exclusive_rss_config(efx, rx_indir_table, key);
Jon Cooper267c0152015-05-06 00:59:38 +01003026
3027 if (rc == -ENOBUFS && !user) {
3028 unsigned context_size;
3029 bool mismatch = false;
3030 size_t i;
3031
Edward Cree42356d92018-03-08 15:45:17 +00003032 for (i = 0;
3033 i < ARRAY_SIZE(efx->rss_context.rx_indir_table) && !mismatch;
Jon Cooper267c0152015-05-06 00:59:38 +01003034 i++)
3035 mismatch = rx_indir_table[i] !=
3036 ethtool_rxfh_indir_default(i, efx->rss_spread);
3037
3038 rc = efx_ef10_rx_push_shared_rss_config(efx, &context_size);
3039 if (rc == 0) {
3040 if (context_size != efx->rss_spread)
3041 netif_warn(efx, probe, efx->net_dev,
3042 "Could not allocate an exclusive RSS"
3043 " context; allocated a shared one of"
3044 " different size."
3045 " Wanted %u, got %u.\n",
3046 efx->rss_spread, context_size);
3047 else if (mismatch)
3048 netif_warn(efx, probe, efx->net_dev,
3049 "Could not allocate an exclusive RSS"
3050 " context; allocated a shared one but"
3051 " could not apply custom"
3052 " indirection.\n");
3053 else
3054 netif_info(efx, probe, efx->net_dev,
3055 "Could not allocate an exclusive RSS"
3056 " context; allocated a shared one.\n");
3057 }
3058 }
3059 return rc;
3060}
3061
3062static int efx_ef10_vf_rx_push_rss_config(struct efx_nic *efx, bool user,
3063 const u32 *rx_indir_table
Edward Creef74d1992017-01-17 12:01:53 +00003064 __attribute__ ((unused)),
3065 const u8 *key
Jon Cooper267c0152015-05-06 00:59:38 +01003066 __attribute__ ((unused)))
3067{
Jon Cooper267c0152015-05-06 00:59:38 +01003068 if (user)
3069 return -EOPNOTSUPP;
Edward Cree42356d92018-03-08 15:45:17 +00003070 if (efx->rss_context.context_id != EFX_EF10_RSS_CONTEXT_INVALID)
Jon Cooper267c0152015-05-06 00:59:38 +01003071 return 0;
3072 return efx_ef10_rx_push_shared_rss_config(efx, NULL);
Ben Hutchings8127d662013-08-29 19:19:29 +01003073}
3074
3075static int efx_ef10_rx_probe(struct efx_rx_queue *rx_queue)
3076{
3077 return efx_nic_alloc_buffer(rx_queue->efx, &rx_queue->rxd.buf,
3078 (rx_queue->ptr_mask + 1) *
3079 sizeof(efx_qword_t),
3080 GFP_KERNEL);
3081}
3082
3083static void efx_ef10_rx_init(struct efx_rx_queue *rx_queue)
3084{
3085 MCDI_DECLARE_BUF(inbuf,
3086 MC_CMD_INIT_RXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 /
3087 EFX_BUF_SIZE));
Ben Hutchings8127d662013-08-29 19:19:29 +01003088 struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
3089 size_t entries = rx_queue->rxd.buf.len / EFX_BUF_SIZE;
3090 struct efx_nic *efx = rx_queue->efx;
Daniel Pieczko45b24492015-05-06 00:57:14 +01003091 struct efx_ef10_nic_data *nic_data = efx->nic_data;
Jon Cooperaa09a3d2015-05-20 11:10:41 +01003092 size_t inlen;
Ben Hutchings8127d662013-08-29 19:19:29 +01003093 dma_addr_t dma_addr;
3094 int rc;
3095 int i;
Jon Cooperaa09a3d2015-05-20 11:10:41 +01003096 BUILD_BUG_ON(MC_CMD_INIT_RXQ_OUT_LEN != 0);
Ben Hutchings8127d662013-08-29 19:19:29 +01003097
3098 rx_queue->scatter_n = 0;
3099 rx_queue->scatter_len = 0;
3100
3101 MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_SIZE, rx_queue->ptr_mask + 1);
3102 MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_TARGET_EVQ, channel->channel);
3103 MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_LABEL, efx_rx_queue_index(rx_queue));
3104 MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_INSTANCE,
3105 efx_rx_queue_index(rx_queue));
Jon Cooperbd9a2652013-11-18 12:54:41 +00003106 MCDI_POPULATE_DWORD_2(inbuf, INIT_RXQ_IN_FLAGS,
3107 INIT_RXQ_IN_FLAG_PREFIX, 1,
3108 INIT_RXQ_IN_FLAG_TIMESTAMP, 1);
Ben Hutchings8127d662013-08-29 19:19:29 +01003109 MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_OWNER_ID, 0);
Daniel Pieczko45b24492015-05-06 00:57:14 +01003110 MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_PORT_ID, nic_data->vport_id);
Ben Hutchings8127d662013-08-29 19:19:29 +01003111
3112 dma_addr = rx_queue->rxd.buf.dma_addr;
3113
3114 netif_dbg(efx, hw, efx->net_dev, "pushing RXQ %d. %zu entries (%llx)\n",
3115 efx_rx_queue_index(rx_queue), entries, (u64)dma_addr);
3116
3117 for (i = 0; i < entries; ++i) {
3118 MCDI_SET_ARRAY_QWORD(inbuf, INIT_RXQ_IN_DMA_ADDR, i, dma_addr);
3119 dma_addr += EFX_BUF_SIZE;
3120 }
3121
3122 inlen = MC_CMD_INIT_RXQ_IN_LEN(entries);
3123
3124 rc = efx_mcdi_rpc(efx, MC_CMD_INIT_RXQ, inbuf, inlen,
Jon Cooperaa09a3d2015-05-20 11:10:41 +01003125 NULL, 0, NULL);
Ben Hutchings48ce5632013-11-01 16:42:44 +00003126 if (rc)
3127 netdev_WARN(efx->net_dev, "failed to initialise RXQ %d\n",
3128 efx_rx_queue_index(rx_queue));
Ben Hutchings8127d662013-08-29 19:19:29 +01003129}
3130
3131static void efx_ef10_rx_fini(struct efx_rx_queue *rx_queue)
3132{
3133 MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_RXQ_IN_LEN);
Jon Cooperaa09a3d2015-05-20 11:10:41 +01003134 MCDI_DECLARE_BUF_ERR(outbuf);
Ben Hutchings8127d662013-08-29 19:19:29 +01003135 struct efx_nic *efx = rx_queue->efx;
3136 size_t outlen;
3137 int rc;
3138
3139 MCDI_SET_DWORD(inbuf, FINI_RXQ_IN_INSTANCE,
3140 efx_rx_queue_index(rx_queue));
3141
Edward Cree1e0b8122013-05-31 18:36:12 +01003142 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_RXQ, inbuf, sizeof(inbuf),
Ben Hutchings8127d662013-08-29 19:19:29 +01003143 outbuf, sizeof(outbuf), &outlen);
3144
3145 if (rc && rc != -EALREADY)
3146 goto fail;
3147
3148 return;
3149
3150fail:
Edward Cree1e0b8122013-05-31 18:36:12 +01003151 efx_mcdi_display_error(efx, MC_CMD_FINI_RXQ, MC_CMD_FINI_RXQ_IN_LEN,
3152 outbuf, outlen, rc);
Ben Hutchings8127d662013-08-29 19:19:29 +01003153}
3154
3155static void efx_ef10_rx_remove(struct efx_rx_queue *rx_queue)
3156{
3157 efx_nic_free_buffer(rx_queue->efx, &rx_queue->rxd.buf);
3158}
3159
3160/* This creates an entry in the RX descriptor queue */
3161static inline void
3162efx_ef10_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index)
3163{
3164 struct efx_rx_buffer *rx_buf;
3165 efx_qword_t *rxd;
3166
3167 rxd = efx_rx_desc(rx_queue, index);
3168 rx_buf = efx_rx_buffer(rx_queue, index);
3169 EFX_POPULATE_QWORD_2(*rxd,
3170 ESF_DZ_RX_KER_BYTE_CNT, rx_buf->len,
3171 ESF_DZ_RX_KER_BUF_ADDR, rx_buf->dma_addr);
3172}
3173
3174static void efx_ef10_rx_write(struct efx_rx_queue *rx_queue)
3175{
3176 struct efx_nic *efx = rx_queue->efx;
3177 unsigned int write_count;
3178 efx_dword_t reg;
3179
3180 /* Firmware requires that RX_DESC_WPTR be a multiple of 8 */
3181 write_count = rx_queue->added_count & ~7;
3182 if (rx_queue->notified_count == write_count)
3183 return;
3184
3185 do
3186 efx_ef10_build_rx_desc(
3187 rx_queue,
3188 rx_queue->notified_count & rx_queue->ptr_mask);
3189 while (++rx_queue->notified_count != write_count);
3190
3191 wmb();
3192 EFX_POPULATE_DWORD_1(reg, ERF_DZ_RX_DESC_WPTR,
3193 write_count & rx_queue->ptr_mask);
3194 efx_writed_page(efx, &reg, ER_DZ_RX_DESC_UPD,
3195 efx_rx_queue_index(rx_queue));
3196}
3197
3198static efx_mcdi_async_completer efx_ef10_rx_defer_refill_complete;
3199
3200static void efx_ef10_rx_defer_refill(struct efx_rx_queue *rx_queue)
3201{
3202 struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
3203 MCDI_DECLARE_BUF(inbuf, MC_CMD_DRIVER_EVENT_IN_LEN);
3204 efx_qword_t event;
3205
3206 EFX_POPULATE_QWORD_2(event,
3207 ESF_DZ_EV_CODE, EFX_EF10_DRVGEN_EV,
3208 ESF_DZ_EV_DATA, EFX_EF10_REFILL);
3209
3210 MCDI_SET_DWORD(inbuf, DRIVER_EVENT_IN_EVQ, channel->channel);
3211
3212 /* MCDI_SET_QWORD is not appropriate here since EFX_POPULATE_* has
3213 * already swapped the data to little-endian order.
3214 */
3215 memcpy(MCDI_PTR(inbuf, DRIVER_EVENT_IN_DATA), &event.u64[0],
3216 sizeof(efx_qword_t));
3217
3218 efx_mcdi_rpc_async(channel->efx, MC_CMD_DRIVER_EVENT,
3219 inbuf, sizeof(inbuf), 0,
3220 efx_ef10_rx_defer_refill_complete, 0);
3221}
3222
3223static void
3224efx_ef10_rx_defer_refill_complete(struct efx_nic *efx, unsigned long cookie,
3225 int rc, efx_dword_t *outbuf,
3226 size_t outlen_actual)
3227{
3228 /* nothing to do */
3229}
3230
3231static int efx_ef10_ev_probe(struct efx_channel *channel)
3232{
3233 return efx_nic_alloc_buffer(channel->efx, &channel->eventq.buf,
3234 (channel->eventq_mask + 1) *
3235 sizeof(efx_qword_t),
3236 GFP_KERNEL);
3237}
3238
Daniel Pieczko46e612b2015-07-21 15:09:18 +01003239static void efx_ef10_ev_fini(struct efx_channel *channel)
3240{
3241 MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_EVQ_IN_LEN);
3242 MCDI_DECLARE_BUF_ERR(outbuf);
3243 struct efx_nic *efx = channel->efx;
3244 size_t outlen;
3245 int rc;
3246
3247 MCDI_SET_DWORD(inbuf, FINI_EVQ_IN_INSTANCE, channel->channel);
3248
3249 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_EVQ, inbuf, sizeof(inbuf),
3250 outbuf, sizeof(outbuf), &outlen);
3251
3252 if (rc && rc != -EALREADY)
3253 goto fail;
3254
3255 return;
3256
3257fail:
3258 efx_mcdi_display_error(efx, MC_CMD_FINI_EVQ, MC_CMD_FINI_EVQ_IN_LEN,
3259 outbuf, outlen, rc);
3260}
3261
Ben Hutchings8127d662013-08-29 19:19:29 +01003262static int efx_ef10_ev_init(struct efx_channel *channel)
3263{
3264 MCDI_DECLARE_BUF(inbuf,
Bert Kenwarda9955602016-08-11 13:01:54 +01003265 MC_CMD_INIT_EVQ_V2_IN_LEN(EFX_MAX_EVQ_SIZE * 8 /
3266 EFX_BUF_SIZE));
3267 MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_EVQ_V2_OUT_LEN);
Ben Hutchings8127d662013-08-29 19:19:29 +01003268 size_t entries = channel->eventq.buf.len / EFX_BUF_SIZE;
3269 struct efx_nic *efx = channel->efx;
3270 struct efx_ef10_nic_data *nic_data;
Ben Hutchings8127d662013-08-29 19:19:29 +01003271 size_t inlen, outlen;
Daniel Pieczko46e612b2015-07-21 15:09:18 +01003272 unsigned int enabled, implemented;
Ben Hutchings8127d662013-08-29 19:19:29 +01003273 dma_addr_t dma_addr;
3274 int rc;
3275 int i;
3276
3277 nic_data = efx->nic_data;
Ben Hutchings8127d662013-08-29 19:19:29 +01003278
3279 /* Fill event queue with all ones (i.e. empty events) */
3280 memset(channel->eventq.buf.addr, 0xff, channel->eventq.buf.len);
3281
3282 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_SIZE, channel->eventq_mask + 1);
3283 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_INSTANCE, channel->channel);
3284 /* INIT_EVQ expects index in vector table, not absolute */
3285 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_IRQ_NUM, channel->channel);
Ben Hutchings8127d662013-08-29 19:19:29 +01003286 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_MODE,
3287 MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS);
3288 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_LOAD, 0);
3289 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_RELOAD, 0);
3290 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_COUNT_MODE,
3291 MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS);
3292 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_COUNT_THRSHLD, 0);
3293
Bert Kenwarda9955602016-08-11 13:01:54 +01003294 if (nic_data->datapath_caps2 &
3295 1 << MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_EVQ_V2_LBN) {
3296 /* Use the new generic approach to specifying event queue
3297 * configuration, requesting lower latency or higher throughput.
3298 * The options that actually get used appear in the output.
3299 */
3300 MCDI_POPULATE_DWORD_2(inbuf, INIT_EVQ_V2_IN_FLAGS,
3301 INIT_EVQ_V2_IN_FLAG_INTERRUPTING, 1,
3302 INIT_EVQ_V2_IN_FLAG_TYPE,
3303 MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_AUTO);
3304 } else {
3305 bool cut_thru = !(nic_data->datapath_caps &
3306 1 << MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN);
3307
3308 MCDI_POPULATE_DWORD_4(inbuf, INIT_EVQ_IN_FLAGS,
3309 INIT_EVQ_IN_FLAG_INTERRUPTING, 1,
3310 INIT_EVQ_IN_FLAG_RX_MERGE, 1,
3311 INIT_EVQ_IN_FLAG_TX_MERGE, 1,
3312 INIT_EVQ_IN_FLAG_CUT_THRU, cut_thru);
3313 }
3314
Ben Hutchings8127d662013-08-29 19:19:29 +01003315 dma_addr = channel->eventq.buf.dma_addr;
3316 for (i = 0; i < entries; ++i) {
3317 MCDI_SET_ARRAY_QWORD(inbuf, INIT_EVQ_IN_DMA_ADDR, i, dma_addr);
3318 dma_addr += EFX_BUF_SIZE;
3319 }
3320
3321 inlen = MC_CMD_INIT_EVQ_IN_LEN(entries);
3322
3323 rc = efx_mcdi_rpc(efx, MC_CMD_INIT_EVQ, inbuf, inlen,
3324 outbuf, sizeof(outbuf), &outlen);
Bert Kenwarda9955602016-08-11 13:01:54 +01003325
3326 if (outlen >= MC_CMD_INIT_EVQ_V2_OUT_LEN)
3327 netif_dbg(efx, drv, efx->net_dev,
3328 "Channel %d using event queue flags %08x\n",
3329 channel->channel,
3330 MCDI_DWORD(outbuf, INIT_EVQ_V2_OUT_FLAGS));
3331
Ben Hutchings8127d662013-08-29 19:19:29 +01003332 /* IRQ return is ignored */
Daniel Pieczko46e612b2015-07-21 15:09:18 +01003333 if (channel->channel || rc)
3334 return rc;
Ben Hutchings8127d662013-08-29 19:19:29 +01003335
Daniel Pieczko46e612b2015-07-21 15:09:18 +01003336 /* Successfully created event queue on channel 0 */
3337 rc = efx_mcdi_get_workarounds(efx, &implemented, &enabled);
Edward Cree832dc9e2015-07-21 15:09:31 +01003338 if (rc == -ENOSYS) {
Bert Kenwardd95e3292016-08-11 13:02:36 +01003339 /* GET_WORKAROUNDS was implemented before this workaround,
3340 * thus it must be unavailable in this firmware.
Edward Cree832dc9e2015-07-21 15:09:31 +01003341 */
3342 nic_data->workaround_26807 = false;
3343 rc = 0;
3344 } else if (rc) {
Ben Hutchings8127d662013-08-29 19:19:29 +01003345 goto fail;
Edward Cree832dc9e2015-07-21 15:09:31 +01003346 } else {
3347 nic_data->workaround_26807 =
3348 !!(enabled & MC_CMD_GET_WORKAROUNDS_OUT_BUG26807);
Ben Hutchings8127d662013-08-29 19:19:29 +01003349
Edward Cree832dc9e2015-07-21 15:09:31 +01003350 if (implemented & MC_CMD_GET_WORKAROUNDS_OUT_BUG26807 &&
3351 !nic_data->workaround_26807) {
Daniel Pieczko5a55a722015-07-21 15:10:02 +01003352 unsigned int flags;
3353
Daniel Pieczko34ccfe62015-07-21 15:09:43 +01003354 rc = efx_mcdi_set_workaround(efx,
3355 MC_CMD_WORKAROUND_BUG26807,
Daniel Pieczko5a55a722015-07-21 15:10:02 +01003356 true, &flags);
3357
3358 if (!rc) {
3359 if (flags &
3360 1 << MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_LBN) {
3361 netif_info(efx, drv, efx->net_dev,
3362 "other functions on NIC have been reset\n");
Daniel Pieczkoabd86a52015-12-04 08:48:39 +00003363
3364 /* With MCFW v4.6.x and earlier, the
3365 * boot count will have incremented,
3366 * so re-read the warm_boot_count
3367 * value now to ensure this function
3368 * doesn't think it has changed next
3369 * time it checks.
3370 */
3371 rc = efx_ef10_get_warm_boot_count(efx);
3372 if (rc >= 0) {
3373 nic_data->warm_boot_count = rc;
3374 rc = 0;
3375 }
Daniel Pieczko5a55a722015-07-21 15:10:02 +01003376 }
Edward Cree832dc9e2015-07-21 15:09:31 +01003377 nic_data->workaround_26807 = true;
Daniel Pieczko5a55a722015-07-21 15:10:02 +01003378 } else if (rc == -EPERM) {
Edward Cree832dc9e2015-07-21 15:09:31 +01003379 rc = 0;
Daniel Pieczko5a55a722015-07-21 15:10:02 +01003380 }
Edward Cree832dc9e2015-07-21 15:09:31 +01003381 }
Daniel Pieczko46e612b2015-07-21 15:09:18 +01003382 }
3383
3384 if (!rc)
3385 return 0;
Ben Hutchings8127d662013-08-29 19:19:29 +01003386
3387fail:
Daniel Pieczko46e612b2015-07-21 15:09:18 +01003388 efx_ef10_ev_fini(channel);
3389 return rc;
Ben Hutchings8127d662013-08-29 19:19:29 +01003390}
3391
3392static void efx_ef10_ev_remove(struct efx_channel *channel)
3393{
3394 efx_nic_free_buffer(channel->efx, &channel->eventq.buf);
3395}
3396
3397static void efx_ef10_handle_rx_wrong_queue(struct efx_rx_queue *rx_queue,
3398 unsigned int rx_queue_label)
3399{
3400 struct efx_nic *efx = rx_queue->efx;
3401
3402 netif_info(efx, hw, efx->net_dev,
3403 "rx event arrived on queue %d labeled as queue %u\n",
3404 efx_rx_queue_index(rx_queue), rx_queue_label);
3405
3406 efx_schedule_reset(efx, RESET_TYPE_DISABLE);
3407}
3408
3409static void
3410efx_ef10_handle_rx_bad_lbits(struct efx_rx_queue *rx_queue,
3411 unsigned int actual, unsigned int expected)
3412{
3413 unsigned int dropped = (actual - expected) & rx_queue->ptr_mask;
3414 struct efx_nic *efx = rx_queue->efx;
3415
3416 netif_info(efx, hw, efx->net_dev,
3417 "dropped %d events (index=%d expected=%d)\n",
3418 dropped, actual, expected);
3419
3420 efx_schedule_reset(efx, RESET_TYPE_DISABLE);
3421}
3422
3423/* partially received RX was aborted. clean up. */
3424static void efx_ef10_handle_rx_abort(struct efx_rx_queue *rx_queue)
3425{
3426 unsigned int rx_desc_ptr;
3427
Ben Hutchings8127d662013-08-29 19:19:29 +01003428 netif_dbg(rx_queue->efx, hw, rx_queue->efx->net_dev,
3429 "scattered RX aborted (dropping %u buffers)\n",
3430 rx_queue->scatter_n);
3431
3432 rx_desc_ptr = rx_queue->removed_count & rx_queue->ptr_mask;
3433
3434 efx_rx_packet(rx_queue, rx_desc_ptr, rx_queue->scatter_n,
3435 0, EFX_RX_PKT_DISCARD);
3436
3437 rx_queue->removed_count += rx_queue->scatter_n;
3438 rx_queue->scatter_n = 0;
3439 rx_queue->scatter_len = 0;
3440 ++efx_rx_queue_channel(rx_queue)->n_rx_nodesc_trunc;
3441}
3442
Jon Coopera0ee3542017-02-08 16:50:40 +00003443static u16 efx_ef10_handle_rx_event_errors(struct efx_channel *channel,
3444 unsigned int n_packets,
3445 unsigned int rx_encap_hdr,
3446 unsigned int rx_l3_class,
3447 unsigned int rx_l4_class,
3448 const efx_qword_t *event)
3449{
3450 struct efx_nic *efx = channel->efx;
Edward Cree69787292017-10-31 14:29:47 +00003451 bool handled = false;
Jon Coopera0ee3542017-02-08 16:50:40 +00003452
3453 if (EFX_QWORD_FIELD(*event, ESF_DZ_RX_ECRC_ERR)) {
Edward Cree69787292017-10-31 14:29:47 +00003454 if (!(efx->net_dev->features & NETIF_F_RXALL)) {
3455 if (!efx->loopback_selftest)
3456 channel->n_rx_eth_crc_err += n_packets;
3457 return EFX_RX_PKT_DISCARD;
3458 }
3459 handled = true;
Jon Coopera0ee3542017-02-08 16:50:40 +00003460 }
3461 if (EFX_QWORD_FIELD(*event, ESF_DZ_RX_IPCKSUM_ERR)) {
3462 if (unlikely(rx_encap_hdr != ESE_EZ_ENCAP_HDR_VXLAN &&
3463 rx_l3_class != ESE_DZ_L3_CLASS_IP4 &&
3464 rx_l3_class != ESE_DZ_L3_CLASS_IP4_FRAG &&
3465 rx_l3_class != ESE_DZ_L3_CLASS_IP6 &&
3466 rx_l3_class != ESE_DZ_L3_CLASS_IP6_FRAG))
3467 netdev_WARN(efx->net_dev,
3468 "invalid class for RX_IPCKSUM_ERR: event="
3469 EFX_QWORD_FMT "\n",
3470 EFX_QWORD_VAL(*event));
3471 if (!efx->loopback_selftest)
3472 *(rx_encap_hdr ?
3473 &channel->n_rx_outer_ip_hdr_chksum_err :
3474 &channel->n_rx_ip_hdr_chksum_err) += n_packets;
3475 return 0;
3476 }
3477 if (EFX_QWORD_FIELD(*event, ESF_DZ_RX_TCPUDP_CKSUM_ERR)) {
3478 if (unlikely(rx_encap_hdr != ESE_EZ_ENCAP_HDR_VXLAN &&
3479 ((rx_l3_class != ESE_DZ_L3_CLASS_IP4 &&
3480 rx_l3_class != ESE_DZ_L3_CLASS_IP6) ||
Bert Kenwardd8d8ccf2017-12-18 16:57:18 +00003481 (rx_l4_class != ESE_FZ_L4_CLASS_TCP &&
3482 rx_l4_class != ESE_FZ_L4_CLASS_UDP))))
Jon Coopera0ee3542017-02-08 16:50:40 +00003483 netdev_WARN(efx->net_dev,
3484 "invalid class for RX_TCPUDP_CKSUM_ERR: event="
3485 EFX_QWORD_FMT "\n",
3486 EFX_QWORD_VAL(*event));
3487 if (!efx->loopback_selftest)
3488 *(rx_encap_hdr ?
3489 &channel->n_rx_outer_tcp_udp_chksum_err :
3490 &channel->n_rx_tcp_udp_chksum_err) += n_packets;
3491 return 0;
3492 }
3493 if (EFX_QWORD_FIELD(*event, ESF_EZ_RX_IP_INNER_CHKSUM_ERR)) {
3494 if (unlikely(!rx_encap_hdr))
3495 netdev_WARN(efx->net_dev,
3496 "invalid encapsulation type for RX_IP_INNER_CHKSUM_ERR: event="
3497 EFX_QWORD_FMT "\n",
3498 EFX_QWORD_VAL(*event));
3499 else if (unlikely(rx_l3_class != ESE_DZ_L3_CLASS_IP4 &&
3500 rx_l3_class != ESE_DZ_L3_CLASS_IP4_FRAG &&
3501 rx_l3_class != ESE_DZ_L3_CLASS_IP6 &&
3502 rx_l3_class != ESE_DZ_L3_CLASS_IP6_FRAG))
3503 netdev_WARN(efx->net_dev,
3504 "invalid class for RX_IP_INNER_CHKSUM_ERR: event="
3505 EFX_QWORD_FMT "\n",
3506 EFX_QWORD_VAL(*event));
3507 if (!efx->loopback_selftest)
3508 channel->n_rx_inner_ip_hdr_chksum_err += n_packets;
3509 return 0;
3510 }
3511 if (EFX_QWORD_FIELD(*event, ESF_EZ_RX_TCP_UDP_INNER_CHKSUM_ERR)) {
3512 if (unlikely(!rx_encap_hdr))
3513 netdev_WARN(efx->net_dev,
3514 "invalid encapsulation type for RX_TCP_UDP_INNER_CHKSUM_ERR: event="
3515 EFX_QWORD_FMT "\n",
3516 EFX_QWORD_VAL(*event));
3517 else if (unlikely((rx_l3_class != ESE_DZ_L3_CLASS_IP4 &&
3518 rx_l3_class != ESE_DZ_L3_CLASS_IP6) ||
Bert Kenwardd8d8ccf2017-12-18 16:57:18 +00003519 (rx_l4_class != ESE_FZ_L4_CLASS_TCP &&
3520 rx_l4_class != ESE_FZ_L4_CLASS_UDP)))
Jon Coopera0ee3542017-02-08 16:50:40 +00003521 netdev_WARN(efx->net_dev,
3522 "invalid class for RX_TCP_UDP_INNER_CHKSUM_ERR: event="
3523 EFX_QWORD_FMT "\n",
3524 EFX_QWORD_VAL(*event));
3525 if (!efx->loopback_selftest)
3526 channel->n_rx_inner_tcp_udp_chksum_err += n_packets;
3527 return 0;
3528 }
3529
Edward Cree69787292017-10-31 14:29:47 +00003530 WARN_ON(!handled); /* No error bits were recognised */
Jon Coopera0ee3542017-02-08 16:50:40 +00003531 return 0;
3532}
3533
Ben Hutchings8127d662013-08-29 19:19:29 +01003534static int efx_ef10_handle_rx_event(struct efx_channel *channel,
3535 const efx_qword_t *event)
3536{
Jon Coopera0ee3542017-02-08 16:50:40 +00003537 unsigned int rx_bytes, next_ptr_lbits, rx_queue_label;
3538 unsigned int rx_l3_class, rx_l4_class, rx_encap_hdr;
Ben Hutchings8127d662013-08-29 19:19:29 +01003539 unsigned int n_descs, n_packets, i;
3540 struct efx_nic *efx = channel->efx;
Jon Coopera0ee3542017-02-08 16:50:40 +00003541 struct efx_ef10_nic_data *nic_data = efx->nic_data;
Ben Hutchings8127d662013-08-29 19:19:29 +01003542 struct efx_rx_queue *rx_queue;
Jon Coopera0ee3542017-02-08 16:50:40 +00003543 efx_qword_t errors;
Ben Hutchings8127d662013-08-29 19:19:29 +01003544 bool rx_cont;
3545 u16 flags = 0;
3546
Mark Rutland6aa7de02017-10-23 14:07:29 -07003547 if (unlikely(READ_ONCE(efx->reset_pending)))
Ben Hutchings8127d662013-08-29 19:19:29 +01003548 return 0;
3549
3550 /* Basic packet information */
3551 rx_bytes = EFX_QWORD_FIELD(*event, ESF_DZ_RX_BYTES);
3552 next_ptr_lbits = EFX_QWORD_FIELD(*event, ESF_DZ_RX_DSC_PTR_LBITS);
3553 rx_queue_label = EFX_QWORD_FIELD(*event, ESF_DZ_RX_QLABEL);
Jon Coopera0ee3542017-02-08 16:50:40 +00003554 rx_l3_class = EFX_QWORD_FIELD(*event, ESF_DZ_RX_L3_CLASS);
Bert Kenwardd8d8ccf2017-12-18 16:57:18 +00003555 rx_l4_class = EFX_QWORD_FIELD(*event, ESF_FZ_RX_L4_CLASS);
Ben Hutchings8127d662013-08-29 19:19:29 +01003556 rx_cont = EFX_QWORD_FIELD(*event, ESF_DZ_RX_CONT);
Jon Coopera0ee3542017-02-08 16:50:40 +00003557 rx_encap_hdr =
3558 nic_data->datapath_caps &
3559 (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN) ?
3560 EFX_QWORD_FIELD(*event, ESF_EZ_RX_ENCAP_HDR) :
3561 ESE_EZ_ENCAP_HDR_NONE;
Ben Hutchings8127d662013-08-29 19:19:29 +01003562
Ben Hutchings48ce5632013-11-01 16:42:44 +00003563 if (EFX_QWORD_FIELD(*event, ESF_DZ_RX_DROP_EVENT))
3564 netdev_WARN(efx->net_dev, "saw RX_DROP_EVENT: event="
3565 EFX_QWORD_FMT "\n",
3566 EFX_QWORD_VAL(*event));
Ben Hutchings8127d662013-08-29 19:19:29 +01003567
3568 rx_queue = efx_channel_get_rx_queue(channel);
3569
3570 if (unlikely(rx_queue_label != efx_rx_queue_index(rx_queue)))
3571 efx_ef10_handle_rx_wrong_queue(rx_queue, rx_queue_label);
3572
3573 n_descs = ((next_ptr_lbits - rx_queue->removed_count) &
3574 ((1 << ESF_DZ_RX_DSC_PTR_LBITS_WIDTH) - 1));
3575
3576 if (n_descs != rx_queue->scatter_n + 1) {
Ben Hutchings92a04162013-09-24 23:21:57 +01003577 struct efx_ef10_nic_data *nic_data = efx->nic_data;
3578
Ben Hutchings8127d662013-08-29 19:19:29 +01003579 /* detect rx abort */
3580 if (unlikely(n_descs == rx_queue->scatter_n)) {
Ben Hutchings48ce5632013-11-01 16:42:44 +00003581 if (rx_queue->scatter_n == 0 || rx_bytes != 0)
3582 netdev_WARN(efx->net_dev,
3583 "invalid RX abort: scatter_n=%u event="
3584 EFX_QWORD_FMT "\n",
3585 rx_queue->scatter_n,
3586 EFX_QWORD_VAL(*event));
Ben Hutchings8127d662013-08-29 19:19:29 +01003587 efx_ef10_handle_rx_abort(rx_queue);
3588 return 0;
3589 }
3590
Ben Hutchings92a04162013-09-24 23:21:57 +01003591 /* Check that RX completion merging is valid, i.e.
3592 * the current firmware supports it and this is a
3593 * non-scattered packet.
3594 */
3595 if (!(nic_data->datapath_caps &
3596 (1 << MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN)) ||
3597 rx_queue->scatter_n != 0 || rx_cont) {
Ben Hutchings8127d662013-08-29 19:19:29 +01003598 efx_ef10_handle_rx_bad_lbits(
3599 rx_queue, next_ptr_lbits,
3600 (rx_queue->removed_count +
3601 rx_queue->scatter_n + 1) &
3602 ((1 << ESF_DZ_RX_DSC_PTR_LBITS_WIDTH) - 1));
3603 return 0;
3604 }
3605
3606 /* Merged completion for multiple non-scattered packets */
3607 rx_queue->scatter_n = 1;
3608 rx_queue->scatter_len = 0;
3609 n_packets = n_descs;
3610 ++channel->n_rx_merge_events;
3611 channel->n_rx_merge_packets += n_packets;
3612 flags |= EFX_RX_PKT_PREFIX_LEN;
3613 } else {
3614 ++rx_queue->scatter_n;
3615 rx_queue->scatter_len += rx_bytes;
3616 if (rx_cont)
3617 return 0;
3618 n_packets = 1;
3619 }
3620
Jon Coopera0ee3542017-02-08 16:50:40 +00003621 EFX_POPULATE_QWORD_5(errors, ESF_DZ_RX_ECRC_ERR, 1,
3622 ESF_DZ_RX_IPCKSUM_ERR, 1,
3623 ESF_DZ_RX_TCPUDP_CKSUM_ERR, 1,
3624 ESF_EZ_RX_IP_INNER_CHKSUM_ERR, 1,
3625 ESF_EZ_RX_TCP_UDP_INNER_CHKSUM_ERR, 1);
3626 EFX_AND_QWORD(errors, *event, errors);
3627 if (unlikely(!EFX_QWORD_IS_ZERO(errors))) {
3628 flags |= efx_ef10_handle_rx_event_errors(channel, n_packets,
Edward Cree90d2ea92017-02-10 17:34:59 +00003629 rx_encap_hdr,
Jon Coopera0ee3542017-02-08 16:50:40 +00003630 rx_l3_class, rx_l4_class,
Edward Cree90d2ea92017-02-10 17:34:59 +00003631 event);
Jon Coopera0ee3542017-02-08 16:50:40 +00003632 } else {
Bert Kenwardd8d8ccf2017-12-18 16:57:18 +00003633 bool tcpudp = rx_l4_class == ESE_FZ_L4_CLASS_TCP ||
3634 rx_l4_class == ESE_FZ_L4_CLASS_UDP;
Jon Cooperda50ae22017-02-08 16:51:02 +00003635
3636 switch (rx_encap_hdr) {
3637 case ESE_EZ_ENCAP_HDR_VXLAN: /* VxLAN or GENEVE */
3638 flags |= EFX_RX_PKT_CSUMMED; /* outer UDP csum */
3639 if (tcpudp)
3640 flags |= EFX_RX_PKT_CSUM_LEVEL; /* inner L4 */
3641 break;
3642 case ESE_EZ_ENCAP_HDR_GRE:
3643 case ESE_EZ_ENCAP_HDR_NONE:
3644 if (tcpudp)
3645 flags |= EFX_RX_PKT_CSUMMED;
3646 break;
3647 default:
3648 netdev_WARN(efx->net_dev,
3649 "unknown encapsulation type: event="
3650 EFX_QWORD_FMT "\n",
3651 EFX_QWORD_VAL(*event));
3652 }
Ben Hutchings8127d662013-08-29 19:19:29 +01003653 }
3654
Bert Kenwardd8d8ccf2017-12-18 16:57:18 +00003655 if (rx_l4_class == ESE_FZ_L4_CLASS_TCP)
Ben Hutchings8127d662013-08-29 19:19:29 +01003656 flags |= EFX_RX_PKT_TCP;
3657
3658 channel->irq_mod_score += 2 * n_packets;
3659
3660 /* Handle received packet(s) */
3661 for (i = 0; i < n_packets; i++) {
3662 efx_rx_packet(rx_queue,
3663 rx_queue->removed_count & rx_queue->ptr_mask,
3664 rx_queue->scatter_n, rx_queue->scatter_len,
3665 flags);
3666 rx_queue->removed_count += rx_queue->scatter_n;
3667 }
3668
3669 rx_queue->scatter_n = 0;
3670 rx_queue->scatter_len = 0;
3671
3672 return n_packets;
3673}
3674
Martin Habetsb9b603d42018-01-25 17:24:43 +00003675static u32 efx_ef10_extract_event_ts(efx_qword_t *event)
3676{
3677 u32 tstamp;
3678
3679 tstamp = EFX_QWORD_FIELD(*event, TX_TIMESTAMP_EVENT_TSTAMP_DATA_HI);
3680 tstamp <<= 16;
3681 tstamp |= EFX_QWORD_FIELD(*event, TX_TIMESTAMP_EVENT_TSTAMP_DATA_LO);
3682
3683 return tstamp;
3684}
3685
Bert Kenward5227ecc2018-01-25 17:24:20 +00003686static void
Ben Hutchings8127d662013-08-29 19:19:29 +01003687efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
3688{
3689 struct efx_nic *efx = channel->efx;
3690 struct efx_tx_queue *tx_queue;
3691 unsigned int tx_ev_desc_ptr;
3692 unsigned int tx_ev_q_label;
Martin Habetsb9b603d42018-01-25 17:24:43 +00003693 unsigned int tx_ev_type;
3694 u64 ts_part;
Ben Hutchings8127d662013-08-29 19:19:29 +01003695
Mark Rutland6aa7de02017-10-23 14:07:29 -07003696 if (unlikely(READ_ONCE(efx->reset_pending)))
Bert Kenward5227ecc2018-01-25 17:24:20 +00003697 return;
Ben Hutchings8127d662013-08-29 19:19:29 +01003698
3699 if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_TX_DROP_EVENT)))
Bert Kenward5227ecc2018-01-25 17:24:20 +00003700 return;
Ben Hutchings8127d662013-08-29 19:19:29 +01003701
Martin Habetsb9b603d42018-01-25 17:24:43 +00003702 /* Get the transmit queue */
Ben Hutchings8127d662013-08-29 19:19:29 +01003703 tx_ev_q_label = EFX_QWORD_FIELD(*event, ESF_DZ_TX_QLABEL);
3704 tx_queue = efx_channel_get_tx_queue(channel,
3705 tx_ev_q_label % EFX_TXQ_TYPES);
Martin Habetsb9b603d42018-01-25 17:24:43 +00003706
3707 if (!tx_queue->timestamping) {
3708 /* Transmit completion */
3709 tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, ESF_DZ_TX_DESCR_INDX);
3710 efx_xmit_done(tx_queue, tx_ev_desc_ptr & tx_queue->ptr_mask);
3711 return;
3712 }
3713
3714 /* Transmit timestamps are only available for 8XXX series. They result
3715 * in three events per packet. These occur in order, and are:
3716 * - the normal completion event
3717 * - the low part of the timestamp
3718 * - the high part of the timestamp
3719 *
3720 * Each part of the timestamp is itself split across two 16 bit
3721 * fields in the event.
3722 */
3723 tx_ev_type = EFX_QWORD_FIELD(*event, ESF_EZ_TX_SOFT1);
3724
3725 switch (tx_ev_type) {
3726 case TX_TIMESTAMP_EVENT_TX_EV_COMPLETION:
3727 /* In case of Queue flush or FLR, we might have received
3728 * the previous TX completion event but not the Timestamp
3729 * events.
3730 */
3731 if (tx_queue->completed_desc_ptr != tx_queue->ptr_mask)
3732 efx_xmit_done(tx_queue, tx_queue->completed_desc_ptr);
3733
3734 tx_ev_desc_ptr = EFX_QWORD_FIELD(*event,
3735 ESF_DZ_TX_DESCR_INDX);
3736 tx_queue->completed_desc_ptr =
3737 tx_ev_desc_ptr & tx_queue->ptr_mask;
3738 break;
3739
3740 case TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_LO:
3741 ts_part = efx_ef10_extract_event_ts(event);
3742 tx_queue->completed_timestamp_minor = ts_part;
3743 break;
3744
3745 case TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_HI:
3746 ts_part = efx_ef10_extract_event_ts(event);
3747 tx_queue->completed_timestamp_major = ts_part;
3748
3749 efx_xmit_done(tx_queue, tx_queue->completed_desc_ptr);
3750 tx_queue->completed_desc_ptr = tx_queue->ptr_mask;
3751 break;
3752
3753 default:
3754 netif_err(efx, hw, efx->net_dev,
3755 "channel %d unknown tx event type %d (data "
3756 EFX_QWORD_FMT ")\n",
3757 channel->channel, tx_ev_type,
3758 EFX_QWORD_VAL(*event));
3759 break;
3760 }
Ben Hutchings8127d662013-08-29 19:19:29 +01003761}
3762
3763static void
3764efx_ef10_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
3765{
3766 struct efx_nic *efx = channel->efx;
3767 int subcode;
3768
3769 subcode = EFX_QWORD_FIELD(*event, ESF_DZ_DRV_SUB_CODE);
3770
3771 switch (subcode) {
3772 case ESE_DZ_DRV_TIMER_EV:
3773 case ESE_DZ_DRV_WAKE_UP_EV:
3774 break;
3775 case ESE_DZ_DRV_START_UP_EV:
3776 /* event queue init complete. ok. */
3777 break;
3778 default:
3779 netif_err(efx, hw, efx->net_dev,
3780 "channel %d unknown driver event type %d"
3781 " (data " EFX_QWORD_FMT ")\n",
3782 channel->channel, subcode,
3783 EFX_QWORD_VAL(*event));
3784
3785 }
3786}
3787
3788static void efx_ef10_handle_driver_generated_event(struct efx_channel *channel,
3789 efx_qword_t *event)
3790{
3791 struct efx_nic *efx = channel->efx;
3792 u32 subcode;
3793
3794 subcode = EFX_QWORD_FIELD(*event, EFX_DWORD_0);
3795
3796 switch (subcode) {
3797 case EFX_EF10_TEST:
3798 channel->event_test_cpu = raw_smp_processor_id();
3799 break;
3800 case EFX_EF10_REFILL:
3801 /* The queue must be empty, so we won't receive any rx
3802 * events, so efx_process_channel() won't refill the
3803 * queue. Refill it here
3804 */
Jon Coopercce28792013-10-02 11:04:14 +01003805 efx_fast_push_rx_descriptors(&channel->rx_queue, true);
Ben Hutchings8127d662013-08-29 19:19:29 +01003806 break;
3807 default:
3808 netif_err(efx, hw, efx->net_dev,
3809 "channel %d unknown driver event type %u"
3810 " (data " EFX_QWORD_FMT ")\n",
3811 channel->channel, (unsigned) subcode,
3812 EFX_QWORD_VAL(*event));
3813 }
3814}
3815
3816static int efx_ef10_ev_process(struct efx_channel *channel, int quota)
3817{
3818 struct efx_nic *efx = channel->efx;
3819 efx_qword_t event, *p_event;
3820 unsigned int read_ptr;
3821 int ev_code;
Ben Hutchings8127d662013-08-29 19:19:29 +01003822 int spent = 0;
3823
Eric W. Biederman75363a42014-03-14 18:11:22 -07003824 if (quota <= 0)
3825 return spent;
3826
Ben Hutchings8127d662013-08-29 19:19:29 +01003827 read_ptr = channel->eventq_read_ptr;
3828
3829 for (;;) {
3830 p_event = efx_event(channel, read_ptr);
3831 event = *p_event;
3832
3833 if (!efx_event_present(&event))
3834 break;
3835
3836 EFX_SET_QWORD(*p_event);
3837
3838 ++read_ptr;
3839
3840 ev_code = EFX_QWORD_FIELD(event, ESF_DZ_EV_CODE);
3841
3842 netif_vdbg(efx, drv, efx->net_dev,
3843 "processing event on %d " EFX_QWORD_FMT "\n",
3844 channel->channel, EFX_QWORD_VAL(event));
3845
3846 switch (ev_code) {
3847 case ESE_DZ_EV_CODE_MCDI_EV:
3848 efx_mcdi_process_event(channel, &event);
3849 break;
3850 case ESE_DZ_EV_CODE_RX_EV:
3851 spent += efx_ef10_handle_rx_event(channel, &event);
3852 if (spent >= quota) {
3853 /* XXX can we split a merged event to
3854 * avoid going over-quota?
3855 */
3856 spent = quota;
3857 goto out;
3858 }
3859 break;
3860 case ESE_DZ_EV_CODE_TX_EV:
Bert Kenward5227ecc2018-01-25 17:24:20 +00003861 efx_ef10_handle_tx_event(channel, &event);
Ben Hutchings8127d662013-08-29 19:19:29 +01003862 break;
3863 case ESE_DZ_EV_CODE_DRIVER_EV:
3864 efx_ef10_handle_driver_event(channel, &event);
3865 if (++spent == quota)
3866 goto out;
3867 break;
3868 case EFX_EF10_DRVGEN_EV:
3869 efx_ef10_handle_driver_generated_event(channel, &event);
3870 break;
3871 default:
3872 netif_err(efx, hw, efx->net_dev,
3873 "channel %d unknown event type %d"
3874 " (data " EFX_QWORD_FMT ")\n",
3875 channel->channel, ev_code,
3876 EFX_QWORD_VAL(event));
3877 }
3878 }
3879
3880out:
3881 channel->eventq_read_ptr = read_ptr;
3882 return spent;
3883}
3884
3885static void efx_ef10_ev_read_ack(struct efx_channel *channel)
3886{
3887 struct efx_nic *efx = channel->efx;
3888 efx_dword_t rptr;
3889
3890 if (EFX_EF10_WORKAROUND_35388(efx)) {
3891 BUILD_BUG_ON(EFX_MIN_EVQ_SIZE <
3892 (1 << ERF_DD_EVQ_IND_RPTR_WIDTH));
3893 BUILD_BUG_ON(EFX_MAX_EVQ_SIZE >
3894 (1 << 2 * ERF_DD_EVQ_IND_RPTR_WIDTH));
3895
3896 EFX_POPULATE_DWORD_2(rptr, ERF_DD_EVQ_IND_RPTR_FLAGS,
3897 EFE_DD_EVQ_IND_RPTR_FLAGS_HIGH,
3898 ERF_DD_EVQ_IND_RPTR,
3899 (channel->eventq_read_ptr &
3900 channel->eventq_mask) >>
3901 ERF_DD_EVQ_IND_RPTR_WIDTH);
3902 efx_writed_page(efx, &rptr, ER_DD_EVQ_INDIRECT,
3903 channel->channel);
3904 EFX_POPULATE_DWORD_2(rptr, ERF_DD_EVQ_IND_RPTR_FLAGS,
3905 EFE_DD_EVQ_IND_RPTR_FLAGS_LOW,
3906 ERF_DD_EVQ_IND_RPTR,
3907 channel->eventq_read_ptr &
3908 ((1 << ERF_DD_EVQ_IND_RPTR_WIDTH) - 1));
3909 efx_writed_page(efx, &rptr, ER_DD_EVQ_INDIRECT,
3910 channel->channel);
3911 } else {
3912 EFX_POPULATE_DWORD_1(rptr, ERF_DZ_EVQ_RPTR,
3913 channel->eventq_read_ptr &
3914 channel->eventq_mask);
3915 efx_writed_page(efx, &rptr, ER_DZ_EVQ_RPTR, channel->channel);
3916 }
3917}
3918
3919static void efx_ef10_ev_test_generate(struct efx_channel *channel)
3920{
3921 MCDI_DECLARE_BUF(inbuf, MC_CMD_DRIVER_EVENT_IN_LEN);
3922 struct efx_nic *efx = channel->efx;
3923 efx_qword_t event;
3924 int rc;
3925
3926 EFX_POPULATE_QWORD_2(event,
3927 ESF_DZ_EV_CODE, EFX_EF10_DRVGEN_EV,
3928 ESF_DZ_EV_DATA, EFX_EF10_TEST);
3929
3930 MCDI_SET_DWORD(inbuf, DRIVER_EVENT_IN_EVQ, channel->channel);
3931
3932 /* MCDI_SET_QWORD is not appropriate here since EFX_POPULATE_* has
3933 * already swapped the data to little-endian order.
3934 */
3935 memcpy(MCDI_PTR(inbuf, DRIVER_EVENT_IN_DATA), &event.u64[0],
3936 sizeof(efx_qword_t));
3937
3938 rc = efx_mcdi_rpc(efx, MC_CMD_DRIVER_EVENT, inbuf, sizeof(inbuf),
3939 NULL, 0, NULL);
3940 if (rc != 0)
3941 goto fail;
3942
3943 return;
3944
3945fail:
3946 WARN_ON(true);
3947 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
3948}
3949
3950void efx_ef10_handle_drain_event(struct efx_nic *efx)
3951{
3952 if (atomic_dec_and_test(&efx->active_queues))
3953 wake_up(&efx->flush_wq);
3954
3955 WARN_ON(atomic_read(&efx->active_queues) < 0);
3956}
3957
3958static int efx_ef10_fini_dmaq(struct efx_nic *efx)
3959{
3960 struct efx_ef10_nic_data *nic_data = efx->nic_data;
3961 struct efx_channel *channel;
3962 struct efx_tx_queue *tx_queue;
3963 struct efx_rx_queue *rx_queue;
3964 int pending;
3965
3966 /* If the MC has just rebooted, the TX/RX queues will have already been
3967 * torn down, but efx->active_queues needs to be set to zero.
3968 */
3969 if (nic_data->must_realloc_vis) {
3970 atomic_set(&efx->active_queues, 0);
3971 return 0;
3972 }
3973
3974 /* Do not attempt to write to the NIC during EEH recovery */
3975 if (efx->state != STATE_RECOVERY) {
3976 efx_for_each_channel(channel, efx) {
3977 efx_for_each_channel_rx_queue(rx_queue, channel)
3978 efx_ef10_rx_fini(rx_queue);
3979 efx_for_each_channel_tx_queue(tx_queue, channel)
3980 efx_ef10_tx_fini(tx_queue);
3981 }
3982
3983 wait_event_timeout(efx->flush_wq,
3984 atomic_read(&efx->active_queues) == 0,
3985 msecs_to_jiffies(EFX_MAX_FLUSH_TIME));
3986 pending = atomic_read(&efx->active_queues);
3987 if (pending) {
3988 netif_err(efx, hw, efx->net_dev, "failed to flush %d queues\n",
3989 pending);
3990 return -ETIMEDOUT;
3991 }
3992 }
3993
3994 return 0;
3995}
3996
Edward Creee2835462014-04-16 19:27:48 +01003997static void efx_ef10_prepare_flr(struct efx_nic *efx)
3998{
3999 atomic_set(&efx->active_queues, 0);
4000}
4001
Ben Hutchings8127d662013-08-29 19:19:29 +01004002static bool efx_ef10_filter_equal(const struct efx_filter_spec *left,
4003 const struct efx_filter_spec *right)
4004{
4005 if ((left->match_flags ^ right->match_flags) |
4006 ((left->flags ^ right->flags) &
4007 (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)))
4008 return false;
4009
4010 return memcmp(&left->outer_vid, &right->outer_vid,
4011 sizeof(struct efx_filter_spec) -
4012 offsetof(struct efx_filter_spec, outer_vid)) == 0;
4013}
4014
4015static unsigned int efx_ef10_filter_hash(const struct efx_filter_spec *spec)
4016{
4017 BUILD_BUG_ON(offsetof(struct efx_filter_spec, outer_vid) & 3);
4018 return jhash2((const u32 *)&spec->outer_vid,
4019 (sizeof(struct efx_filter_spec) -
4020 offsetof(struct efx_filter_spec, outer_vid)) / 4,
4021 0);
4022 /* XXX should we randomise the initval? */
4023}
4024
4025/* Decide whether a filter should be exclusive or else should allow
4026 * delivery to additional recipients. Currently we decide that
4027 * filters for specific local unicast MAC and IP addresses are
4028 * exclusive.
4029 */
4030static bool efx_ef10_filter_is_exclusive(const struct efx_filter_spec *spec)
4031{
4032 if (spec->match_flags & EFX_FILTER_MATCH_LOC_MAC &&
4033 !is_multicast_ether_addr(spec->loc_mac))
4034 return true;
4035
4036 if ((spec->match_flags &
4037 (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) ==
4038 (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) {
4039 if (spec->ether_type == htons(ETH_P_IP) &&
4040 !ipv4_is_multicast(spec->loc_host[0]))
4041 return true;
4042 if (spec->ether_type == htons(ETH_P_IPV6) &&
4043 ((const u8 *)spec->loc_host)[0] != 0xff)
4044 return true;
4045 }
4046
4047 return false;
4048}
4049
4050static struct efx_filter_spec *
4051efx_ef10_filter_entry_spec(const struct efx_ef10_filter_table *table,
4052 unsigned int filter_idx)
4053{
4054 return (struct efx_filter_spec *)(table->entry[filter_idx].spec &
4055 ~EFX_EF10_FILTER_FLAGS);
4056}
4057
4058static unsigned int
4059efx_ef10_filter_entry_flags(const struct efx_ef10_filter_table *table,
4060 unsigned int filter_idx)
4061{
4062 return table->entry[filter_idx].spec & EFX_EF10_FILTER_FLAGS;
4063}
4064
4065static void
4066efx_ef10_filter_set_entry(struct efx_ef10_filter_table *table,
4067 unsigned int filter_idx,
4068 const struct efx_filter_spec *spec,
4069 unsigned int flags)
4070{
4071 table->entry[filter_idx].spec = (unsigned long)spec | flags;
4072}
4073
Edward Cree9b410802017-01-27 15:02:52 +00004074static void
4075efx_ef10_filter_push_prep_set_match_fields(struct efx_nic *efx,
4076 const struct efx_filter_spec *spec,
4077 efx_dword_t *inbuf)
4078{
4079 enum efx_encap_type encap_type = efx_filter_get_encap_type(spec);
4080 u32 match_fields = 0, uc_match, mc_match;
4081
4082 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
4083 efx_ef10_filter_is_exclusive(spec) ?
4084 MC_CMD_FILTER_OP_IN_OP_INSERT :
4085 MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE);
4086
4087 /* Convert match flags and values. Unlike almost
4088 * everything else in MCDI, these fields are in
4089 * network byte order.
4090 */
4091#define COPY_VALUE(value, mcdi_field) \
4092 do { \
4093 match_fields |= \
4094 1 << MC_CMD_FILTER_OP_IN_MATCH_ ## \
4095 mcdi_field ## _LBN; \
4096 BUILD_BUG_ON( \
4097 MC_CMD_FILTER_OP_IN_ ## mcdi_field ## _LEN < \
4098 sizeof(value)); \
4099 memcpy(MCDI_PTR(inbuf, FILTER_OP_IN_ ## mcdi_field), \
4100 &value, sizeof(value)); \
4101 } while (0)
4102#define COPY_FIELD(gen_flag, gen_field, mcdi_field) \
4103 if (spec->match_flags & EFX_FILTER_MATCH_ ## gen_flag) { \
4104 COPY_VALUE(spec->gen_field, mcdi_field); \
4105 }
4106 /* Handle encap filters first. They will always be mismatch
4107 * (unknown UC or MC) filters
4108 */
4109 if (encap_type) {
4110 /* ether_type and outer_ip_proto need to be variables
4111 * because COPY_VALUE wants to memcpy them
4112 */
4113 __be16 ether_type =
4114 htons(encap_type & EFX_ENCAP_FLAG_IPV6 ?
4115 ETH_P_IPV6 : ETH_P_IP);
4116 u8 vni_type = MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_GENEVE;
4117 u8 outer_ip_proto;
4118
4119 switch (encap_type & EFX_ENCAP_TYPES_MASK) {
4120 case EFX_ENCAP_TYPE_VXLAN:
4121 vni_type = MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_VXLAN;
4122 /* fallthrough */
4123 case EFX_ENCAP_TYPE_GENEVE:
4124 COPY_VALUE(ether_type, ETHER_TYPE);
4125 outer_ip_proto = IPPROTO_UDP;
4126 COPY_VALUE(outer_ip_proto, IP_PROTO);
4127 /* We always need to set the type field, even
4128 * though we're not matching on the TNI.
4129 */
4130 MCDI_POPULATE_DWORD_1(inbuf,
4131 FILTER_OP_EXT_IN_VNI_OR_VSID,
4132 FILTER_OP_EXT_IN_VNI_TYPE,
4133 vni_type);
4134 break;
4135 case EFX_ENCAP_TYPE_NVGRE:
4136 COPY_VALUE(ether_type, ETHER_TYPE);
4137 outer_ip_proto = IPPROTO_GRE;
4138 COPY_VALUE(outer_ip_proto, IP_PROTO);
4139 break;
4140 default:
4141 WARN_ON(1);
4142 }
4143
4144 uc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_LBN;
4145 mc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_LBN;
4146 } else {
4147 uc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_LBN;
4148 mc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_LBN;
4149 }
4150
4151 if (spec->match_flags & EFX_FILTER_MATCH_LOC_MAC_IG)
4152 match_fields |=
4153 is_multicast_ether_addr(spec->loc_mac) ?
4154 1 << mc_match :
4155 1 << uc_match;
4156 COPY_FIELD(REM_HOST, rem_host, SRC_IP);
4157 COPY_FIELD(LOC_HOST, loc_host, DST_IP);
4158 COPY_FIELD(REM_MAC, rem_mac, SRC_MAC);
4159 COPY_FIELD(REM_PORT, rem_port, SRC_PORT);
4160 COPY_FIELD(LOC_MAC, loc_mac, DST_MAC);
4161 COPY_FIELD(LOC_PORT, loc_port, DST_PORT);
4162 COPY_FIELD(ETHER_TYPE, ether_type, ETHER_TYPE);
4163 COPY_FIELD(INNER_VID, inner_vid, INNER_VLAN);
4164 COPY_FIELD(OUTER_VID, outer_vid, OUTER_VLAN);
4165 COPY_FIELD(IP_PROTO, ip_proto, IP_PROTO);
4166#undef COPY_FIELD
4167#undef COPY_VALUE
4168 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_MATCH_FIELDS,
4169 match_fields);
4170}
4171
Ben Hutchings8127d662013-08-29 19:19:29 +01004172static void efx_ef10_filter_push_prep(struct efx_nic *efx,
4173 const struct efx_filter_spec *spec,
4174 efx_dword_t *inbuf, u64 handle,
Edward Cree42356d92018-03-08 15:45:17 +00004175 struct efx_rss_context *ctx,
Ben Hutchings8127d662013-08-29 19:19:29 +01004176 bool replacing)
4177{
4178 struct efx_ef10_nic_data *nic_data = efx->nic_data;
Jon Cooperdcb41232016-04-25 16:51:00 +01004179 u32 flags = spec->flags;
Ben Hutchings8127d662013-08-29 19:19:29 +01004180
Edward Cree9b410802017-01-27 15:02:52 +00004181 memset(inbuf, 0, MC_CMD_FILTER_OP_EXT_IN_LEN);
Ben Hutchings8127d662013-08-29 19:19:29 +01004182
Edward Cree42356d92018-03-08 15:45:17 +00004183 /* If RSS filter, caller better have given us an RSS context */
4184 if (flags & EFX_FILTER_FLAG_RX_RSS) {
4185 /* We don't have the ability to return an error, so we'll just
4186 * log a warning and disable RSS for the filter.
4187 */
4188 if (WARN_ON_ONCE(!ctx))
4189 flags &= ~EFX_FILTER_FLAG_RX_RSS;
4190 else if (WARN_ON_ONCE(ctx->context_id == EFX_EF10_RSS_CONTEXT_INVALID))
4191 flags &= ~EFX_FILTER_FLAG_RX_RSS;
4192 }
Jon Cooperdcb41232016-04-25 16:51:00 +01004193
Ben Hutchings8127d662013-08-29 19:19:29 +01004194 if (replacing) {
4195 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
4196 MC_CMD_FILTER_OP_IN_OP_REPLACE);
4197 MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE, handle);
4198 } else {
Edward Cree9b410802017-01-27 15:02:52 +00004199 efx_ef10_filter_push_prep_set_match_fields(efx, spec, inbuf);
Ben Hutchings8127d662013-08-29 19:19:29 +01004200 }
4201
Daniel Pieczko45b24492015-05-06 00:57:14 +01004202 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_PORT_ID, nic_data->vport_id);
Ben Hutchings8127d662013-08-29 19:19:29 +01004203 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_DEST,
4204 spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ?
4205 MC_CMD_FILTER_OP_IN_RX_DEST_DROP :
4206 MC_CMD_FILTER_OP_IN_RX_DEST_HOST);
Shradha Shahe3d36292015-05-06 00:56:24 +01004207 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_TX_DOMAIN, 0);
Ben Hutchings8127d662013-08-29 19:19:29 +01004208 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_TX_DEST,
4209 MC_CMD_FILTER_OP_IN_TX_DEST_DEFAULT);
Ben Hutchingsa0bc3482013-12-16 18:56:24 +00004210 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_QUEUE,
4211 spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ?
4212 0 : spec->dmaq_id);
Ben Hutchings8127d662013-08-29 19:19:29 +01004213 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_MODE,
Jon Cooperdcb41232016-04-25 16:51:00 +01004214 (flags & EFX_FILTER_FLAG_RX_RSS) ?
Ben Hutchings8127d662013-08-29 19:19:29 +01004215 MC_CMD_FILTER_OP_IN_RX_MODE_RSS :
4216 MC_CMD_FILTER_OP_IN_RX_MODE_SIMPLE);
Jon Cooperdcb41232016-04-25 16:51:00 +01004217 if (flags & EFX_FILTER_FLAG_RX_RSS)
Edward Cree42356d92018-03-08 15:45:17 +00004218 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_CONTEXT, ctx->context_id);
Ben Hutchings8127d662013-08-29 19:19:29 +01004219}
4220
4221static int efx_ef10_filter_push(struct efx_nic *efx,
Edward Cree42356d92018-03-08 15:45:17 +00004222 const struct efx_filter_spec *spec, u64 *handle,
4223 struct efx_rss_context *ctx, bool replacing)
Ben Hutchings8127d662013-08-29 19:19:29 +01004224{
Edward Cree9b410802017-01-27 15:02:52 +00004225 MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_EXT_IN_LEN);
4226 MCDI_DECLARE_BUF(outbuf, MC_CMD_FILTER_OP_EXT_OUT_LEN);
Ben Hutchings8127d662013-08-29 19:19:29 +01004227 int rc;
4228
Edward Cree42356d92018-03-08 15:45:17 +00004229 efx_ef10_filter_push_prep(efx, spec, inbuf, *handle, ctx, replacing);
Ben Hutchings8127d662013-08-29 19:19:29 +01004230 rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf),
4231 outbuf, sizeof(outbuf), NULL);
4232 if (rc == 0)
4233 *handle = MCDI_QWORD(outbuf, FILTER_OP_OUT_HANDLE);
Ben Hutchings065e64c2013-10-09 14:17:27 +01004234 if (rc == -ENOSPC)
4235 rc = -EBUSY; /* to match efx_farch_filter_insert() */
Ben Hutchings8127d662013-08-29 19:19:29 +01004236 return rc;
4237}
4238
Andrew Rybchenko7ac0dd92016-06-15 17:49:30 +01004239static u32 efx_ef10_filter_mcdi_flags_from_spec(const struct efx_filter_spec *spec)
Ben Hutchings8127d662013-08-29 19:19:29 +01004240{
Edward Cree9b410802017-01-27 15:02:52 +00004241 enum efx_encap_type encap_type = efx_filter_get_encap_type(spec);
Andrew Rybchenko7ac0dd92016-06-15 17:49:30 +01004242 unsigned int match_flags = spec->match_flags;
Edward Cree9b410802017-01-27 15:02:52 +00004243 unsigned int uc_match, mc_match;
Andrew Rybchenko7ac0dd92016-06-15 17:49:30 +01004244 u32 mcdi_flags = 0;
4245
Edward Cree9b410802017-01-27 15:02:52 +00004246#define MAP_FILTER_TO_MCDI_FLAG(gen_flag, mcdi_field, encap) { \
4247 unsigned int old_match_flags = match_flags; \
4248 match_flags &= ~EFX_FILTER_MATCH_ ## gen_flag; \
4249 if (match_flags != old_match_flags) \
4250 mcdi_flags |= \
4251 (1 << ((encap) ? \
4252 MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_ ## \
4253 mcdi_field ## _LBN : \
4254 MC_CMD_FILTER_OP_EXT_IN_MATCH_ ##\
4255 mcdi_field ## _LBN)); \
4256 }
4257 /* inner or outer based on encap type */
4258 MAP_FILTER_TO_MCDI_FLAG(REM_HOST, SRC_IP, encap_type);
4259 MAP_FILTER_TO_MCDI_FLAG(LOC_HOST, DST_IP, encap_type);
4260 MAP_FILTER_TO_MCDI_FLAG(REM_MAC, SRC_MAC, encap_type);
4261 MAP_FILTER_TO_MCDI_FLAG(REM_PORT, SRC_PORT, encap_type);
4262 MAP_FILTER_TO_MCDI_FLAG(LOC_MAC, DST_MAC, encap_type);
4263 MAP_FILTER_TO_MCDI_FLAG(LOC_PORT, DST_PORT, encap_type);
4264 MAP_FILTER_TO_MCDI_FLAG(ETHER_TYPE, ETHER_TYPE, encap_type);
4265 MAP_FILTER_TO_MCDI_FLAG(IP_PROTO, IP_PROTO, encap_type);
4266 /* always outer */
4267 MAP_FILTER_TO_MCDI_FLAG(INNER_VID, INNER_VLAN, false);
4268 MAP_FILTER_TO_MCDI_FLAG(OUTER_VID, OUTER_VLAN, false);
4269#undef MAP_FILTER_TO_MCDI_FLAG
4270
4271 /* special handling for encap type, and mismatch */
4272 if (encap_type) {
4273 match_flags &= ~EFX_FILTER_MATCH_ENCAP_TYPE;
4274 mcdi_flags |=
4275 (1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE_LBN);
4276 mcdi_flags |= (1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO_LBN);
4277
4278 uc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_LBN;
4279 mc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_LBN;
4280 } else {
4281 uc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_LBN;
4282 mc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_LBN;
4283 }
4284
Andrew Rybchenko7ac0dd92016-06-15 17:49:30 +01004285 if (match_flags & EFX_FILTER_MATCH_LOC_MAC_IG) {
4286 match_flags &= ~EFX_FILTER_MATCH_LOC_MAC_IG;
4287 mcdi_flags |=
4288 is_multicast_ether_addr(spec->loc_mac) ?
Edward Cree9b410802017-01-27 15:02:52 +00004289 1 << mc_match :
4290 1 << uc_match;
Andrew Rybchenko7ac0dd92016-06-15 17:49:30 +01004291 }
4292
Andrew Rybchenko7ac0dd92016-06-15 17:49:30 +01004293 /* Did we map them all? */
4294 WARN_ON_ONCE(match_flags);
4295
4296 return mcdi_flags;
4297}
4298
4299static int efx_ef10_filter_pri(struct efx_ef10_filter_table *table,
4300 const struct efx_filter_spec *spec)
4301{
4302 u32 mcdi_flags = efx_ef10_filter_mcdi_flags_from_spec(spec);
Ben Hutchings8127d662013-08-29 19:19:29 +01004303 unsigned int match_pri;
4304
4305 for (match_pri = 0;
4306 match_pri < table->rx_match_count;
4307 match_pri++)
Andrew Rybchenko7ac0dd92016-06-15 17:49:30 +01004308 if (table->rx_match_mcdi_flags[match_pri] == mcdi_flags)
Ben Hutchings8127d662013-08-29 19:19:29 +01004309 return match_pri;
4310
4311 return -EPROTONOSUPPORT;
4312}
4313
4314static s32 efx_ef10_filter_insert(struct efx_nic *efx,
4315 struct efx_filter_spec *spec,
4316 bool replace_equal)
4317{
Ben Hutchings8127d662013-08-29 19:19:29 +01004318 DECLARE_BITMAP(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT);
Edward Cree31b84292018-03-27 17:44:21 +01004319 struct efx_ef10_nic_data *nic_data = efx->nic_data;
Edward Creec2bebe32018-03-27 17:42:28 +01004320 struct efx_ef10_filter_table *table;
Ben Hutchings8127d662013-08-29 19:19:29 +01004321 struct efx_filter_spec *saved_spec;
Edward Cree42356d92018-03-08 15:45:17 +00004322 struct efx_rss_context *ctx = NULL;
Ben Hutchings8127d662013-08-29 19:19:29 +01004323 unsigned int match_pri, hash;
4324 unsigned int priv_flags;
Edward Creee0a65e32018-03-27 17:44:36 +01004325 bool rss_locked = false;
Ben Hutchings8127d662013-08-29 19:19:29 +01004326 bool replacing = false;
Edward Creec2bebe32018-03-27 17:42:28 +01004327 unsigned int depth, i;
Ben Hutchings8127d662013-08-29 19:19:29 +01004328 int ins_index = -1;
4329 DEFINE_WAIT(wait);
4330 bool is_mc_recip;
4331 s32 rc;
4332
Edward Creec2bebe32018-03-27 17:42:28 +01004333 down_read(&efx->filter_sem);
4334 table = efx->filter_state;
4335 down_write(&table->lock);
4336
Ben Hutchings8127d662013-08-29 19:19:29 +01004337 /* For now, only support RX filters */
4338 if ((spec->flags & (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)) !=
Edward Creec2bebe32018-03-27 17:42:28 +01004339 EFX_FILTER_FLAG_RX) {
4340 rc = -EINVAL;
4341 goto out_unlock;
4342 }
Ben Hutchings8127d662013-08-29 19:19:29 +01004343
Andrew Rybchenko7ac0dd92016-06-15 17:49:30 +01004344 rc = efx_ef10_filter_pri(table, spec);
Ben Hutchings8127d662013-08-29 19:19:29 +01004345 if (rc < 0)
Edward Creec2bebe32018-03-27 17:42:28 +01004346 goto out_unlock;
Ben Hutchings8127d662013-08-29 19:19:29 +01004347 match_pri = rc;
4348
4349 hash = efx_ef10_filter_hash(spec);
4350 is_mc_recip = efx_filter_is_mc_recipient(spec);
4351 if (is_mc_recip)
4352 bitmap_zero(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT);
4353
Edward Cree42356d92018-03-08 15:45:17 +00004354 if (spec->flags & EFX_FILTER_FLAG_RX_RSS) {
Edward Creee0a65e32018-03-27 17:44:36 +01004355 mutex_lock(&efx->rss_lock);
4356 rss_locked = true;
Edward Cree42356d92018-03-08 15:45:17 +00004357 if (spec->rss_context)
Edward Creee0a65e32018-03-27 17:44:36 +01004358 ctx = efx_find_rss_context_entry(efx, spec->rss_context);
Edward Cree42356d92018-03-08 15:45:17 +00004359 else
4360 ctx = &efx->rss_context;
Edward Creec2bebe32018-03-27 17:42:28 +01004361 if (!ctx) {
4362 rc = -ENOENT;
4363 goto out_unlock;
4364 }
4365 if (ctx->context_id == EFX_EF10_RSS_CONTEXT_INVALID) {
4366 rc = -EOPNOTSUPP;
4367 goto out_unlock;
4368 }
Edward Cree42356d92018-03-08 15:45:17 +00004369 }
4370
Ben Hutchings8127d662013-08-29 19:19:29 +01004371 /* Find any existing filters with the same match tuple or
Edward Creec2bebe32018-03-27 17:42:28 +01004372 * else a free slot to insert at.
Ben Hutchings8127d662013-08-29 19:19:29 +01004373 */
Edward Creec2bebe32018-03-27 17:42:28 +01004374 for (depth = 1; depth < EFX_EF10_FILTER_SEARCH_LIMIT; depth++) {
4375 i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1);
4376 saved_spec = efx_ef10_filter_entry_spec(table, i);
Ben Hutchings8127d662013-08-29 19:19:29 +01004377
Edward Creec2bebe32018-03-27 17:42:28 +01004378 if (!saved_spec) {
4379 if (ins_index < 0)
4380 ins_index = i;
4381 } else if (efx_ef10_filter_equal(spec, saved_spec)) {
4382 if (spec->priority < saved_spec->priority &&
4383 spec->priority != EFX_FILTER_PRI_AUTO) {
4384 rc = -EPERM;
4385 goto out_unlock;
4386 }
4387 if (!is_mc_recip) {
4388 /* This is the only one */
4389 if (spec->priority ==
4390 saved_spec->priority &&
4391 !replace_equal) {
4392 rc = -EEXIST;
4393 goto out_unlock;
4394 }
4395 ins_index = i;
4396 break;
4397 } else if (spec->priority >
4398 saved_spec->priority ||
4399 (spec->priority ==
4400 saved_spec->priority &&
4401 replace_equal)) {
Ben Hutchings8127d662013-08-29 19:19:29 +01004402 if (ins_index < 0)
4403 ins_index = i;
Edward Creec2bebe32018-03-27 17:42:28 +01004404 else
4405 __set_bit(depth, mc_rem_map);
Ben Hutchings8127d662013-08-29 19:19:29 +01004406 }
Ben Hutchings8127d662013-08-29 19:19:29 +01004407 }
Ben Hutchings8127d662013-08-29 19:19:29 +01004408 }
4409
Edward Creec2bebe32018-03-27 17:42:28 +01004410 /* Once we reach the maximum search depth, use the first suitable
4411 * slot, or return -EBUSY if there was none
Ben Hutchings8127d662013-08-29 19:19:29 +01004412 */
Edward Creec2bebe32018-03-27 17:42:28 +01004413 if (ins_index < 0) {
4414 rc = -EBUSY;
4415 goto out_unlock;
4416 }
4417
4418 /* Create a software table entry if necessary. */
Ben Hutchings8127d662013-08-29 19:19:29 +01004419 saved_spec = efx_ef10_filter_entry_spec(table, ins_index);
4420 if (saved_spec) {
Ben Hutchings7665d1a2013-11-21 19:02:18 +00004421 if (spec->priority == EFX_FILTER_PRI_AUTO &&
4422 saved_spec->priority >= EFX_FILTER_PRI_AUTO) {
Ben Hutchings8127d662013-08-29 19:19:29 +01004423 /* Just make sure it won't be removed */
Ben Hutchings7665d1a2013-11-21 19:02:18 +00004424 if (saved_spec->priority > EFX_FILTER_PRI_AUTO)
4425 saved_spec->flags |= EFX_FILTER_FLAG_RX_OVER_AUTO;
Ben Hutchings8127d662013-08-29 19:19:29 +01004426 table->entry[ins_index].spec &=
Ben Hutchingsb59e6ef2013-11-21 19:02:22 +00004427 ~EFX_EF10_FILTER_FLAG_AUTO_OLD;
Ben Hutchings8127d662013-08-29 19:19:29 +01004428 rc = ins_index;
4429 goto out_unlock;
4430 }
4431 replacing = true;
4432 priv_flags = efx_ef10_filter_entry_flags(table, ins_index);
4433 } else {
4434 saved_spec = kmalloc(sizeof(*spec), GFP_ATOMIC);
4435 if (!saved_spec) {
4436 rc = -ENOMEM;
4437 goto out_unlock;
4438 }
4439 *saved_spec = *spec;
4440 priv_flags = 0;
4441 }
Edward Creec2bebe32018-03-27 17:42:28 +01004442 efx_ef10_filter_set_entry(table, ins_index, saved_spec, priv_flags);
Ben Hutchings8127d662013-08-29 19:19:29 +01004443
Edward Creec2bebe32018-03-27 17:42:28 +01004444 /* Actually insert the filter on the HW */
Ben Hutchings8127d662013-08-29 19:19:29 +01004445 rc = efx_ef10_filter_push(efx, spec, &table->entry[ins_index].handle,
Edward Cree42356d92018-03-08 15:45:17 +00004446 ctx, replacing);
Ben Hutchings8127d662013-08-29 19:19:29 +01004447
Edward Cree31b84292018-03-27 17:44:21 +01004448 if (rc == -EINVAL && nic_data->must_realloc_vis)
4449 /* The MC rebooted under us, causing it to reject our filter
4450 * insertion as pointing to an invalid VI (spec->dmaq_id).
4451 */
4452 rc = -EAGAIN;
4453
Ben Hutchings8127d662013-08-29 19:19:29 +01004454 /* Finalise the software table entry */
Ben Hutchings8127d662013-08-29 19:19:29 +01004455 if (rc == 0) {
4456 if (replacing) {
4457 /* Update the fields that may differ */
Ben Hutchings7665d1a2013-11-21 19:02:18 +00004458 if (saved_spec->priority == EFX_FILTER_PRI_AUTO)
4459 saved_spec->flags |=
4460 EFX_FILTER_FLAG_RX_OVER_AUTO;
Ben Hutchings8127d662013-08-29 19:19:29 +01004461 saved_spec->priority = spec->priority;
Ben Hutchings7665d1a2013-11-21 19:02:18 +00004462 saved_spec->flags &= EFX_FILTER_FLAG_RX_OVER_AUTO;
Ben Hutchings8127d662013-08-29 19:19:29 +01004463 saved_spec->flags |= spec->flags;
4464 saved_spec->rss_context = spec->rss_context;
4465 saved_spec->dmaq_id = spec->dmaq_id;
4466 }
4467 } else if (!replacing) {
4468 kfree(saved_spec);
4469 saved_spec = NULL;
Edward Creec2bebe32018-03-27 17:42:28 +01004470 } else {
4471 /* We failed to replace, so the old filter is still present.
4472 * Roll back the software table to reflect this. In fact the
4473 * efx_ef10_filter_set_entry() call below will do the right
4474 * thing, so nothing extra is needed here.
4475 */
Ben Hutchings8127d662013-08-29 19:19:29 +01004476 }
4477 efx_ef10_filter_set_entry(table, ins_index, saved_spec, priv_flags);
4478
4479 /* Remove and finalise entries for lower-priority multicast
4480 * recipients
4481 */
4482 if (is_mc_recip) {
Martin Habetsbb53f4d2017-06-22 10:50:41 +01004483 MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_EXT_IN_LEN);
Ben Hutchings8127d662013-08-29 19:19:29 +01004484 unsigned int depth, i;
4485
4486 memset(inbuf, 0, sizeof(inbuf));
4487
4488 for (depth = 0; depth < EFX_EF10_FILTER_SEARCH_LIMIT; depth++) {
4489 if (!test_bit(depth, mc_rem_map))
4490 continue;
4491
4492 i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1);
4493 saved_spec = efx_ef10_filter_entry_spec(table, i);
4494 priv_flags = efx_ef10_filter_entry_flags(table, i);
4495
4496 if (rc == 0) {
Ben Hutchings8127d662013-08-29 19:19:29 +01004497 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
4498 MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE);
4499 MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
4500 table->entry[i].handle);
4501 rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP,
4502 inbuf, sizeof(inbuf),
4503 NULL, 0, NULL);
Ben Hutchings8127d662013-08-29 19:19:29 +01004504 }
4505
4506 if (rc == 0) {
4507 kfree(saved_spec);
4508 saved_spec = NULL;
4509 priv_flags = 0;
Ben Hutchings8127d662013-08-29 19:19:29 +01004510 }
4511 efx_ef10_filter_set_entry(table, i, saved_spec,
4512 priv_flags);
4513 }
4514 }
4515
4516 /* If successful, return the inserted filter ID */
4517 if (rc == 0)
Jon Cooper0ccb9982017-02-17 15:49:13 +00004518 rc = efx_ef10_make_filter_id(match_pri, ins_index);
Ben Hutchings8127d662013-08-29 19:19:29 +01004519
Ben Hutchings8127d662013-08-29 19:19:29 +01004520out_unlock:
Edward Creee0a65e32018-03-27 17:44:36 +01004521 if (rss_locked)
4522 mutex_unlock(&efx->rss_lock);
Edward Creec2bebe32018-03-27 17:42:28 +01004523 up_write(&table->lock);
4524 up_read(&efx->filter_sem);
Ben Hutchings8127d662013-08-29 19:19:29 +01004525 return rc;
4526}
4527
Fengguang Wu9fd8095d2013-08-31 06:54:05 +08004528static void efx_ef10_filter_update_rx_scatter(struct efx_nic *efx)
Ben Hutchings8127d662013-08-29 19:19:29 +01004529{
4530 /* no need to do anything here on EF10 */
4531}
4532
4533/* Remove a filter.
Ben Hutchingsb59e6ef2013-11-21 19:02:22 +00004534 * If !by_index, remove by ID
4535 * If by_index, remove by index
Ben Hutchings8127d662013-08-29 19:19:29 +01004536 * Filter ID may come from userland and must be range-checked.
Edward Creec2bebe32018-03-27 17:42:28 +01004537 * Caller must hold efx->filter_sem for read, and efx->filter_state->lock
4538 * for write.
Ben Hutchings8127d662013-08-29 19:19:29 +01004539 */
4540static int efx_ef10_filter_remove_internal(struct efx_nic *efx,
Ben Hutchingsfbd79122013-11-21 19:15:03 +00004541 unsigned int priority_mask,
Ben Hutchingsb59e6ef2013-11-21 19:02:22 +00004542 u32 filter_id, bool by_index)
Ben Hutchings8127d662013-08-29 19:19:29 +01004543{
Jon Cooper0ccb9982017-02-17 15:49:13 +00004544 unsigned int filter_idx = efx_ef10_filter_get_unsafe_id(filter_id);
Ben Hutchings8127d662013-08-29 19:19:29 +01004545 struct efx_ef10_filter_table *table = efx->filter_state;
4546 MCDI_DECLARE_BUF(inbuf,
4547 MC_CMD_FILTER_OP_IN_HANDLE_OFST +
4548 MC_CMD_FILTER_OP_IN_HANDLE_LEN);
4549 struct efx_filter_spec *spec;
4550 DEFINE_WAIT(wait);
4551 int rc;
4552
Ben Hutchings8127d662013-08-29 19:19:29 +01004553 spec = efx_ef10_filter_entry_spec(table, filter_idx);
Ben Hutchings7665d1a2013-11-21 19:02:18 +00004554 if (!spec ||
Ben Hutchingsb59e6ef2013-11-21 19:02:22 +00004555 (!by_index &&
Andrew Rybchenko7ac0dd92016-06-15 17:49:30 +01004556 efx_ef10_filter_pri(table, spec) !=
Edward Creec2bebe32018-03-27 17:42:28 +01004557 efx_ef10_filter_get_unsafe_pri(filter_id)))
4558 return -ENOENT;
Ben Hutchings7665d1a2013-11-21 19:02:18 +00004559
4560 if (spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO &&
Ben Hutchingsfbd79122013-11-21 19:15:03 +00004561 priority_mask == (1U << EFX_FILTER_PRI_AUTO)) {
Ben Hutchings7665d1a2013-11-21 19:02:18 +00004562 /* Just remove flags */
4563 spec->flags &= ~EFX_FILTER_FLAG_RX_OVER_AUTO;
Ben Hutchingsb59e6ef2013-11-21 19:02:22 +00004564 table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_AUTO_OLD;
Edward Creec2bebe32018-03-27 17:42:28 +01004565 return 0;
Ben Hutchings7665d1a2013-11-21 19:02:18 +00004566 }
4567
Edward Creec2bebe32018-03-27 17:42:28 +01004568 if (!(priority_mask & (1U << spec->priority)))
4569 return -ENOENT;
Ben Hutchings8127d662013-08-29 19:19:29 +01004570
Ben Hutchings7665d1a2013-11-21 19:02:18 +00004571 if (spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO) {
Ben Hutchingsb59e6ef2013-11-21 19:02:22 +00004572 /* Reset to an automatic filter */
Ben Hutchings8127d662013-08-29 19:19:29 +01004573
4574 struct efx_filter_spec new_spec = *spec;
4575
Ben Hutchings7665d1a2013-11-21 19:02:18 +00004576 new_spec.priority = EFX_FILTER_PRI_AUTO;
Ben Hutchings8127d662013-08-29 19:19:29 +01004577 new_spec.flags = (EFX_FILTER_FLAG_RX |
Edward Cree42356d92018-03-08 15:45:17 +00004578 (efx_rss_active(&efx->rss_context) ?
Bert Kenwardf1c2ef42015-12-11 09:39:32 +00004579 EFX_FILTER_FLAG_RX_RSS : 0));
Ben Hutchings8127d662013-08-29 19:19:29 +01004580 new_spec.dmaq_id = 0;
Edward Cree42356d92018-03-08 15:45:17 +00004581 new_spec.rss_context = 0;
Ben Hutchings8127d662013-08-29 19:19:29 +01004582 rc = efx_ef10_filter_push(efx, &new_spec,
4583 &table->entry[filter_idx].handle,
Edward Cree42356d92018-03-08 15:45:17 +00004584 &efx->rss_context,
Ben Hutchings8127d662013-08-29 19:19:29 +01004585 true);
4586
Ben Hutchings8127d662013-08-29 19:19:29 +01004587 if (rc == 0)
4588 *spec = new_spec;
4589 } else {
4590 /* Really remove the filter */
4591
4592 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
4593 efx_ef10_filter_is_exclusive(spec) ?
4594 MC_CMD_FILTER_OP_IN_OP_REMOVE :
4595 MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE);
4596 MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
4597 table->entry[filter_idx].handle);
Bert Kenward105eac62017-02-17 15:50:12 +00004598 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FILTER_OP,
4599 inbuf, sizeof(inbuf), NULL, 0, NULL);
Ben Hutchings8127d662013-08-29 19:19:29 +01004600
Bert Kenward105eac62017-02-17 15:50:12 +00004601 if ((rc == 0) || (rc == -ENOENT)) {
4602 /* Filter removed OK or didn't actually exist */
Ben Hutchings8127d662013-08-29 19:19:29 +01004603 kfree(spec);
4604 efx_ef10_filter_set_entry(table, filter_idx, NULL, 0);
Bert Kenward105eac62017-02-17 15:50:12 +00004605 } else {
4606 efx_mcdi_display_error(efx, MC_CMD_FILTER_OP,
Martin Habetsbb53f4d2017-06-22 10:50:41 +01004607 MC_CMD_FILTER_OP_EXT_IN_LEN,
Bert Kenward105eac62017-02-17 15:50:12 +00004608 NULL, 0, rc);
Ben Hutchings8127d662013-08-29 19:19:29 +01004609 }
4610 }
Ben Hutchings7665d1a2013-11-21 19:02:18 +00004611
Ben Hutchings8127d662013-08-29 19:19:29 +01004612 return rc;
4613}
4614
4615static int efx_ef10_filter_remove_safe(struct efx_nic *efx,
4616 enum efx_filter_priority priority,
4617 u32 filter_id)
4618{
Edward Creec2bebe32018-03-27 17:42:28 +01004619 struct efx_ef10_filter_table *table;
4620 int rc;
4621
4622 down_read(&efx->filter_sem);
4623 table = efx->filter_state;
4624 down_write(&table->lock);
4625 rc = efx_ef10_filter_remove_internal(efx, 1U << priority, filter_id,
4626 false);
4627 up_write(&table->lock);
4628 up_read(&efx->filter_sem);
4629 return rc;
Ben Hutchings8127d662013-08-29 19:19:29 +01004630}
4631
Edward Creec2bebe32018-03-27 17:42:28 +01004632/* Caller must hold efx->filter_sem for read */
Edward Cree8c915622016-06-15 17:49:05 +01004633static void efx_ef10_filter_remove_unsafe(struct efx_nic *efx,
4634 enum efx_filter_priority priority,
4635 u32 filter_id)
Edward Cree12fb0da2015-07-21 15:11:00 +01004636{
Edward Creec2bebe32018-03-27 17:42:28 +01004637 struct efx_ef10_filter_table *table = efx->filter_state;
4638
Edward Cree8c915622016-06-15 17:49:05 +01004639 if (filter_id == EFX_EF10_FILTER_ID_INVALID)
4640 return;
Edward Creec2bebe32018-03-27 17:42:28 +01004641
4642 down_write(&table->lock);
4643 efx_ef10_filter_remove_internal(efx, 1U << priority, filter_id,
4644 true);
4645 up_write(&table->lock);
Edward Cree12fb0da2015-07-21 15:11:00 +01004646}
4647
Ben Hutchings8127d662013-08-29 19:19:29 +01004648static int efx_ef10_filter_get_safe(struct efx_nic *efx,
4649 enum efx_filter_priority priority,
4650 u32 filter_id, struct efx_filter_spec *spec)
4651{
Jon Cooper0ccb9982017-02-17 15:49:13 +00004652 unsigned int filter_idx = efx_ef10_filter_get_unsafe_id(filter_id);
Ben Hutchings8127d662013-08-29 19:19:29 +01004653 const struct efx_filter_spec *saved_spec;
Edward Creec2bebe32018-03-27 17:42:28 +01004654 struct efx_ef10_filter_table *table;
Ben Hutchings8127d662013-08-29 19:19:29 +01004655 int rc;
4656
Edward Creec2bebe32018-03-27 17:42:28 +01004657 down_read(&efx->filter_sem);
4658 table = efx->filter_state;
4659 down_read(&table->lock);
Ben Hutchings8127d662013-08-29 19:19:29 +01004660 saved_spec = efx_ef10_filter_entry_spec(table, filter_idx);
4661 if (saved_spec && saved_spec->priority == priority &&
Andrew Rybchenko7ac0dd92016-06-15 17:49:30 +01004662 efx_ef10_filter_pri(table, saved_spec) ==
Jon Cooper0ccb9982017-02-17 15:49:13 +00004663 efx_ef10_filter_get_unsafe_pri(filter_id)) {
Ben Hutchings8127d662013-08-29 19:19:29 +01004664 *spec = *saved_spec;
4665 rc = 0;
4666 } else {
4667 rc = -ENOENT;
4668 }
Edward Creec2bebe32018-03-27 17:42:28 +01004669 up_read(&table->lock);
4670 up_read(&efx->filter_sem);
Ben Hutchings8127d662013-08-29 19:19:29 +01004671 return rc;
4672}
4673
Ben Hutchingsfbd79122013-11-21 19:15:03 +00004674static int efx_ef10_filter_clear_rx(struct efx_nic *efx,
Edward Creec2bebe32018-03-27 17:42:28 +01004675 enum efx_filter_priority priority)
Ben Hutchings8127d662013-08-29 19:19:29 +01004676{
Edward Creec2bebe32018-03-27 17:42:28 +01004677 struct efx_ef10_filter_table *table;
Ben Hutchingsfbd79122013-11-21 19:15:03 +00004678 unsigned int priority_mask;
4679 unsigned int i;
4680 int rc;
4681
4682 priority_mask = (((1U << (priority + 1)) - 1) &
4683 ~(1U << EFX_FILTER_PRI_AUTO));
4684
Edward Creec2bebe32018-03-27 17:42:28 +01004685 down_read(&efx->filter_sem);
4686 table = efx->filter_state;
4687 down_write(&table->lock);
Ben Hutchingsfbd79122013-11-21 19:15:03 +00004688 for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) {
4689 rc = efx_ef10_filter_remove_internal(efx, priority_mask,
4690 i, true);
4691 if (rc && rc != -ENOENT)
Edward Creec2bebe32018-03-27 17:42:28 +01004692 break;
4693 rc = 0;
Ben Hutchingsfbd79122013-11-21 19:15:03 +00004694 }
4695
Edward Creec2bebe32018-03-27 17:42:28 +01004696 up_write(&table->lock);
4697 up_read(&efx->filter_sem);
4698 return rc;
Ben Hutchings8127d662013-08-29 19:19:29 +01004699}
4700
4701static u32 efx_ef10_filter_count_rx_used(struct efx_nic *efx,
4702 enum efx_filter_priority priority)
4703{
Edward Creec2bebe32018-03-27 17:42:28 +01004704 struct efx_ef10_filter_table *table;
Ben Hutchings8127d662013-08-29 19:19:29 +01004705 unsigned int filter_idx;
4706 s32 count = 0;
4707
Edward Creec2bebe32018-03-27 17:42:28 +01004708 down_read(&efx->filter_sem);
4709 table = efx->filter_state;
4710 down_read(&table->lock);
Ben Hutchings8127d662013-08-29 19:19:29 +01004711 for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
4712 if (table->entry[filter_idx].spec &&
4713 efx_ef10_filter_entry_spec(table, filter_idx)->priority ==
4714 priority)
4715 ++count;
4716 }
Edward Creec2bebe32018-03-27 17:42:28 +01004717 up_read(&table->lock);
4718 up_read(&efx->filter_sem);
Ben Hutchings8127d662013-08-29 19:19:29 +01004719 return count;
4720}
4721
4722static u32 efx_ef10_filter_get_rx_id_limit(struct efx_nic *efx)
4723{
4724 struct efx_ef10_filter_table *table = efx->filter_state;
4725
Jon Cooper0ccb9982017-02-17 15:49:13 +00004726 return table->rx_match_count * HUNT_FILTER_TBL_ROWS * 2;
Ben Hutchings8127d662013-08-29 19:19:29 +01004727}
4728
4729static s32 efx_ef10_filter_get_rx_ids(struct efx_nic *efx,
4730 enum efx_filter_priority priority,
4731 u32 *buf, u32 size)
4732{
Edward Creec2bebe32018-03-27 17:42:28 +01004733 struct efx_ef10_filter_table *table;
Ben Hutchings8127d662013-08-29 19:19:29 +01004734 struct efx_filter_spec *spec;
4735 unsigned int filter_idx;
4736 s32 count = 0;
4737
Edward Creec2bebe32018-03-27 17:42:28 +01004738 down_read(&efx->filter_sem);
4739 table = efx->filter_state;
4740 down_read(&table->lock);
4741
Ben Hutchings8127d662013-08-29 19:19:29 +01004742 for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
4743 spec = efx_ef10_filter_entry_spec(table, filter_idx);
4744 if (spec && spec->priority == priority) {
4745 if (count == size) {
4746 count = -EMSGSIZE;
4747 break;
4748 }
Jon Cooper0ccb9982017-02-17 15:49:13 +00004749 buf[count++] =
4750 efx_ef10_make_filter_id(
4751 efx_ef10_filter_pri(table, spec),
Ben Hutchings8127d662013-08-29 19:19:29 +01004752 filter_idx);
4753 }
4754 }
Edward Creec2bebe32018-03-27 17:42:28 +01004755 up_read(&table->lock);
4756 up_read(&efx->filter_sem);
Ben Hutchings8127d662013-08-29 19:19:29 +01004757 return count;
4758}
4759
4760#ifdef CONFIG_RFS_ACCEL
4761
Ben Hutchings8127d662013-08-29 19:19:29 +01004762static bool efx_ef10_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
4763 unsigned int filter_idx)
4764{
Edward Creec2bebe32018-03-27 17:42:28 +01004765 struct efx_ef10_filter_table *table;
Edward Cree3af0f342018-03-27 17:41:59 +01004766 struct efx_filter_spec *spec;
Edward Creec2bebe32018-03-27 17:42:28 +01004767 bool ret;
Ben Hutchings8127d662013-08-29 19:19:29 +01004768
Edward Creec2bebe32018-03-27 17:42:28 +01004769 down_read(&efx->filter_sem);
4770 table = efx->filter_state;
4771 down_write(&table->lock);
Edward Cree3af0f342018-03-27 17:41:59 +01004772 spec = efx_ef10_filter_entry_spec(table, filter_idx);
Edward Creec2bebe32018-03-27 17:42:28 +01004773
4774 if (!spec || spec->priority != EFX_FILTER_PRI_HINT) {
4775 ret = true;
4776 goto out_unlock;
4777 }
4778
Edward Creea7f80182018-04-13 19:17:49 +01004779 if (!rps_may_expire_flow(efx->net_dev, spec->dmaq_id, flow_id, 0)) {
Edward Cree3af0f342018-03-27 17:41:59 +01004780 ret = false;
4781 goto out_unlock;
4782 }
Ben Hutchings8127d662013-08-29 19:19:29 +01004783
Edward Creec2bebe32018-03-27 17:42:28 +01004784 ret = efx_ef10_filter_remove_internal(efx, 1U << spec->priority,
4785 filter_idx, true) == 0;
Edward Cree3af0f342018-03-27 17:41:59 +01004786out_unlock:
Edward Creec2bebe32018-03-27 17:42:28 +01004787 up_write(&table->lock);
4788 up_read(&efx->filter_sem);
Edward Cree3af0f342018-03-27 17:41:59 +01004789 return ret;
Ben Hutchings8127d662013-08-29 19:19:29 +01004790}
4791
Ben Hutchings8127d662013-08-29 19:19:29 +01004792#endif /* CONFIG_RFS_ACCEL */
4793
Edward Cree9b410802017-01-27 15:02:52 +00004794static int efx_ef10_filter_match_flags_from_mcdi(bool encap, u32 mcdi_flags)
Ben Hutchings8127d662013-08-29 19:19:29 +01004795{
4796 int match_flags = 0;
4797
Edward Cree9b410802017-01-27 15:02:52 +00004798#define MAP_FLAG(gen_flag, mcdi_field) do { \
Ben Hutchings8127d662013-08-29 19:19:29 +01004799 u32 old_mcdi_flags = mcdi_flags; \
Edward Cree9b410802017-01-27 15:02:52 +00004800 mcdi_flags &= ~(1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_ ## \
4801 mcdi_field ## _LBN); \
Ben Hutchings8127d662013-08-29 19:19:29 +01004802 if (mcdi_flags != old_mcdi_flags) \
4803 match_flags |= EFX_FILTER_MATCH_ ## gen_flag; \
Edward Cree9b410802017-01-27 15:02:52 +00004804 } while (0)
4805
4806 if (encap) {
4807 /* encap filters must specify encap type */
4808 match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
4809 /* and imply ethertype and ip proto */
4810 mcdi_flags &=
4811 ~(1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO_LBN);
4812 mcdi_flags &=
4813 ~(1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE_LBN);
4814 /* VLAN tags refer to the outer packet */
4815 MAP_FLAG(INNER_VID, INNER_VLAN);
4816 MAP_FLAG(OUTER_VID, OUTER_VLAN);
4817 /* everything else refers to the inner packet */
4818 MAP_FLAG(LOC_MAC_IG, IFRM_UNKNOWN_UCAST_DST);
4819 MAP_FLAG(LOC_MAC_IG, IFRM_UNKNOWN_MCAST_DST);
4820 MAP_FLAG(REM_HOST, IFRM_SRC_IP);
4821 MAP_FLAG(LOC_HOST, IFRM_DST_IP);
4822 MAP_FLAG(REM_MAC, IFRM_SRC_MAC);
4823 MAP_FLAG(REM_PORT, IFRM_SRC_PORT);
4824 MAP_FLAG(LOC_MAC, IFRM_DST_MAC);
4825 MAP_FLAG(LOC_PORT, IFRM_DST_PORT);
4826 MAP_FLAG(ETHER_TYPE, IFRM_ETHER_TYPE);
4827 MAP_FLAG(IP_PROTO, IFRM_IP_PROTO);
4828 } else {
4829 MAP_FLAG(LOC_MAC_IG, UNKNOWN_UCAST_DST);
4830 MAP_FLAG(LOC_MAC_IG, UNKNOWN_MCAST_DST);
4831 MAP_FLAG(REM_HOST, SRC_IP);
4832 MAP_FLAG(LOC_HOST, DST_IP);
4833 MAP_FLAG(REM_MAC, SRC_MAC);
4834 MAP_FLAG(REM_PORT, SRC_PORT);
4835 MAP_FLAG(LOC_MAC, DST_MAC);
4836 MAP_FLAG(LOC_PORT, DST_PORT);
4837 MAP_FLAG(ETHER_TYPE, ETHER_TYPE);
4838 MAP_FLAG(INNER_VID, INNER_VLAN);
4839 MAP_FLAG(OUTER_VID, OUTER_VLAN);
4840 MAP_FLAG(IP_PROTO, IP_PROTO);
Ben Hutchings8127d662013-08-29 19:19:29 +01004841 }
Ben Hutchings8127d662013-08-29 19:19:29 +01004842#undef MAP_FLAG
4843
4844 /* Did we map them all? */
4845 if (mcdi_flags)
4846 return -EINVAL;
4847
4848 return match_flags;
4849}
4850
Andrew Rybchenko34813fe2016-06-15 17:48:14 +01004851static void efx_ef10_filter_cleanup_vlans(struct efx_nic *efx)
4852{
4853 struct efx_ef10_filter_table *table = efx->filter_state;
4854 struct efx_ef10_filter_vlan *vlan, *next_vlan;
4855
4856 /* See comment in efx_ef10_filter_table_remove() */
4857 if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
4858 return;
4859
4860 if (!table)
4861 return;
4862
4863 list_for_each_entry_safe(vlan, next_vlan, &table->vlan_list, list)
4864 efx_ef10_filter_del_vlan_internal(efx, vlan);
4865}
4866
Andrew Rybchenko7ac0dd92016-06-15 17:49:30 +01004867static bool efx_ef10_filter_match_supported(struct efx_ef10_filter_table *table,
Edward Cree9b410802017-01-27 15:02:52 +00004868 bool encap,
Andrew Rybchenko7ac0dd92016-06-15 17:49:30 +01004869 enum efx_filter_match_flags match_flags)
4870{
4871 unsigned int match_pri;
4872 int mf;
4873
4874 for (match_pri = 0;
4875 match_pri < table->rx_match_count;
4876 match_pri++) {
Edward Cree9b410802017-01-27 15:02:52 +00004877 mf = efx_ef10_filter_match_flags_from_mcdi(encap,
Andrew Rybchenko7ac0dd92016-06-15 17:49:30 +01004878 table->rx_match_mcdi_flags[match_pri]);
4879 if (mf == match_flags)
4880 return true;
4881 }
4882
4883 return false;
4884}
4885
Edward Cree9b410802017-01-27 15:02:52 +00004886static int
4887efx_ef10_filter_table_probe_matches(struct efx_nic *efx,
4888 struct efx_ef10_filter_table *table,
4889 bool encap)
Ben Hutchings8127d662013-08-29 19:19:29 +01004890{
4891 MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_PARSER_DISP_INFO_IN_LEN);
4892 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX);
4893 unsigned int pd_match_pri, pd_match_count;
Ben Hutchings8127d662013-08-29 19:19:29 +01004894 size_t outlen;
4895 int rc;
4896
Ben Hutchings8127d662013-08-29 19:19:29 +01004897 /* Find out which RX filter types are supported, and their priorities */
4898 MCDI_SET_DWORD(inbuf, GET_PARSER_DISP_INFO_IN_OP,
Edward Cree9b410802017-01-27 15:02:52 +00004899 encap ?
4900 MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_ENCAP_RX_MATCHES :
Ben Hutchings8127d662013-08-29 19:19:29 +01004901 MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES);
4902 rc = efx_mcdi_rpc(efx, MC_CMD_GET_PARSER_DISP_INFO,
4903 inbuf, sizeof(inbuf), outbuf, sizeof(outbuf),
4904 &outlen);
4905 if (rc)
Edward Cree9b410802017-01-27 15:02:52 +00004906 return rc;
4907
Ben Hutchings8127d662013-08-29 19:19:29 +01004908 pd_match_count = MCDI_VAR_ARRAY_LEN(
4909 outlen, GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES);
Ben Hutchings8127d662013-08-29 19:19:29 +01004910
4911 for (pd_match_pri = 0; pd_match_pri < pd_match_count; pd_match_pri++) {
4912 u32 mcdi_flags =
4913 MCDI_ARRAY_DWORD(
4914 outbuf,
4915 GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES,
4916 pd_match_pri);
Edward Cree9b410802017-01-27 15:02:52 +00004917 rc = efx_ef10_filter_match_flags_from_mcdi(encap, mcdi_flags);
Ben Hutchings8127d662013-08-29 19:19:29 +01004918 if (rc < 0) {
4919 netif_dbg(efx, probe, efx->net_dev,
4920 "%s: fw flags %#x pri %u not supported in driver\n",
4921 __func__, mcdi_flags, pd_match_pri);
4922 } else {
4923 netif_dbg(efx, probe, efx->net_dev,
4924 "%s: fw flags %#x pri %u supported as driver flags %#x pri %u\n",
4925 __func__, mcdi_flags, pd_match_pri,
4926 rc, table->rx_match_count);
Andrew Rybchenko7ac0dd92016-06-15 17:49:30 +01004927 table->rx_match_mcdi_flags[table->rx_match_count] = mcdi_flags;
4928 table->rx_match_count++;
Ben Hutchings8127d662013-08-29 19:19:29 +01004929 }
4930 }
4931
Edward Cree9b410802017-01-27 15:02:52 +00004932 return 0;
4933}
4934
4935static int efx_ef10_filter_table_probe(struct efx_nic *efx)
4936{
4937 struct efx_ef10_nic_data *nic_data = efx->nic_data;
4938 struct net_device *net_dev = efx->net_dev;
4939 struct efx_ef10_filter_table *table;
4940 struct efx_ef10_vlan *vlan;
4941 int rc;
4942
4943 if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
4944 return -EINVAL;
4945
4946 if (efx->filter_state) /* already probed */
4947 return 0;
4948
4949 table = kzalloc(sizeof(*table), GFP_KERNEL);
4950 if (!table)
4951 return -ENOMEM;
4952
4953 table->rx_match_count = 0;
4954 rc = efx_ef10_filter_table_probe_matches(efx, table, false);
4955 if (rc)
4956 goto fail;
4957 if (nic_data->datapath_caps &
4958 (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN))
4959 rc = efx_ef10_filter_table_probe_matches(efx, table, true);
4960 if (rc)
4961 goto fail;
Martin Habetse4478ad2016-06-15 17:51:07 +01004962 if ((efx_supported_features(efx) & NETIF_F_HW_VLAN_CTAG_FILTER) &&
Edward Cree9b410802017-01-27 15:02:52 +00004963 !(efx_ef10_filter_match_supported(table, false,
Martin Habetse4478ad2016-06-15 17:51:07 +01004964 (EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_LOC_MAC)) &&
Edward Cree9b410802017-01-27 15:02:52 +00004965 efx_ef10_filter_match_supported(table, false,
Martin Habetse4478ad2016-06-15 17:51:07 +01004966 (EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_LOC_MAC_IG)))) {
4967 netif_info(efx, probe, net_dev,
4968 "VLAN filters are not supported in this firmware variant\n");
4969 net_dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
4970 efx->fixed_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
4971 net_dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
4972 }
4973
Ben Hutchings8127d662013-08-29 19:19:29 +01004974 table->entry = vzalloc(HUNT_FILTER_TBL_ROWS * sizeof(*table->entry));
4975 if (!table->entry) {
4976 rc = -ENOMEM;
4977 goto fail;
4978 }
4979
Andrew Rybchenkob071c3a2016-06-15 17:43:00 +01004980 table->mc_promisc_last = false;
Andrew Rybchenko4a53ea82016-06-15 17:48:32 +01004981 table->vlan_filter =
4982 !!(efx->net_dev->features & NETIF_F_HW_VLAN_CTAG_FILTER);
Andrew Rybchenko34813fe2016-06-15 17:48:14 +01004983 INIT_LIST_HEAD(&table->vlan_list);
Edward Creec2bebe32018-03-27 17:42:28 +01004984 init_rwsem(&table->lock);
Edward Cree12fb0da2015-07-21 15:11:00 +01004985
Ben Hutchings8127d662013-08-29 19:19:29 +01004986 efx->filter_state = table;
Andrew Rybchenko34813fe2016-06-15 17:48:14 +01004987
4988 list_for_each_entry(vlan, &nic_data->vlan_list, list) {
4989 rc = efx_ef10_filter_add_vlan(efx, vlan->vid);
4990 if (rc)
4991 goto fail_add_vlan;
4992 }
4993
Ben Hutchings8127d662013-08-29 19:19:29 +01004994 return 0;
4995
Andrew Rybchenko34813fe2016-06-15 17:48:14 +01004996fail_add_vlan:
4997 efx_ef10_filter_cleanup_vlans(efx);
4998 efx->filter_state = NULL;
Ben Hutchings8127d662013-08-29 19:19:29 +01004999fail:
5000 kfree(table);
5001 return rc;
5002}
5003
Edward Cree0d322412015-05-20 11:10:03 +01005004/* Caller must hold efx->filter_sem for read if race against
5005 * efx_ef10_filter_table_remove() is possible
5006 */
Ben Hutchings8127d662013-08-29 19:19:29 +01005007static void efx_ef10_filter_table_restore(struct efx_nic *efx)
5008{
5009 struct efx_ef10_filter_table *table = efx->filter_state;
5010 struct efx_ef10_nic_data *nic_data = efx->nic_data;
Jon Cooper2d3d4ec2017-01-27 15:02:11 +00005011 unsigned int invalid_filters = 0, failed = 0;
5012 struct efx_ef10_filter_vlan *vlan;
Ben Hutchings8127d662013-08-29 19:19:29 +01005013 struct efx_filter_spec *spec;
Edward Cree42356d92018-03-08 15:45:17 +00005014 struct efx_rss_context *ctx;
Ben Hutchings8127d662013-08-29 19:19:29 +01005015 unsigned int filter_idx;
Jon Cooper2d3d4ec2017-01-27 15:02:11 +00005016 u32 mcdi_flags;
5017 int match_pri;
Edward Cree9b410802017-01-27 15:02:52 +00005018 int rc, i;
Ben Hutchings8127d662013-08-29 19:19:29 +01005019
Edward Cree0d322412015-05-20 11:10:03 +01005020 WARN_ON(!rwsem_is_locked(&efx->filter_sem));
5021
Ben Hutchings8127d662013-08-29 19:19:29 +01005022 if (!nic_data->must_restore_filters)
5023 return;
5024
Edward Cree0d322412015-05-20 11:10:03 +01005025 if (!table)
5026 return;
5027
Edward Creec2bebe32018-03-27 17:42:28 +01005028 down_write(&table->lock);
Edward Creee0a65e32018-03-27 17:44:36 +01005029 mutex_lock(&efx->rss_lock);
Ben Hutchings8127d662013-08-29 19:19:29 +01005030
5031 for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
5032 spec = efx_ef10_filter_entry_spec(table, filter_idx);
5033 if (!spec)
5034 continue;
5035
Jon Cooper2d3d4ec2017-01-27 15:02:11 +00005036 mcdi_flags = efx_ef10_filter_mcdi_flags_from_spec(spec);
5037 match_pri = 0;
5038 while (match_pri < table->rx_match_count &&
5039 table->rx_match_mcdi_flags[match_pri] != mcdi_flags)
5040 ++match_pri;
5041 if (match_pri >= table->rx_match_count) {
5042 invalid_filters++;
5043 goto not_restored;
5044 }
Edward Cree42356d92018-03-08 15:45:17 +00005045 if (spec->rss_context)
Edward Creee0a65e32018-03-27 17:44:36 +01005046 ctx = efx_find_rss_context_entry(efx, spec->rss_context);
Edward Cree42356d92018-03-08 15:45:17 +00005047 else
5048 ctx = &efx->rss_context;
5049 if (spec->flags & EFX_FILTER_FLAG_RX_RSS) {
5050 if (!ctx) {
5051 netif_warn(efx, drv, efx->net_dev,
5052 "Warning: unable to restore a filter with nonexistent RSS context %u.\n",
5053 spec->rss_context);
5054 invalid_filters++;
5055 goto not_restored;
5056 }
5057 if (ctx->context_id == EFX_EF10_RSS_CONTEXT_INVALID) {
5058 netif_warn(efx, drv, efx->net_dev,
5059 "Warning: unable to restore a filter with RSS context %u as it was not created.\n",
5060 spec->rss_context);
5061 invalid_filters++;
5062 goto not_restored;
5063 }
5064 }
Jon Cooper2d3d4ec2017-01-27 15:02:11 +00005065
Ben Hutchings8127d662013-08-29 19:19:29 +01005066 rc = efx_ef10_filter_push(efx, spec,
5067 &table->entry[filter_idx].handle,
Edward Cree42356d92018-03-08 15:45:17 +00005068 ctx, false);
Ben Hutchings8127d662013-08-29 19:19:29 +01005069 if (rc)
Jon Cooper2d3d4ec2017-01-27 15:02:11 +00005070 failed++;
Jon Cooper2d3d4ec2017-01-27 15:02:11 +00005071
Ben Hutchings8127d662013-08-29 19:19:29 +01005072 if (rc) {
Jon Cooper2d3d4ec2017-01-27 15:02:11 +00005073not_restored:
Edward Cree9b410802017-01-27 15:02:52 +00005074 list_for_each_entry(vlan, &table->vlan_list, list)
5075 for (i = 0; i < EFX_EF10_NUM_DEFAULT_FILTERS; ++i)
5076 if (vlan->default_filters[i] == filter_idx)
5077 vlan->default_filters[i] =
5078 EFX_EF10_FILTER_ID_INVALID;
5079
Ben Hutchings8127d662013-08-29 19:19:29 +01005080 kfree(spec);
5081 efx_ef10_filter_set_entry(table, filter_idx, NULL, 0);
Ben Hutchings8127d662013-08-29 19:19:29 +01005082 }
5083 }
5084
Edward Creee0a65e32018-03-27 17:44:36 +01005085 mutex_unlock(&efx->rss_lock);
Edward Creec2bebe32018-03-27 17:42:28 +01005086 up_write(&table->lock);
Ben Hutchings8127d662013-08-29 19:19:29 +01005087
Jon Cooper2d3d4ec2017-01-27 15:02:11 +00005088 /* This can happen validly if the MC's capabilities have changed, so
5089 * is not an error.
5090 */
5091 if (invalid_filters)
5092 netif_dbg(efx, drv, efx->net_dev,
5093 "Did not restore %u filters that are now unsupported.\n",
5094 invalid_filters);
5095
Ben Hutchings8127d662013-08-29 19:19:29 +01005096 if (failed)
5097 netif_err(efx, hw, efx->net_dev,
Jon Cooper2d3d4ec2017-01-27 15:02:11 +00005098 "unable to restore %u filters\n", failed);
Ben Hutchings8127d662013-08-29 19:19:29 +01005099 else
5100 nic_data->must_restore_filters = false;
5101}
5102
5103static void efx_ef10_filter_table_remove(struct efx_nic *efx)
5104{
5105 struct efx_ef10_filter_table *table = efx->filter_state;
Martin Habetsbb53f4d2017-06-22 10:50:41 +01005106 MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_EXT_IN_LEN);
Ben Hutchings8127d662013-08-29 19:19:29 +01005107 struct efx_filter_spec *spec;
5108 unsigned int filter_idx;
5109 int rc;
5110
Andrew Rybchenko34813fe2016-06-15 17:48:14 +01005111 efx_ef10_filter_cleanup_vlans(efx);
Edward Cree0d322412015-05-20 11:10:03 +01005112 efx->filter_state = NULL;
Edward Creedd987082016-06-15 17:43:43 +01005113 /* If we were called without locking, then it's not safe to free
5114 * the table as others might be using it. So we just WARN, leak
5115 * the memory, and potentially get an inconsistent filter table
5116 * state.
5117 * This should never actually happen.
5118 */
5119 if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
5120 return;
5121
Edward Cree0d322412015-05-20 11:10:03 +01005122 if (!table)
5123 return;
5124
Ben Hutchings8127d662013-08-29 19:19:29 +01005125 for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
5126 spec = efx_ef10_filter_entry_spec(table, filter_idx);
5127 if (!spec)
5128 continue;
5129
5130 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
5131 efx_ef10_filter_is_exclusive(spec) ?
5132 MC_CMD_FILTER_OP_IN_OP_REMOVE :
5133 MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE);
5134 MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
5135 table->entry[filter_idx].handle);
Bert Kenwarde65a5102015-12-23 08:57:36 +00005136 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FILTER_OP, inbuf,
5137 sizeof(inbuf), NULL, 0, NULL);
Ben Hutchings48ce5632013-11-01 16:42:44 +00005138 if (rc)
Bert Kenwarde65a5102015-12-23 08:57:36 +00005139 netif_info(efx, drv, efx->net_dev,
5140 "%s: filter %04x remove failed\n",
5141 __func__, filter_idx);
Ben Hutchings8127d662013-08-29 19:19:29 +01005142 kfree(spec);
5143 }
5144
5145 vfree(table->entry);
5146 kfree(table);
5147}
5148
Andrew Rybchenko6a379582016-06-15 17:44:20 +01005149static void efx_ef10_filter_mark_one_old(struct efx_nic *efx, uint16_t *id)
5150{
5151 struct efx_ef10_filter_table *table = efx->filter_state;
5152 unsigned int filter_idx;
5153
Edward Creec2bebe32018-03-27 17:42:28 +01005154 efx_rwsem_assert_write_locked(&table->lock);
5155
Andrew Rybchenko6a379582016-06-15 17:44:20 +01005156 if (*id != EFX_EF10_FILTER_ID_INVALID) {
Jon Cooper0ccb9982017-02-17 15:49:13 +00005157 filter_idx = efx_ef10_filter_get_unsafe_id(*id);
Andrew Rybchenko6a379582016-06-15 17:44:20 +01005158 if (!table->entry[filter_idx].spec)
5159 netif_dbg(efx, drv, efx->net_dev,
5160 "marked null spec old %04x:%04x\n", *id,
5161 filter_idx);
5162 table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_AUTO_OLD;
5163 *id = EFX_EF10_FILTER_ID_INVALID;
Bert Kenwarde65a5102015-12-23 08:57:36 +00005164 }
Andrew Rybchenko6a379582016-06-15 17:44:20 +01005165}
5166
Andrew Rybchenkob3a3c032016-06-15 17:47:36 +01005167/* Mark old per-VLAN filters that may need to be removed */
5168static void _efx_ef10_filter_vlan_mark_old(struct efx_nic *efx,
5169 struct efx_ef10_filter_vlan *vlan)
Ben Hutchings8127d662013-08-29 19:19:29 +01005170{
5171 struct efx_ef10_filter_table *table = efx->filter_state;
Andrew Rybchenko6a379582016-06-15 17:44:20 +01005172 unsigned int i;
Ben Hutchings8127d662013-08-29 19:19:29 +01005173
Edward Cree12fb0da2015-07-21 15:11:00 +01005174 for (i = 0; i < table->dev_uc_count; i++)
Andrew Rybchenkodc3273e2016-06-15 17:45:36 +01005175 efx_ef10_filter_mark_one_old(efx, &vlan->uc[i]);
Edward Cree12fb0da2015-07-21 15:11:00 +01005176 for (i = 0; i < table->dev_mc_count; i++)
Andrew Rybchenkodc3273e2016-06-15 17:45:36 +01005177 efx_ef10_filter_mark_one_old(efx, &vlan->mc[i]);
Edward Cree9b410802017-01-27 15:02:52 +00005178 for (i = 0; i < EFX_EF10_NUM_DEFAULT_FILTERS; i++)
5179 efx_ef10_filter_mark_one_old(efx, &vlan->default_filters[i]);
Andrew Rybchenkob3a3c032016-06-15 17:47:36 +01005180}
5181
Andrew Rybchenko34813fe2016-06-15 17:48:14 +01005182/* Mark old filters that may need to be removed.
5183 * Caller must hold efx->filter_sem for read if race against
5184 * efx_ef10_filter_table_remove() is possible
5185 */
Andrew Rybchenkob3a3c032016-06-15 17:47:36 +01005186static void efx_ef10_filter_mark_old(struct efx_nic *efx)
5187{
5188 struct efx_ef10_filter_table *table = efx->filter_state;
Andrew Rybchenko34813fe2016-06-15 17:48:14 +01005189 struct efx_ef10_filter_vlan *vlan;
Andrew Rybchenkob3a3c032016-06-15 17:47:36 +01005190
Edward Creec2bebe32018-03-27 17:42:28 +01005191 down_write(&table->lock);
Andrew Rybchenko34813fe2016-06-15 17:48:14 +01005192 list_for_each_entry(vlan, &table->vlan_list, list)
5193 _efx_ef10_filter_vlan_mark_old(efx, vlan);
Edward Creec2bebe32018-03-27 17:42:28 +01005194 up_write(&table->lock);
Daniel Pieczko822b96f2015-07-21 15:10:27 +01005195}
Ben Hutchings8127d662013-08-29 19:19:29 +01005196
Andrew Rybchenkoafa4ce12016-06-15 17:45:56 +01005197static void efx_ef10_filter_uc_addr_list(struct efx_nic *efx)
Daniel Pieczko822b96f2015-07-21 15:10:27 +01005198{
5199 struct efx_ef10_filter_table *table = efx->filter_state;
5200 struct net_device *net_dev = efx->net_dev;
5201 struct netdev_hw_addr *uc;
5202 unsigned int i;
5203
Andrew Rybchenkoafa4ce12016-06-15 17:45:56 +01005204 table->uc_promisc = !!(net_dev->flags & IFF_PROMISC);
Daniel Pieczko822b96f2015-07-21 15:10:27 +01005205 ether_addr_copy(table->dev_uc_list[0].addr, net_dev->dev_addr);
5206 i = 1;
5207 netdev_for_each_uc_addr(uc, net_dev) {
Edward Cree12fb0da2015-07-21 15:11:00 +01005208 if (i >= EFX_EF10_FILTER_DEV_UC_MAX) {
Andrew Rybchenkoafa4ce12016-06-15 17:45:56 +01005209 table->uc_promisc = true;
Edward Cree12fb0da2015-07-21 15:11:00 +01005210 break;
5211 }
Daniel Pieczko822b96f2015-07-21 15:10:27 +01005212 ether_addr_copy(table->dev_uc_list[i].addr, uc->addr);
5213 i++;
5214 }
Bert Kenwardc70d6812017-07-12 17:19:41 +01005215
5216 table->dev_uc_count = i;
Daniel Pieczko822b96f2015-07-21 15:10:27 +01005217}
5218
Andrew Rybchenkoafa4ce12016-06-15 17:45:56 +01005219static void efx_ef10_filter_mc_addr_list(struct efx_nic *efx)
Daniel Pieczko822b96f2015-07-21 15:10:27 +01005220{
5221 struct efx_ef10_filter_table *table = efx->filter_state;
5222 struct net_device *net_dev = efx->net_dev;
5223 struct netdev_hw_addr *mc;
Bert Kenwardc70d6812017-07-12 17:19:41 +01005224 unsigned int i;
Daniel Pieczko822b96f2015-07-21 15:10:27 +01005225
Edward Cree148cbab2017-04-04 17:02:49 +01005226 table->mc_overflow = false;
Andrew Rybchenkoafa4ce12016-06-15 17:45:56 +01005227 table->mc_promisc = !!(net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI));
Daniel Pieczko822b96f2015-07-21 15:10:27 +01005228
Edward Cree12fb0da2015-07-21 15:11:00 +01005229 i = 0;
Daniel Pieczkoab8b1f7c2015-07-21 15:10:44 +01005230 netdev_for_each_mc_addr(mc, net_dev) {
Edward Cree12fb0da2015-07-21 15:11:00 +01005231 if (i >= EFX_EF10_FILTER_DEV_MC_MAX) {
Andrew Rybchenkoafa4ce12016-06-15 17:45:56 +01005232 table->mc_promisc = true;
Edward Cree148cbab2017-04-04 17:02:49 +01005233 table->mc_overflow = true;
Edward Cree12fb0da2015-07-21 15:11:00 +01005234 break;
5235 }
Daniel Pieczkoab8b1f7c2015-07-21 15:10:44 +01005236 ether_addr_copy(table->dev_mc_list[i].addr, mc->addr);
5237 i++;
Ben Hutchings8127d662013-08-29 19:19:29 +01005238 }
Edward Cree12fb0da2015-07-21 15:11:00 +01005239
5240 table->dev_mc_count = i;
Daniel Pieczko822b96f2015-07-21 15:10:27 +01005241}
Ben Hutchings8127d662013-08-29 19:19:29 +01005242
Edward Cree12fb0da2015-07-21 15:11:00 +01005243static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx,
Andrew Rybchenkob3a3c032016-06-15 17:47:36 +01005244 struct efx_ef10_filter_vlan *vlan,
5245 bool multicast, bool rollback)
Daniel Pieczko822b96f2015-07-21 15:10:27 +01005246{
5247 struct efx_ef10_filter_table *table = efx->filter_state;
5248 struct efx_ef10_dev_addr *addr_list;
Bert Kenwardf1c2ef42015-12-11 09:39:32 +00005249 enum efx_filter_flags filter_flags;
Daniel Pieczko822b96f2015-07-21 15:10:27 +01005250 struct efx_filter_spec spec;
Edward Cree12fb0da2015-07-21 15:11:00 +01005251 u8 baddr[ETH_ALEN];
5252 unsigned int i, j;
5253 int addr_count;
Andrew Rybchenkodc3273e2016-06-15 17:45:36 +01005254 u16 *ids;
Daniel Pieczko822b96f2015-07-21 15:10:27 +01005255 int rc;
5256
5257 if (multicast) {
5258 addr_list = table->dev_mc_list;
Edward Cree12fb0da2015-07-21 15:11:00 +01005259 addr_count = table->dev_mc_count;
Andrew Rybchenkodc3273e2016-06-15 17:45:36 +01005260 ids = vlan->mc;
Daniel Pieczko822b96f2015-07-21 15:10:27 +01005261 } else {
5262 addr_list = table->dev_uc_list;
Edward Cree12fb0da2015-07-21 15:11:00 +01005263 addr_count = table->dev_uc_count;
Andrew Rybchenkodc3273e2016-06-15 17:45:36 +01005264 ids = vlan->uc;
Daniel Pieczko822b96f2015-07-21 15:10:27 +01005265 }
5266
Bert Kenward89bda972018-04-17 13:32:39 +01005267 filter_flags = efx_rss_active(&efx->rss_context) ? EFX_FILTER_FLAG_RX_RSS : 0;
Bert Kenwardf1c2ef42015-12-11 09:39:32 +00005268
Daniel Pieczko822b96f2015-07-21 15:10:27 +01005269 /* Insert/renew filters */
Edward Cree12fb0da2015-07-21 15:11:00 +01005270 for (i = 0; i < addr_count; i++) {
Edward Creed58299a2017-06-29 16:50:06 +01005271 EFX_WARN_ON_PARANOID(ids[i] != EFX_EF10_FILTER_ID_INVALID);
Bert Kenwardf1c2ef42015-12-11 09:39:32 +00005272 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0);
Andrew Rybchenkob3a3c032016-06-15 17:47:36 +01005273 efx_filter_set_eth_local(&spec, vlan->vid, addr_list[i].addr);
Jon Cooperb6f568e2015-07-21 15:10:15 +01005274 rc = efx_ef10_filter_insert(efx, &spec, true);
5275 if (rc < 0) {
Edward Cree12fb0da2015-07-21 15:11:00 +01005276 if (rollback) {
5277 netif_info(efx, drv, efx->net_dev,
5278 "efx_ef10_filter_insert failed rc=%d\n",
5279 rc);
5280 /* Fall back to promiscuous */
5281 for (j = 0; j < i; j++) {
Edward Cree12fb0da2015-07-21 15:11:00 +01005282 efx_ef10_filter_remove_unsafe(
5283 efx, EFX_FILTER_PRI_AUTO,
Andrew Rybchenkodc3273e2016-06-15 17:45:36 +01005284 ids[j]);
5285 ids[j] = EFX_EF10_FILTER_ID_INVALID;
Edward Cree12fb0da2015-07-21 15:11:00 +01005286 }
5287 return rc;
5288 } else {
Edward Creed58299a2017-06-29 16:50:06 +01005289 /* keep invalid ID, and carry on */
Daniel Pieczko822b96f2015-07-21 15:10:27 +01005290 }
Edward Creed58299a2017-06-29 16:50:06 +01005291 } else {
5292 ids[i] = efx_ef10_filter_get_unsafe_id(rc);
Ben Hutchings8127d662013-08-29 19:19:29 +01005293 }
5294 }
Daniel Pieczko822b96f2015-07-21 15:10:27 +01005295
Edward Cree12fb0da2015-07-21 15:11:00 +01005296 if (multicast && rollback) {
5297 /* Also need an Ethernet broadcast filter */
Edward Cree9b410802017-01-27 15:02:52 +00005298 EFX_WARN_ON_PARANOID(vlan->default_filters[EFX_EF10_BCAST] !=
5299 EFX_EF10_FILTER_ID_INVALID);
Bert Kenwardf1c2ef42015-12-11 09:39:32 +00005300 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0);
Edward Cree12fb0da2015-07-21 15:11:00 +01005301 eth_broadcast_addr(baddr);
Andrew Rybchenkob3a3c032016-06-15 17:47:36 +01005302 efx_filter_set_eth_local(&spec, vlan->vid, baddr);
Daniel Pieczko822b96f2015-07-21 15:10:27 +01005303 rc = efx_ef10_filter_insert(efx, &spec, true);
Edward Cree12fb0da2015-07-21 15:11:00 +01005304 if (rc < 0) {
Daniel Pieczko822b96f2015-07-21 15:10:27 +01005305 netif_warn(efx, drv, efx->net_dev,
Edward Cree12fb0da2015-07-21 15:11:00 +01005306 "Broadcast filter insert failed rc=%d\n", rc);
5307 /* Fall back to promiscuous */
5308 for (j = 0; j < i; j++) {
Edward Cree12fb0da2015-07-21 15:11:00 +01005309 efx_ef10_filter_remove_unsafe(
5310 efx, EFX_FILTER_PRI_AUTO,
Andrew Rybchenkodc3273e2016-06-15 17:45:36 +01005311 ids[j]);
5312 ids[j] = EFX_EF10_FILTER_ID_INVALID;
Edward Cree12fb0da2015-07-21 15:11:00 +01005313 }
5314 return rc;
5315 } else {
Edward Cree9b410802017-01-27 15:02:52 +00005316 vlan->default_filters[EFX_EF10_BCAST] =
Jon Cooper0ccb9982017-02-17 15:49:13 +00005317 efx_ef10_filter_get_unsafe_id(rc);
Edward Cree12fb0da2015-07-21 15:11:00 +01005318 }
Daniel Pieczko822b96f2015-07-21 15:10:27 +01005319 }
Edward Cree12fb0da2015-07-21 15:11:00 +01005320
5321 return 0;
5322}
5323
Andrew Rybchenkob3a3c032016-06-15 17:47:36 +01005324static int efx_ef10_filter_insert_def(struct efx_nic *efx,
5325 struct efx_ef10_filter_vlan *vlan,
Edward Cree9b410802017-01-27 15:02:52 +00005326 enum efx_encap_type encap_type,
Andrew Rybchenkob3a3c032016-06-15 17:47:36 +01005327 bool multicast, bool rollback)
Edward Cree12fb0da2015-07-21 15:11:00 +01005328{
Edward Cree12fb0da2015-07-21 15:11:00 +01005329 struct efx_ef10_nic_data *nic_data = efx->nic_data;
Bert Kenwardf1c2ef42015-12-11 09:39:32 +00005330 enum efx_filter_flags filter_flags;
Edward Cree12fb0da2015-07-21 15:11:00 +01005331 struct efx_filter_spec spec;
5332 u8 baddr[ETH_ALEN];
5333 int rc;
Edward Cree9b410802017-01-27 15:02:52 +00005334 u16 *id;
Edward Cree12fb0da2015-07-21 15:11:00 +01005335
Bert Kenward89bda972018-04-17 13:32:39 +01005336 filter_flags = efx_rss_active(&efx->rss_context) ? EFX_FILTER_FLAG_RX_RSS : 0;
Bert Kenwardf1c2ef42015-12-11 09:39:32 +00005337
5338 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0);
Edward Cree12fb0da2015-07-21 15:11:00 +01005339
5340 if (multicast)
5341 efx_filter_set_mc_def(&spec);
5342 else
5343 efx_filter_set_uc_def(&spec);
5344
Edward Cree9b410802017-01-27 15:02:52 +00005345 if (encap_type) {
5346 if (nic_data->datapath_caps &
5347 (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN))
5348 efx_filter_set_encap_type(&spec, encap_type);
5349 else
5350 /* don't insert encap filters on non-supporting
5351 * platforms. ID will be left as INVALID.
5352 */
5353 return 0;
5354 }
5355
Andrew Rybchenkob3a3c032016-06-15 17:47:36 +01005356 if (vlan->vid != EFX_FILTER_VID_UNSPEC)
5357 efx_filter_set_eth_local(&spec, vlan->vid, NULL);
5358
Edward Cree12fb0da2015-07-21 15:11:00 +01005359 rc = efx_ef10_filter_insert(efx, &spec, true);
5360 if (rc < 0) {
Edward Cree9b410802017-01-27 15:02:52 +00005361 const char *um = multicast ? "Multicast" : "Unicast";
5362 const char *encap_name = "";
5363 const char *encap_ipv = "";
5364
5365 if ((encap_type & EFX_ENCAP_TYPES_MASK) ==
5366 EFX_ENCAP_TYPE_VXLAN)
5367 encap_name = "VXLAN ";
5368 else if ((encap_type & EFX_ENCAP_TYPES_MASK) ==
5369 EFX_ENCAP_TYPE_NVGRE)
5370 encap_name = "NVGRE ";
5371 else if ((encap_type & EFX_ENCAP_TYPES_MASK) ==
5372 EFX_ENCAP_TYPE_GENEVE)
5373 encap_name = "GENEVE ";
5374 if (encap_type & EFX_ENCAP_FLAG_IPV6)
5375 encap_ipv = "IPv6 ";
5376 else if (encap_type)
5377 encap_ipv = "IPv4 ";
5378
5379 /* unprivileged functions can't insert mismatch filters
5380 * for encapsulated or unicast traffic, so downgrade
5381 * those warnings to debug.
5382 */
Jon Cooper34e7aef2017-01-27 15:02:39 +00005383 netif_cond_dbg(efx, drv, efx->net_dev,
Edward Cree9b410802017-01-27 15:02:52 +00005384 rc == -EPERM && (encap_type || !multicast), warn,
5385 "%s%s%s mismatch filter insert failed rc=%d\n",
5386 encap_name, encap_ipv, um, rc);
Edward Cree12fb0da2015-07-21 15:11:00 +01005387 } else if (multicast) {
Edward Cree9b410802017-01-27 15:02:52 +00005388 /* mapping from encap types to default filter IDs (multicast) */
5389 static enum efx_ef10_default_filters map[] = {
5390 [EFX_ENCAP_TYPE_NONE] = EFX_EF10_MCDEF,
5391 [EFX_ENCAP_TYPE_VXLAN] = EFX_EF10_VXLAN4_MCDEF,
5392 [EFX_ENCAP_TYPE_NVGRE] = EFX_EF10_NVGRE4_MCDEF,
5393 [EFX_ENCAP_TYPE_GENEVE] = EFX_EF10_GENEVE4_MCDEF,
5394 [EFX_ENCAP_TYPE_VXLAN | EFX_ENCAP_FLAG_IPV6] =
5395 EFX_EF10_VXLAN6_MCDEF,
5396 [EFX_ENCAP_TYPE_NVGRE | EFX_ENCAP_FLAG_IPV6] =
5397 EFX_EF10_NVGRE6_MCDEF,
5398 [EFX_ENCAP_TYPE_GENEVE | EFX_ENCAP_FLAG_IPV6] =
5399 EFX_EF10_GENEVE6_MCDEF,
5400 };
5401
5402 /* quick bounds check (BCAST result impossible) */
5403 BUILD_BUG_ON(EFX_EF10_BCAST != 0);
Colin Ian Kinge9904992017-01-31 16:30:02 +00005404 if (encap_type >= ARRAY_SIZE(map) || map[encap_type] == 0) {
Edward Cree9b410802017-01-27 15:02:52 +00005405 WARN_ON(1);
5406 return -EINVAL;
5407 }
5408 /* then follow map */
5409 id = &vlan->default_filters[map[encap_type]];
5410
5411 EFX_WARN_ON_PARANOID(*id != EFX_EF10_FILTER_ID_INVALID);
Jon Cooper0ccb9982017-02-17 15:49:13 +00005412 *id = efx_ef10_filter_get_unsafe_id(rc);
Edward Cree9b410802017-01-27 15:02:52 +00005413 if (!nic_data->workaround_26807 && !encap_type) {
Edward Cree12fb0da2015-07-21 15:11:00 +01005414 /* Also need an Ethernet broadcast filter */
5415 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
Bert Kenwardf1c2ef42015-12-11 09:39:32 +00005416 filter_flags, 0);
Edward Cree12fb0da2015-07-21 15:11:00 +01005417 eth_broadcast_addr(baddr);
Andrew Rybchenkob3a3c032016-06-15 17:47:36 +01005418 efx_filter_set_eth_local(&spec, vlan->vid, baddr);
Edward Cree12fb0da2015-07-21 15:11:00 +01005419 rc = efx_ef10_filter_insert(efx, &spec, true);
5420 if (rc < 0) {
5421 netif_warn(efx, drv, efx->net_dev,
5422 "Broadcast filter insert failed rc=%d\n",
5423 rc);
5424 if (rollback) {
5425 /* Roll back the mc_def filter */
5426 efx_ef10_filter_remove_unsafe(
5427 efx, EFX_FILTER_PRI_AUTO,
Edward Cree9b410802017-01-27 15:02:52 +00005428 *id);
5429 *id = EFX_EF10_FILTER_ID_INVALID;
Edward Cree12fb0da2015-07-21 15:11:00 +01005430 return rc;
5431 }
5432 } else {
Edward Cree9b410802017-01-27 15:02:52 +00005433 EFX_WARN_ON_PARANOID(
5434 vlan->default_filters[EFX_EF10_BCAST] !=
5435 EFX_EF10_FILTER_ID_INVALID);
5436 vlan->default_filters[EFX_EF10_BCAST] =
Jon Cooper0ccb9982017-02-17 15:49:13 +00005437 efx_ef10_filter_get_unsafe_id(rc);
Edward Cree12fb0da2015-07-21 15:11:00 +01005438 }
5439 }
5440 rc = 0;
5441 } else {
Edward Cree9b410802017-01-27 15:02:52 +00005442 /* mapping from encap types to default filter IDs (unicast) */
5443 static enum efx_ef10_default_filters map[] = {
5444 [EFX_ENCAP_TYPE_NONE] = EFX_EF10_UCDEF,
5445 [EFX_ENCAP_TYPE_VXLAN] = EFX_EF10_VXLAN4_UCDEF,
5446 [EFX_ENCAP_TYPE_NVGRE] = EFX_EF10_NVGRE4_UCDEF,
5447 [EFX_ENCAP_TYPE_GENEVE] = EFX_EF10_GENEVE4_UCDEF,
5448 [EFX_ENCAP_TYPE_VXLAN | EFX_ENCAP_FLAG_IPV6] =
5449 EFX_EF10_VXLAN6_UCDEF,
5450 [EFX_ENCAP_TYPE_NVGRE | EFX_ENCAP_FLAG_IPV6] =
5451 EFX_EF10_NVGRE6_UCDEF,
5452 [EFX_ENCAP_TYPE_GENEVE | EFX_ENCAP_FLAG_IPV6] =
5453 EFX_EF10_GENEVE6_UCDEF,
5454 };
5455
5456 /* quick bounds check (BCAST result impossible) */
5457 BUILD_BUG_ON(EFX_EF10_BCAST != 0);
Dan Carpenteree467fb2017-02-07 10:44:31 +03005458 if (encap_type >= ARRAY_SIZE(map) || map[encap_type] == 0) {
Edward Cree9b410802017-01-27 15:02:52 +00005459 WARN_ON(1);
5460 return -EINVAL;
5461 }
5462 /* then follow map */
5463 id = &vlan->default_filters[map[encap_type]];
5464 EFX_WARN_ON_PARANOID(*id != EFX_EF10_FILTER_ID_INVALID);
5465 *id = rc;
Edward Cree12fb0da2015-07-21 15:11:00 +01005466 rc = 0;
5467 }
5468 return rc;
Daniel Pieczko822b96f2015-07-21 15:10:27 +01005469}
5470
Edward Creec2bebe32018-03-27 17:42:28 +01005471/* Remove filters that weren't renewed. */
Daniel Pieczko822b96f2015-07-21 15:10:27 +01005472static void efx_ef10_filter_remove_old(struct efx_nic *efx)
5473{
5474 struct efx_ef10_filter_table *table = efx->filter_state;
Bert Kenwarde65a5102015-12-23 08:57:36 +00005475 int remove_failed = 0;
5476 int remove_noent = 0;
5477 int rc;
Daniel Pieczko822b96f2015-07-21 15:10:27 +01005478 int i;
5479
Edward Creec2bebe32018-03-27 17:42:28 +01005480 down_write(&table->lock);
Ben Hutchings8127d662013-08-29 19:19:29 +01005481 for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) {
Mark Rutland6aa7de02017-10-23 14:07:29 -07005482 if (READ_ONCE(table->entry[i].spec) &
Ben Hutchingsb59e6ef2013-11-21 19:02:22 +00005483 EFX_EF10_FILTER_FLAG_AUTO_OLD) {
Bert Kenwarde65a5102015-12-23 08:57:36 +00005484 rc = efx_ef10_filter_remove_internal(efx,
5485 1U << EFX_FILTER_PRI_AUTO, i, true);
5486 if (rc == -ENOENT)
5487 remove_noent++;
5488 else if (rc)
5489 remove_failed++;
Ben Hutchings8127d662013-08-29 19:19:29 +01005490 }
5491 }
Edward Creec2bebe32018-03-27 17:42:28 +01005492 up_write(&table->lock);
Bert Kenwarde65a5102015-12-23 08:57:36 +00005493
5494 if (remove_failed)
5495 netif_info(efx, drv, efx->net_dev,
5496 "%s: failed to remove %d filters\n",
5497 __func__, remove_failed);
5498 if (remove_noent)
5499 netif_info(efx, drv, efx->net_dev,
5500 "%s: failed to remove %d non-existent filters\n",
5501 __func__, remove_noent);
Ben Hutchings8127d662013-08-29 19:19:29 +01005502}
5503
Daniel Pieczko7a186f42015-07-07 11:37:19 +01005504static int efx_ef10_vport_set_mac_address(struct efx_nic *efx)
5505{
5506 struct efx_ef10_nic_data *nic_data = efx->nic_data;
5507 u8 mac_old[ETH_ALEN];
5508 int rc, rc2;
5509
5510 /* Only reconfigure a PF-created vport */
5511 if (is_zero_ether_addr(nic_data->vport_mac))
5512 return 0;
5513
5514 efx_device_detach_sync(efx);
5515 efx_net_stop(efx->net_dev);
5516 down_write(&efx->filter_sem);
5517 efx_ef10_filter_table_remove(efx);
5518 up_write(&efx->filter_sem);
5519
5520 rc = efx_ef10_vadaptor_free(efx, nic_data->vport_id);
5521 if (rc)
5522 goto restore_filters;
5523
5524 ether_addr_copy(mac_old, nic_data->vport_mac);
5525 rc = efx_ef10_vport_del_mac(efx, nic_data->vport_id,
5526 nic_data->vport_mac);
5527 if (rc)
5528 goto restore_vadaptor;
5529
5530 rc = efx_ef10_vport_add_mac(efx, nic_data->vport_id,
5531 efx->net_dev->dev_addr);
5532 if (!rc) {
5533 ether_addr_copy(nic_data->vport_mac, efx->net_dev->dev_addr);
5534 } else {
5535 rc2 = efx_ef10_vport_add_mac(efx, nic_data->vport_id, mac_old);
5536 if (rc2) {
5537 /* Failed to add original MAC, so clear vport_mac */
5538 eth_zero_addr(nic_data->vport_mac);
5539 goto reset_nic;
5540 }
5541 }
5542
5543restore_vadaptor:
5544 rc2 = efx_ef10_vadaptor_alloc(efx, nic_data->vport_id);
5545 if (rc2)
5546 goto reset_nic;
5547restore_filters:
5548 down_write(&efx->filter_sem);
5549 rc2 = efx_ef10_filter_table_probe(efx);
5550 up_write(&efx->filter_sem);
5551 if (rc2)
5552 goto reset_nic;
5553
5554 rc2 = efx_net_open(efx->net_dev);
5555 if (rc2)
5556 goto reset_nic;
5557
Peter Dunning9c568fd2017-02-17 15:50:43 +00005558 efx_device_attach_if_not_resetting(efx);
Daniel Pieczko7a186f42015-07-07 11:37:19 +01005559
5560 return rc;
5561
5562reset_nic:
5563 netif_err(efx, drv, efx->net_dev,
5564 "Failed to restore when changing MAC address - scheduling reset\n");
5565 efx_schedule_reset(efx, RESET_TYPE_DATAPATH);
5566
5567 return rc ? rc : rc2;
5568}
5569
Daniel Pieczko822b96f2015-07-21 15:10:27 +01005570/* Caller must hold efx->filter_sem for read if race against
5571 * efx_ef10_filter_table_remove() is possible
5572 */
Andrew Rybchenko34813fe2016-06-15 17:48:14 +01005573static void efx_ef10_filter_vlan_sync_rx_mode(struct efx_nic *efx,
5574 struct efx_ef10_filter_vlan *vlan)
Daniel Pieczko822b96f2015-07-21 15:10:27 +01005575{
5576 struct efx_ef10_filter_table *table = efx->filter_state;
Daniel Pieczkoab8b1f7c2015-07-21 15:10:44 +01005577 struct efx_ef10_nic_data *nic_data = efx->nic_data;
Andrew Rybchenkob3a3c032016-06-15 17:47:36 +01005578
Andrew Rybchenko4a53ea82016-06-15 17:48:32 +01005579 /* Do not install unspecified VID if VLAN filtering is enabled.
5580 * Do not install all specified VIDs if VLAN filtering is disabled.
5581 */
5582 if ((vlan->vid == EFX_FILTER_VID_UNSPEC) == table->vlan_filter)
5583 return;
5584
Edward Cree12fb0da2015-07-21 15:11:00 +01005585 /* Insert/renew unicast filters */
Andrew Rybchenkoafa4ce12016-06-15 17:45:56 +01005586 if (table->uc_promisc) {
Edward Cree9b410802017-01-27 15:02:52 +00005587 efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NONE,
5588 false, false);
Andrew Rybchenkob3a3c032016-06-15 17:47:36 +01005589 efx_ef10_filter_insert_addr_list(efx, vlan, false, false);
Edward Cree12fb0da2015-07-21 15:11:00 +01005590 } else {
5591 /* If any of the filters failed to insert, fall back to
5592 * promiscuous mode - add in the uc_def filter. But keep
5593 * our individual unicast filters.
5594 */
Andrew Rybchenkob3a3c032016-06-15 17:47:36 +01005595 if (efx_ef10_filter_insert_addr_list(efx, vlan, false, false))
Edward Cree9b410802017-01-27 15:02:52 +00005596 efx_ef10_filter_insert_def(efx, vlan,
5597 EFX_ENCAP_TYPE_NONE,
5598 false, false);
Edward Cree12fb0da2015-07-21 15:11:00 +01005599 }
Edward Cree9b410802017-01-27 15:02:52 +00005600 efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_VXLAN,
5601 false, false);
5602 efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_VXLAN |
5603 EFX_ENCAP_FLAG_IPV6,
5604 false, false);
5605 efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NVGRE,
5606 false, false);
5607 efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NVGRE |
5608 EFX_ENCAP_FLAG_IPV6,
5609 false, false);
5610 efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_GENEVE,
5611 false, false);
5612 efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_GENEVE |
5613 EFX_ENCAP_FLAG_IPV6,
5614 false, false);
Daniel Pieczkoab8b1f7c2015-07-21 15:10:44 +01005615
Edward Cree12fb0da2015-07-21 15:11:00 +01005616 /* Insert/renew multicast filters */
Daniel Pieczkoab8b1f7c2015-07-21 15:10:44 +01005617 /* If changing promiscuous state with cascaded multicast filters, remove
5618 * old filters first, so that packets are dropped rather than duplicated
5619 */
Andrew Rybchenkoafa4ce12016-06-15 17:45:56 +01005620 if (nic_data->workaround_26807 &&
5621 table->mc_promisc_last != table->mc_promisc)
Daniel Pieczkoab8b1f7c2015-07-21 15:10:44 +01005622 efx_ef10_filter_remove_old(efx);
Andrew Rybchenkoafa4ce12016-06-15 17:45:56 +01005623 if (table->mc_promisc) {
Edward Cree12fb0da2015-07-21 15:11:00 +01005624 if (nic_data->workaround_26807) {
5625 /* If we failed to insert promiscuous filters, rollback
5626 * and fall back to individual multicast filters
5627 */
Edward Cree9b410802017-01-27 15:02:52 +00005628 if (efx_ef10_filter_insert_def(efx, vlan,
5629 EFX_ENCAP_TYPE_NONE,
5630 true, true)) {
Edward Cree12fb0da2015-07-21 15:11:00 +01005631 /* Changing promisc state, so remove old filters */
5632 efx_ef10_filter_remove_old(efx);
Andrew Rybchenkob3a3c032016-06-15 17:47:36 +01005633 efx_ef10_filter_insert_addr_list(efx, vlan,
5634 true, false);
Edward Cree12fb0da2015-07-21 15:11:00 +01005635 }
5636 } else {
5637 /* If we failed to insert promiscuous filters, don't
Edward Cree148cbab2017-04-04 17:02:49 +01005638 * rollback. Regardless, also insert the mc_list,
5639 * unless it's incomplete due to overflow
Edward Cree12fb0da2015-07-21 15:11:00 +01005640 */
Edward Cree9b410802017-01-27 15:02:52 +00005641 efx_ef10_filter_insert_def(efx, vlan,
5642 EFX_ENCAP_TYPE_NONE,
5643 true, false);
Edward Cree148cbab2017-04-04 17:02:49 +01005644 if (!table->mc_overflow)
5645 efx_ef10_filter_insert_addr_list(efx, vlan,
5646 true, false);
Edward Cree12fb0da2015-07-21 15:11:00 +01005647 }
5648 } else {
5649 /* If any filters failed to insert, rollback and fall back to
5650 * promiscuous mode - mc_def filter and maybe broadcast. If
5651 * that fails, roll back again and insert as many of our
5652 * individual multicast filters as we can.
5653 */
Andrew Rybchenkob3a3c032016-06-15 17:47:36 +01005654 if (efx_ef10_filter_insert_addr_list(efx, vlan, true, true)) {
Edward Cree12fb0da2015-07-21 15:11:00 +01005655 /* Changing promisc state, so remove old filters */
5656 if (nic_data->workaround_26807)
5657 efx_ef10_filter_remove_old(efx);
Edward Cree9b410802017-01-27 15:02:52 +00005658 if (efx_ef10_filter_insert_def(efx, vlan,
5659 EFX_ENCAP_TYPE_NONE,
5660 true, true))
Andrew Rybchenkob3a3c032016-06-15 17:47:36 +01005661 efx_ef10_filter_insert_addr_list(efx, vlan,
5662 true, false);
Edward Cree12fb0da2015-07-21 15:11:00 +01005663 }
5664 }
Edward Cree9b410802017-01-27 15:02:52 +00005665 efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_VXLAN,
5666 true, false);
5667 efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_VXLAN |
5668 EFX_ENCAP_FLAG_IPV6,
5669 true, false);
5670 efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NVGRE,
5671 true, false);
5672 efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NVGRE |
5673 EFX_ENCAP_FLAG_IPV6,
5674 true, false);
5675 efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_GENEVE,
5676 true, false);
5677 efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_GENEVE |
5678 EFX_ENCAP_FLAG_IPV6,
5679 true, false);
Andrew Rybchenko34813fe2016-06-15 17:48:14 +01005680}
5681
5682/* Caller must hold efx->filter_sem for read if race against
5683 * efx_ef10_filter_table_remove() is possible
5684 */
5685static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx)
5686{
5687 struct efx_ef10_filter_table *table = efx->filter_state;
5688 struct net_device *net_dev = efx->net_dev;
5689 struct efx_ef10_filter_vlan *vlan;
Andrew Rybchenko4a53ea82016-06-15 17:48:32 +01005690 bool vlan_filter;
Andrew Rybchenko34813fe2016-06-15 17:48:14 +01005691
5692 if (!efx_dev_registered(efx))
5693 return;
5694
5695 if (!table)
5696 return;
5697
5698 efx_ef10_filter_mark_old(efx);
5699
5700 /* Copy/convert the address lists; add the primary station
5701 * address and broadcast address
5702 */
5703 netif_addr_lock_bh(net_dev);
5704 efx_ef10_filter_uc_addr_list(efx);
5705 efx_ef10_filter_mc_addr_list(efx);
5706 netif_addr_unlock_bh(net_dev);
5707
Andrew Rybchenko4a53ea82016-06-15 17:48:32 +01005708 /* If VLAN filtering changes, all old filters are finally removed.
5709 * Do it in advance to avoid conflicts for unicast untagged and
5710 * VLAN 0 tagged filters.
5711 */
5712 vlan_filter = !!(net_dev->features & NETIF_F_HW_VLAN_CTAG_FILTER);
5713 if (table->vlan_filter != vlan_filter) {
5714 table->vlan_filter = vlan_filter;
5715 efx_ef10_filter_remove_old(efx);
5716 }
5717
Andrew Rybchenko34813fe2016-06-15 17:48:14 +01005718 list_for_each_entry(vlan, &table->vlan_list, list)
5719 efx_ef10_filter_vlan_sync_rx_mode(efx, vlan);
Daniel Pieczko822b96f2015-07-21 15:10:27 +01005720
5721 efx_ef10_filter_remove_old(efx);
Andrew Rybchenkoafa4ce12016-06-15 17:45:56 +01005722 table->mc_promisc_last = table->mc_promisc;
Daniel Pieczko822b96f2015-07-21 15:10:27 +01005723}
5724
Andrew Rybchenko34813fe2016-06-15 17:48:14 +01005725static struct efx_ef10_filter_vlan *efx_ef10_filter_find_vlan(struct efx_nic *efx, u16 vid)
5726{
5727 struct efx_ef10_filter_table *table = efx->filter_state;
5728 struct efx_ef10_filter_vlan *vlan;
5729
5730 WARN_ON(!rwsem_is_locked(&efx->filter_sem));
5731
5732 list_for_each_entry(vlan, &table->vlan_list, list) {
5733 if (vlan->vid == vid)
5734 return vlan;
5735 }
5736
5737 return NULL;
5738}
5739
5740static int efx_ef10_filter_add_vlan(struct efx_nic *efx, u16 vid)
5741{
5742 struct efx_ef10_filter_table *table = efx->filter_state;
5743 struct efx_ef10_filter_vlan *vlan;
5744 unsigned int i;
5745
5746 if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
5747 return -EINVAL;
5748
5749 vlan = efx_ef10_filter_find_vlan(efx, vid);
5750 if (WARN_ON(vlan)) {
5751 netif_err(efx, drv, efx->net_dev,
5752 "VLAN %u already added\n", vid);
5753 return -EALREADY;
5754 }
5755
5756 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
5757 if (!vlan)
5758 return -ENOMEM;
5759
5760 vlan->vid = vid;
5761
5762 for (i = 0; i < ARRAY_SIZE(vlan->uc); i++)
5763 vlan->uc[i] = EFX_EF10_FILTER_ID_INVALID;
5764 for (i = 0; i < ARRAY_SIZE(vlan->mc); i++)
5765 vlan->mc[i] = EFX_EF10_FILTER_ID_INVALID;
Edward Cree9b410802017-01-27 15:02:52 +00005766 for (i = 0; i < EFX_EF10_NUM_DEFAULT_FILTERS; i++)
5767 vlan->default_filters[i] = EFX_EF10_FILTER_ID_INVALID;
Andrew Rybchenko34813fe2016-06-15 17:48:14 +01005768
5769 list_add_tail(&vlan->list, &table->vlan_list);
5770
5771 if (efx_dev_registered(efx))
5772 efx_ef10_filter_vlan_sync_rx_mode(efx, vlan);
5773
5774 return 0;
5775}
5776
5777static void efx_ef10_filter_del_vlan_internal(struct efx_nic *efx,
5778 struct efx_ef10_filter_vlan *vlan)
5779{
5780 unsigned int i;
5781
5782 /* See comment in efx_ef10_filter_table_remove() */
5783 if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
5784 return;
5785
5786 list_del(&vlan->list);
5787
Edward Cree8c915622016-06-15 17:49:05 +01005788 for (i = 0; i < ARRAY_SIZE(vlan->uc); i++)
Andrew Rybchenko34813fe2016-06-15 17:48:14 +01005789 efx_ef10_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO,
Edward Cree8c915622016-06-15 17:49:05 +01005790 vlan->uc[i]);
5791 for (i = 0; i < ARRAY_SIZE(vlan->mc); i++)
Andrew Rybchenko34813fe2016-06-15 17:48:14 +01005792 efx_ef10_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO,
Edward Cree8c915622016-06-15 17:49:05 +01005793 vlan->mc[i]);
Edward Cree9b410802017-01-27 15:02:52 +00005794 for (i = 0; i < EFX_EF10_NUM_DEFAULT_FILTERS; i++)
5795 if (vlan->default_filters[i] != EFX_EF10_FILTER_ID_INVALID)
5796 efx_ef10_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO,
5797 vlan->default_filters[i]);
Andrew Rybchenko34813fe2016-06-15 17:48:14 +01005798
5799 kfree(vlan);
5800}
5801
5802static void efx_ef10_filter_del_vlan(struct efx_nic *efx, u16 vid)
5803{
5804 struct efx_ef10_filter_vlan *vlan;
5805
5806 /* See comment in efx_ef10_filter_table_remove() */
5807 if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
5808 return;
5809
5810 vlan = efx_ef10_filter_find_vlan(efx, vid);
5811 if (!vlan) {
5812 netif_err(efx, drv, efx->net_dev,
5813 "VLAN %u not found in filter state\n", vid);
5814 return;
5815 }
5816
5817 efx_ef10_filter_del_vlan_internal(efx, vlan);
5818}
5819
Shradha Shah910c8782015-05-20 11:12:48 +01005820static int efx_ef10_set_mac_address(struct efx_nic *efx)
5821{
5822 MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_SET_MAC_IN_LEN);
5823 struct efx_ef10_nic_data *nic_data = efx->nic_data;
5824 bool was_enabled = efx->port_enabled;
5825 int rc;
5826
5827 efx_device_detach_sync(efx);
5828 efx_net_stop(efx->net_dev);
Martin Habetsd2489532016-06-15 17:48:49 +01005829
5830 mutex_lock(&efx->mac_lock);
Shradha Shah910c8782015-05-20 11:12:48 +01005831 down_write(&efx->filter_sem);
5832 efx_ef10_filter_table_remove(efx);
5833
5834 ether_addr_copy(MCDI_PTR(inbuf, VADAPTOR_SET_MAC_IN_MACADDR),
5835 efx->net_dev->dev_addr);
5836 MCDI_SET_DWORD(inbuf, VADAPTOR_SET_MAC_IN_UPSTREAM_PORT_ID,
5837 nic_data->vport_id);
Daniel Pieczko535a6172015-07-07 11:37:33 +01005838 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_VADAPTOR_SET_MAC, inbuf,
5839 sizeof(inbuf), NULL, 0, NULL);
Shradha Shah910c8782015-05-20 11:12:48 +01005840
5841 efx_ef10_filter_table_probe(efx);
5842 up_write(&efx->filter_sem);
Martin Habetsd2489532016-06-15 17:48:49 +01005843 mutex_unlock(&efx->mac_lock);
5844
Shradha Shah910c8782015-05-20 11:12:48 +01005845 if (was_enabled)
5846 efx_net_open(efx->net_dev);
Peter Dunning9c568fd2017-02-17 15:50:43 +00005847 efx_device_attach_if_not_resetting(efx);
Shradha Shah910c8782015-05-20 11:12:48 +01005848
Daniel Pieczko9e9f6652015-07-07 11:37:00 +01005849#ifdef CONFIG_SFC_SRIOV
5850 if (efx->pci_dev->is_virtfn && efx->pci_dev->physfn) {
Shradha Shah910c8782015-05-20 11:12:48 +01005851 struct pci_dev *pci_dev_pf = efx->pci_dev->physfn;
5852
Daniel Pieczko9e9f6652015-07-07 11:37:00 +01005853 if (rc == -EPERM) {
5854 struct efx_nic *efx_pf;
Shradha Shah910c8782015-05-20 11:12:48 +01005855
Daniel Pieczko9e9f6652015-07-07 11:37:00 +01005856 /* Switch to PF and change MAC address on vport */
5857 efx_pf = pci_get_drvdata(pci_dev_pf);
5858
5859 rc = efx_ef10_sriov_set_vf_mac(efx_pf,
Shradha Shah910c8782015-05-20 11:12:48 +01005860 nic_data->vf_index,
Daniel Pieczko9e9f6652015-07-07 11:37:00 +01005861 efx->net_dev->dev_addr);
5862 } else if (!rc) {
Shradha Shah910c8782015-05-20 11:12:48 +01005863 struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf);
5864 struct efx_ef10_nic_data *nic_data = efx_pf->nic_data;
5865 unsigned int i;
5866
Daniel Pieczko9e9f6652015-07-07 11:37:00 +01005867 /* MAC address successfully changed by VF (with MAC
5868 * spoofing) so update the parent PF if possible.
5869 */
Shradha Shah910c8782015-05-20 11:12:48 +01005870 for (i = 0; i < efx_pf->vf_count; ++i) {
5871 struct ef10_vf *vf = nic_data->vf + i;
5872
5873 if (vf->efx == efx) {
5874 ether_addr_copy(vf->mac,
5875 efx->net_dev->dev_addr);
5876 return 0;
5877 }
5878 }
5879 }
Daniel Pieczko9e9f6652015-07-07 11:37:00 +01005880 } else
Shradha Shah910c8782015-05-20 11:12:48 +01005881#endif
Daniel Pieczko9e9f6652015-07-07 11:37:00 +01005882 if (rc == -EPERM) {
5883 netif_err(efx, drv, efx->net_dev,
5884 "Cannot change MAC address; use sfboot to enable"
5885 " mac-spoofing on this interface\n");
Daniel Pieczko7a186f42015-07-07 11:37:19 +01005886 } else if (rc == -ENOSYS && !efx_ef10_is_vf(efx)) {
5887 /* If the active MCFW does not support MC_CMD_VADAPTOR_SET_MAC
5888 * fall-back to the method of changing the MAC address on the
5889 * vport. This only applies to PFs because such versions of
5890 * MCFW do not support VFs.
5891 */
5892 rc = efx_ef10_vport_set_mac_address(efx);
Robert Stonehousecbad52e2017-11-07 17:30:30 +00005893 } else if (rc) {
Daniel Pieczko535a6172015-07-07 11:37:33 +01005894 efx_mcdi_display_error(efx, MC_CMD_VADAPTOR_SET_MAC,
5895 sizeof(inbuf), NULL, 0, rc);
Daniel Pieczko9e9f6652015-07-07 11:37:00 +01005896 }
5897
Shradha Shah910c8782015-05-20 11:12:48 +01005898 return rc;
5899}
5900
Ben Hutchings8127d662013-08-29 19:19:29 +01005901static int efx_ef10_mac_reconfigure(struct efx_nic *efx)
5902{
5903 efx_ef10_filter_sync_rx_mode(efx);
5904
5905 return efx_mcdi_set_mac(efx);
5906}
5907
Shradha Shah862f8942015-05-20 11:08:56 +01005908static int efx_ef10_mac_reconfigure_vf(struct efx_nic *efx)
5909{
5910 efx_ef10_filter_sync_rx_mode(efx);
5911
5912 return 0;
5913}
5914
Jon Cooper74cd60a2013-09-16 14:18:51 +01005915static int efx_ef10_start_bist(struct efx_nic *efx, u32 bist_type)
5916{
5917 MCDI_DECLARE_BUF(inbuf, MC_CMD_START_BIST_IN_LEN);
5918
5919 MCDI_SET_DWORD(inbuf, START_BIST_IN_TYPE, bist_type);
5920 return efx_mcdi_rpc(efx, MC_CMD_START_BIST, inbuf, sizeof(inbuf),
5921 NULL, 0, NULL);
5922}
5923
5924/* MC BISTs follow a different poll mechanism to phy BISTs.
5925 * The BIST is done in the poll handler on the MC, and the MCDI command
5926 * will block until the BIST is done.
5927 */
5928static int efx_ef10_poll_bist(struct efx_nic *efx)
5929{
5930 int rc;
5931 MCDI_DECLARE_BUF(outbuf, MC_CMD_POLL_BIST_OUT_LEN);
5932 size_t outlen;
5933 u32 result;
5934
5935 rc = efx_mcdi_rpc(efx, MC_CMD_POLL_BIST, NULL, 0,
5936 outbuf, sizeof(outbuf), &outlen);
5937 if (rc != 0)
5938 return rc;
5939
5940 if (outlen < MC_CMD_POLL_BIST_OUT_LEN)
5941 return -EIO;
5942
5943 result = MCDI_DWORD(outbuf, POLL_BIST_OUT_RESULT);
5944 switch (result) {
5945 case MC_CMD_POLL_BIST_PASSED:
5946 netif_dbg(efx, hw, efx->net_dev, "BIST passed.\n");
5947 return 0;
5948 case MC_CMD_POLL_BIST_TIMEOUT:
5949 netif_err(efx, hw, efx->net_dev, "BIST timed out\n");
5950 return -EIO;
5951 case MC_CMD_POLL_BIST_FAILED:
5952 netif_err(efx, hw, efx->net_dev, "BIST failed.\n");
5953 return -EIO;
5954 default:
5955 netif_err(efx, hw, efx->net_dev,
5956 "BIST returned unknown result %u", result);
5957 return -EIO;
5958 }
5959}
5960
5961static int efx_ef10_run_bist(struct efx_nic *efx, u32 bist_type)
5962{
5963 int rc;
5964
5965 netif_dbg(efx, drv, efx->net_dev, "starting BIST type %u\n", bist_type);
5966
5967 rc = efx_ef10_start_bist(efx, bist_type);
5968 if (rc != 0)
5969 return rc;
5970
5971 return efx_ef10_poll_bist(efx);
5972}
5973
5974static int
5975efx_ef10_test_chip(struct efx_nic *efx, struct efx_self_tests *tests)
5976{
5977 int rc, rc2;
5978
5979 efx_reset_down(efx, RESET_TYPE_WORLD);
5980
5981 rc = efx_mcdi_rpc(efx, MC_CMD_ENABLE_OFFLINE_BIST,
5982 NULL, 0, NULL, 0, NULL);
5983 if (rc != 0)
5984 goto out;
5985
5986 tests->memory = efx_ef10_run_bist(efx, MC_CMD_MC_MEM_BIST) ? -1 : 1;
5987 tests->registers = efx_ef10_run_bist(efx, MC_CMD_REG_BIST) ? -1 : 1;
5988
5989 rc = efx_mcdi_reset(efx, RESET_TYPE_WORLD);
5990
5991out:
Daniel Pieczko27324822015-07-31 11:14:54 +01005992 if (rc == -EPERM)
5993 rc = 0;
Jon Cooper74cd60a2013-09-16 14:18:51 +01005994 rc2 = efx_reset_up(efx, RESET_TYPE_WORLD, rc == 0);
5995 return rc ? rc : rc2;
5996}
5997
Ben Hutchings8127d662013-08-29 19:19:29 +01005998#ifdef CONFIG_SFC_MTD
5999
6000struct efx_ef10_nvram_type_info {
6001 u16 type, type_mask;
6002 u8 port;
6003 const char *name;
6004};
6005
6006static const struct efx_ef10_nvram_type_info efx_ef10_nvram_types[] = {
6007 { NVRAM_PARTITION_TYPE_MC_FIRMWARE, 0, 0, "sfc_mcfw" },
6008 { NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP, 0, 0, "sfc_mcfw_backup" },
6009 { NVRAM_PARTITION_TYPE_EXPANSION_ROM, 0, 0, "sfc_exp_rom" },
6010 { NVRAM_PARTITION_TYPE_STATIC_CONFIG, 0, 0, "sfc_static_cfg" },
6011 { NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG, 0, 0, "sfc_dynamic_cfg" },
6012 { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0, 0, 0, "sfc_exp_rom_cfg" },
6013 { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT1, 0, 1, "sfc_exp_rom_cfg" },
6014 { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT2, 0, 2, "sfc_exp_rom_cfg" },
6015 { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT3, 0, 3, "sfc_exp_rom_cfg" },
Ben Hutchingsa84f3bf92013-10-09 14:14:41 +01006016 { NVRAM_PARTITION_TYPE_LICENSE, 0, 0, "sfc_license" },
Ben Hutchings8127d662013-08-29 19:19:29 +01006017 { NVRAM_PARTITION_TYPE_PHY_MIN, 0xff, 0, "sfc_phy_fw" },
6018};
6019
6020static int efx_ef10_mtd_probe_partition(struct efx_nic *efx,
6021 struct efx_mcdi_mtd_partition *part,
6022 unsigned int type)
6023{
6024 MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_METADATA_IN_LEN);
6025 MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_METADATA_OUT_LENMAX);
6026 const struct efx_ef10_nvram_type_info *info;
6027 size_t size, erase_size, outlen;
6028 bool protected;
6029 int rc;
6030
6031 for (info = efx_ef10_nvram_types; ; info++) {
6032 if (info ==
6033 efx_ef10_nvram_types + ARRAY_SIZE(efx_ef10_nvram_types))
6034 return -ENODEV;
6035 if ((type & ~info->type_mask) == info->type)
6036 break;
6037 }
6038 if (info->port != efx_port_num(efx))
6039 return -ENODEV;
6040
6041 rc = efx_mcdi_nvram_info(efx, type, &size, &erase_size, &protected);
6042 if (rc)
6043 return rc;
6044 if (protected)
6045 return -ENODEV; /* hide it */
6046
6047 part->nvram_type = type;
6048
6049 MCDI_SET_DWORD(inbuf, NVRAM_METADATA_IN_TYPE, type);
6050 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_METADATA, inbuf, sizeof(inbuf),
6051 outbuf, sizeof(outbuf), &outlen);
6052 if (rc)
6053 return rc;
6054 if (outlen < MC_CMD_NVRAM_METADATA_OUT_LENMIN)
6055 return -EIO;
6056 if (MCDI_DWORD(outbuf, NVRAM_METADATA_OUT_FLAGS) &
6057 (1 << MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_LBN))
6058 part->fw_subtype = MCDI_DWORD(outbuf,
6059 NVRAM_METADATA_OUT_SUBTYPE);
6060
6061 part->common.dev_type_name = "EF10 NVRAM manager";
6062 part->common.type_name = info->name;
6063
6064 part->common.mtd.type = MTD_NORFLASH;
6065 part->common.mtd.flags = MTD_CAP_NORFLASH;
6066 part->common.mtd.size = size;
6067 part->common.mtd.erasesize = erase_size;
6068
6069 return 0;
6070}
6071
6072static int efx_ef10_mtd_probe(struct efx_nic *efx)
6073{
6074 MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX);
6075 struct efx_mcdi_mtd_partition *parts;
6076 size_t outlen, n_parts_total, i, n_parts;
6077 unsigned int type;
6078 int rc;
6079
6080 ASSERT_RTNL();
6081
6082 BUILD_BUG_ON(MC_CMD_NVRAM_PARTITIONS_IN_LEN != 0);
6083 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_PARTITIONS, NULL, 0,
6084 outbuf, sizeof(outbuf), &outlen);
6085 if (rc)
6086 return rc;
6087 if (outlen < MC_CMD_NVRAM_PARTITIONS_OUT_LENMIN)
6088 return -EIO;
6089
6090 n_parts_total = MCDI_DWORD(outbuf, NVRAM_PARTITIONS_OUT_NUM_PARTITIONS);
6091 if (n_parts_total >
6092 MCDI_VAR_ARRAY_LEN(outlen, NVRAM_PARTITIONS_OUT_TYPE_ID))
6093 return -EIO;
6094
6095 parts = kcalloc(n_parts_total, sizeof(*parts), GFP_KERNEL);
6096 if (!parts)
6097 return -ENOMEM;
6098
6099 n_parts = 0;
6100 for (i = 0; i < n_parts_total; i++) {
6101 type = MCDI_ARRAY_DWORD(outbuf, NVRAM_PARTITIONS_OUT_TYPE_ID,
6102 i);
6103 rc = efx_ef10_mtd_probe_partition(efx, &parts[n_parts], type);
6104 if (rc == 0)
6105 n_parts++;
6106 else if (rc != -ENODEV)
6107 goto fail;
6108 }
6109
6110 rc = efx_mtd_add(efx, &parts[0].common, n_parts, sizeof(*parts));
6111fail:
6112 if (rc)
6113 kfree(parts);
6114 return rc;
6115}
6116
6117#endif /* CONFIG_SFC_MTD */
6118
6119static void efx_ef10_ptp_write_host_time(struct efx_nic *efx, u32 host_time)
6120{
6121 _efx_writed(efx, cpu_to_le32(host_time), ER_DZ_MC_DB_LWRD);
6122}
6123
Shradha Shah02246a72015-05-06 00:58:14 +01006124static void efx_ef10_ptp_write_host_time_vf(struct efx_nic *efx,
6125 u32 host_time) {}
6126
Jon Cooperbd9a2652013-11-18 12:54:41 +00006127static int efx_ef10_rx_enable_timestamping(struct efx_channel *channel,
6128 bool temp)
6129{
6130 MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_LEN);
6131 int rc;
6132
6133 if (channel->sync_events_state == SYNC_EVENTS_REQUESTED ||
6134 channel->sync_events_state == SYNC_EVENTS_VALID ||
6135 (temp && channel->sync_events_state == SYNC_EVENTS_DISABLED))
6136 return 0;
6137 channel->sync_events_state = SYNC_EVENTS_REQUESTED;
6138
6139 MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_TIME_EVENT_SUBSCRIBE);
6140 MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
6141 MCDI_SET_DWORD(inbuf, PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE,
6142 channel->channel);
6143
6144 rc = efx_mcdi_rpc(channel->efx, MC_CMD_PTP,
6145 inbuf, sizeof(inbuf), NULL, 0, NULL);
6146
6147 if (rc != 0)
6148 channel->sync_events_state = temp ? SYNC_EVENTS_QUIESCENT :
6149 SYNC_EVENTS_DISABLED;
6150
6151 return rc;
6152}
6153
6154static int efx_ef10_rx_disable_timestamping(struct efx_channel *channel,
6155 bool temp)
6156{
6157 MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_LEN);
6158 int rc;
6159
6160 if (channel->sync_events_state == SYNC_EVENTS_DISABLED ||
6161 (temp && channel->sync_events_state == SYNC_EVENTS_QUIESCENT))
6162 return 0;
6163 if (channel->sync_events_state == SYNC_EVENTS_QUIESCENT) {
6164 channel->sync_events_state = SYNC_EVENTS_DISABLED;
6165 return 0;
6166 }
6167 channel->sync_events_state = temp ? SYNC_EVENTS_QUIESCENT :
6168 SYNC_EVENTS_DISABLED;
6169
6170 MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_TIME_EVENT_UNSUBSCRIBE);
6171 MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
6172 MCDI_SET_DWORD(inbuf, PTP_IN_TIME_EVENT_UNSUBSCRIBE_CONTROL,
6173 MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_SINGLE);
6174 MCDI_SET_DWORD(inbuf, PTP_IN_TIME_EVENT_UNSUBSCRIBE_QUEUE,
6175 channel->channel);
6176
6177 rc = efx_mcdi_rpc(channel->efx, MC_CMD_PTP,
6178 inbuf, sizeof(inbuf), NULL, 0, NULL);
6179
6180 return rc;
6181}
6182
6183static int efx_ef10_ptp_set_ts_sync_events(struct efx_nic *efx, bool en,
6184 bool temp)
6185{
6186 int (*set)(struct efx_channel *channel, bool temp);
6187 struct efx_channel *channel;
6188
6189 set = en ?
6190 efx_ef10_rx_enable_timestamping :
6191 efx_ef10_rx_disable_timestamping;
6192
Edward Cree2935e3c2018-01-25 17:26:06 +00006193 channel = efx_ptp_channel(efx);
6194 if (channel) {
Jon Cooperbd9a2652013-11-18 12:54:41 +00006195 int rc = set(channel, temp);
6196 if (en && rc != 0) {
6197 efx_ef10_ptp_set_ts_sync_events(efx, false, temp);
6198 return rc;
6199 }
6200 }
6201
6202 return 0;
6203}
6204
Shradha Shah02246a72015-05-06 00:58:14 +01006205static int efx_ef10_ptp_set_ts_config_vf(struct efx_nic *efx,
6206 struct hwtstamp_config *init)
6207{
6208 return -EOPNOTSUPP;
6209}
6210
Jon Cooperbd9a2652013-11-18 12:54:41 +00006211static int efx_ef10_ptp_set_ts_config(struct efx_nic *efx,
6212 struct hwtstamp_config *init)
6213{
6214 int rc;
6215
6216 switch (init->rx_filter) {
6217 case HWTSTAMP_FILTER_NONE:
6218 efx_ef10_ptp_set_ts_sync_events(efx, false, false);
6219 /* if TX timestamping is still requested then leave PTP on */
6220 return efx_ptp_change_mode(efx,
6221 init->tx_type != HWTSTAMP_TX_OFF, 0);
6222 case HWTSTAMP_FILTER_ALL:
6223 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
6224 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
6225 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
6226 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
6227 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
6228 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
6229 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
6230 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
6231 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
6232 case HWTSTAMP_FILTER_PTP_V2_EVENT:
6233 case HWTSTAMP_FILTER_PTP_V2_SYNC:
6234 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
Miroslav Lichvare3412572017-05-19 17:52:36 +02006235 case HWTSTAMP_FILTER_NTP_ALL:
Jon Cooperbd9a2652013-11-18 12:54:41 +00006236 init->rx_filter = HWTSTAMP_FILTER_ALL;
6237 rc = efx_ptp_change_mode(efx, true, 0);
6238 if (!rc)
6239 rc = efx_ef10_ptp_set_ts_sync_events(efx, true, false);
6240 if (rc)
6241 efx_ptp_change_mode(efx, false, 0);
6242 return rc;
6243 default:
6244 return -ERANGE;
6245 }
6246}
6247
Bert Kenward08a7b29b2017-01-10 16:23:33 +00006248static int efx_ef10_get_phys_port_id(struct efx_nic *efx,
6249 struct netdev_phys_item_id *ppid)
6250{
6251 struct efx_ef10_nic_data *nic_data = efx->nic_data;
6252
6253 if (!is_valid_ether_addr(nic_data->port_id))
6254 return -EOPNOTSUPP;
6255
6256 ppid->id_len = ETH_ALEN;
6257 memcpy(ppid->id, nic_data->port_id, ppid->id_len);
6258
6259 return 0;
6260}
6261
Andrew Rybchenko4a53ea82016-06-15 17:48:32 +01006262static int efx_ef10_vlan_rx_add_vid(struct efx_nic *efx, __be16 proto, u16 vid)
6263{
6264 if (proto != htons(ETH_P_8021Q))
6265 return -EINVAL;
6266
6267 return efx_ef10_add_vlan(efx, vid);
6268}
6269
6270static int efx_ef10_vlan_rx_kill_vid(struct efx_nic *efx, __be16 proto, u16 vid)
6271{
6272 if (proto != htons(ETH_P_8021Q))
6273 return -EINVAL;
6274
6275 return efx_ef10_del_vlan(efx, vid);
6276}
6277
Jon Coopere5fbd972017-02-08 16:52:10 +00006278/* We rely on the MCDI wiping out our TX rings if it made any changes to the
6279 * ports table, ensuring that any TSO descriptors that were made on a now-
6280 * removed tunnel port will be blown away and won't break things when we try
6281 * to transmit them using the new ports table.
6282 */
6283static int efx_ef10_set_udp_tnl_ports(struct efx_nic *efx, bool unloading)
6284{
6285 struct efx_ef10_nic_data *nic_data = efx->nic_data;
6286 MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LENMAX);
6287 MCDI_DECLARE_BUF(outbuf, MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_LEN);
6288 bool will_reset = false;
6289 size_t num_entries = 0;
6290 size_t inlen, outlen;
6291 size_t i;
6292 int rc;
6293 efx_dword_t flags_and_num_entries;
6294
6295 WARN_ON(!mutex_is_locked(&nic_data->udp_tunnels_lock));
6296
6297 nic_data->udp_tunnels_dirty = false;
6298
6299 if (!(nic_data->datapath_caps &
6300 (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN))) {
Peter Dunning9c568fd2017-02-17 15:50:43 +00006301 efx_device_attach_if_not_resetting(efx);
Jon Coopere5fbd972017-02-08 16:52:10 +00006302 return 0;
6303 }
6304
6305 BUILD_BUG_ON(ARRAY_SIZE(nic_data->udp_tunnels) >
6306 MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_MAXNUM);
6307
6308 for (i = 0; i < ARRAY_SIZE(nic_data->udp_tunnels); ++i) {
6309 if (nic_data->udp_tunnels[i].count &&
6310 nic_data->udp_tunnels[i].port) {
6311 efx_dword_t entry;
6312
6313 EFX_POPULATE_DWORD_2(entry,
6314 TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT,
6315 ntohs(nic_data->udp_tunnels[i].port),
6316 TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL,
6317 nic_data->udp_tunnels[i].type);
6318 *_MCDI_ARRAY_DWORD(inbuf,
6319 SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES,
6320 num_entries++) = entry;
6321 }
6322 }
6323
6324 BUILD_BUG_ON((MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_NUM_ENTRIES_OFST -
6325 MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_FLAGS_OFST) * 8 !=
6326 EFX_WORD_1_LBN);
6327 BUILD_BUG_ON(MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_NUM_ENTRIES_LEN * 8 !=
6328 EFX_WORD_1_WIDTH);
6329 EFX_POPULATE_DWORD_2(flags_and_num_entries,
6330 MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_UNLOADING,
6331 !!unloading,
6332 EFX_WORD_1, num_entries);
6333 *_MCDI_DWORD(inbuf, SET_TUNNEL_ENCAP_UDP_PORTS_IN_FLAGS) =
6334 flags_and_num_entries;
6335
6336 inlen = MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LEN(num_entries);
6337
6338 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS,
6339 inbuf, inlen, outbuf, sizeof(outbuf), &outlen);
6340 if (rc == -EIO) {
6341 /* Most likely the MC rebooted due to another function also
6342 * setting its tunnel port list. Mark the tunnel port list as
6343 * dirty, so it will be pushed upon coming up from the reboot.
6344 */
6345 nic_data->udp_tunnels_dirty = true;
6346 return 0;
6347 }
6348
6349 if (rc) {
6350 /* expected not available on unprivileged functions */
6351 if (rc != -EPERM)
6352 netif_warn(efx, drv, efx->net_dev,
6353 "Unable to set UDP tunnel ports; rc=%d.\n", rc);
6354 } else if (MCDI_DWORD(outbuf, SET_TUNNEL_ENCAP_UDP_PORTS_OUT_FLAGS) &
6355 (1 << MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_RESETTING_LBN)) {
6356 netif_info(efx, drv, efx->net_dev,
6357 "Rebooting MC due to UDP tunnel port list change\n");
6358 will_reset = true;
6359 if (unloading)
6360 /* Delay for the MC reset to complete. This will make
6361 * unloading other functions a bit smoother. This is a
6362 * race, but the other unload will work whichever way
6363 * it goes, this just avoids an unnecessary error
6364 * message.
6365 */
6366 msleep(100);
6367 }
6368 if (!will_reset && !unloading) {
6369 /* The caller will have detached, relying on the MC reset to
6370 * trigger a re-attach. Since there won't be an MC reset, we
6371 * have to do the attach ourselves.
6372 */
Peter Dunning9c568fd2017-02-17 15:50:43 +00006373 efx_device_attach_if_not_resetting(efx);
Jon Coopere5fbd972017-02-08 16:52:10 +00006374 }
6375
6376 return rc;
6377}
6378
6379static int efx_ef10_udp_tnl_push_ports(struct efx_nic *efx)
6380{
6381 struct efx_ef10_nic_data *nic_data = efx->nic_data;
6382 int rc = 0;
6383
6384 mutex_lock(&nic_data->udp_tunnels_lock);
6385 if (nic_data->udp_tunnels_dirty) {
6386 /* Make sure all TX are stopped while we modify the table, else
6387 * we might race against an efx_features_check().
6388 */
6389 efx_device_detach_sync(efx);
6390 rc = efx_ef10_set_udp_tnl_ports(efx, false);
6391 }
6392 mutex_unlock(&nic_data->udp_tunnels_lock);
6393 return rc;
6394}
6395
6396static struct efx_udp_tunnel *__efx_ef10_udp_tnl_lookup_port(struct efx_nic *efx,
6397 __be16 port)
6398{
6399 struct efx_ef10_nic_data *nic_data = efx->nic_data;
6400 size_t i;
6401
6402 for (i = 0; i < ARRAY_SIZE(nic_data->udp_tunnels); ++i) {
6403 if (!nic_data->udp_tunnels[i].count)
6404 continue;
6405 if (nic_data->udp_tunnels[i].port == port)
6406 return &nic_data->udp_tunnels[i];
6407 }
6408 return NULL;
6409}
6410
6411static int efx_ef10_udp_tnl_add_port(struct efx_nic *efx,
6412 struct efx_udp_tunnel tnl)
6413{
6414 struct efx_ef10_nic_data *nic_data = efx->nic_data;
6415 struct efx_udp_tunnel *match;
6416 char typebuf[8];
6417 size_t i;
6418 int rc;
6419
6420 if (!(nic_data->datapath_caps &
6421 (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN)))
6422 return 0;
6423
6424 efx_get_udp_tunnel_type_name(tnl.type, typebuf, sizeof(typebuf));
6425 netif_dbg(efx, drv, efx->net_dev, "Adding UDP tunnel (%s) port %d\n",
6426 typebuf, ntohs(tnl.port));
6427
6428 mutex_lock(&nic_data->udp_tunnels_lock);
6429 /* Make sure all TX are stopped while we add to the table, else we
6430 * might race against an efx_features_check().
6431 */
6432 efx_device_detach_sync(efx);
6433
6434 match = __efx_ef10_udp_tnl_lookup_port(efx, tnl.port);
6435 if (match != NULL) {
6436 if (match->type == tnl.type) {
6437 netif_dbg(efx, drv, efx->net_dev,
6438 "Referencing existing tunnel entry\n");
6439 match->count++;
6440 /* No need to cause an MCDI update */
6441 rc = 0;
6442 goto unlock_out;
6443 }
6444 efx_get_udp_tunnel_type_name(match->type,
6445 typebuf, sizeof(typebuf));
6446 netif_dbg(efx, drv, efx->net_dev,
6447 "UDP port %d is already in use by %s\n",
6448 ntohs(tnl.port), typebuf);
6449 rc = -EEXIST;
6450 goto unlock_out;
6451 }
6452
6453 for (i = 0; i < ARRAY_SIZE(nic_data->udp_tunnels); ++i)
6454 if (!nic_data->udp_tunnels[i].count) {
6455 nic_data->udp_tunnels[i] = tnl;
6456 nic_data->udp_tunnels[i].count = 1;
6457 rc = efx_ef10_set_udp_tnl_ports(efx, false);
6458 goto unlock_out;
6459 }
6460
6461 netif_dbg(efx, drv, efx->net_dev,
6462 "Unable to add UDP tunnel (%s) port %d; insufficient resources.\n",
6463 typebuf, ntohs(tnl.port));
6464
6465 rc = -ENOMEM;
6466
6467unlock_out:
6468 mutex_unlock(&nic_data->udp_tunnels_lock);
6469 return rc;
6470}
6471
6472/* Called under the TX lock with the TX queue running, hence no-one can be
6473 * in the middle of updating the UDP tunnels table. However, they could
6474 * have tried and failed the MCDI, in which case they'll have set the dirty
6475 * flag before dropping their locks.
6476 */
6477static bool efx_ef10_udp_tnl_has_port(struct efx_nic *efx, __be16 port)
6478{
6479 struct efx_ef10_nic_data *nic_data = efx->nic_data;
6480
6481 if (!(nic_data->datapath_caps &
6482 (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN)))
6483 return false;
6484
6485 if (nic_data->udp_tunnels_dirty)
6486 /* SW table may not match HW state, so just assume we can't
6487 * use any UDP tunnel offloads.
6488 */
6489 return false;
6490
6491 return __efx_ef10_udp_tnl_lookup_port(efx, port) != NULL;
6492}
6493
6494static int efx_ef10_udp_tnl_del_port(struct efx_nic *efx,
6495 struct efx_udp_tunnel tnl)
6496{
6497 struct efx_ef10_nic_data *nic_data = efx->nic_data;
6498 struct efx_udp_tunnel *match;
6499 char typebuf[8];
6500 int rc;
6501
6502 if (!(nic_data->datapath_caps &
6503 (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN)))
6504 return 0;
6505
6506 efx_get_udp_tunnel_type_name(tnl.type, typebuf, sizeof(typebuf));
6507 netif_dbg(efx, drv, efx->net_dev, "Removing UDP tunnel (%s) port %d\n",
6508 typebuf, ntohs(tnl.port));
6509
6510 mutex_lock(&nic_data->udp_tunnels_lock);
6511 /* Make sure all TX are stopped while we remove from the table, else we
6512 * might race against an efx_features_check().
6513 */
6514 efx_device_detach_sync(efx);
6515
6516 match = __efx_ef10_udp_tnl_lookup_port(efx, tnl.port);
6517 if (match != NULL) {
6518 if (match->type == tnl.type) {
6519 if (--match->count) {
6520 /* Port is still in use, so nothing to do */
6521 netif_dbg(efx, drv, efx->net_dev,
6522 "UDP tunnel port %d remains active\n",
6523 ntohs(tnl.port));
6524 rc = 0;
6525 goto out_unlock;
6526 }
6527 rc = efx_ef10_set_udp_tnl_ports(efx, false);
6528 goto out_unlock;
6529 }
6530 efx_get_udp_tunnel_type_name(match->type,
6531 typebuf, sizeof(typebuf));
6532 netif_warn(efx, drv, efx->net_dev,
6533 "UDP port %d is actually in use by %s, not removing\n",
6534 ntohs(tnl.port), typebuf);
6535 }
6536 rc = -ENOENT;
6537
6538out_unlock:
6539 mutex_unlock(&nic_data->udp_tunnels_lock);
6540 return rc;
6541}
6542
Andrew Rybchenko100a9db2016-06-15 17:42:26 +01006543#define EF10_OFFLOAD_FEATURES \
6544 (NETIF_F_IP_CSUM | \
Andrew Rybchenko4a53ea82016-06-15 17:48:32 +01006545 NETIF_F_HW_VLAN_CTAG_FILTER | \
Andrew Rybchenko100a9db2016-06-15 17:42:26 +01006546 NETIF_F_IPV6_CSUM | \
6547 NETIF_F_RXHASH | \
6548 NETIF_F_NTUPLE)
6549
Shradha Shah02246a72015-05-06 00:58:14 +01006550const struct efx_nic_type efx_hunt_a0_vf_nic_type = {
Shradha Shah6f7f8aa2015-05-06 01:00:07 +01006551 .is_vf = true,
Edward Cree03714bb2017-12-18 16:55:50 +00006552 .mem_bar = efx_ef10_vf_mem_bar,
Ben Hutchings8127d662013-08-29 19:19:29 +01006553 .mem_map_size = efx_ef10_mem_map_size,
Shradha Shah02246a72015-05-06 00:58:14 +01006554 .probe = efx_ef10_probe_vf,
6555 .remove = efx_ef10_remove,
6556 .dimension_resources = efx_ef10_dimension_resources,
6557 .init = efx_ef10_init_nic,
6558 .fini = efx_port_dummy_op_void,
Jon Cooper087e9022015-05-20 11:11:35 +01006559 .map_reset_reason = efx_ef10_map_reset_reason,
Shradha Shah02246a72015-05-06 00:58:14 +01006560 .map_reset_flags = efx_ef10_map_reset_flags,
6561 .reset = efx_ef10_reset,
6562 .probe_port = efx_mcdi_port_probe,
6563 .remove_port = efx_mcdi_port_remove,
6564 .fini_dmaq = efx_ef10_fini_dmaq,
6565 .prepare_flr = efx_ef10_prepare_flr,
6566 .finish_flr = efx_port_dummy_op_void,
6567 .describe_stats = efx_ef10_describe_stats,
Daniel Pieczkod7788192015-06-02 11:39:20 +01006568 .update_stats = efx_ef10_update_stats_vf,
Shradha Shah02246a72015-05-06 00:58:14 +01006569 .start_stats = efx_port_dummy_op_void,
6570 .pull_stats = efx_port_dummy_op_void,
6571 .stop_stats = efx_port_dummy_op_void,
6572 .set_id_led = efx_mcdi_set_id_led,
6573 .push_irq_moderation = efx_ef10_push_irq_moderation,
Shradha Shah862f8942015-05-20 11:08:56 +01006574 .reconfigure_mac = efx_ef10_mac_reconfigure_vf,
Shradha Shah02246a72015-05-06 00:58:14 +01006575 .check_mac_fault = efx_mcdi_mac_check_fault,
6576 .reconfigure_port = efx_mcdi_port_reconfigure,
6577 .get_wol = efx_ef10_get_wol_vf,
6578 .set_wol = efx_ef10_set_wol_vf,
6579 .resume_wol = efx_port_dummy_op_void,
6580 .mcdi_request = efx_ef10_mcdi_request,
6581 .mcdi_poll_response = efx_ef10_mcdi_poll_response,
6582 .mcdi_read_response = efx_ef10_mcdi_read_response,
6583 .mcdi_poll_reboot = efx_ef10_mcdi_poll_reboot,
Daniel Pieczkoc577e592015-10-09 10:40:35 +01006584 .mcdi_reboot_detected = efx_ef10_mcdi_reboot_detected,
Shradha Shah02246a72015-05-06 00:58:14 +01006585 .irq_enable_master = efx_port_dummy_op_void,
6586 .irq_test_generate = efx_ef10_irq_test_generate,
6587 .irq_disable_non_ev = efx_port_dummy_op_void,
6588 .irq_handle_msi = efx_ef10_msi_interrupt,
6589 .irq_handle_legacy = efx_ef10_legacy_interrupt,
6590 .tx_probe = efx_ef10_tx_probe,
6591 .tx_init = efx_ef10_tx_init,
6592 .tx_remove = efx_ef10_tx_remove,
6593 .tx_write = efx_ef10_tx_write,
Bert Kenwarde9117e52016-11-17 10:51:54 +00006594 .tx_limit_len = efx_ef10_tx_limit_len,
Jon Cooper267c0152015-05-06 00:59:38 +01006595 .rx_push_rss_config = efx_ef10_vf_rx_push_rss_config,
Edward Creea707d182017-01-17 12:02:12 +00006596 .rx_pull_rss_config = efx_ef10_rx_pull_rss_config,
Shradha Shah02246a72015-05-06 00:58:14 +01006597 .rx_probe = efx_ef10_rx_probe,
6598 .rx_init = efx_ef10_rx_init,
6599 .rx_remove = efx_ef10_rx_remove,
6600 .rx_write = efx_ef10_rx_write,
6601 .rx_defer_refill = efx_ef10_rx_defer_refill,
6602 .ev_probe = efx_ef10_ev_probe,
6603 .ev_init = efx_ef10_ev_init,
6604 .ev_fini = efx_ef10_ev_fini,
6605 .ev_remove = efx_ef10_ev_remove,
6606 .ev_process = efx_ef10_ev_process,
6607 .ev_read_ack = efx_ef10_ev_read_ack,
6608 .ev_test_generate = efx_ef10_ev_test_generate,
6609 .filter_table_probe = efx_ef10_filter_table_probe,
6610 .filter_table_restore = efx_ef10_filter_table_restore,
6611 .filter_table_remove = efx_ef10_filter_table_remove,
6612 .filter_update_rx_scatter = efx_ef10_filter_update_rx_scatter,
6613 .filter_insert = efx_ef10_filter_insert,
6614 .filter_remove_safe = efx_ef10_filter_remove_safe,
6615 .filter_get_safe = efx_ef10_filter_get_safe,
6616 .filter_clear_rx = efx_ef10_filter_clear_rx,
6617 .filter_count_rx_used = efx_ef10_filter_count_rx_used,
6618 .filter_get_rx_id_limit = efx_ef10_filter_get_rx_id_limit,
6619 .filter_get_rx_ids = efx_ef10_filter_get_rx_ids,
6620#ifdef CONFIG_RFS_ACCEL
Shradha Shah02246a72015-05-06 00:58:14 +01006621 .filter_rfs_expire_one = efx_ef10_filter_rfs_expire_one,
6622#endif
6623#ifdef CONFIG_SFC_MTD
6624 .mtd_probe = efx_port_dummy_op_int,
6625#endif
6626 .ptp_write_host_time = efx_ef10_ptp_write_host_time_vf,
6627 .ptp_set_ts_config = efx_ef10_ptp_set_ts_config_vf,
Andrew Rybchenko4a53ea82016-06-15 17:48:32 +01006628 .vlan_rx_add_vid = efx_ef10_vlan_rx_add_vid,
6629 .vlan_rx_kill_vid = efx_ef10_vlan_rx_kill_vid,
Shradha Shah02246a72015-05-06 00:58:14 +01006630#ifdef CONFIG_SFC_SRIOV
Shradha Shah7b8c7b52015-05-06 00:58:54 +01006631 .vswitching_probe = efx_ef10_vswitching_probe_vf,
6632 .vswitching_restore = efx_ef10_vswitching_restore_vf,
6633 .vswitching_remove = efx_ef10_vswitching_remove_vf,
Shradha Shah02246a72015-05-06 00:58:14 +01006634#endif
Daniel Pieczko0d5e0fb2015-05-20 11:10:20 +01006635 .get_mac_address = efx_ef10_get_mac_address_vf,
Shradha Shah910c8782015-05-20 11:12:48 +01006636 .set_mac_address = efx_ef10_set_mac_address,
Daniel Pieczko0d5e0fb2015-05-20 11:10:20 +01006637
Bert Kenward08a7b29b2017-01-10 16:23:33 +00006638 .get_phys_port_id = efx_ef10_get_phys_port_id,
Shradha Shah02246a72015-05-06 00:58:14 +01006639 .revision = EFX_REV_HUNT_A0,
6640 .max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH),
6641 .rx_prefix_size = ES_DZ_RX_PREFIX_SIZE,
6642 .rx_hash_offset = ES_DZ_RX_PREFIX_HASH_OFST,
6643 .rx_ts_offset = ES_DZ_RX_PREFIX_TSTAMP_OFST,
6644 .can_rx_scatter = true,
6645 .always_rx_scatter = true,
Andrew Rybchenko6f9f6ec2017-02-13 14:57:39 +00006646 .min_interrupt_mode = EFX_INT_MODE_MSIX,
Shradha Shah02246a72015-05-06 00:58:14 +01006647 .max_interrupt_mode = EFX_INT_MODE_MSIX,
6648 .timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH,
Andrew Rybchenko100a9db2016-06-15 17:42:26 +01006649 .offload_features = EF10_OFFLOAD_FEATURES,
Shradha Shah02246a72015-05-06 00:58:14 +01006650 .mcdi_max_ver = 2,
6651 .max_rx_ip_filters = HUNT_FILTER_TBL_ROWS,
6652 .hwtstamp_filters = 1 << HWTSTAMP_FILTER_NONE |
6653 1 << HWTSTAMP_FILTER_ALL,
Edward Creef74d1992017-01-17 12:01:53 +00006654 .rx_hash_key_size = 40,
Shradha Shah02246a72015-05-06 00:58:14 +01006655};
6656
6657const struct efx_nic_type efx_hunt_a0_nic_type = {
Shradha Shah6f7f8aa2015-05-06 01:00:07 +01006658 .is_vf = false,
Edward Cree03714bb2017-12-18 16:55:50 +00006659 .mem_bar = efx_ef10_pf_mem_bar,
Shradha Shah02246a72015-05-06 00:58:14 +01006660 .mem_map_size = efx_ef10_mem_map_size,
6661 .probe = efx_ef10_probe_pf,
Ben Hutchings8127d662013-08-29 19:19:29 +01006662 .remove = efx_ef10_remove,
6663 .dimension_resources = efx_ef10_dimension_resources,
6664 .init = efx_ef10_init_nic,
6665 .fini = efx_port_dummy_op_void,
Jon Cooper087e9022015-05-20 11:11:35 +01006666 .map_reset_reason = efx_ef10_map_reset_reason,
Ben Hutchings8127d662013-08-29 19:19:29 +01006667 .map_reset_flags = efx_ef10_map_reset_flags,
Jon Cooper3e336262014-01-17 19:48:06 +00006668 .reset = efx_ef10_reset,
Ben Hutchings8127d662013-08-29 19:19:29 +01006669 .probe_port = efx_mcdi_port_probe,
6670 .remove_port = efx_mcdi_port_remove,
6671 .fini_dmaq = efx_ef10_fini_dmaq,
Edward Creee2835462014-04-16 19:27:48 +01006672 .prepare_flr = efx_ef10_prepare_flr,
6673 .finish_flr = efx_port_dummy_op_void,
Ben Hutchings8127d662013-08-29 19:19:29 +01006674 .describe_stats = efx_ef10_describe_stats,
Daniel Pieczkod7788192015-06-02 11:39:20 +01006675 .update_stats = efx_ef10_update_stats_pf,
Ben Hutchings8127d662013-08-29 19:19:29 +01006676 .start_stats = efx_mcdi_mac_start_stats,
Jon Cooperf8f3b5a2013-09-30 17:36:50 +01006677 .pull_stats = efx_mcdi_mac_pull_stats,
Ben Hutchings8127d662013-08-29 19:19:29 +01006678 .stop_stats = efx_mcdi_mac_stop_stats,
6679 .set_id_led = efx_mcdi_set_id_led,
6680 .push_irq_moderation = efx_ef10_push_irq_moderation,
6681 .reconfigure_mac = efx_ef10_mac_reconfigure,
6682 .check_mac_fault = efx_mcdi_mac_check_fault,
6683 .reconfigure_port = efx_mcdi_port_reconfigure,
6684 .get_wol = efx_ef10_get_wol,
6685 .set_wol = efx_ef10_set_wol,
6686 .resume_wol = efx_port_dummy_op_void,
Jon Cooper74cd60a2013-09-16 14:18:51 +01006687 .test_chip = efx_ef10_test_chip,
Ben Hutchings8127d662013-08-29 19:19:29 +01006688 .test_nvram = efx_mcdi_nvram_test_all,
6689 .mcdi_request = efx_ef10_mcdi_request,
6690 .mcdi_poll_response = efx_ef10_mcdi_poll_response,
6691 .mcdi_read_response = efx_ef10_mcdi_read_response,
6692 .mcdi_poll_reboot = efx_ef10_mcdi_poll_reboot,
Daniel Pieczkoc577e592015-10-09 10:40:35 +01006693 .mcdi_reboot_detected = efx_ef10_mcdi_reboot_detected,
Ben Hutchings8127d662013-08-29 19:19:29 +01006694 .irq_enable_master = efx_port_dummy_op_void,
6695 .irq_test_generate = efx_ef10_irq_test_generate,
6696 .irq_disable_non_ev = efx_port_dummy_op_void,
6697 .irq_handle_msi = efx_ef10_msi_interrupt,
6698 .irq_handle_legacy = efx_ef10_legacy_interrupt,
6699 .tx_probe = efx_ef10_tx_probe,
6700 .tx_init = efx_ef10_tx_init,
6701 .tx_remove = efx_ef10_tx_remove,
6702 .tx_write = efx_ef10_tx_write,
Bert Kenwarde9117e52016-11-17 10:51:54 +00006703 .tx_limit_len = efx_ef10_tx_limit_len,
Jon Cooper267c0152015-05-06 00:59:38 +01006704 .rx_push_rss_config = efx_ef10_pf_rx_push_rss_config,
Edward Creea707d182017-01-17 12:02:12 +00006705 .rx_pull_rss_config = efx_ef10_rx_pull_rss_config,
Edward Cree42356d92018-03-08 15:45:17 +00006706 .rx_push_rss_context_config = efx_ef10_rx_push_rss_context_config,
6707 .rx_pull_rss_context_config = efx_ef10_rx_pull_rss_context_config,
6708 .rx_restore_rss_contexts = efx_ef10_rx_restore_rss_contexts,
Ben Hutchings8127d662013-08-29 19:19:29 +01006709 .rx_probe = efx_ef10_rx_probe,
6710 .rx_init = efx_ef10_rx_init,
6711 .rx_remove = efx_ef10_rx_remove,
6712 .rx_write = efx_ef10_rx_write,
6713 .rx_defer_refill = efx_ef10_rx_defer_refill,
6714 .ev_probe = efx_ef10_ev_probe,
6715 .ev_init = efx_ef10_ev_init,
6716 .ev_fini = efx_ef10_ev_fini,
6717 .ev_remove = efx_ef10_ev_remove,
6718 .ev_process = efx_ef10_ev_process,
6719 .ev_read_ack = efx_ef10_ev_read_ack,
6720 .ev_test_generate = efx_ef10_ev_test_generate,
6721 .filter_table_probe = efx_ef10_filter_table_probe,
6722 .filter_table_restore = efx_ef10_filter_table_restore,
6723 .filter_table_remove = efx_ef10_filter_table_remove,
6724 .filter_update_rx_scatter = efx_ef10_filter_update_rx_scatter,
6725 .filter_insert = efx_ef10_filter_insert,
6726 .filter_remove_safe = efx_ef10_filter_remove_safe,
6727 .filter_get_safe = efx_ef10_filter_get_safe,
6728 .filter_clear_rx = efx_ef10_filter_clear_rx,
6729 .filter_count_rx_used = efx_ef10_filter_count_rx_used,
6730 .filter_get_rx_id_limit = efx_ef10_filter_get_rx_id_limit,
6731 .filter_get_rx_ids = efx_ef10_filter_get_rx_ids,
6732#ifdef CONFIG_RFS_ACCEL
Ben Hutchings8127d662013-08-29 19:19:29 +01006733 .filter_rfs_expire_one = efx_ef10_filter_rfs_expire_one,
6734#endif
6735#ifdef CONFIG_SFC_MTD
6736 .mtd_probe = efx_ef10_mtd_probe,
6737 .mtd_rename = efx_mcdi_mtd_rename,
6738 .mtd_read = efx_mcdi_mtd_read,
6739 .mtd_erase = efx_mcdi_mtd_erase,
6740 .mtd_write = efx_mcdi_mtd_write,
6741 .mtd_sync = efx_mcdi_mtd_sync,
6742#endif
6743 .ptp_write_host_time = efx_ef10_ptp_write_host_time,
Jon Cooperbd9a2652013-11-18 12:54:41 +00006744 .ptp_set_ts_sync_events = efx_ef10_ptp_set_ts_sync_events,
6745 .ptp_set_ts_config = efx_ef10_ptp_set_ts_config,
Andrew Rybchenko4a53ea82016-06-15 17:48:32 +01006746 .vlan_rx_add_vid = efx_ef10_vlan_rx_add_vid,
6747 .vlan_rx_kill_vid = efx_ef10_vlan_rx_kill_vid,
Jon Coopere5fbd972017-02-08 16:52:10 +00006748 .udp_tnl_push_ports = efx_ef10_udp_tnl_push_ports,
6749 .udp_tnl_add_port = efx_ef10_udp_tnl_add_port,
6750 .udp_tnl_has_port = efx_ef10_udp_tnl_has_port,
6751 .udp_tnl_del_port = efx_ef10_udp_tnl_del_port,
Shradha Shah7fa8d542015-05-06 00:55:13 +01006752#ifdef CONFIG_SFC_SRIOV
Shradha Shah834e23d2015-05-06 00:55:58 +01006753 .sriov_configure = efx_ef10_sriov_configure,
Shradha Shahd98a4ff2014-11-05 12:16:46 +00006754 .sriov_init = efx_ef10_sriov_init,
6755 .sriov_fini = efx_ef10_sriov_fini,
Shradha Shahd98a4ff2014-11-05 12:16:46 +00006756 .sriov_wanted = efx_ef10_sriov_wanted,
6757 .sriov_reset = efx_ef10_sriov_reset,
Shradha Shah7fa8d542015-05-06 00:55:13 +01006758 .sriov_flr = efx_ef10_sriov_flr,
6759 .sriov_set_vf_mac = efx_ef10_sriov_set_vf_mac,
6760 .sriov_set_vf_vlan = efx_ef10_sriov_set_vf_vlan,
6761 .sriov_set_vf_spoofchk = efx_ef10_sriov_set_vf_spoofchk,
6762 .sriov_get_vf_config = efx_ef10_sriov_get_vf_config,
Edward Cree4392dc62015-05-20 11:12:13 +01006763 .sriov_set_vf_link_state = efx_ef10_sriov_set_vf_link_state,
Shradha Shah7b8c7b52015-05-06 00:58:54 +01006764 .vswitching_probe = efx_ef10_vswitching_probe_pf,
6765 .vswitching_restore = efx_ef10_vswitching_restore_pf,
6766 .vswitching_remove = efx_ef10_vswitching_remove_pf,
Shradha Shah7fa8d542015-05-06 00:55:13 +01006767#endif
Daniel Pieczko0d5e0fb2015-05-20 11:10:20 +01006768 .get_mac_address = efx_ef10_get_mac_address_pf,
Shradha Shah910c8782015-05-20 11:12:48 +01006769 .set_mac_address = efx_ef10_set_mac_address,
Edward Cree46d1efd2016-11-17 10:52:36 +00006770 .tso_versions = efx_ef10_tso_versions,
Ben Hutchings8127d662013-08-29 19:19:29 +01006771
Bert Kenward08a7b29b2017-01-10 16:23:33 +00006772 .get_phys_port_id = efx_ef10_get_phys_port_id,
Ben Hutchings8127d662013-08-29 19:19:29 +01006773 .revision = EFX_REV_HUNT_A0,
6774 .max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH),
6775 .rx_prefix_size = ES_DZ_RX_PREFIX_SIZE,
6776 .rx_hash_offset = ES_DZ_RX_PREFIX_HASH_OFST,
Jon Cooperbd9a2652013-11-18 12:54:41 +00006777 .rx_ts_offset = ES_DZ_RX_PREFIX_TSTAMP_OFST,
Ben Hutchings8127d662013-08-29 19:19:29 +01006778 .can_rx_scatter = true,
6779 .always_rx_scatter = true,
Edward Creede1deff2017-01-13 21:20:14 +00006780 .option_descriptors = true,
Andrew Rybchenko6f9f6ec2017-02-13 14:57:39 +00006781 .min_interrupt_mode = EFX_INT_MODE_LEGACY,
Ben Hutchings8127d662013-08-29 19:19:29 +01006782 .max_interrupt_mode = EFX_INT_MODE_MSIX,
6783 .timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH,
Andrew Rybchenko100a9db2016-06-15 17:42:26 +01006784 .offload_features = EF10_OFFLOAD_FEATURES,
Ben Hutchings8127d662013-08-29 19:19:29 +01006785 .mcdi_max_ver = 2,
6786 .max_rx_ip_filters = HUNT_FILTER_TBL_ROWS,
Jon Cooperbd9a2652013-11-18 12:54:41 +00006787 .hwtstamp_filters = 1 << HWTSTAMP_FILTER_NONE |
6788 1 << HWTSTAMP_FILTER_ALL,
Edward Creef74d1992017-01-17 12:01:53 +00006789 .rx_hash_key_size = 40,
Ben Hutchings8127d662013-08-29 19:19:29 +01006790};