blob: 89c88ca22457aa34862580eb3daad4099ed760a9 [file] [log] [blame]
Ben Hutchings8127d662013-08-29 19:19:29 +01001/****************************************************************************
2 * Driver for Solarflare network controllers and boards
3 * Copyright 2012-2013 Solarflare Communications Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation, incorporated herein by reference.
8 */
9
10#include "net_driver.h"
11#include "ef10_regs.h"
12#include "io.h"
13#include "mcdi.h"
14#include "mcdi_pcol.h"
15#include "nic.h"
16#include "workarounds.h"
Jon Cooper74cd60a2013-09-16 14:18:51 +010017#include "selftest.h"
Shradha Shah7fa8d542015-05-06 00:55:13 +010018#include "ef10_sriov.h"
Ben Hutchings8127d662013-08-29 19:19:29 +010019#include <linux/in.h>
20#include <linux/jhash.h>
21#include <linux/wait.h>
22#include <linux/workqueue.h>
23
24/* Hardware control for EF10 architecture including 'Huntington'. */
25
26#define EFX_EF10_DRVGEN_EV 7
27enum {
28 EFX_EF10_TEST = 1,
29 EFX_EF10_REFILL,
30};
31
32/* The reserved RSS context value */
33#define EFX_EF10_RSS_CONTEXT_INVALID 0xffffffff
Jon Cooper267c0152015-05-06 00:59:38 +010034/* The maximum size of a shared RSS context */
35/* TODO: this should really be from the mcdi protocol export */
36#define EFX_EF10_MAX_SHARED_RSS_CONTEXT_SIZE 64UL
Ben Hutchings8127d662013-08-29 19:19:29 +010037
38/* The filter table(s) are managed by firmware and we have write-only
39 * access. When removing filters we must identify them to the
40 * firmware by a 64-bit handle, but this is too wide for Linux kernel
41 * interfaces (32-bit for RX NFC, 16-bit for RFS). Also, we need to
42 * be able to tell in advance whether a requested insertion will
43 * replace an existing filter. Therefore we maintain a software hash
44 * table, which should be at least as large as the hardware hash
45 * table.
46 *
47 * Huntington has a single 8K filter table shared between all filter
48 * types and both ports.
49 */
50#define HUNT_FILTER_TBL_ROWS 8192
51
Edward Cree12fb0da2015-07-21 15:11:00 +010052#define EFX_EF10_FILTER_ID_INVALID 0xffff
Daniel Pieczko822b96f2015-07-21 15:10:27 +010053struct efx_ef10_dev_addr {
54 u8 addr[ETH_ALEN];
55 u16 id;
56};
57
Ben Hutchings8127d662013-08-29 19:19:29 +010058struct efx_ef10_filter_table {
59/* The RX match field masks supported by this fw & hw, in order of priority */
60 enum efx_filter_match_flags rx_match_flags[
61 MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MAXNUM];
62 unsigned int rx_match_count;
63
64 struct {
65 unsigned long spec; /* pointer to spec plus flag bits */
Ben Hutchingsb59e6ef2013-11-21 19:02:22 +000066/* BUSY flag indicates that an update is in progress. AUTO_OLD is
67 * used to mark and sweep MAC filters for the device address lists.
Ben Hutchings8127d662013-08-29 19:19:29 +010068 */
69#define EFX_EF10_FILTER_FLAG_BUSY 1UL
Ben Hutchingsb59e6ef2013-11-21 19:02:22 +000070#define EFX_EF10_FILTER_FLAG_AUTO_OLD 2UL
Ben Hutchings8127d662013-08-29 19:19:29 +010071#define EFX_EF10_FILTER_FLAGS 3UL
72 u64 handle; /* firmware handle */
73 } *entry;
74 wait_queue_head_t waitq;
75/* Shadow of net_device address lists, guarded by mac_lock */
Ben Hutchingsb59e6ef2013-11-21 19:02:22 +000076#define EFX_EF10_FILTER_DEV_UC_MAX 32
77#define EFX_EF10_FILTER_DEV_MC_MAX 256
Daniel Pieczko822b96f2015-07-21 15:10:27 +010078 struct efx_ef10_dev_addr dev_uc_list[EFX_EF10_FILTER_DEV_UC_MAX];
79 struct efx_ef10_dev_addr dev_mc_list[EFX_EF10_FILTER_DEV_MC_MAX];
Edward Cree12fb0da2015-07-21 15:11:00 +010080 int dev_uc_count;
81 int dev_mc_count;
82/* Indices (like efx_ef10_dev_addr.id) for promisc/allmulti filters */
83 u16 ucdef_id;
84 u16 bcast_id;
85 u16 mcdef_id;
Ben Hutchings8127d662013-08-29 19:19:29 +010086};
87
88/* An arbitrary search limit for the software hash table */
89#define EFX_EF10_FILTER_SEARCH_LIMIT 200
90
Ben Hutchings8127d662013-08-29 19:19:29 +010091static void efx_ef10_rx_free_indir_table(struct efx_nic *efx);
92static void efx_ef10_filter_table_remove(struct efx_nic *efx);
93
94static int efx_ef10_get_warm_boot_count(struct efx_nic *efx)
95{
96 efx_dword_t reg;
97
98 efx_readd(efx, &reg, ER_DZ_BIU_MC_SFT_STATUS);
99 return EFX_DWORD_FIELD(reg, EFX_WORD_1) == 0xb007 ?
100 EFX_DWORD_FIELD(reg, EFX_WORD_0) : -EIO;
101}
102
103static unsigned int efx_ef10_mem_map_size(struct efx_nic *efx)
104{
Shradha Shah02246a72015-05-06 00:58:14 +0100105 int bar;
106
107 bar = efx->type->mem_bar;
108 return resource_size(&efx->pci_dev->resource[bar]);
Ben Hutchings8127d662013-08-29 19:19:29 +0100109}
110
Daniel Pieczko7a186f42015-07-07 11:37:19 +0100111static bool efx_ef10_is_vf(struct efx_nic *efx)
112{
113 return efx->type->is_vf;
114}
115
Daniel Pieczko1cd9ecb2015-05-06 00:57:53 +0100116static int efx_ef10_get_pf_index(struct efx_nic *efx)
117{
118 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_FUNCTION_INFO_OUT_LEN);
119 struct efx_ef10_nic_data *nic_data = efx->nic_data;
120 size_t outlen;
121 int rc;
122
123 rc = efx_mcdi_rpc(efx, MC_CMD_GET_FUNCTION_INFO, NULL, 0, outbuf,
124 sizeof(outbuf), &outlen);
125 if (rc)
126 return rc;
127 if (outlen < sizeof(outbuf))
128 return -EIO;
129
130 nic_data->pf_index = MCDI_DWORD(outbuf, GET_FUNCTION_INFO_OUT_PF);
131 return 0;
132}
133
Shradha Shah88a37de2015-05-20 11:09:15 +0100134#ifdef CONFIG_SFC_SRIOV
135static int efx_ef10_get_vf_index(struct efx_nic *efx)
136{
137 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_FUNCTION_INFO_OUT_LEN);
138 struct efx_ef10_nic_data *nic_data = efx->nic_data;
139 size_t outlen;
140 int rc;
141
142 rc = efx_mcdi_rpc(efx, MC_CMD_GET_FUNCTION_INFO, NULL, 0, outbuf,
143 sizeof(outbuf), &outlen);
144 if (rc)
145 return rc;
146 if (outlen < sizeof(outbuf))
147 return -EIO;
148
149 nic_data->vf_index = MCDI_DWORD(outbuf, GET_FUNCTION_INFO_OUT_VF);
150 return 0;
151}
152#endif
153
Ben Hutchingse5a25382013-09-05 22:50:59 +0100154static int efx_ef10_init_datapath_caps(struct efx_nic *efx)
Ben Hutchings8127d662013-08-29 19:19:29 +0100155{
156 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CAPABILITIES_OUT_LEN);
157 struct efx_ef10_nic_data *nic_data = efx->nic_data;
158 size_t outlen;
159 int rc;
160
161 BUILD_BUG_ON(MC_CMD_GET_CAPABILITIES_IN_LEN != 0);
162
163 rc = efx_mcdi_rpc(efx, MC_CMD_GET_CAPABILITIES, NULL, 0,
164 outbuf, sizeof(outbuf), &outlen);
165 if (rc)
166 return rc;
Ben Hutchingse5a25382013-09-05 22:50:59 +0100167 if (outlen < sizeof(outbuf)) {
168 netif_err(efx, drv, efx->net_dev,
169 "unable to read datapath firmware capabilities\n");
170 return -EIO;
171 }
Ben Hutchings8127d662013-08-29 19:19:29 +0100172
Ben Hutchingse5a25382013-09-05 22:50:59 +0100173 nic_data->datapath_caps =
174 MCDI_DWORD(outbuf, GET_CAPABILITIES_OUT_FLAGS1);
175
Daniel Pieczko8d9f9dd2015-05-06 00:56:55 +0100176 /* record the DPCPU firmware IDs to determine VEB vswitching support.
177 */
178 nic_data->rx_dpcpu_fw_id =
179 MCDI_WORD(outbuf, GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID);
180 nic_data->tx_dpcpu_fw_id =
181 MCDI_WORD(outbuf, GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID);
182
Ben Hutchingse5a25382013-09-05 22:50:59 +0100183 if (!(nic_data->datapath_caps &
Ben Hutchingse5a25382013-09-05 22:50:59 +0100184 (1 << MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_14_LBN))) {
185 netif_err(efx, probe, efx->net_dev,
186 "current firmware does not support an RX prefix\n");
187 return -ENODEV;
Ben Hutchings8127d662013-08-29 19:19:29 +0100188 }
189
190 return 0;
191}
192
193static int efx_ef10_get_sysclk_freq(struct efx_nic *efx)
194{
195 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CLOCK_OUT_LEN);
196 int rc;
197
198 rc = efx_mcdi_rpc(efx, MC_CMD_GET_CLOCK, NULL, 0,
199 outbuf, sizeof(outbuf), NULL);
200 if (rc)
201 return rc;
202 rc = MCDI_DWORD(outbuf, GET_CLOCK_OUT_SYS_FREQ);
203 return rc > 0 ? rc : -ERANGE;
204}
205
Daniel Pieczko0d5e0fb2015-05-20 11:10:20 +0100206static int efx_ef10_get_mac_address_pf(struct efx_nic *efx, u8 *mac_address)
Ben Hutchings8127d662013-08-29 19:19:29 +0100207{
208 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_MAC_ADDRESSES_OUT_LEN);
209 size_t outlen;
210 int rc;
211
212 BUILD_BUG_ON(MC_CMD_GET_MAC_ADDRESSES_IN_LEN != 0);
213
214 rc = efx_mcdi_rpc(efx, MC_CMD_GET_MAC_ADDRESSES, NULL, 0,
215 outbuf, sizeof(outbuf), &outlen);
216 if (rc)
217 return rc;
218 if (outlen < MC_CMD_GET_MAC_ADDRESSES_OUT_LEN)
219 return -EIO;
220
Edward Creecd84ff42014-03-07 18:27:41 +0000221 ether_addr_copy(mac_address,
222 MCDI_PTR(outbuf, GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE));
Ben Hutchings8127d662013-08-29 19:19:29 +0100223 return 0;
224}
225
Daniel Pieczko0d5e0fb2015-05-20 11:10:20 +0100226static int efx_ef10_get_mac_address_vf(struct efx_nic *efx, u8 *mac_address)
227{
228 MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN);
229 MCDI_DECLARE_BUF(outbuf, MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX);
230 size_t outlen;
231 int num_addrs, rc;
232
233 MCDI_SET_DWORD(inbuf, VPORT_GET_MAC_ADDRESSES_IN_VPORT_ID,
234 EVB_PORT_ID_ASSIGNED);
235 rc = efx_mcdi_rpc(efx, MC_CMD_VPORT_GET_MAC_ADDRESSES, inbuf,
236 sizeof(inbuf), outbuf, sizeof(outbuf), &outlen);
237
238 if (rc)
239 return rc;
240 if (outlen < MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMIN)
241 return -EIO;
242
243 num_addrs = MCDI_DWORD(outbuf,
244 VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_COUNT);
245
246 WARN_ON(num_addrs != 1);
247
248 ether_addr_copy(mac_address,
249 MCDI_PTR(outbuf, VPORT_GET_MAC_ADDRESSES_OUT_MACADDR));
250
251 return 0;
252}
253
Shradha Shah0f5c0842015-06-02 11:37:58 +0100254static ssize_t efx_ef10_show_link_control_flag(struct device *dev,
255 struct device_attribute *attr,
256 char *buf)
257{
258 struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
259
260 return sprintf(buf, "%d\n",
261 ((efx->mcdi->fn_flags) &
262 (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL))
263 ? 1 : 0);
264}
265
266static ssize_t efx_ef10_show_primary_flag(struct device *dev,
267 struct device_attribute *attr,
268 char *buf)
269{
270 struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
271
272 return sprintf(buf, "%d\n",
273 ((efx->mcdi->fn_flags) &
274 (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY))
275 ? 1 : 0);
276}
277
278static DEVICE_ATTR(link_control_flag, 0444, efx_ef10_show_link_control_flag,
279 NULL);
280static DEVICE_ATTR(primary_flag, 0444, efx_ef10_show_primary_flag, NULL);
281
Ben Hutchings8127d662013-08-29 19:19:29 +0100282static int efx_ef10_probe(struct efx_nic *efx)
283{
284 struct efx_ef10_nic_data *nic_data;
Shradha Shah8be41322015-06-02 11:37:25 +0100285 struct net_device *net_dev = efx->net_dev;
Ben Hutchings8127d662013-08-29 19:19:29 +0100286 int i, rc;
287
Ben Hutchingsaa3930e2014-02-12 18:59:19 +0000288 /* We can have one VI for each 8K region. However, until we
289 * use TX option descriptors we need two TX queues per channel.
Ben Hutchings8127d662013-08-29 19:19:29 +0100290 */
Shradha Shahb0fbdae2015-08-28 10:55:42 +0100291 efx->max_channels = min_t(unsigned int,
292 EFX_MAX_CHANNELS,
293 efx_ef10_mem_map_size(efx) /
294 (EFX_VI_PAGE_SIZE * EFX_TXQ_TYPES));
295 efx->max_tx_channels = efx->max_channels;
Edward Cree9fd3d3a2014-11-03 14:14:35 +0000296 if (WARN_ON(efx->max_channels == 0))
297 return -EIO;
Ben Hutchings8127d662013-08-29 19:19:29 +0100298
299 nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL);
300 if (!nic_data)
301 return -ENOMEM;
302 efx->nic_data = nic_data;
303
Edward Cree75aba2a2015-05-27 13:13:54 +0100304 /* we assume later that we can copy from this buffer in dwords */
305 BUILD_BUG_ON(MCDI_CTL_SDU_LEN_MAX_V2 % 4);
306
Ben Hutchings8127d662013-08-29 19:19:29 +0100307 rc = efx_nic_alloc_buffer(efx, &nic_data->mcdi_buf,
308 8 + MCDI_CTL_SDU_LEN_MAX_V2, GFP_KERNEL);
309 if (rc)
310 goto fail1;
311
312 /* Get the MC's warm boot count. In case it's rebooting right
313 * now, be prepared to retry.
314 */
315 i = 0;
316 for (;;) {
317 rc = efx_ef10_get_warm_boot_count(efx);
318 if (rc >= 0)
319 break;
320 if (++i == 5)
321 goto fail2;
322 ssleep(1);
323 }
324 nic_data->warm_boot_count = rc;
325
326 nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
327
Daniel Pieczko45b24492015-05-06 00:57:14 +0100328 nic_data->vport_id = EVB_PORT_ID_ASSIGNED;
329
Ben Hutchings8127d662013-08-29 19:19:29 +0100330 /* In case we're recovering from a crash (kexec), we want to
331 * cancel any outstanding request by the previous user of this
332 * function. We send a special message using the least
333 * significant bits of the 'high' (doorbell) register.
334 */
335 _efx_writed(efx, cpu_to_le32(1), ER_DZ_MC_DB_HWRD);
336
337 rc = efx_mcdi_init(efx);
338 if (rc)
339 goto fail2;
340
341 /* Reset (most) configuration for this function */
342 rc = efx_mcdi_reset(efx, RESET_TYPE_ALL);
343 if (rc)
344 goto fail3;
345
346 /* Enable event logging */
347 rc = efx_mcdi_log_ctrl(efx, true, false, 0);
348 if (rc)
349 goto fail3;
350
Shradha Shah0f5c0842015-06-02 11:37:58 +0100351 rc = device_create_file(&efx->pci_dev->dev,
352 &dev_attr_link_control_flag);
Daniel Pieczko1cd9ecb2015-05-06 00:57:53 +0100353 if (rc)
354 goto fail3;
355
Shradha Shah0f5c0842015-06-02 11:37:58 +0100356 rc = device_create_file(&efx->pci_dev->dev, &dev_attr_primary_flag);
357 if (rc)
358 goto fail4;
359
360 rc = efx_ef10_get_pf_index(efx);
361 if (rc)
362 goto fail5;
363
Ben Hutchingse5a25382013-09-05 22:50:59 +0100364 rc = efx_ef10_init_datapath_caps(efx);
Ben Hutchings8127d662013-08-29 19:19:29 +0100365 if (rc < 0)
Shradha Shah0f5c0842015-06-02 11:37:58 +0100366 goto fail5;
Ben Hutchings8127d662013-08-29 19:19:29 +0100367
368 efx->rx_packet_len_offset =
369 ES_DZ_RX_PREFIX_PKTLEN_OFST - ES_DZ_RX_PREFIX_SIZE;
370
Ben Hutchings8127d662013-08-29 19:19:29 +0100371 rc = efx_mcdi_port_get_number(efx);
372 if (rc < 0)
Shradha Shah0f5c0842015-06-02 11:37:58 +0100373 goto fail5;
Ben Hutchings8127d662013-08-29 19:19:29 +0100374 efx->port_num = rc;
Shradha Shah8be41322015-06-02 11:37:25 +0100375 net_dev->dev_port = rc;
Ben Hutchings8127d662013-08-29 19:19:29 +0100376
Daniel Pieczko0d5e0fb2015-05-20 11:10:20 +0100377 rc = efx->type->get_mac_address(efx, efx->net_dev->perm_addr);
Ben Hutchings8127d662013-08-29 19:19:29 +0100378 if (rc)
Shradha Shah0f5c0842015-06-02 11:37:58 +0100379 goto fail5;
Ben Hutchings8127d662013-08-29 19:19:29 +0100380
381 rc = efx_ef10_get_sysclk_freq(efx);
382 if (rc < 0)
Shradha Shah0f5c0842015-06-02 11:37:58 +0100383 goto fail5;
Ben Hutchings8127d662013-08-29 19:19:29 +0100384 efx->timer_quantum_ns = 1536000 / rc; /* 1536 cycles */
385
Edward Cree267d9d72015-05-06 00:59:18 +0100386 /* Check whether firmware supports bug 35388 workaround.
387 * First try to enable it, then if we get EPERM, just
388 * ask if it's already enabled
389 */
Daniel Pieczko34ccfe62015-07-21 15:09:43 +0100390 rc = efx_mcdi_set_workaround(efx, MC_CMD_WORKAROUND_BUG35388, true, NULL);
Shradha Shahc9012e02015-06-02 11:37:41 +0100391 if (rc == 0) {
Ben Hutchings8127d662013-08-29 19:19:29 +0100392 nic_data->workaround_35388 = true;
Shradha Shahc9012e02015-06-02 11:37:41 +0100393 } else if (rc == -EPERM) {
Edward Cree267d9d72015-05-06 00:59:18 +0100394 unsigned int enabled;
395
396 rc = efx_mcdi_get_workarounds(efx, NULL, &enabled);
397 if (rc)
398 goto fail3;
399 nic_data->workaround_35388 = enabled &
400 MC_CMD_GET_WORKAROUNDS_OUT_BUG35388;
Shradha Shahc9012e02015-06-02 11:37:41 +0100401 } else if (rc != -ENOSYS && rc != -ENOENT) {
Shradha Shah0f5c0842015-06-02 11:37:58 +0100402 goto fail5;
Shradha Shahc9012e02015-06-02 11:37:41 +0100403 }
Ben Hutchings8127d662013-08-29 19:19:29 +0100404 netif_dbg(efx, probe, efx->net_dev,
405 "workaround for bug 35388 is %sabled\n",
406 nic_data->workaround_35388 ? "en" : "dis");
407
408 rc = efx_mcdi_mon_probe(efx);
Edward Cree267d9d72015-05-06 00:59:18 +0100409 if (rc && rc != -EPERM)
Shradha Shah0f5c0842015-06-02 11:37:58 +0100410 goto fail5;
Ben Hutchings8127d662013-08-29 19:19:29 +0100411
Ben Hutchings9aecda92013-12-05 21:28:42 +0000412 efx_ptp_probe(efx, NULL);
413
Shradha Shah1d051e02015-06-02 11:38:16 +0100414#ifdef CONFIG_SFC_SRIOV
415 if ((efx->pci_dev->physfn) && (!efx->pci_dev->is_physfn)) {
416 struct pci_dev *pci_dev_pf = efx->pci_dev->physfn;
417 struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf);
418
419 efx_pf->type->get_mac_address(efx_pf, nic_data->port_id);
420 } else
421#endif
422 ether_addr_copy(nic_data->port_id, efx->net_dev->perm_addr);
423
Ben Hutchings8127d662013-08-29 19:19:29 +0100424 return 0;
425
Shradha Shah0f5c0842015-06-02 11:37:58 +0100426fail5:
427 device_remove_file(&efx->pci_dev->dev, &dev_attr_primary_flag);
428fail4:
429 device_remove_file(&efx->pci_dev->dev, &dev_attr_link_control_flag);
Ben Hutchings8127d662013-08-29 19:19:29 +0100430fail3:
431 efx_mcdi_fini(efx);
432fail2:
433 efx_nic_free_buffer(efx, &nic_data->mcdi_buf);
434fail1:
435 kfree(nic_data);
436 efx->nic_data = NULL;
437 return rc;
438}
439
440static int efx_ef10_free_vis(struct efx_nic *efx)
441{
Jon Cooperaa09a3d2015-05-20 11:10:41 +0100442 MCDI_DECLARE_BUF_ERR(outbuf);
Edward Cree1e0b8122013-05-31 18:36:12 +0100443 size_t outlen;
444 int rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FREE_VIS, NULL, 0,
445 outbuf, sizeof(outbuf), &outlen);
Ben Hutchings8127d662013-08-29 19:19:29 +0100446
447 /* -EALREADY means nothing to free, so ignore */
448 if (rc == -EALREADY)
449 rc = 0;
Edward Cree1e0b8122013-05-31 18:36:12 +0100450 if (rc)
451 efx_mcdi_display_error(efx, MC_CMD_FREE_VIS, 0, outbuf, outlen,
452 rc);
Ben Hutchings8127d662013-08-29 19:19:29 +0100453 return rc;
454}
455
Ben Hutchings183233b2013-06-28 21:47:12 +0100456#ifdef EFX_USE_PIO
457
458static void efx_ef10_free_piobufs(struct efx_nic *efx)
459{
460 struct efx_ef10_nic_data *nic_data = efx->nic_data;
461 MCDI_DECLARE_BUF(inbuf, MC_CMD_FREE_PIOBUF_IN_LEN);
462 unsigned int i;
463 int rc;
464
465 BUILD_BUG_ON(MC_CMD_FREE_PIOBUF_OUT_LEN != 0);
466
467 for (i = 0; i < nic_data->n_piobufs; i++) {
468 MCDI_SET_DWORD(inbuf, FREE_PIOBUF_IN_PIOBUF_HANDLE,
469 nic_data->piobuf_handle[i]);
470 rc = efx_mcdi_rpc(efx, MC_CMD_FREE_PIOBUF, inbuf, sizeof(inbuf),
471 NULL, 0, NULL);
472 WARN_ON(rc);
473 }
474
475 nic_data->n_piobufs = 0;
476}
477
478static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n)
479{
480 struct efx_ef10_nic_data *nic_data = efx->nic_data;
481 MCDI_DECLARE_BUF(outbuf, MC_CMD_ALLOC_PIOBUF_OUT_LEN);
482 unsigned int i;
483 size_t outlen;
484 int rc = 0;
485
486 BUILD_BUG_ON(MC_CMD_ALLOC_PIOBUF_IN_LEN != 0);
487
488 for (i = 0; i < n; i++) {
Bert Kenward09a04202015-12-23 08:58:15 +0000489 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_ALLOC_PIOBUF, NULL, 0,
490 outbuf, sizeof(outbuf), &outlen);
491 if (rc) {
492 /* Don't display the MC error if we didn't have space
493 * for a VF.
494 */
495 if (!(efx_ef10_is_vf(efx) && rc == -ENOSPC))
496 efx_mcdi_display_error(efx, MC_CMD_ALLOC_PIOBUF,
497 0, outbuf, outlen, rc);
Ben Hutchings183233b2013-06-28 21:47:12 +0100498 break;
Bert Kenward09a04202015-12-23 08:58:15 +0000499 }
Ben Hutchings183233b2013-06-28 21:47:12 +0100500 if (outlen < MC_CMD_ALLOC_PIOBUF_OUT_LEN) {
501 rc = -EIO;
502 break;
503 }
504 nic_data->piobuf_handle[i] =
505 MCDI_DWORD(outbuf, ALLOC_PIOBUF_OUT_PIOBUF_HANDLE);
506 netif_dbg(efx, probe, efx->net_dev,
507 "allocated PIO buffer %u handle %x\n", i,
508 nic_data->piobuf_handle[i]);
509 }
510
511 nic_data->n_piobufs = i;
512 if (rc)
513 efx_ef10_free_piobufs(efx);
514 return rc;
515}
516
517static int efx_ef10_link_piobufs(struct efx_nic *efx)
518{
519 struct efx_ef10_nic_data *nic_data = efx->nic_data;
Jon Cooperaa09a3d2015-05-20 11:10:41 +0100520 _MCDI_DECLARE_BUF(inbuf,
521 max(MC_CMD_LINK_PIOBUF_IN_LEN,
522 MC_CMD_UNLINK_PIOBUF_IN_LEN));
Ben Hutchings183233b2013-06-28 21:47:12 +0100523 struct efx_channel *channel;
524 struct efx_tx_queue *tx_queue;
525 unsigned int offset, index;
526 int rc;
527
528 BUILD_BUG_ON(MC_CMD_LINK_PIOBUF_OUT_LEN != 0);
529 BUILD_BUG_ON(MC_CMD_UNLINK_PIOBUF_OUT_LEN != 0);
530
Jon Cooperaa09a3d2015-05-20 11:10:41 +0100531 memset(inbuf, 0, sizeof(inbuf));
532
Ben Hutchings183233b2013-06-28 21:47:12 +0100533 /* Link a buffer to each VI in the write-combining mapping */
534 for (index = 0; index < nic_data->n_piobufs; ++index) {
535 MCDI_SET_DWORD(inbuf, LINK_PIOBUF_IN_PIOBUF_HANDLE,
536 nic_data->piobuf_handle[index]);
537 MCDI_SET_DWORD(inbuf, LINK_PIOBUF_IN_TXQ_INSTANCE,
538 nic_data->pio_write_vi_base + index);
539 rc = efx_mcdi_rpc(efx, MC_CMD_LINK_PIOBUF,
540 inbuf, MC_CMD_LINK_PIOBUF_IN_LEN,
541 NULL, 0, NULL);
542 if (rc) {
543 netif_err(efx, drv, efx->net_dev,
544 "failed to link VI %u to PIO buffer %u (%d)\n",
545 nic_data->pio_write_vi_base + index, index,
546 rc);
547 goto fail;
548 }
549 netif_dbg(efx, probe, efx->net_dev,
550 "linked VI %u to PIO buffer %u\n",
551 nic_data->pio_write_vi_base + index, index);
552 }
553
554 /* Link a buffer to each TX queue */
555 efx_for_each_channel(channel, efx) {
556 efx_for_each_channel_tx_queue(tx_queue, channel) {
557 /* We assign the PIO buffers to queues in
558 * reverse order to allow for the following
559 * special case.
560 */
561 offset = ((efx->tx_channel_offset + efx->n_tx_channels -
562 tx_queue->channel->channel - 1) *
563 efx_piobuf_size);
564 index = offset / ER_DZ_TX_PIOBUF_SIZE;
565 offset = offset % ER_DZ_TX_PIOBUF_SIZE;
566
567 /* When the host page size is 4K, the first
568 * host page in the WC mapping may be within
569 * the same VI page as the last TX queue. We
570 * can only link one buffer to each VI.
571 */
572 if (tx_queue->queue == nic_data->pio_write_vi_base) {
573 BUG_ON(index != 0);
574 rc = 0;
575 } else {
576 MCDI_SET_DWORD(inbuf,
577 LINK_PIOBUF_IN_PIOBUF_HANDLE,
578 nic_data->piobuf_handle[index]);
579 MCDI_SET_DWORD(inbuf,
580 LINK_PIOBUF_IN_TXQ_INSTANCE,
581 tx_queue->queue);
582 rc = efx_mcdi_rpc(efx, MC_CMD_LINK_PIOBUF,
583 inbuf, MC_CMD_LINK_PIOBUF_IN_LEN,
584 NULL, 0, NULL);
585 }
586
587 if (rc) {
588 /* This is non-fatal; the TX path just
589 * won't use PIO for this queue
590 */
591 netif_err(efx, drv, efx->net_dev,
592 "failed to link VI %u to PIO buffer %u (%d)\n",
593 tx_queue->queue, index, rc);
594 tx_queue->piobuf = NULL;
595 } else {
596 tx_queue->piobuf =
597 nic_data->pio_write_base +
598 index * EFX_VI_PAGE_SIZE + offset;
599 tx_queue->piobuf_offset = offset;
600 netif_dbg(efx, probe, efx->net_dev,
601 "linked VI %u to PIO buffer %u offset %x addr %p\n",
602 tx_queue->queue, index,
603 tx_queue->piobuf_offset,
604 tx_queue->piobuf);
605 }
606 }
607 }
608
609 return 0;
610
611fail:
612 while (index--) {
613 MCDI_SET_DWORD(inbuf, UNLINK_PIOBUF_IN_TXQ_INSTANCE,
614 nic_data->pio_write_vi_base + index);
615 efx_mcdi_rpc(efx, MC_CMD_UNLINK_PIOBUF,
616 inbuf, MC_CMD_UNLINK_PIOBUF_IN_LEN,
617 NULL, 0, NULL);
618 }
619 return rc;
620}
621
Edward Creec0795bf2016-05-24 18:53:36 +0100622static void efx_ef10_forget_old_piobufs(struct efx_nic *efx)
623{
624 struct efx_channel *channel;
625 struct efx_tx_queue *tx_queue;
626
627 /* All our existing PIO buffers went away */
628 efx_for_each_channel(channel, efx)
629 efx_for_each_channel_tx_queue(tx_queue, channel)
630 tx_queue->piobuf = NULL;
631}
632
Ben Hutchings183233b2013-06-28 21:47:12 +0100633#else /* !EFX_USE_PIO */
634
635static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n)
636{
637 return n == 0 ? 0 : -ENOBUFS;
638}
639
640static int efx_ef10_link_piobufs(struct efx_nic *efx)
641{
642 return 0;
643}
644
645static void efx_ef10_free_piobufs(struct efx_nic *efx)
646{
647}
648
Edward Creec0795bf2016-05-24 18:53:36 +0100649static void efx_ef10_forget_old_piobufs(struct efx_nic *efx)
650{
651}
652
Ben Hutchings183233b2013-06-28 21:47:12 +0100653#endif /* EFX_USE_PIO */
654
Ben Hutchings8127d662013-08-29 19:19:29 +0100655static void efx_ef10_remove(struct efx_nic *efx)
656{
657 struct efx_ef10_nic_data *nic_data = efx->nic_data;
658 int rc;
659
Shradha Shahf1122a32015-05-20 11:09:46 +0100660#ifdef CONFIG_SFC_SRIOV
661 struct efx_ef10_nic_data *nic_data_pf;
662 struct pci_dev *pci_dev_pf;
663 struct efx_nic *efx_pf;
664 struct ef10_vf *vf;
665
666 if (efx->pci_dev->is_virtfn) {
667 pci_dev_pf = efx->pci_dev->physfn;
668 if (pci_dev_pf) {
669 efx_pf = pci_get_drvdata(pci_dev_pf);
670 nic_data_pf = efx_pf->nic_data;
671 vf = nic_data_pf->vf + nic_data->vf_index;
672 vf->efx = NULL;
673 } else
674 netif_info(efx, drv, efx->net_dev,
675 "Could not get the PF id from VF\n");
676 }
677#endif
678
Ben Hutchings9aecda92013-12-05 21:28:42 +0000679 efx_ptp_remove(efx);
680
Ben Hutchings8127d662013-08-29 19:19:29 +0100681 efx_mcdi_mon_remove(efx);
682
Ben Hutchings8127d662013-08-29 19:19:29 +0100683 efx_ef10_rx_free_indir_table(efx);
684
Ben Hutchings183233b2013-06-28 21:47:12 +0100685 if (nic_data->wc_membase)
686 iounmap(nic_data->wc_membase);
687
Ben Hutchings8127d662013-08-29 19:19:29 +0100688 rc = efx_ef10_free_vis(efx);
689 WARN_ON(rc != 0);
690
Ben Hutchings183233b2013-06-28 21:47:12 +0100691 if (!nic_data->must_restore_piobufs)
692 efx_ef10_free_piobufs(efx);
693
Shradha Shah0f5c0842015-06-02 11:37:58 +0100694 device_remove_file(&efx->pci_dev->dev, &dev_attr_primary_flag);
695 device_remove_file(&efx->pci_dev->dev, &dev_attr_link_control_flag);
696
Ben Hutchings8127d662013-08-29 19:19:29 +0100697 efx_mcdi_fini(efx);
698 efx_nic_free_buffer(efx, &nic_data->mcdi_buf);
699 kfree(nic_data);
700}
701
Shradha Shah88a37de2015-05-20 11:09:15 +0100702static int efx_ef10_probe_pf(struct efx_nic *efx)
703{
704 return efx_ef10_probe(efx);
705}
706
Daniel Pieczko7a186f42015-07-07 11:37:19 +0100707int efx_ef10_vadaptor_alloc(struct efx_nic *efx, unsigned int port_id)
708{
709 MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_ALLOC_IN_LEN);
710
711 MCDI_SET_DWORD(inbuf, VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID, port_id);
712 return efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_ALLOC, inbuf, sizeof(inbuf),
713 NULL, 0, NULL);
714}
715
716int efx_ef10_vadaptor_free(struct efx_nic *efx, unsigned int port_id)
717{
718 MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_FREE_IN_LEN);
719
720 MCDI_SET_DWORD(inbuf, VADAPTOR_FREE_IN_UPSTREAM_PORT_ID, port_id);
721 return efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_FREE, inbuf, sizeof(inbuf),
722 NULL, 0, NULL);
723}
724
725int efx_ef10_vport_add_mac(struct efx_nic *efx,
726 unsigned int port_id, u8 *mac)
727{
728 MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_LEN);
729
730 MCDI_SET_DWORD(inbuf, VPORT_ADD_MAC_ADDRESS_IN_VPORT_ID, port_id);
731 ether_addr_copy(MCDI_PTR(inbuf, VPORT_ADD_MAC_ADDRESS_IN_MACADDR), mac);
732
733 return efx_mcdi_rpc(efx, MC_CMD_VPORT_ADD_MAC_ADDRESS, inbuf,
734 sizeof(inbuf), NULL, 0, NULL);
735}
736
737int efx_ef10_vport_del_mac(struct efx_nic *efx,
738 unsigned int port_id, u8 *mac)
739{
740 MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_LEN);
741
742 MCDI_SET_DWORD(inbuf, VPORT_DEL_MAC_ADDRESS_IN_VPORT_ID, port_id);
743 ether_addr_copy(MCDI_PTR(inbuf, VPORT_DEL_MAC_ADDRESS_IN_MACADDR), mac);
744
745 return efx_mcdi_rpc(efx, MC_CMD_VPORT_DEL_MAC_ADDRESS, inbuf,
746 sizeof(inbuf), NULL, 0, NULL);
747}
748
Shradha Shah88a37de2015-05-20 11:09:15 +0100749#ifdef CONFIG_SFC_SRIOV
750static int efx_ef10_probe_vf(struct efx_nic *efx)
751{
752 int rc;
Daniel Pieczko6598dad2015-06-02 11:41:00 +0100753 struct pci_dev *pci_dev_pf;
754
755 /* If the parent PF has no VF data structure, it doesn't know about this
756 * VF so fail probe. The VF needs to be re-created. This can happen
757 * if the PF driver is unloaded while the VF is assigned to a guest.
758 */
759 pci_dev_pf = efx->pci_dev->physfn;
760 if (pci_dev_pf) {
761 struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf);
762 struct efx_ef10_nic_data *nic_data_pf = efx_pf->nic_data;
763
764 if (!nic_data_pf->vf) {
765 netif_info(efx, drv, efx->net_dev,
766 "The VF cannot link to its parent PF; "
767 "please destroy and re-create the VF\n");
768 return -EBUSY;
769 }
770 }
Shradha Shah88a37de2015-05-20 11:09:15 +0100771
772 rc = efx_ef10_probe(efx);
773 if (rc)
774 return rc;
775
776 rc = efx_ef10_get_vf_index(efx);
777 if (rc)
778 goto fail;
779
Shradha Shahf1122a32015-05-20 11:09:46 +0100780 if (efx->pci_dev->is_virtfn) {
781 if (efx->pci_dev->physfn) {
782 struct efx_nic *efx_pf =
783 pci_get_drvdata(efx->pci_dev->physfn);
784 struct efx_ef10_nic_data *nic_data_p = efx_pf->nic_data;
785 struct efx_ef10_nic_data *nic_data = efx->nic_data;
786
787 nic_data_p->vf[nic_data->vf_index].efx = efx;
Daniel Pieczko6598dad2015-06-02 11:41:00 +0100788 nic_data_p->vf[nic_data->vf_index].pci_dev =
789 efx->pci_dev;
Shradha Shahf1122a32015-05-20 11:09:46 +0100790 } else
791 netif_info(efx, drv, efx->net_dev,
792 "Could not get the PF id from VF\n");
793 }
794
Shradha Shah88a37de2015-05-20 11:09:15 +0100795 return 0;
796
797fail:
798 efx_ef10_remove(efx);
799 return rc;
800}
801#else
802static int efx_ef10_probe_vf(struct efx_nic *efx __attribute__ ((unused)))
803{
804 return 0;
805}
806#endif
807
Ben Hutchings8127d662013-08-29 19:19:29 +0100808static int efx_ef10_alloc_vis(struct efx_nic *efx,
809 unsigned int min_vis, unsigned int max_vis)
810{
811 MCDI_DECLARE_BUF(inbuf, MC_CMD_ALLOC_VIS_IN_LEN);
812 MCDI_DECLARE_BUF(outbuf, MC_CMD_ALLOC_VIS_OUT_LEN);
813 struct efx_ef10_nic_data *nic_data = efx->nic_data;
814 size_t outlen;
815 int rc;
816
817 MCDI_SET_DWORD(inbuf, ALLOC_VIS_IN_MIN_VI_COUNT, min_vis);
818 MCDI_SET_DWORD(inbuf, ALLOC_VIS_IN_MAX_VI_COUNT, max_vis);
819 rc = efx_mcdi_rpc(efx, MC_CMD_ALLOC_VIS, inbuf, sizeof(inbuf),
820 outbuf, sizeof(outbuf), &outlen);
821 if (rc != 0)
822 return rc;
823
824 if (outlen < MC_CMD_ALLOC_VIS_OUT_LEN)
825 return -EIO;
826
827 netif_dbg(efx, drv, efx->net_dev, "base VI is A0x%03x\n",
828 MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_BASE));
829
830 nic_data->vi_base = MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_BASE);
831 nic_data->n_allocated_vis = MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_COUNT);
832 return 0;
833}
834
Ben Hutchings183233b2013-06-28 21:47:12 +0100835/* Note that the failure path of this function does not free
836 * resources, as this will be done by efx_ef10_remove().
837 */
Ben Hutchings8127d662013-08-29 19:19:29 +0100838static int efx_ef10_dimension_resources(struct efx_nic *efx)
839{
Ben Hutchings183233b2013-06-28 21:47:12 +0100840 struct efx_ef10_nic_data *nic_data = efx->nic_data;
841 unsigned int uc_mem_map_size, wc_mem_map_size;
Shradha Shahb0fbdae2015-08-28 10:55:42 +0100842 unsigned int min_vis = max(EFX_TXQ_TYPES,
843 efx_separate_tx_channels ? 2 : 1);
844 unsigned int channel_vis, pio_write_vi_base, max_vis;
Ben Hutchings183233b2013-06-28 21:47:12 +0100845 void __iomem *membase;
846 int rc;
Ben Hutchings8127d662013-08-29 19:19:29 +0100847
Shradha Shahb0fbdae2015-08-28 10:55:42 +0100848 channel_vis = max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES);
Ben Hutchings183233b2013-06-28 21:47:12 +0100849
850#ifdef EFX_USE_PIO
851 /* Try to allocate PIO buffers if wanted and if the full
852 * number of PIO buffers would be sufficient to allocate one
853 * copy-buffer per TX channel. Failure is non-fatal, as there
854 * are only a small number of PIO buffers shared between all
855 * functions of the controller.
856 */
857 if (efx_piobuf_size != 0 &&
858 ER_DZ_TX_PIOBUF_SIZE / efx_piobuf_size * EF10_TX_PIOBUF_COUNT >=
859 efx->n_tx_channels) {
860 unsigned int n_piobufs =
861 DIV_ROUND_UP(efx->n_tx_channels,
862 ER_DZ_TX_PIOBUF_SIZE / efx_piobuf_size);
863
864 rc = efx_ef10_alloc_piobufs(efx, n_piobufs);
865 if (rc)
866 netif_err(efx, probe, efx->net_dev,
867 "failed to allocate PIO buffers (%d)\n", rc);
868 else
869 netif_dbg(efx, probe, efx->net_dev,
870 "allocated %u PIO buffers\n", n_piobufs);
871 }
872#else
873 nic_data->n_piobufs = 0;
874#endif
875
876 /* PIO buffers should be mapped with write-combining enabled,
877 * and we want to make single UC and WC mappings rather than
878 * several of each (in fact that's the only option if host
879 * page size is >4K). So we may allocate some extra VIs just
880 * for writing PIO buffers through.
Daniel Pieczko52ad7622014-04-01 13:10:34 +0100881 *
Shradha Shahb0fbdae2015-08-28 10:55:42 +0100882 * The UC mapping contains (channel_vis - 1) complete VIs and the
Daniel Pieczko52ad7622014-04-01 13:10:34 +0100883 * first half of the next VI. Then the WC mapping begins with
884 * the second half of this last VI.
Ben Hutchings183233b2013-06-28 21:47:12 +0100885 */
Shradha Shahb0fbdae2015-08-28 10:55:42 +0100886 uc_mem_map_size = PAGE_ALIGN((channel_vis - 1) * EFX_VI_PAGE_SIZE +
Ben Hutchings183233b2013-06-28 21:47:12 +0100887 ER_DZ_TX_PIOBUF);
888 if (nic_data->n_piobufs) {
Daniel Pieczko52ad7622014-04-01 13:10:34 +0100889 /* pio_write_vi_base rounds down to give the number of complete
890 * VIs inside the UC mapping.
891 */
Ben Hutchings183233b2013-06-28 21:47:12 +0100892 pio_write_vi_base = uc_mem_map_size / EFX_VI_PAGE_SIZE;
893 wc_mem_map_size = (PAGE_ALIGN((pio_write_vi_base +
894 nic_data->n_piobufs) *
895 EFX_VI_PAGE_SIZE) -
896 uc_mem_map_size);
897 max_vis = pio_write_vi_base + nic_data->n_piobufs;
898 } else {
899 pio_write_vi_base = 0;
900 wc_mem_map_size = 0;
Shradha Shahb0fbdae2015-08-28 10:55:42 +0100901 max_vis = channel_vis;
Ben Hutchings183233b2013-06-28 21:47:12 +0100902 }
903
904 /* In case the last attached driver failed to free VIs, do it now */
905 rc = efx_ef10_free_vis(efx);
906 if (rc != 0)
907 return rc;
908
909 rc = efx_ef10_alloc_vis(efx, min_vis, max_vis);
910 if (rc != 0)
911 return rc;
912
Shradha Shahb0fbdae2015-08-28 10:55:42 +0100913 if (nic_data->n_allocated_vis < channel_vis) {
914 netif_info(efx, drv, efx->net_dev,
915 "Could not allocate enough VIs to satisfy RSS"
916 " requirements. Performance may not be optimal.\n");
917 /* We didn't get the VIs to populate our channels.
918 * We could keep what we got but then we'd have more
919 * interrupts than we need.
920 * Instead calculate new max_channels and restart
921 */
922 efx->max_channels = nic_data->n_allocated_vis;
923 efx->max_tx_channels =
924 nic_data->n_allocated_vis / EFX_TXQ_TYPES;
925
926 efx_ef10_free_vis(efx);
927 return -EAGAIN;
928 }
929
Ben Hutchings183233b2013-06-28 21:47:12 +0100930 /* If we didn't get enough VIs to map all the PIO buffers, free the
931 * PIO buffers
932 */
933 if (nic_data->n_piobufs &&
934 nic_data->n_allocated_vis <
935 pio_write_vi_base + nic_data->n_piobufs) {
936 netif_dbg(efx, probe, efx->net_dev,
937 "%u VIs are not sufficient to map %u PIO buffers\n",
938 nic_data->n_allocated_vis, nic_data->n_piobufs);
939 efx_ef10_free_piobufs(efx);
940 }
941
942 /* Shrink the original UC mapping of the memory BAR */
943 membase = ioremap_nocache(efx->membase_phys, uc_mem_map_size);
944 if (!membase) {
945 netif_err(efx, probe, efx->net_dev,
946 "could not shrink memory BAR to %x\n",
947 uc_mem_map_size);
948 return -ENOMEM;
949 }
950 iounmap(efx->membase);
951 efx->membase = membase;
952
953 /* Set up the WC mapping if needed */
954 if (wc_mem_map_size) {
955 nic_data->wc_membase = ioremap_wc(efx->membase_phys +
956 uc_mem_map_size,
957 wc_mem_map_size);
958 if (!nic_data->wc_membase) {
959 netif_err(efx, probe, efx->net_dev,
960 "could not allocate WC mapping of size %x\n",
961 wc_mem_map_size);
962 return -ENOMEM;
963 }
964 nic_data->pio_write_vi_base = pio_write_vi_base;
965 nic_data->pio_write_base =
966 nic_data->wc_membase +
967 (pio_write_vi_base * EFX_VI_PAGE_SIZE + ER_DZ_TX_PIOBUF -
968 uc_mem_map_size);
969
970 rc = efx_ef10_link_piobufs(efx);
971 if (rc)
972 efx_ef10_free_piobufs(efx);
973 }
974
975 netif_dbg(efx, probe, efx->net_dev,
976 "memory BAR at %pa (virtual %p+%x UC, %p+%x WC)\n",
977 &efx->membase_phys, efx->membase, uc_mem_map_size,
978 nic_data->wc_membase, wc_mem_map_size);
979
980 return 0;
Ben Hutchings8127d662013-08-29 19:19:29 +0100981}
982
983static int efx_ef10_init_nic(struct efx_nic *efx)
984{
985 struct efx_ef10_nic_data *nic_data = efx->nic_data;
986 int rc;
987
Ben Hutchingsa915ccc2013-09-05 22:51:55 +0100988 if (nic_data->must_check_datapath_caps) {
989 rc = efx_ef10_init_datapath_caps(efx);
990 if (rc)
991 return rc;
992 nic_data->must_check_datapath_caps = false;
993 }
994
Ben Hutchings8127d662013-08-29 19:19:29 +0100995 if (nic_data->must_realloc_vis) {
996 /* We cannot let the number of VIs change now */
997 rc = efx_ef10_alloc_vis(efx, nic_data->n_allocated_vis,
998 nic_data->n_allocated_vis);
999 if (rc)
1000 return rc;
1001 nic_data->must_realloc_vis = false;
1002 }
1003
Ben Hutchings183233b2013-06-28 21:47:12 +01001004 if (nic_data->must_restore_piobufs && nic_data->n_piobufs) {
1005 rc = efx_ef10_alloc_piobufs(efx, nic_data->n_piobufs);
1006 if (rc == 0) {
1007 rc = efx_ef10_link_piobufs(efx);
1008 if (rc)
1009 efx_ef10_free_piobufs(efx);
1010 }
1011
1012 /* Log an error on failure, but this is non-fatal */
1013 if (rc)
1014 netif_err(efx, drv, efx->net_dev,
1015 "failed to restore PIO buffers (%d)\n", rc);
1016 nic_data->must_restore_piobufs = false;
1017 }
1018
Jon Cooper267c0152015-05-06 00:59:38 +01001019 /* don't fail init if RSS setup doesn't work */
1020 efx->type->rx_push_rss_config(efx, false, efx->rx_indir_table);
1021
Ben Hutchings8127d662013-08-29 19:19:29 +01001022 return 0;
1023}
1024
Jon Cooper3e336262014-01-17 19:48:06 +00001025static void efx_ef10_reset_mc_allocations(struct efx_nic *efx)
1026{
1027 struct efx_ef10_nic_data *nic_data = efx->nic_data;
Daniel Pieczko774ad032015-07-31 11:15:22 +01001028#ifdef CONFIG_SFC_SRIOV
1029 unsigned int i;
1030#endif
Jon Cooper3e336262014-01-17 19:48:06 +00001031
1032 /* All our allocations have been reset */
1033 nic_data->must_realloc_vis = true;
1034 nic_data->must_restore_filters = true;
1035 nic_data->must_restore_piobufs = true;
Edward Creec0795bf2016-05-24 18:53:36 +01001036 efx_ef10_forget_old_piobufs(efx);
Jon Cooper3e336262014-01-17 19:48:06 +00001037 nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
Daniel Pieczko774ad032015-07-31 11:15:22 +01001038
1039 /* Driver-created vswitches and vports must be re-created */
1040 nic_data->must_probe_vswitching = true;
1041 nic_data->vport_id = EVB_PORT_ID_ASSIGNED;
1042#ifdef CONFIG_SFC_SRIOV
1043 if (nic_data->vf)
1044 for (i = 0; i < efx->vf_count; i++)
1045 nic_data->vf[i].vport_id = 0;
1046#endif
Jon Cooper3e336262014-01-17 19:48:06 +00001047}
1048
Jon Cooper087e9022015-05-20 11:11:35 +01001049static enum reset_type efx_ef10_map_reset_reason(enum reset_type reason)
1050{
1051 if (reason == RESET_TYPE_MC_FAILURE)
1052 return RESET_TYPE_DATAPATH;
1053
1054 return efx_mcdi_map_reset_reason(reason);
1055}
1056
Ben Hutchings8127d662013-08-29 19:19:29 +01001057static int efx_ef10_map_reset_flags(u32 *flags)
1058{
1059 enum {
1060 EF10_RESET_PORT = ((ETH_RESET_MAC | ETH_RESET_PHY) <<
1061 ETH_RESET_SHARED_SHIFT),
1062 EF10_RESET_MC = ((ETH_RESET_DMA | ETH_RESET_FILTER |
1063 ETH_RESET_OFFLOAD | ETH_RESET_MAC |
1064 ETH_RESET_PHY | ETH_RESET_MGMT) <<
1065 ETH_RESET_SHARED_SHIFT)
1066 };
1067
1068 /* We assume for now that our PCI function is permitted to
1069 * reset everything.
1070 */
1071
1072 if ((*flags & EF10_RESET_MC) == EF10_RESET_MC) {
1073 *flags &= ~EF10_RESET_MC;
1074 return RESET_TYPE_WORLD;
1075 }
1076
1077 if ((*flags & EF10_RESET_PORT) == EF10_RESET_PORT) {
1078 *flags &= ~EF10_RESET_PORT;
1079 return RESET_TYPE_ALL;
1080 }
1081
1082 /* no invisible reset implemented */
1083
1084 return -EINVAL;
1085}
1086
Jon Cooper3e336262014-01-17 19:48:06 +00001087static int efx_ef10_reset(struct efx_nic *efx, enum reset_type reset_type)
1088{
1089 int rc = efx_mcdi_reset(efx, reset_type);
1090
Daniel Pieczko27324822015-07-31 11:14:54 +01001091 /* Unprivileged functions return -EPERM, but need to return success
1092 * here so that the datapath is brought back up.
1093 */
1094 if (reset_type == RESET_TYPE_WORLD && rc == -EPERM)
1095 rc = 0;
1096
Jon Cooper3e336262014-01-17 19:48:06 +00001097 /* If it was a port reset, trigger reallocation of MC resources.
1098 * Note that on an MC reset nothing needs to be done now because we'll
1099 * detect the MC reset later and handle it then.
Edward Creee2835462014-04-16 19:27:48 +01001100 * For an FLR, we never get an MC reset event, but the MC has reset all
1101 * resources assigned to us, so we have to trigger reallocation now.
Jon Cooper3e336262014-01-17 19:48:06 +00001102 */
Edward Creee2835462014-04-16 19:27:48 +01001103 if ((reset_type == RESET_TYPE_ALL ||
1104 reset_type == RESET_TYPE_MCDI_TIMEOUT) && !rc)
Jon Cooper3e336262014-01-17 19:48:06 +00001105 efx_ef10_reset_mc_allocations(efx);
1106 return rc;
1107}
1108
Ben Hutchings8127d662013-08-29 19:19:29 +01001109#define EF10_DMA_STAT(ext_name, mcdi_name) \
1110 [EF10_STAT_ ## ext_name] = \
1111 { #ext_name, 64, 8 * MC_CMD_MAC_ ## mcdi_name }
1112#define EF10_DMA_INVIS_STAT(int_name, mcdi_name) \
1113 [EF10_STAT_ ## int_name] = \
1114 { NULL, 64, 8 * MC_CMD_MAC_ ## mcdi_name }
1115#define EF10_OTHER_STAT(ext_name) \
1116 [EF10_STAT_ ## ext_name] = { #ext_name, 0, 0 }
Edward Creee4d112e2014-07-15 11:58:12 +01001117#define GENERIC_SW_STAT(ext_name) \
1118 [GENERIC_STAT_ ## ext_name] = { #ext_name, 0, 0 }
Ben Hutchings8127d662013-08-29 19:19:29 +01001119
1120static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = {
Daniel Pieczkoe80ca0132015-06-02 11:38:34 +01001121 EF10_DMA_STAT(port_tx_bytes, TX_BYTES),
1122 EF10_DMA_STAT(port_tx_packets, TX_PKTS),
1123 EF10_DMA_STAT(port_tx_pause, TX_PAUSE_PKTS),
1124 EF10_DMA_STAT(port_tx_control, TX_CONTROL_PKTS),
1125 EF10_DMA_STAT(port_tx_unicast, TX_UNICAST_PKTS),
1126 EF10_DMA_STAT(port_tx_multicast, TX_MULTICAST_PKTS),
1127 EF10_DMA_STAT(port_tx_broadcast, TX_BROADCAST_PKTS),
1128 EF10_DMA_STAT(port_tx_lt64, TX_LT64_PKTS),
1129 EF10_DMA_STAT(port_tx_64, TX_64_PKTS),
1130 EF10_DMA_STAT(port_tx_65_to_127, TX_65_TO_127_PKTS),
1131 EF10_DMA_STAT(port_tx_128_to_255, TX_128_TO_255_PKTS),
1132 EF10_DMA_STAT(port_tx_256_to_511, TX_256_TO_511_PKTS),
1133 EF10_DMA_STAT(port_tx_512_to_1023, TX_512_TO_1023_PKTS),
1134 EF10_DMA_STAT(port_tx_1024_to_15xx, TX_1024_TO_15XX_PKTS),
1135 EF10_DMA_STAT(port_tx_15xx_to_jumbo, TX_15XX_TO_JUMBO_PKTS),
1136 EF10_DMA_STAT(port_rx_bytes, RX_BYTES),
1137 EF10_DMA_INVIS_STAT(port_rx_bytes_minus_good_bytes, RX_BAD_BYTES),
1138 EF10_OTHER_STAT(port_rx_good_bytes),
1139 EF10_OTHER_STAT(port_rx_bad_bytes),
1140 EF10_DMA_STAT(port_rx_packets, RX_PKTS),
1141 EF10_DMA_STAT(port_rx_good, RX_GOOD_PKTS),
1142 EF10_DMA_STAT(port_rx_bad, RX_BAD_FCS_PKTS),
1143 EF10_DMA_STAT(port_rx_pause, RX_PAUSE_PKTS),
1144 EF10_DMA_STAT(port_rx_control, RX_CONTROL_PKTS),
1145 EF10_DMA_STAT(port_rx_unicast, RX_UNICAST_PKTS),
1146 EF10_DMA_STAT(port_rx_multicast, RX_MULTICAST_PKTS),
1147 EF10_DMA_STAT(port_rx_broadcast, RX_BROADCAST_PKTS),
1148 EF10_DMA_STAT(port_rx_lt64, RX_UNDERSIZE_PKTS),
1149 EF10_DMA_STAT(port_rx_64, RX_64_PKTS),
1150 EF10_DMA_STAT(port_rx_65_to_127, RX_65_TO_127_PKTS),
1151 EF10_DMA_STAT(port_rx_128_to_255, RX_128_TO_255_PKTS),
1152 EF10_DMA_STAT(port_rx_256_to_511, RX_256_TO_511_PKTS),
1153 EF10_DMA_STAT(port_rx_512_to_1023, RX_512_TO_1023_PKTS),
1154 EF10_DMA_STAT(port_rx_1024_to_15xx, RX_1024_TO_15XX_PKTS),
1155 EF10_DMA_STAT(port_rx_15xx_to_jumbo, RX_15XX_TO_JUMBO_PKTS),
1156 EF10_DMA_STAT(port_rx_gtjumbo, RX_GTJUMBO_PKTS),
1157 EF10_DMA_STAT(port_rx_bad_gtjumbo, RX_JABBER_PKTS),
1158 EF10_DMA_STAT(port_rx_overflow, RX_OVERFLOW_PKTS),
1159 EF10_DMA_STAT(port_rx_align_error, RX_ALIGN_ERROR_PKTS),
1160 EF10_DMA_STAT(port_rx_length_error, RX_LENGTH_ERROR_PKTS),
1161 EF10_DMA_STAT(port_rx_nodesc_drops, RX_NODESC_DROPS),
Edward Creee4d112e2014-07-15 11:58:12 +01001162 GENERIC_SW_STAT(rx_nodesc_trunc),
1163 GENERIC_SW_STAT(rx_noskb_drops),
Daniel Pieczkoe80ca0132015-06-02 11:38:34 +01001164 EF10_DMA_STAT(port_rx_pm_trunc_bb_overflow, PM_TRUNC_BB_OVERFLOW),
1165 EF10_DMA_STAT(port_rx_pm_discard_bb_overflow, PM_DISCARD_BB_OVERFLOW),
1166 EF10_DMA_STAT(port_rx_pm_trunc_vfifo_full, PM_TRUNC_VFIFO_FULL),
1167 EF10_DMA_STAT(port_rx_pm_discard_vfifo_full, PM_DISCARD_VFIFO_FULL),
1168 EF10_DMA_STAT(port_rx_pm_trunc_qbb, PM_TRUNC_QBB),
1169 EF10_DMA_STAT(port_rx_pm_discard_qbb, PM_DISCARD_QBB),
1170 EF10_DMA_STAT(port_rx_pm_discard_mapping, PM_DISCARD_MAPPING),
1171 EF10_DMA_STAT(port_rx_dp_q_disabled_packets, RXDP_Q_DISABLED_PKTS),
1172 EF10_DMA_STAT(port_rx_dp_di_dropped_packets, RXDP_DI_DROPPED_PKTS),
1173 EF10_DMA_STAT(port_rx_dp_streaming_packets, RXDP_STREAMING_PKTS),
1174 EF10_DMA_STAT(port_rx_dp_hlb_fetch, RXDP_HLB_FETCH_CONDITIONS),
1175 EF10_DMA_STAT(port_rx_dp_hlb_wait, RXDP_HLB_WAIT_CONDITIONS),
Daniel Pieczko3c36a2a2015-06-02 11:39:06 +01001176 EF10_DMA_STAT(rx_unicast, VADAPTER_RX_UNICAST_PACKETS),
1177 EF10_DMA_STAT(rx_unicast_bytes, VADAPTER_RX_UNICAST_BYTES),
1178 EF10_DMA_STAT(rx_multicast, VADAPTER_RX_MULTICAST_PACKETS),
1179 EF10_DMA_STAT(rx_multicast_bytes, VADAPTER_RX_MULTICAST_BYTES),
1180 EF10_DMA_STAT(rx_broadcast, VADAPTER_RX_BROADCAST_PACKETS),
1181 EF10_DMA_STAT(rx_broadcast_bytes, VADAPTER_RX_BROADCAST_BYTES),
1182 EF10_DMA_STAT(rx_bad, VADAPTER_RX_BAD_PACKETS),
1183 EF10_DMA_STAT(rx_bad_bytes, VADAPTER_RX_BAD_BYTES),
1184 EF10_DMA_STAT(rx_overflow, VADAPTER_RX_OVERFLOW),
1185 EF10_DMA_STAT(tx_unicast, VADAPTER_TX_UNICAST_PACKETS),
1186 EF10_DMA_STAT(tx_unicast_bytes, VADAPTER_TX_UNICAST_BYTES),
1187 EF10_DMA_STAT(tx_multicast, VADAPTER_TX_MULTICAST_PACKETS),
1188 EF10_DMA_STAT(tx_multicast_bytes, VADAPTER_TX_MULTICAST_BYTES),
1189 EF10_DMA_STAT(tx_broadcast, VADAPTER_TX_BROADCAST_PACKETS),
1190 EF10_DMA_STAT(tx_broadcast_bytes, VADAPTER_TX_BROADCAST_BYTES),
1191 EF10_DMA_STAT(tx_bad, VADAPTER_TX_BAD_PACKETS),
1192 EF10_DMA_STAT(tx_bad_bytes, VADAPTER_TX_BAD_BYTES),
1193 EF10_DMA_STAT(tx_overflow, VADAPTER_TX_OVERFLOW),
Ben Hutchings8127d662013-08-29 19:19:29 +01001194};
1195
Daniel Pieczkoe80ca0132015-06-02 11:38:34 +01001196#define HUNT_COMMON_STAT_MASK ((1ULL << EF10_STAT_port_tx_bytes) | \
1197 (1ULL << EF10_STAT_port_tx_packets) | \
1198 (1ULL << EF10_STAT_port_tx_pause) | \
1199 (1ULL << EF10_STAT_port_tx_unicast) | \
1200 (1ULL << EF10_STAT_port_tx_multicast) | \
1201 (1ULL << EF10_STAT_port_tx_broadcast) | \
1202 (1ULL << EF10_STAT_port_rx_bytes) | \
1203 (1ULL << \
1204 EF10_STAT_port_rx_bytes_minus_good_bytes) | \
1205 (1ULL << EF10_STAT_port_rx_good_bytes) | \
1206 (1ULL << EF10_STAT_port_rx_bad_bytes) | \
1207 (1ULL << EF10_STAT_port_rx_packets) | \
1208 (1ULL << EF10_STAT_port_rx_good) | \
1209 (1ULL << EF10_STAT_port_rx_bad) | \
1210 (1ULL << EF10_STAT_port_rx_pause) | \
1211 (1ULL << EF10_STAT_port_rx_control) | \
1212 (1ULL << EF10_STAT_port_rx_unicast) | \
1213 (1ULL << EF10_STAT_port_rx_multicast) | \
1214 (1ULL << EF10_STAT_port_rx_broadcast) | \
1215 (1ULL << EF10_STAT_port_rx_lt64) | \
1216 (1ULL << EF10_STAT_port_rx_64) | \
1217 (1ULL << EF10_STAT_port_rx_65_to_127) | \
1218 (1ULL << EF10_STAT_port_rx_128_to_255) | \
1219 (1ULL << EF10_STAT_port_rx_256_to_511) | \
1220 (1ULL << EF10_STAT_port_rx_512_to_1023) |\
1221 (1ULL << EF10_STAT_port_rx_1024_to_15xx) |\
1222 (1ULL << EF10_STAT_port_rx_15xx_to_jumbo) |\
1223 (1ULL << EF10_STAT_port_rx_gtjumbo) | \
1224 (1ULL << EF10_STAT_port_rx_bad_gtjumbo) |\
1225 (1ULL << EF10_STAT_port_rx_overflow) | \
1226 (1ULL << EF10_STAT_port_rx_nodesc_drops) |\
Edward Creee4d112e2014-07-15 11:58:12 +01001227 (1ULL << GENERIC_STAT_rx_nodesc_trunc) | \
1228 (1ULL << GENERIC_STAT_rx_noskb_drops))
Ben Hutchings8127d662013-08-29 19:19:29 +01001229
1230/* These statistics are only provided by the 10G MAC. For a 10G/40G
1231 * switchable port we do not expose these because they might not
1232 * include all the packets they should.
1233 */
Daniel Pieczkoe80ca0132015-06-02 11:38:34 +01001234#define HUNT_10G_ONLY_STAT_MASK ((1ULL << EF10_STAT_port_tx_control) | \
1235 (1ULL << EF10_STAT_port_tx_lt64) | \
1236 (1ULL << EF10_STAT_port_tx_64) | \
1237 (1ULL << EF10_STAT_port_tx_65_to_127) |\
1238 (1ULL << EF10_STAT_port_tx_128_to_255) |\
1239 (1ULL << EF10_STAT_port_tx_256_to_511) |\
1240 (1ULL << EF10_STAT_port_tx_512_to_1023) |\
1241 (1ULL << EF10_STAT_port_tx_1024_to_15xx) |\
1242 (1ULL << EF10_STAT_port_tx_15xx_to_jumbo))
Ben Hutchings8127d662013-08-29 19:19:29 +01001243
1244/* These statistics are only provided by the 40G MAC. For a 10G/40G
1245 * switchable port we do expose these because the errors will otherwise
1246 * be silent.
1247 */
Daniel Pieczkoe80ca0132015-06-02 11:38:34 +01001248#define HUNT_40G_EXTRA_STAT_MASK ((1ULL << EF10_STAT_port_rx_align_error) |\
1249 (1ULL << EF10_STAT_port_rx_length_error))
Ben Hutchings8127d662013-08-29 19:19:29 +01001250
Edward Cree568d7a02013-09-25 17:32:09 +01001251/* These statistics are only provided if the firmware supports the
1252 * capability PM_AND_RXDP_COUNTERS.
1253 */
1254#define HUNT_PM_AND_RXDP_STAT_MASK ( \
Daniel Pieczkoe80ca0132015-06-02 11:38:34 +01001255 (1ULL << EF10_STAT_port_rx_pm_trunc_bb_overflow) | \
1256 (1ULL << EF10_STAT_port_rx_pm_discard_bb_overflow) | \
1257 (1ULL << EF10_STAT_port_rx_pm_trunc_vfifo_full) | \
1258 (1ULL << EF10_STAT_port_rx_pm_discard_vfifo_full) | \
1259 (1ULL << EF10_STAT_port_rx_pm_trunc_qbb) | \
1260 (1ULL << EF10_STAT_port_rx_pm_discard_qbb) | \
1261 (1ULL << EF10_STAT_port_rx_pm_discard_mapping) | \
1262 (1ULL << EF10_STAT_port_rx_dp_q_disabled_packets) | \
1263 (1ULL << EF10_STAT_port_rx_dp_di_dropped_packets) | \
1264 (1ULL << EF10_STAT_port_rx_dp_streaming_packets) | \
1265 (1ULL << EF10_STAT_port_rx_dp_hlb_fetch) | \
1266 (1ULL << EF10_STAT_port_rx_dp_hlb_wait))
Ben Hutchings8127d662013-08-29 19:19:29 +01001267
Edward Cree4bae9132013-09-27 18:52:49 +01001268static u64 efx_ef10_raw_stat_mask(struct efx_nic *efx)
Ben Hutchings8127d662013-08-29 19:19:29 +01001269{
Edward Cree4bae9132013-09-27 18:52:49 +01001270 u64 raw_mask = HUNT_COMMON_STAT_MASK;
Ben Hutchings8127d662013-08-29 19:19:29 +01001271 u32 port_caps = efx_mcdi_phy_get_caps(efx);
Edward Cree568d7a02013-09-25 17:32:09 +01001272 struct efx_ef10_nic_data *nic_data = efx->nic_data;
Ben Hutchings8127d662013-08-29 19:19:29 +01001273
Daniel Pieczko3c36a2a2015-06-02 11:39:06 +01001274 if (!(efx->mcdi->fn_flags &
1275 1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL))
1276 return 0;
1277
Ben Hutchings8127d662013-08-29 19:19:29 +01001278 if (port_caps & (1 << MC_CMD_PHY_CAP_40000FDX_LBN))
Edward Cree4bae9132013-09-27 18:52:49 +01001279 raw_mask |= HUNT_40G_EXTRA_STAT_MASK;
Ben Hutchings8127d662013-08-29 19:19:29 +01001280 else
Edward Cree4bae9132013-09-27 18:52:49 +01001281 raw_mask |= HUNT_10G_ONLY_STAT_MASK;
Edward Cree568d7a02013-09-25 17:32:09 +01001282
1283 if (nic_data->datapath_caps &
1284 (1 << MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_LBN))
1285 raw_mask |= HUNT_PM_AND_RXDP_STAT_MASK;
1286
Edward Cree4bae9132013-09-27 18:52:49 +01001287 return raw_mask;
1288}
1289
1290static void efx_ef10_get_stat_mask(struct efx_nic *efx, unsigned long *mask)
1291{
Daniel Pieczkod94619c2015-06-02 11:40:05 +01001292 struct efx_ef10_nic_data *nic_data = efx->nic_data;
Daniel Pieczko3c36a2a2015-06-02 11:39:06 +01001293 u64 raw_mask[2];
1294
1295 raw_mask[0] = efx_ef10_raw_stat_mask(efx);
1296
Daniel Pieczkod94619c2015-06-02 11:40:05 +01001297 /* Only show vadaptor stats when EVB capability is present */
1298 if (nic_data->datapath_caps &
1299 (1 << MC_CMD_GET_CAPABILITIES_OUT_EVB_LBN)) {
1300 raw_mask[0] |= ~((1ULL << EF10_STAT_rx_unicast) - 1);
1301 raw_mask[1] = (1ULL << (EF10_STAT_COUNT - 63)) - 1;
1302 } else {
1303 raw_mask[1] = 0;
1304 }
Edward Cree4bae9132013-09-27 18:52:49 +01001305
1306#if BITS_PER_LONG == 64
Daniel Pieczko3c36a2a2015-06-02 11:39:06 +01001307 mask[0] = raw_mask[0];
1308 mask[1] = raw_mask[1];
Edward Cree4bae9132013-09-27 18:52:49 +01001309#else
Daniel Pieczko3c36a2a2015-06-02 11:39:06 +01001310 mask[0] = raw_mask[0] & 0xffffffff;
1311 mask[1] = raw_mask[0] >> 32;
1312 mask[2] = raw_mask[1] & 0xffffffff;
1313 mask[3] = raw_mask[1] >> 32;
Edward Cree4bae9132013-09-27 18:52:49 +01001314#endif
Ben Hutchings8127d662013-08-29 19:19:29 +01001315}
1316
1317static size_t efx_ef10_describe_stats(struct efx_nic *efx, u8 *names)
1318{
Edward Cree4bae9132013-09-27 18:52:49 +01001319 DECLARE_BITMAP(mask, EF10_STAT_COUNT);
1320
1321 efx_ef10_get_stat_mask(efx, mask);
Ben Hutchings8127d662013-08-29 19:19:29 +01001322 return efx_nic_describe_stats(efx_ef10_stat_desc, EF10_STAT_COUNT,
Edward Cree4bae9132013-09-27 18:52:49 +01001323 mask, names);
Ben Hutchings8127d662013-08-29 19:19:29 +01001324}
1325
Daniel Pieczkod7788192015-06-02 11:39:20 +01001326static size_t efx_ef10_update_stats_common(struct efx_nic *efx, u64 *full_stats,
1327 struct rtnl_link_stats64 *core_stats)
1328{
1329 DECLARE_BITMAP(mask, EF10_STAT_COUNT);
1330 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1331 u64 *stats = nic_data->stats;
1332 size_t stats_count = 0, index;
1333
1334 efx_ef10_get_stat_mask(efx, mask);
1335
1336 if (full_stats) {
1337 for_each_set_bit(index, mask, EF10_STAT_COUNT) {
1338 if (efx_ef10_stat_desc[index].name) {
1339 *full_stats++ = stats[index];
1340 ++stats_count;
1341 }
1342 }
1343 }
1344
Bert Kenwardfbe43072015-08-26 16:39:03 +01001345 if (!core_stats)
1346 return stats_count;
1347
1348 if (nic_data->datapath_caps &
1349 1 << MC_CMD_GET_CAPABILITIES_OUT_EVB_LBN) {
1350 /* Use vadaptor stats. */
Daniel Pieczko0fc95fc2015-06-02 11:39:33 +01001351 core_stats->rx_packets = stats[EF10_STAT_rx_unicast] +
1352 stats[EF10_STAT_rx_multicast] +
1353 stats[EF10_STAT_rx_broadcast];
1354 core_stats->tx_packets = stats[EF10_STAT_tx_unicast] +
1355 stats[EF10_STAT_tx_multicast] +
1356 stats[EF10_STAT_tx_broadcast];
1357 core_stats->rx_bytes = stats[EF10_STAT_rx_unicast_bytes] +
1358 stats[EF10_STAT_rx_multicast_bytes] +
1359 stats[EF10_STAT_rx_broadcast_bytes];
1360 core_stats->tx_bytes = stats[EF10_STAT_tx_unicast_bytes] +
1361 stats[EF10_STAT_tx_multicast_bytes] +
1362 stats[EF10_STAT_tx_broadcast_bytes];
1363 core_stats->rx_dropped = stats[GENERIC_STAT_rx_nodesc_trunc] +
Daniel Pieczkod7788192015-06-02 11:39:20 +01001364 stats[GENERIC_STAT_rx_noskb_drops];
Daniel Pieczko0fc95fc2015-06-02 11:39:33 +01001365 core_stats->multicast = stats[EF10_STAT_rx_multicast];
1366 core_stats->rx_crc_errors = stats[EF10_STAT_rx_bad];
1367 core_stats->rx_fifo_errors = stats[EF10_STAT_rx_overflow];
1368 core_stats->rx_errors = core_stats->rx_crc_errors;
1369 core_stats->tx_errors = stats[EF10_STAT_tx_bad];
Bert Kenwardfbe43072015-08-26 16:39:03 +01001370 } else {
1371 /* Use port stats. */
1372 core_stats->rx_packets = stats[EF10_STAT_port_rx_packets];
1373 core_stats->tx_packets = stats[EF10_STAT_port_tx_packets];
1374 core_stats->rx_bytes = stats[EF10_STAT_port_rx_bytes];
1375 core_stats->tx_bytes = stats[EF10_STAT_port_tx_bytes];
1376 core_stats->rx_dropped = stats[EF10_STAT_port_rx_nodesc_drops] +
1377 stats[GENERIC_STAT_rx_nodesc_trunc] +
1378 stats[GENERIC_STAT_rx_noskb_drops];
1379 core_stats->multicast = stats[EF10_STAT_port_rx_multicast];
1380 core_stats->rx_length_errors =
1381 stats[EF10_STAT_port_rx_gtjumbo] +
1382 stats[EF10_STAT_port_rx_length_error];
1383 core_stats->rx_crc_errors = stats[EF10_STAT_port_rx_bad];
1384 core_stats->rx_frame_errors =
1385 stats[EF10_STAT_port_rx_align_error];
1386 core_stats->rx_fifo_errors = stats[EF10_STAT_port_rx_overflow];
1387 core_stats->rx_errors = (core_stats->rx_length_errors +
1388 core_stats->rx_crc_errors +
1389 core_stats->rx_frame_errors);
Daniel Pieczkod7788192015-06-02 11:39:20 +01001390 }
1391
1392 return stats_count;
1393}
1394
1395static int efx_ef10_try_update_nic_stats_pf(struct efx_nic *efx)
Ben Hutchings8127d662013-08-29 19:19:29 +01001396{
1397 struct efx_ef10_nic_data *nic_data = efx->nic_data;
Edward Cree4bae9132013-09-27 18:52:49 +01001398 DECLARE_BITMAP(mask, EF10_STAT_COUNT);
Ben Hutchings8127d662013-08-29 19:19:29 +01001399 __le64 generation_start, generation_end;
1400 u64 *stats = nic_data->stats;
1401 __le64 *dma_stats;
1402
Edward Cree4bae9132013-09-27 18:52:49 +01001403 efx_ef10_get_stat_mask(efx, mask);
1404
Ben Hutchings8127d662013-08-29 19:19:29 +01001405 dma_stats = efx->stats_buffer.addr;
1406 nic_data = efx->nic_data;
1407
1408 generation_end = dma_stats[MC_CMD_MAC_GENERATION_END];
1409 if (generation_end == EFX_MC_STATS_GENERATION_INVALID)
1410 return 0;
1411 rmb();
Edward Cree4bae9132013-09-27 18:52:49 +01001412 efx_nic_update_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, mask,
Ben Hutchings8127d662013-08-29 19:19:29 +01001413 stats, efx->stats_buffer.addr, false);
Jon Cooperd546a892013-09-27 18:26:30 +01001414 rmb();
Ben Hutchings8127d662013-08-29 19:19:29 +01001415 generation_start = dma_stats[MC_CMD_MAC_GENERATION_START];
1416 if (generation_end != generation_start)
1417 return -EAGAIN;
1418
1419 /* Update derived statistics */
Daniel Pieczkoe80ca0132015-06-02 11:38:34 +01001420 efx_nic_fix_nodesc_drop_stat(efx,
1421 &stats[EF10_STAT_port_rx_nodesc_drops]);
1422 stats[EF10_STAT_port_rx_good_bytes] =
1423 stats[EF10_STAT_port_rx_bytes] -
1424 stats[EF10_STAT_port_rx_bytes_minus_good_bytes];
1425 efx_update_diff_stat(&stats[EF10_STAT_port_rx_bad_bytes],
1426 stats[EF10_STAT_port_rx_bytes_minus_good_bytes]);
Edward Creee4d112e2014-07-15 11:58:12 +01001427 efx_update_sw_stats(efx, stats);
Ben Hutchings8127d662013-08-29 19:19:29 +01001428 return 0;
1429}
1430
1431
Daniel Pieczkod7788192015-06-02 11:39:20 +01001432static size_t efx_ef10_update_stats_pf(struct efx_nic *efx, u64 *full_stats,
1433 struct rtnl_link_stats64 *core_stats)
Ben Hutchings8127d662013-08-29 19:19:29 +01001434{
Ben Hutchings8127d662013-08-29 19:19:29 +01001435 int retry;
1436
1437 /* If we're unlucky enough to read statistics during the DMA, wait
1438 * up to 10ms for it to finish (typically takes <500us)
1439 */
1440 for (retry = 0; retry < 100; ++retry) {
Daniel Pieczkod7788192015-06-02 11:39:20 +01001441 if (efx_ef10_try_update_nic_stats_pf(efx) == 0)
Ben Hutchings8127d662013-08-29 19:19:29 +01001442 break;
1443 udelay(100);
1444 }
1445
Daniel Pieczkod7788192015-06-02 11:39:20 +01001446 return efx_ef10_update_stats_common(efx, full_stats, core_stats);
1447}
1448
1449static int efx_ef10_try_update_nic_stats_vf(struct efx_nic *efx)
1450{
1451 MCDI_DECLARE_BUF(inbuf, MC_CMD_MAC_STATS_IN_LEN);
1452 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1453 DECLARE_BITMAP(mask, EF10_STAT_COUNT);
1454 __le64 generation_start, generation_end;
1455 u64 *stats = nic_data->stats;
1456 u32 dma_len = MC_CMD_MAC_NSTATS * sizeof(u64);
1457 struct efx_buffer stats_buf;
1458 __le64 *dma_stats;
1459 int rc;
1460
Daniel Pieczkof00bf232015-06-02 11:40:18 +01001461 spin_unlock_bh(&efx->stats_lock);
1462
1463 if (in_interrupt()) {
1464 /* If in atomic context, cannot update stats. Just update the
1465 * software stats and return so the caller can continue.
1466 */
1467 spin_lock_bh(&efx->stats_lock);
1468 efx_update_sw_stats(efx, stats);
1469 return 0;
1470 }
1471
Daniel Pieczkod7788192015-06-02 11:39:20 +01001472 efx_ef10_get_stat_mask(efx, mask);
1473
1474 rc = efx_nic_alloc_buffer(efx, &stats_buf, dma_len, GFP_ATOMIC);
Daniel Pieczkof00bf232015-06-02 11:40:18 +01001475 if (rc) {
1476 spin_lock_bh(&efx->stats_lock);
Daniel Pieczkod7788192015-06-02 11:39:20 +01001477 return rc;
Daniel Pieczkof00bf232015-06-02 11:40:18 +01001478 }
Daniel Pieczkod7788192015-06-02 11:39:20 +01001479
1480 dma_stats = stats_buf.addr;
1481 dma_stats[MC_CMD_MAC_GENERATION_END] = EFX_MC_STATS_GENERATION_INVALID;
1482
1483 MCDI_SET_QWORD(inbuf, MAC_STATS_IN_DMA_ADDR, stats_buf.dma_addr);
1484 MCDI_POPULATE_DWORD_1(inbuf, MAC_STATS_IN_CMD,
Daniel Pieczko0fc95fc2015-06-02 11:39:33 +01001485 MAC_STATS_IN_DMA, 1);
Daniel Pieczkod7788192015-06-02 11:39:20 +01001486 MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len);
1487 MCDI_SET_DWORD(inbuf, MAC_STATS_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
1488
Daniel Pieczko6dd48592015-06-02 11:39:49 +01001489 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf),
1490 NULL, 0, NULL);
Daniel Pieczkod7788192015-06-02 11:39:20 +01001491 spin_lock_bh(&efx->stats_lock);
Daniel Pieczko6dd48592015-06-02 11:39:49 +01001492 if (rc) {
1493 /* Expect ENOENT if DMA queues have not been set up */
1494 if (rc != -ENOENT || atomic_read(&efx->active_queues))
1495 efx_mcdi_display_error(efx, MC_CMD_MAC_STATS,
1496 sizeof(inbuf), NULL, 0, rc);
Daniel Pieczkod7788192015-06-02 11:39:20 +01001497 goto out;
Daniel Pieczko6dd48592015-06-02 11:39:49 +01001498 }
Daniel Pieczkod7788192015-06-02 11:39:20 +01001499
1500 generation_end = dma_stats[MC_CMD_MAC_GENERATION_END];
Daniel Pieczko0fc95fc2015-06-02 11:39:33 +01001501 if (generation_end == EFX_MC_STATS_GENERATION_INVALID) {
1502 WARN_ON_ONCE(1);
Daniel Pieczkod7788192015-06-02 11:39:20 +01001503 goto out;
Daniel Pieczko0fc95fc2015-06-02 11:39:33 +01001504 }
Daniel Pieczkod7788192015-06-02 11:39:20 +01001505 rmb();
1506 efx_nic_update_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, mask,
1507 stats, stats_buf.addr, false);
1508 rmb();
1509 generation_start = dma_stats[MC_CMD_MAC_GENERATION_START];
1510 if (generation_end != generation_start) {
1511 rc = -EAGAIN;
1512 goto out;
Ben Hutchings8127d662013-08-29 19:19:29 +01001513 }
1514
Daniel Pieczkod7788192015-06-02 11:39:20 +01001515 efx_update_sw_stats(efx, stats);
1516out:
1517 efx_nic_free_buffer(efx, &stats_buf);
1518 return rc;
1519}
Ben Hutchings8127d662013-08-29 19:19:29 +01001520
Daniel Pieczkod7788192015-06-02 11:39:20 +01001521static size_t efx_ef10_update_stats_vf(struct efx_nic *efx, u64 *full_stats,
1522 struct rtnl_link_stats64 *core_stats)
1523{
1524 if (efx_ef10_try_update_nic_stats_vf(efx))
1525 return 0;
1526
1527 return efx_ef10_update_stats_common(efx, full_stats, core_stats);
Ben Hutchings8127d662013-08-29 19:19:29 +01001528}
1529
1530static void efx_ef10_push_irq_moderation(struct efx_channel *channel)
1531{
1532 struct efx_nic *efx = channel->efx;
1533 unsigned int mode, value;
1534 efx_dword_t timer_cmd;
1535
1536 if (channel->irq_moderation) {
1537 mode = 3;
1538 value = channel->irq_moderation - 1;
1539 } else {
1540 mode = 0;
1541 value = 0;
1542 }
1543
1544 if (EFX_EF10_WORKAROUND_35388(efx)) {
1545 EFX_POPULATE_DWORD_3(timer_cmd, ERF_DD_EVQ_IND_TIMER_FLAGS,
1546 EFE_DD_EVQ_IND_TIMER_FLAGS,
1547 ERF_DD_EVQ_IND_TIMER_MODE, mode,
1548 ERF_DD_EVQ_IND_TIMER_VAL, value);
1549 efx_writed_page(efx, &timer_cmd, ER_DD_EVQ_INDIRECT,
1550 channel->channel);
1551 } else {
1552 EFX_POPULATE_DWORD_2(timer_cmd, ERF_DZ_TC_TIMER_MODE, mode,
1553 ERF_DZ_TC_TIMER_VAL, value);
1554 efx_writed_page(efx, &timer_cmd, ER_DZ_EVQ_TMR,
1555 channel->channel);
1556 }
1557}
1558
Shradha Shah02246a72015-05-06 00:58:14 +01001559static void efx_ef10_get_wol_vf(struct efx_nic *efx,
1560 struct ethtool_wolinfo *wol) {}
1561
1562static int efx_ef10_set_wol_vf(struct efx_nic *efx, u32 type)
1563{
1564 return -EOPNOTSUPP;
1565}
1566
Ben Hutchings8127d662013-08-29 19:19:29 +01001567static void efx_ef10_get_wol(struct efx_nic *efx, struct ethtool_wolinfo *wol)
1568{
1569 wol->supported = 0;
1570 wol->wolopts = 0;
1571 memset(&wol->sopass, 0, sizeof(wol->sopass));
1572}
1573
1574static int efx_ef10_set_wol(struct efx_nic *efx, u32 type)
1575{
1576 if (type != 0)
1577 return -EINVAL;
1578 return 0;
1579}
1580
1581static void efx_ef10_mcdi_request(struct efx_nic *efx,
1582 const efx_dword_t *hdr, size_t hdr_len,
1583 const efx_dword_t *sdu, size_t sdu_len)
1584{
1585 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1586 u8 *pdu = nic_data->mcdi_buf.addr;
1587
1588 memcpy(pdu, hdr, hdr_len);
1589 memcpy(pdu + hdr_len, sdu, sdu_len);
1590 wmb();
1591
1592 /* The hardware provides 'low' and 'high' (doorbell) registers
1593 * for passing the 64-bit address of an MCDI request to
1594 * firmware. However the dwords are swapped by firmware. The
1595 * least significant bits of the doorbell are then 0 for all
1596 * MCDI requests due to alignment.
1597 */
1598 _efx_writed(efx, cpu_to_le32((u64)nic_data->mcdi_buf.dma_addr >> 32),
1599 ER_DZ_MC_DB_LWRD);
1600 _efx_writed(efx, cpu_to_le32((u32)nic_data->mcdi_buf.dma_addr),
1601 ER_DZ_MC_DB_HWRD);
1602}
1603
1604static bool efx_ef10_mcdi_poll_response(struct efx_nic *efx)
1605{
1606 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1607 const efx_dword_t hdr = *(const efx_dword_t *)nic_data->mcdi_buf.addr;
1608
1609 rmb();
1610 return EFX_DWORD_FIELD(hdr, MCDI_HEADER_RESPONSE);
1611}
1612
1613static void
1614efx_ef10_mcdi_read_response(struct efx_nic *efx, efx_dword_t *outbuf,
1615 size_t offset, size_t outlen)
1616{
1617 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1618 const u8 *pdu = nic_data->mcdi_buf.addr;
1619
1620 memcpy(outbuf, pdu + offset, outlen);
1621}
1622
Daniel Pieczkoc577e592015-10-09 10:40:35 +01001623static void efx_ef10_mcdi_reboot_detected(struct efx_nic *efx)
1624{
1625 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1626
1627 /* All our allocations have been reset */
1628 efx_ef10_reset_mc_allocations(efx);
1629
1630 /* The datapath firmware might have been changed */
1631 nic_data->must_check_datapath_caps = true;
1632
1633 /* MAC statistics have been cleared on the NIC; clear the local
1634 * statistic that we update with efx_update_diff_stat().
1635 */
1636 nic_data->stats[EF10_STAT_port_rx_bad_bytes] = 0;
1637}
1638
Ben Hutchings8127d662013-08-29 19:19:29 +01001639static int efx_ef10_mcdi_poll_reboot(struct efx_nic *efx)
1640{
1641 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1642 int rc;
1643
1644 rc = efx_ef10_get_warm_boot_count(efx);
1645 if (rc < 0) {
1646 /* The firmware is presumably in the process of
1647 * rebooting. However, we are supposed to report each
1648 * reboot just once, so we must only do that once we
1649 * can read and store the updated warm boot count.
1650 */
1651 return 0;
1652 }
1653
1654 if (rc == nic_data->warm_boot_count)
1655 return 0;
1656
1657 nic_data->warm_boot_count = rc;
Daniel Pieczkoc577e592015-10-09 10:40:35 +01001658 efx_ef10_mcdi_reboot_detected(efx);
Ben Hutchings869070c2013-09-05 22:46:10 +01001659
Ben Hutchings8127d662013-08-29 19:19:29 +01001660 return -EIO;
1661}
1662
1663/* Handle an MSI interrupt
1664 *
1665 * Handle an MSI hardware interrupt. This routine schedules event
1666 * queue processing. No interrupt acknowledgement cycle is necessary.
1667 * Also, we never need to check that the interrupt is for us, since
1668 * MSI interrupts cannot be shared.
1669 */
1670static irqreturn_t efx_ef10_msi_interrupt(int irq, void *dev_id)
1671{
1672 struct efx_msi_context *context = dev_id;
1673 struct efx_nic *efx = context->efx;
1674
1675 netif_vdbg(efx, intr, efx->net_dev,
1676 "IRQ %d on CPU %d\n", irq, raw_smp_processor_id());
1677
1678 if (likely(ACCESS_ONCE(efx->irq_soft_enabled))) {
1679 /* Note test interrupts */
1680 if (context->index == efx->irq_level)
1681 efx->last_irq_cpu = raw_smp_processor_id();
1682
1683 /* Schedule processing of the channel */
1684 efx_schedule_channel_irq(efx->channel[context->index]);
1685 }
1686
1687 return IRQ_HANDLED;
1688}
1689
1690static irqreturn_t efx_ef10_legacy_interrupt(int irq, void *dev_id)
1691{
1692 struct efx_nic *efx = dev_id;
1693 bool soft_enabled = ACCESS_ONCE(efx->irq_soft_enabled);
1694 struct efx_channel *channel;
1695 efx_dword_t reg;
1696 u32 queues;
1697
1698 /* Read the ISR which also ACKs the interrupts */
1699 efx_readd(efx, &reg, ER_DZ_BIU_INT_ISR);
1700 queues = EFX_DWORD_FIELD(reg, ERF_DZ_ISR_REG);
1701
1702 if (queues == 0)
1703 return IRQ_NONE;
1704
1705 if (likely(soft_enabled)) {
1706 /* Note test interrupts */
1707 if (queues & (1U << efx->irq_level))
1708 efx->last_irq_cpu = raw_smp_processor_id();
1709
1710 efx_for_each_channel(channel, efx) {
1711 if (queues & 1)
1712 efx_schedule_channel_irq(channel);
1713 queues >>= 1;
1714 }
1715 }
1716
1717 netif_vdbg(efx, intr, efx->net_dev,
1718 "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n",
1719 irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg));
1720
1721 return IRQ_HANDLED;
1722}
1723
1724static void efx_ef10_irq_test_generate(struct efx_nic *efx)
1725{
1726 MCDI_DECLARE_BUF(inbuf, MC_CMD_TRIGGER_INTERRUPT_IN_LEN);
1727
1728 BUILD_BUG_ON(MC_CMD_TRIGGER_INTERRUPT_OUT_LEN != 0);
1729
1730 MCDI_SET_DWORD(inbuf, TRIGGER_INTERRUPT_IN_INTR_LEVEL, efx->irq_level);
1731 (void) efx_mcdi_rpc(efx, MC_CMD_TRIGGER_INTERRUPT,
1732 inbuf, sizeof(inbuf), NULL, 0, NULL);
1733}
1734
1735static int efx_ef10_tx_probe(struct efx_tx_queue *tx_queue)
1736{
1737 return efx_nic_alloc_buffer(tx_queue->efx, &tx_queue->txd.buf,
1738 (tx_queue->ptr_mask + 1) *
1739 sizeof(efx_qword_t),
1740 GFP_KERNEL);
1741}
1742
1743/* This writes to the TX_DESC_WPTR and also pushes data */
1744static inline void efx_ef10_push_tx_desc(struct efx_tx_queue *tx_queue,
1745 const efx_qword_t *txd)
1746{
1747 unsigned int write_ptr;
1748 efx_oword_t reg;
1749
1750 write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
1751 EFX_POPULATE_OWORD_1(reg, ERF_DZ_TX_DESC_WPTR, write_ptr);
1752 reg.qword[0] = *txd;
1753 efx_writeo_page(tx_queue->efx, &reg,
1754 ER_DZ_TX_DESC_UPD, tx_queue->queue);
1755}
1756
1757static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue)
1758{
1759 MCDI_DECLARE_BUF(inbuf, MC_CMD_INIT_TXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 /
1760 EFX_BUF_SIZE));
Ben Hutchings8127d662013-08-29 19:19:29 +01001761 bool csum_offload = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD;
1762 size_t entries = tx_queue->txd.buf.len / EFX_BUF_SIZE;
1763 struct efx_channel *channel = tx_queue->channel;
1764 struct efx_nic *efx = tx_queue->efx;
Daniel Pieczko45b24492015-05-06 00:57:14 +01001765 struct efx_ef10_nic_data *nic_data = efx->nic_data;
Jon Cooperaa09a3d2015-05-20 11:10:41 +01001766 size_t inlen;
Ben Hutchings8127d662013-08-29 19:19:29 +01001767 dma_addr_t dma_addr;
1768 efx_qword_t *txd;
1769 int rc;
1770 int i;
Jon Cooperaa09a3d2015-05-20 11:10:41 +01001771 BUILD_BUG_ON(MC_CMD_INIT_TXQ_OUT_LEN != 0);
Ben Hutchings8127d662013-08-29 19:19:29 +01001772
1773 MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_SIZE, tx_queue->ptr_mask + 1);
1774 MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_TARGET_EVQ, channel->channel);
1775 MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_LABEL, tx_queue->queue);
1776 MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_INSTANCE, tx_queue->queue);
1777 MCDI_POPULATE_DWORD_2(inbuf, INIT_TXQ_IN_FLAGS,
1778 INIT_TXQ_IN_FLAG_IP_CSUM_DIS, !csum_offload,
1779 INIT_TXQ_IN_FLAG_TCP_CSUM_DIS, !csum_offload);
1780 MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_OWNER_ID, 0);
Daniel Pieczko45b24492015-05-06 00:57:14 +01001781 MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_PORT_ID, nic_data->vport_id);
Ben Hutchings8127d662013-08-29 19:19:29 +01001782
1783 dma_addr = tx_queue->txd.buf.dma_addr;
1784
1785 netif_dbg(efx, hw, efx->net_dev, "pushing TXQ %d. %zu entries (%llx)\n",
1786 tx_queue->queue, entries, (u64)dma_addr);
1787
1788 for (i = 0; i < entries; ++i) {
1789 MCDI_SET_ARRAY_QWORD(inbuf, INIT_TXQ_IN_DMA_ADDR, i, dma_addr);
1790 dma_addr += EFX_BUF_SIZE;
1791 }
1792
1793 inlen = MC_CMD_INIT_TXQ_IN_LEN(entries);
1794
1795 rc = efx_mcdi_rpc(efx, MC_CMD_INIT_TXQ, inbuf, inlen,
Jon Cooperaa09a3d2015-05-20 11:10:41 +01001796 NULL, 0, NULL);
Ben Hutchings8127d662013-08-29 19:19:29 +01001797 if (rc)
1798 goto fail;
1799
1800 /* A previous user of this TX queue might have set us up the
1801 * bomb by writing a descriptor to the TX push collector but
1802 * not the doorbell. (Each collector belongs to a port, not a
1803 * queue or function, so cannot easily be reset.) We must
1804 * attempt to push a no-op descriptor in its place.
1805 */
1806 tx_queue->buffer[0].flags = EFX_TX_BUF_OPTION;
1807 tx_queue->insert_count = 1;
1808 txd = efx_tx_desc(tx_queue, 0);
1809 EFX_POPULATE_QWORD_4(*txd,
1810 ESF_DZ_TX_DESC_IS_OPT, true,
1811 ESF_DZ_TX_OPTION_TYPE,
1812 ESE_DZ_TX_OPTION_DESC_CRC_CSUM,
1813 ESF_DZ_TX_OPTION_UDP_TCP_CSUM, csum_offload,
1814 ESF_DZ_TX_OPTION_IP_CSUM, csum_offload);
1815 tx_queue->write_count = 1;
Bert Kenward93171b12015-11-30 09:05:35 +00001816
1817 if (nic_data->datapath_caps &
1818 (1 << MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_LBN)) {
1819 tx_queue->tso_version = 1;
1820 }
1821
Ben Hutchings8127d662013-08-29 19:19:29 +01001822 wmb();
1823 efx_ef10_push_tx_desc(tx_queue, txd);
1824
1825 return;
1826
1827fail:
Ben Hutchings48ce5632013-11-01 16:42:44 +00001828 netdev_WARN(efx->net_dev, "failed to initialise TXQ %d\n",
1829 tx_queue->queue);
Ben Hutchings8127d662013-08-29 19:19:29 +01001830}
1831
1832static void efx_ef10_tx_fini(struct efx_tx_queue *tx_queue)
1833{
1834 MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_TXQ_IN_LEN);
Jon Cooperaa09a3d2015-05-20 11:10:41 +01001835 MCDI_DECLARE_BUF_ERR(outbuf);
Ben Hutchings8127d662013-08-29 19:19:29 +01001836 struct efx_nic *efx = tx_queue->efx;
1837 size_t outlen;
1838 int rc;
1839
1840 MCDI_SET_DWORD(inbuf, FINI_TXQ_IN_INSTANCE,
1841 tx_queue->queue);
1842
Edward Cree1e0b8122013-05-31 18:36:12 +01001843 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_TXQ, inbuf, sizeof(inbuf),
Ben Hutchings8127d662013-08-29 19:19:29 +01001844 outbuf, sizeof(outbuf), &outlen);
1845
1846 if (rc && rc != -EALREADY)
1847 goto fail;
1848
1849 return;
1850
1851fail:
Edward Cree1e0b8122013-05-31 18:36:12 +01001852 efx_mcdi_display_error(efx, MC_CMD_FINI_TXQ, MC_CMD_FINI_TXQ_IN_LEN,
1853 outbuf, outlen, rc);
Ben Hutchings8127d662013-08-29 19:19:29 +01001854}
1855
1856static void efx_ef10_tx_remove(struct efx_tx_queue *tx_queue)
1857{
1858 efx_nic_free_buffer(tx_queue->efx, &tx_queue->txd.buf);
1859}
1860
1861/* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */
1862static inline void efx_ef10_notify_tx_desc(struct efx_tx_queue *tx_queue)
1863{
1864 unsigned int write_ptr;
1865 efx_dword_t reg;
1866
1867 write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
1868 EFX_POPULATE_DWORD_1(reg, ERF_DZ_TX_DESC_WPTR_DWORD, write_ptr);
1869 efx_writed_page(tx_queue->efx, &reg,
1870 ER_DZ_TX_DESC_UPD_DWORD, tx_queue->queue);
1871}
1872
1873static void efx_ef10_tx_write(struct efx_tx_queue *tx_queue)
1874{
1875 unsigned int old_write_count = tx_queue->write_count;
1876 struct efx_tx_buffer *buffer;
1877 unsigned int write_ptr;
1878 efx_qword_t *txd;
1879
Martin Habetsb2663a42015-11-02 12:51:31 +00001880 tx_queue->xmit_more_available = false;
1881 if (unlikely(tx_queue->write_count == tx_queue->insert_count))
1882 return;
Ben Hutchings8127d662013-08-29 19:19:29 +01001883
1884 do {
1885 write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
1886 buffer = &tx_queue->buffer[write_ptr];
1887 txd = efx_tx_desc(tx_queue, write_ptr);
1888 ++tx_queue->write_count;
1889
1890 /* Create TX descriptor ring entry */
1891 if (buffer->flags & EFX_TX_BUF_OPTION) {
1892 *txd = buffer->option;
1893 } else {
1894 BUILD_BUG_ON(EFX_TX_BUF_CONT != 1);
1895 EFX_POPULATE_QWORD_3(
1896 *txd,
1897 ESF_DZ_TX_KER_CONT,
1898 buffer->flags & EFX_TX_BUF_CONT,
1899 ESF_DZ_TX_KER_BYTE_CNT, buffer->len,
1900 ESF_DZ_TX_KER_BUF_ADDR, buffer->dma_addr);
1901 }
1902 } while (tx_queue->write_count != tx_queue->insert_count);
1903
1904 wmb(); /* Ensure descriptors are written before they are fetched */
1905
1906 if (efx_nic_may_push_tx_desc(tx_queue, old_write_count)) {
1907 txd = efx_tx_desc(tx_queue,
1908 old_write_count & tx_queue->ptr_mask);
1909 efx_ef10_push_tx_desc(tx_queue, txd);
1910 ++tx_queue->pushes;
1911 } else {
1912 efx_ef10_notify_tx_desc(tx_queue);
1913 }
1914}
1915
Jon Cooper267c0152015-05-06 00:59:38 +01001916static int efx_ef10_alloc_rss_context(struct efx_nic *efx, u32 *context,
1917 bool exclusive, unsigned *context_size)
Ben Hutchings8127d662013-08-29 19:19:29 +01001918{
1919 MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN);
1920 MCDI_DECLARE_BUF(outbuf, MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN);
Daniel Pieczko45b24492015-05-06 00:57:14 +01001921 struct efx_ef10_nic_data *nic_data = efx->nic_data;
Ben Hutchings8127d662013-08-29 19:19:29 +01001922 size_t outlen;
1923 int rc;
Jon Cooper267c0152015-05-06 00:59:38 +01001924 u32 alloc_type = exclusive ?
1925 MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_EXCLUSIVE :
1926 MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_SHARED;
1927 unsigned rss_spread = exclusive ?
1928 efx->rss_spread :
1929 min(rounddown_pow_of_two(efx->rss_spread),
1930 EFX_EF10_MAX_SHARED_RSS_CONTEXT_SIZE);
1931
1932 if (!exclusive && rss_spread == 1) {
1933 *context = EFX_EF10_RSS_CONTEXT_INVALID;
1934 if (context_size)
1935 *context_size = 1;
1936 return 0;
1937 }
Ben Hutchings8127d662013-08-29 19:19:29 +01001938
Jon Cooperdcb41232016-04-25 16:51:00 +01001939 if (nic_data->datapath_caps &
1940 1 << MC_CMD_GET_CAPABILITIES_OUT_RX_RSS_LIMITED_LBN)
1941 return -EOPNOTSUPP;
1942
Ben Hutchings8127d662013-08-29 19:19:29 +01001943 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID,
Daniel Pieczko45b24492015-05-06 00:57:14 +01001944 nic_data->vport_id);
Jon Cooper267c0152015-05-06 00:59:38 +01001945 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_TYPE, alloc_type);
1946 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_NUM_QUEUES, rss_spread);
Ben Hutchings8127d662013-08-29 19:19:29 +01001947
1948 rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_ALLOC, inbuf, sizeof(inbuf),
1949 outbuf, sizeof(outbuf), &outlen);
1950 if (rc != 0)
1951 return rc;
1952
1953 if (outlen < MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN)
1954 return -EIO;
1955
1956 *context = MCDI_DWORD(outbuf, RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID);
1957
Jon Cooper267c0152015-05-06 00:59:38 +01001958 if (context_size)
1959 *context_size = rss_spread;
1960
Ben Hutchings8127d662013-08-29 19:19:29 +01001961 return 0;
1962}
1963
1964static void efx_ef10_free_rss_context(struct efx_nic *efx, u32 context)
1965{
1966 MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_FREE_IN_LEN);
1967 int rc;
1968
1969 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_FREE_IN_RSS_CONTEXT_ID,
1970 context);
1971
1972 rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_FREE, inbuf, sizeof(inbuf),
1973 NULL, 0, NULL);
1974 WARN_ON(rc != 0);
1975}
1976
Jon Cooper267c0152015-05-06 00:59:38 +01001977static int efx_ef10_populate_rss_table(struct efx_nic *efx, u32 context,
1978 const u32 *rx_indir_table)
Ben Hutchings8127d662013-08-29 19:19:29 +01001979{
1980 MCDI_DECLARE_BUF(tablebuf, MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN);
1981 MCDI_DECLARE_BUF(keybuf, MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN);
1982 int i, rc;
1983
1984 MCDI_SET_DWORD(tablebuf, RSS_CONTEXT_SET_TABLE_IN_RSS_CONTEXT_ID,
1985 context);
1986 BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) !=
1987 MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_LEN);
1988
1989 for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); ++i)
1990 MCDI_PTR(tablebuf,
1991 RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE)[i] =
Jon Cooper267c0152015-05-06 00:59:38 +01001992 (u8) rx_indir_table[i];
Ben Hutchings8127d662013-08-29 19:19:29 +01001993
1994 rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_TABLE, tablebuf,
1995 sizeof(tablebuf), NULL, 0, NULL);
1996 if (rc != 0)
1997 return rc;
1998
1999 MCDI_SET_DWORD(keybuf, RSS_CONTEXT_SET_KEY_IN_RSS_CONTEXT_ID,
2000 context);
2001 BUILD_BUG_ON(ARRAY_SIZE(efx->rx_hash_key) !=
2002 MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN);
2003 for (i = 0; i < ARRAY_SIZE(efx->rx_hash_key); ++i)
2004 MCDI_PTR(keybuf, RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY)[i] =
2005 efx->rx_hash_key[i];
2006
2007 return efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_KEY, keybuf,
2008 sizeof(keybuf), NULL, 0, NULL);
2009}
2010
2011static void efx_ef10_rx_free_indir_table(struct efx_nic *efx)
2012{
2013 struct efx_ef10_nic_data *nic_data = efx->nic_data;
2014
2015 if (nic_data->rx_rss_context != EFX_EF10_RSS_CONTEXT_INVALID)
2016 efx_ef10_free_rss_context(efx, nic_data->rx_rss_context);
2017 nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
2018}
2019
Jon Cooper267c0152015-05-06 00:59:38 +01002020static int efx_ef10_rx_push_shared_rss_config(struct efx_nic *efx,
2021 unsigned *context_size)
2022{
2023 u32 new_rx_rss_context;
2024 struct efx_ef10_nic_data *nic_data = efx->nic_data;
2025 int rc = efx_ef10_alloc_rss_context(efx, &new_rx_rss_context,
2026 false, context_size);
2027
2028 if (rc != 0)
2029 return rc;
2030
2031 nic_data->rx_rss_context = new_rx_rss_context;
2032 nic_data->rx_rss_context_exclusive = false;
2033 efx_set_default_rx_indir_table(efx);
2034 return 0;
2035}
2036
2037static int efx_ef10_rx_push_exclusive_rss_config(struct efx_nic *efx,
2038 const u32 *rx_indir_table)
Ben Hutchings8127d662013-08-29 19:19:29 +01002039{
2040 struct efx_ef10_nic_data *nic_data = efx->nic_data;
2041 int rc;
Jon Cooper267c0152015-05-06 00:59:38 +01002042 u32 new_rx_rss_context;
Ben Hutchings8127d662013-08-29 19:19:29 +01002043
Jon Cooper267c0152015-05-06 00:59:38 +01002044 if (nic_data->rx_rss_context == EFX_EF10_RSS_CONTEXT_INVALID ||
2045 !nic_data->rx_rss_context_exclusive) {
2046 rc = efx_ef10_alloc_rss_context(efx, &new_rx_rss_context,
2047 true, NULL);
2048 if (rc == -EOPNOTSUPP)
2049 return rc;
2050 else if (rc != 0)
2051 goto fail1;
2052 } else {
2053 new_rx_rss_context = nic_data->rx_rss_context;
Ben Hutchings8127d662013-08-29 19:19:29 +01002054 }
2055
Jon Cooper267c0152015-05-06 00:59:38 +01002056 rc = efx_ef10_populate_rss_table(efx, new_rx_rss_context,
2057 rx_indir_table);
Ben Hutchings8127d662013-08-29 19:19:29 +01002058 if (rc != 0)
Jon Cooper267c0152015-05-06 00:59:38 +01002059 goto fail2;
Ben Hutchings8127d662013-08-29 19:19:29 +01002060
Jon Cooper267c0152015-05-06 00:59:38 +01002061 if (nic_data->rx_rss_context != new_rx_rss_context)
2062 efx_ef10_rx_free_indir_table(efx);
2063 nic_data->rx_rss_context = new_rx_rss_context;
2064 nic_data->rx_rss_context_exclusive = true;
2065 if (rx_indir_table != efx->rx_indir_table)
2066 memcpy(efx->rx_indir_table, rx_indir_table,
2067 sizeof(efx->rx_indir_table));
2068 return 0;
Ben Hutchings8127d662013-08-29 19:19:29 +01002069
Jon Cooper267c0152015-05-06 00:59:38 +01002070fail2:
2071 if (new_rx_rss_context != nic_data->rx_rss_context)
2072 efx_ef10_free_rss_context(efx, new_rx_rss_context);
2073fail1:
Ben Hutchings8127d662013-08-29 19:19:29 +01002074 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
Jon Cooper267c0152015-05-06 00:59:38 +01002075 return rc;
2076}
2077
2078static int efx_ef10_pf_rx_push_rss_config(struct efx_nic *efx, bool user,
2079 const u32 *rx_indir_table)
2080{
2081 int rc;
2082
2083 if (efx->rss_spread == 1)
2084 return 0;
2085
2086 rc = efx_ef10_rx_push_exclusive_rss_config(efx, rx_indir_table);
2087
2088 if (rc == -ENOBUFS && !user) {
2089 unsigned context_size;
2090 bool mismatch = false;
2091 size_t i;
2092
2093 for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table) && !mismatch;
2094 i++)
2095 mismatch = rx_indir_table[i] !=
2096 ethtool_rxfh_indir_default(i, efx->rss_spread);
2097
2098 rc = efx_ef10_rx_push_shared_rss_config(efx, &context_size);
2099 if (rc == 0) {
2100 if (context_size != efx->rss_spread)
2101 netif_warn(efx, probe, efx->net_dev,
2102 "Could not allocate an exclusive RSS"
2103 " context; allocated a shared one of"
2104 " different size."
2105 " Wanted %u, got %u.\n",
2106 efx->rss_spread, context_size);
2107 else if (mismatch)
2108 netif_warn(efx, probe, efx->net_dev,
2109 "Could not allocate an exclusive RSS"
2110 " context; allocated a shared one but"
2111 " could not apply custom"
2112 " indirection.\n");
2113 else
2114 netif_info(efx, probe, efx->net_dev,
2115 "Could not allocate an exclusive RSS"
2116 " context; allocated a shared one.\n");
2117 }
2118 }
2119 return rc;
2120}
2121
2122static int efx_ef10_vf_rx_push_rss_config(struct efx_nic *efx, bool user,
2123 const u32 *rx_indir_table
2124 __attribute__ ((unused)))
2125{
2126 struct efx_ef10_nic_data *nic_data = efx->nic_data;
2127
2128 if (user)
2129 return -EOPNOTSUPP;
2130 if (nic_data->rx_rss_context != EFX_EF10_RSS_CONTEXT_INVALID)
2131 return 0;
2132 return efx_ef10_rx_push_shared_rss_config(efx, NULL);
Ben Hutchings8127d662013-08-29 19:19:29 +01002133}
2134
2135static int efx_ef10_rx_probe(struct efx_rx_queue *rx_queue)
2136{
2137 return efx_nic_alloc_buffer(rx_queue->efx, &rx_queue->rxd.buf,
2138 (rx_queue->ptr_mask + 1) *
2139 sizeof(efx_qword_t),
2140 GFP_KERNEL);
2141}
2142
2143static void efx_ef10_rx_init(struct efx_rx_queue *rx_queue)
2144{
2145 MCDI_DECLARE_BUF(inbuf,
2146 MC_CMD_INIT_RXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 /
2147 EFX_BUF_SIZE));
Ben Hutchings8127d662013-08-29 19:19:29 +01002148 struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
2149 size_t entries = rx_queue->rxd.buf.len / EFX_BUF_SIZE;
2150 struct efx_nic *efx = rx_queue->efx;
Daniel Pieczko45b24492015-05-06 00:57:14 +01002151 struct efx_ef10_nic_data *nic_data = efx->nic_data;
Jon Cooperaa09a3d2015-05-20 11:10:41 +01002152 size_t inlen;
Ben Hutchings8127d662013-08-29 19:19:29 +01002153 dma_addr_t dma_addr;
2154 int rc;
2155 int i;
Jon Cooperaa09a3d2015-05-20 11:10:41 +01002156 BUILD_BUG_ON(MC_CMD_INIT_RXQ_OUT_LEN != 0);
Ben Hutchings8127d662013-08-29 19:19:29 +01002157
2158 rx_queue->scatter_n = 0;
2159 rx_queue->scatter_len = 0;
2160
2161 MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_SIZE, rx_queue->ptr_mask + 1);
2162 MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_TARGET_EVQ, channel->channel);
2163 MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_LABEL, efx_rx_queue_index(rx_queue));
2164 MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_INSTANCE,
2165 efx_rx_queue_index(rx_queue));
Jon Cooperbd9a2652013-11-18 12:54:41 +00002166 MCDI_POPULATE_DWORD_2(inbuf, INIT_RXQ_IN_FLAGS,
2167 INIT_RXQ_IN_FLAG_PREFIX, 1,
2168 INIT_RXQ_IN_FLAG_TIMESTAMP, 1);
Ben Hutchings8127d662013-08-29 19:19:29 +01002169 MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_OWNER_ID, 0);
Daniel Pieczko45b24492015-05-06 00:57:14 +01002170 MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_PORT_ID, nic_data->vport_id);
Ben Hutchings8127d662013-08-29 19:19:29 +01002171
2172 dma_addr = rx_queue->rxd.buf.dma_addr;
2173
2174 netif_dbg(efx, hw, efx->net_dev, "pushing RXQ %d. %zu entries (%llx)\n",
2175 efx_rx_queue_index(rx_queue), entries, (u64)dma_addr);
2176
2177 for (i = 0; i < entries; ++i) {
2178 MCDI_SET_ARRAY_QWORD(inbuf, INIT_RXQ_IN_DMA_ADDR, i, dma_addr);
2179 dma_addr += EFX_BUF_SIZE;
2180 }
2181
2182 inlen = MC_CMD_INIT_RXQ_IN_LEN(entries);
2183
2184 rc = efx_mcdi_rpc(efx, MC_CMD_INIT_RXQ, inbuf, inlen,
Jon Cooperaa09a3d2015-05-20 11:10:41 +01002185 NULL, 0, NULL);
Ben Hutchings48ce5632013-11-01 16:42:44 +00002186 if (rc)
2187 netdev_WARN(efx->net_dev, "failed to initialise RXQ %d\n",
2188 efx_rx_queue_index(rx_queue));
Ben Hutchings8127d662013-08-29 19:19:29 +01002189}
2190
2191static void efx_ef10_rx_fini(struct efx_rx_queue *rx_queue)
2192{
2193 MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_RXQ_IN_LEN);
Jon Cooperaa09a3d2015-05-20 11:10:41 +01002194 MCDI_DECLARE_BUF_ERR(outbuf);
Ben Hutchings8127d662013-08-29 19:19:29 +01002195 struct efx_nic *efx = rx_queue->efx;
2196 size_t outlen;
2197 int rc;
2198
2199 MCDI_SET_DWORD(inbuf, FINI_RXQ_IN_INSTANCE,
2200 efx_rx_queue_index(rx_queue));
2201
Edward Cree1e0b8122013-05-31 18:36:12 +01002202 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_RXQ, inbuf, sizeof(inbuf),
Ben Hutchings8127d662013-08-29 19:19:29 +01002203 outbuf, sizeof(outbuf), &outlen);
2204
2205 if (rc && rc != -EALREADY)
2206 goto fail;
2207
2208 return;
2209
2210fail:
Edward Cree1e0b8122013-05-31 18:36:12 +01002211 efx_mcdi_display_error(efx, MC_CMD_FINI_RXQ, MC_CMD_FINI_RXQ_IN_LEN,
2212 outbuf, outlen, rc);
Ben Hutchings8127d662013-08-29 19:19:29 +01002213}
2214
2215static void efx_ef10_rx_remove(struct efx_rx_queue *rx_queue)
2216{
2217 efx_nic_free_buffer(rx_queue->efx, &rx_queue->rxd.buf);
2218}
2219
2220/* This creates an entry in the RX descriptor queue */
2221static inline void
2222efx_ef10_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index)
2223{
2224 struct efx_rx_buffer *rx_buf;
2225 efx_qword_t *rxd;
2226
2227 rxd = efx_rx_desc(rx_queue, index);
2228 rx_buf = efx_rx_buffer(rx_queue, index);
2229 EFX_POPULATE_QWORD_2(*rxd,
2230 ESF_DZ_RX_KER_BYTE_CNT, rx_buf->len,
2231 ESF_DZ_RX_KER_BUF_ADDR, rx_buf->dma_addr);
2232}
2233
2234static void efx_ef10_rx_write(struct efx_rx_queue *rx_queue)
2235{
2236 struct efx_nic *efx = rx_queue->efx;
2237 unsigned int write_count;
2238 efx_dword_t reg;
2239
2240 /* Firmware requires that RX_DESC_WPTR be a multiple of 8 */
2241 write_count = rx_queue->added_count & ~7;
2242 if (rx_queue->notified_count == write_count)
2243 return;
2244
2245 do
2246 efx_ef10_build_rx_desc(
2247 rx_queue,
2248 rx_queue->notified_count & rx_queue->ptr_mask);
2249 while (++rx_queue->notified_count != write_count);
2250
2251 wmb();
2252 EFX_POPULATE_DWORD_1(reg, ERF_DZ_RX_DESC_WPTR,
2253 write_count & rx_queue->ptr_mask);
2254 efx_writed_page(efx, &reg, ER_DZ_RX_DESC_UPD,
2255 efx_rx_queue_index(rx_queue));
2256}
2257
2258static efx_mcdi_async_completer efx_ef10_rx_defer_refill_complete;
2259
2260static void efx_ef10_rx_defer_refill(struct efx_rx_queue *rx_queue)
2261{
2262 struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
2263 MCDI_DECLARE_BUF(inbuf, MC_CMD_DRIVER_EVENT_IN_LEN);
2264 efx_qword_t event;
2265
2266 EFX_POPULATE_QWORD_2(event,
2267 ESF_DZ_EV_CODE, EFX_EF10_DRVGEN_EV,
2268 ESF_DZ_EV_DATA, EFX_EF10_REFILL);
2269
2270 MCDI_SET_DWORD(inbuf, DRIVER_EVENT_IN_EVQ, channel->channel);
2271
2272 /* MCDI_SET_QWORD is not appropriate here since EFX_POPULATE_* has
2273 * already swapped the data to little-endian order.
2274 */
2275 memcpy(MCDI_PTR(inbuf, DRIVER_EVENT_IN_DATA), &event.u64[0],
2276 sizeof(efx_qword_t));
2277
2278 efx_mcdi_rpc_async(channel->efx, MC_CMD_DRIVER_EVENT,
2279 inbuf, sizeof(inbuf), 0,
2280 efx_ef10_rx_defer_refill_complete, 0);
2281}
2282
2283static void
2284efx_ef10_rx_defer_refill_complete(struct efx_nic *efx, unsigned long cookie,
2285 int rc, efx_dword_t *outbuf,
2286 size_t outlen_actual)
2287{
2288 /* nothing to do */
2289}
2290
2291static int efx_ef10_ev_probe(struct efx_channel *channel)
2292{
2293 return efx_nic_alloc_buffer(channel->efx, &channel->eventq.buf,
2294 (channel->eventq_mask + 1) *
2295 sizeof(efx_qword_t),
2296 GFP_KERNEL);
2297}
2298
Daniel Pieczko46e612b2015-07-21 15:09:18 +01002299static void efx_ef10_ev_fini(struct efx_channel *channel)
2300{
2301 MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_EVQ_IN_LEN);
2302 MCDI_DECLARE_BUF_ERR(outbuf);
2303 struct efx_nic *efx = channel->efx;
2304 size_t outlen;
2305 int rc;
2306
2307 MCDI_SET_DWORD(inbuf, FINI_EVQ_IN_INSTANCE, channel->channel);
2308
2309 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_EVQ, inbuf, sizeof(inbuf),
2310 outbuf, sizeof(outbuf), &outlen);
2311
2312 if (rc && rc != -EALREADY)
2313 goto fail;
2314
2315 return;
2316
2317fail:
2318 efx_mcdi_display_error(efx, MC_CMD_FINI_EVQ, MC_CMD_FINI_EVQ_IN_LEN,
2319 outbuf, outlen, rc);
2320}
2321
Ben Hutchings8127d662013-08-29 19:19:29 +01002322static int efx_ef10_ev_init(struct efx_channel *channel)
2323{
2324 MCDI_DECLARE_BUF(inbuf,
2325 MC_CMD_INIT_EVQ_IN_LEN(EFX_MAX_EVQ_SIZE * 8 /
2326 EFX_BUF_SIZE));
2327 MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_EVQ_OUT_LEN);
2328 size_t entries = channel->eventq.buf.len / EFX_BUF_SIZE;
2329 struct efx_nic *efx = channel->efx;
2330 struct efx_ef10_nic_data *nic_data;
2331 bool supports_rx_merge;
2332 size_t inlen, outlen;
Daniel Pieczko46e612b2015-07-21 15:09:18 +01002333 unsigned int enabled, implemented;
Ben Hutchings8127d662013-08-29 19:19:29 +01002334 dma_addr_t dma_addr;
2335 int rc;
2336 int i;
2337
2338 nic_data = efx->nic_data;
2339 supports_rx_merge =
2340 !!(nic_data->datapath_caps &
2341 1 << MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN);
2342
2343 /* Fill event queue with all ones (i.e. empty events) */
2344 memset(channel->eventq.buf.addr, 0xff, channel->eventq.buf.len);
2345
2346 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_SIZE, channel->eventq_mask + 1);
2347 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_INSTANCE, channel->channel);
2348 /* INIT_EVQ expects index in vector table, not absolute */
2349 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_IRQ_NUM, channel->channel);
2350 MCDI_POPULATE_DWORD_4(inbuf, INIT_EVQ_IN_FLAGS,
2351 INIT_EVQ_IN_FLAG_INTERRUPTING, 1,
2352 INIT_EVQ_IN_FLAG_RX_MERGE, 1,
2353 INIT_EVQ_IN_FLAG_TX_MERGE, 1,
2354 INIT_EVQ_IN_FLAG_CUT_THRU, !supports_rx_merge);
2355 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_MODE,
2356 MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS);
2357 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_LOAD, 0);
2358 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_RELOAD, 0);
2359 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_COUNT_MODE,
2360 MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS);
2361 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_COUNT_THRSHLD, 0);
2362
2363 dma_addr = channel->eventq.buf.dma_addr;
2364 for (i = 0; i < entries; ++i) {
2365 MCDI_SET_ARRAY_QWORD(inbuf, INIT_EVQ_IN_DMA_ADDR, i, dma_addr);
2366 dma_addr += EFX_BUF_SIZE;
2367 }
2368
2369 inlen = MC_CMD_INIT_EVQ_IN_LEN(entries);
2370
2371 rc = efx_mcdi_rpc(efx, MC_CMD_INIT_EVQ, inbuf, inlen,
2372 outbuf, sizeof(outbuf), &outlen);
Ben Hutchings8127d662013-08-29 19:19:29 +01002373 /* IRQ return is ignored */
Daniel Pieczko46e612b2015-07-21 15:09:18 +01002374 if (channel->channel || rc)
2375 return rc;
Ben Hutchings8127d662013-08-29 19:19:29 +01002376
Daniel Pieczko46e612b2015-07-21 15:09:18 +01002377 /* Successfully created event queue on channel 0 */
2378 rc = efx_mcdi_get_workarounds(efx, &implemented, &enabled);
Edward Cree832dc9e2015-07-21 15:09:31 +01002379 if (rc == -ENOSYS) {
2380 /* GET_WORKAROUNDS was implemented before the bug26807
2381 * workaround, thus the latter must be unavailable in this fw
2382 */
2383 nic_data->workaround_26807 = false;
2384 rc = 0;
2385 } else if (rc) {
Ben Hutchings8127d662013-08-29 19:19:29 +01002386 goto fail;
Edward Cree832dc9e2015-07-21 15:09:31 +01002387 } else {
2388 nic_data->workaround_26807 =
2389 !!(enabled & MC_CMD_GET_WORKAROUNDS_OUT_BUG26807);
Ben Hutchings8127d662013-08-29 19:19:29 +01002390
Edward Cree832dc9e2015-07-21 15:09:31 +01002391 if (implemented & MC_CMD_GET_WORKAROUNDS_OUT_BUG26807 &&
2392 !nic_data->workaround_26807) {
Daniel Pieczko5a55a722015-07-21 15:10:02 +01002393 unsigned int flags;
2394
Daniel Pieczko34ccfe62015-07-21 15:09:43 +01002395 rc = efx_mcdi_set_workaround(efx,
2396 MC_CMD_WORKAROUND_BUG26807,
Daniel Pieczko5a55a722015-07-21 15:10:02 +01002397 true, &flags);
2398
2399 if (!rc) {
2400 if (flags &
2401 1 << MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_LBN) {
2402 netif_info(efx, drv, efx->net_dev,
2403 "other functions on NIC have been reset\n");
Daniel Pieczkoabd86a52015-12-04 08:48:39 +00002404
2405 /* With MCFW v4.6.x and earlier, the
2406 * boot count will have incremented,
2407 * so re-read the warm_boot_count
2408 * value now to ensure this function
2409 * doesn't think it has changed next
2410 * time it checks.
2411 */
2412 rc = efx_ef10_get_warm_boot_count(efx);
2413 if (rc >= 0) {
2414 nic_data->warm_boot_count = rc;
2415 rc = 0;
2416 }
Daniel Pieczko5a55a722015-07-21 15:10:02 +01002417 }
Edward Cree832dc9e2015-07-21 15:09:31 +01002418 nic_data->workaround_26807 = true;
Daniel Pieczko5a55a722015-07-21 15:10:02 +01002419 } else if (rc == -EPERM) {
Edward Cree832dc9e2015-07-21 15:09:31 +01002420 rc = 0;
Daniel Pieczko5a55a722015-07-21 15:10:02 +01002421 }
Edward Cree832dc9e2015-07-21 15:09:31 +01002422 }
Daniel Pieczko46e612b2015-07-21 15:09:18 +01002423 }
2424
2425 if (!rc)
2426 return 0;
Ben Hutchings8127d662013-08-29 19:19:29 +01002427
2428fail:
Daniel Pieczko46e612b2015-07-21 15:09:18 +01002429 efx_ef10_ev_fini(channel);
2430 return rc;
Ben Hutchings8127d662013-08-29 19:19:29 +01002431}
2432
2433static void efx_ef10_ev_remove(struct efx_channel *channel)
2434{
2435 efx_nic_free_buffer(channel->efx, &channel->eventq.buf);
2436}
2437
2438static void efx_ef10_handle_rx_wrong_queue(struct efx_rx_queue *rx_queue,
2439 unsigned int rx_queue_label)
2440{
2441 struct efx_nic *efx = rx_queue->efx;
2442
2443 netif_info(efx, hw, efx->net_dev,
2444 "rx event arrived on queue %d labeled as queue %u\n",
2445 efx_rx_queue_index(rx_queue), rx_queue_label);
2446
2447 efx_schedule_reset(efx, RESET_TYPE_DISABLE);
2448}
2449
2450static void
2451efx_ef10_handle_rx_bad_lbits(struct efx_rx_queue *rx_queue,
2452 unsigned int actual, unsigned int expected)
2453{
2454 unsigned int dropped = (actual - expected) & rx_queue->ptr_mask;
2455 struct efx_nic *efx = rx_queue->efx;
2456
2457 netif_info(efx, hw, efx->net_dev,
2458 "dropped %d events (index=%d expected=%d)\n",
2459 dropped, actual, expected);
2460
2461 efx_schedule_reset(efx, RESET_TYPE_DISABLE);
2462}
2463
2464/* partially received RX was aborted. clean up. */
2465static void efx_ef10_handle_rx_abort(struct efx_rx_queue *rx_queue)
2466{
2467 unsigned int rx_desc_ptr;
2468
Ben Hutchings8127d662013-08-29 19:19:29 +01002469 netif_dbg(rx_queue->efx, hw, rx_queue->efx->net_dev,
2470 "scattered RX aborted (dropping %u buffers)\n",
2471 rx_queue->scatter_n);
2472
2473 rx_desc_ptr = rx_queue->removed_count & rx_queue->ptr_mask;
2474
2475 efx_rx_packet(rx_queue, rx_desc_ptr, rx_queue->scatter_n,
2476 0, EFX_RX_PKT_DISCARD);
2477
2478 rx_queue->removed_count += rx_queue->scatter_n;
2479 rx_queue->scatter_n = 0;
2480 rx_queue->scatter_len = 0;
2481 ++efx_rx_queue_channel(rx_queue)->n_rx_nodesc_trunc;
2482}
2483
2484static int efx_ef10_handle_rx_event(struct efx_channel *channel,
2485 const efx_qword_t *event)
2486{
2487 unsigned int rx_bytes, next_ptr_lbits, rx_queue_label, rx_l4_class;
2488 unsigned int n_descs, n_packets, i;
2489 struct efx_nic *efx = channel->efx;
2490 struct efx_rx_queue *rx_queue;
2491 bool rx_cont;
2492 u16 flags = 0;
2493
2494 if (unlikely(ACCESS_ONCE(efx->reset_pending)))
2495 return 0;
2496
2497 /* Basic packet information */
2498 rx_bytes = EFX_QWORD_FIELD(*event, ESF_DZ_RX_BYTES);
2499 next_ptr_lbits = EFX_QWORD_FIELD(*event, ESF_DZ_RX_DSC_PTR_LBITS);
2500 rx_queue_label = EFX_QWORD_FIELD(*event, ESF_DZ_RX_QLABEL);
2501 rx_l4_class = EFX_QWORD_FIELD(*event, ESF_DZ_RX_L4_CLASS);
2502 rx_cont = EFX_QWORD_FIELD(*event, ESF_DZ_RX_CONT);
2503
Ben Hutchings48ce5632013-11-01 16:42:44 +00002504 if (EFX_QWORD_FIELD(*event, ESF_DZ_RX_DROP_EVENT))
2505 netdev_WARN(efx->net_dev, "saw RX_DROP_EVENT: event="
2506 EFX_QWORD_FMT "\n",
2507 EFX_QWORD_VAL(*event));
Ben Hutchings8127d662013-08-29 19:19:29 +01002508
2509 rx_queue = efx_channel_get_rx_queue(channel);
2510
2511 if (unlikely(rx_queue_label != efx_rx_queue_index(rx_queue)))
2512 efx_ef10_handle_rx_wrong_queue(rx_queue, rx_queue_label);
2513
2514 n_descs = ((next_ptr_lbits - rx_queue->removed_count) &
2515 ((1 << ESF_DZ_RX_DSC_PTR_LBITS_WIDTH) - 1));
2516
2517 if (n_descs != rx_queue->scatter_n + 1) {
Ben Hutchings92a04162013-09-24 23:21:57 +01002518 struct efx_ef10_nic_data *nic_data = efx->nic_data;
2519
Ben Hutchings8127d662013-08-29 19:19:29 +01002520 /* detect rx abort */
2521 if (unlikely(n_descs == rx_queue->scatter_n)) {
Ben Hutchings48ce5632013-11-01 16:42:44 +00002522 if (rx_queue->scatter_n == 0 || rx_bytes != 0)
2523 netdev_WARN(efx->net_dev,
2524 "invalid RX abort: scatter_n=%u event="
2525 EFX_QWORD_FMT "\n",
2526 rx_queue->scatter_n,
2527 EFX_QWORD_VAL(*event));
Ben Hutchings8127d662013-08-29 19:19:29 +01002528 efx_ef10_handle_rx_abort(rx_queue);
2529 return 0;
2530 }
2531
Ben Hutchings92a04162013-09-24 23:21:57 +01002532 /* Check that RX completion merging is valid, i.e.
2533 * the current firmware supports it and this is a
2534 * non-scattered packet.
2535 */
2536 if (!(nic_data->datapath_caps &
2537 (1 << MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN)) ||
2538 rx_queue->scatter_n != 0 || rx_cont) {
Ben Hutchings8127d662013-08-29 19:19:29 +01002539 efx_ef10_handle_rx_bad_lbits(
2540 rx_queue, next_ptr_lbits,
2541 (rx_queue->removed_count +
2542 rx_queue->scatter_n + 1) &
2543 ((1 << ESF_DZ_RX_DSC_PTR_LBITS_WIDTH) - 1));
2544 return 0;
2545 }
2546
2547 /* Merged completion for multiple non-scattered packets */
2548 rx_queue->scatter_n = 1;
2549 rx_queue->scatter_len = 0;
2550 n_packets = n_descs;
2551 ++channel->n_rx_merge_events;
2552 channel->n_rx_merge_packets += n_packets;
2553 flags |= EFX_RX_PKT_PREFIX_LEN;
2554 } else {
2555 ++rx_queue->scatter_n;
2556 rx_queue->scatter_len += rx_bytes;
2557 if (rx_cont)
2558 return 0;
2559 n_packets = 1;
2560 }
2561
2562 if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_RX_ECRC_ERR)))
2563 flags |= EFX_RX_PKT_DISCARD;
2564
2565 if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_RX_IPCKSUM_ERR))) {
2566 channel->n_rx_ip_hdr_chksum_err += n_packets;
2567 } else if (unlikely(EFX_QWORD_FIELD(*event,
2568 ESF_DZ_RX_TCPUDP_CKSUM_ERR))) {
2569 channel->n_rx_tcp_udp_chksum_err += n_packets;
2570 } else if (rx_l4_class == ESE_DZ_L4_CLASS_TCP ||
2571 rx_l4_class == ESE_DZ_L4_CLASS_UDP) {
2572 flags |= EFX_RX_PKT_CSUMMED;
2573 }
2574
2575 if (rx_l4_class == ESE_DZ_L4_CLASS_TCP)
2576 flags |= EFX_RX_PKT_TCP;
2577
2578 channel->irq_mod_score += 2 * n_packets;
2579
2580 /* Handle received packet(s) */
2581 for (i = 0; i < n_packets; i++) {
2582 efx_rx_packet(rx_queue,
2583 rx_queue->removed_count & rx_queue->ptr_mask,
2584 rx_queue->scatter_n, rx_queue->scatter_len,
2585 flags);
2586 rx_queue->removed_count += rx_queue->scatter_n;
2587 }
2588
2589 rx_queue->scatter_n = 0;
2590 rx_queue->scatter_len = 0;
2591
2592 return n_packets;
2593}
2594
2595static int
2596efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
2597{
2598 struct efx_nic *efx = channel->efx;
2599 struct efx_tx_queue *tx_queue;
2600 unsigned int tx_ev_desc_ptr;
2601 unsigned int tx_ev_q_label;
2602 int tx_descs = 0;
2603
2604 if (unlikely(ACCESS_ONCE(efx->reset_pending)))
2605 return 0;
2606
2607 if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_TX_DROP_EVENT)))
2608 return 0;
2609
2610 /* Transmit completion */
2611 tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, ESF_DZ_TX_DESCR_INDX);
2612 tx_ev_q_label = EFX_QWORD_FIELD(*event, ESF_DZ_TX_QLABEL);
2613 tx_queue = efx_channel_get_tx_queue(channel,
2614 tx_ev_q_label % EFX_TXQ_TYPES);
2615 tx_descs = ((tx_ev_desc_ptr + 1 - tx_queue->read_count) &
2616 tx_queue->ptr_mask);
2617 efx_xmit_done(tx_queue, tx_ev_desc_ptr & tx_queue->ptr_mask);
2618
2619 return tx_descs;
2620}
2621
2622static void
2623efx_ef10_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
2624{
2625 struct efx_nic *efx = channel->efx;
2626 int subcode;
2627
2628 subcode = EFX_QWORD_FIELD(*event, ESF_DZ_DRV_SUB_CODE);
2629
2630 switch (subcode) {
2631 case ESE_DZ_DRV_TIMER_EV:
2632 case ESE_DZ_DRV_WAKE_UP_EV:
2633 break;
2634 case ESE_DZ_DRV_START_UP_EV:
2635 /* event queue init complete. ok. */
2636 break;
2637 default:
2638 netif_err(efx, hw, efx->net_dev,
2639 "channel %d unknown driver event type %d"
2640 " (data " EFX_QWORD_FMT ")\n",
2641 channel->channel, subcode,
2642 EFX_QWORD_VAL(*event));
2643
2644 }
2645}
2646
2647static void efx_ef10_handle_driver_generated_event(struct efx_channel *channel,
2648 efx_qword_t *event)
2649{
2650 struct efx_nic *efx = channel->efx;
2651 u32 subcode;
2652
2653 subcode = EFX_QWORD_FIELD(*event, EFX_DWORD_0);
2654
2655 switch (subcode) {
2656 case EFX_EF10_TEST:
2657 channel->event_test_cpu = raw_smp_processor_id();
2658 break;
2659 case EFX_EF10_REFILL:
2660 /* The queue must be empty, so we won't receive any rx
2661 * events, so efx_process_channel() won't refill the
2662 * queue. Refill it here
2663 */
Jon Coopercce28792013-10-02 11:04:14 +01002664 efx_fast_push_rx_descriptors(&channel->rx_queue, true);
Ben Hutchings8127d662013-08-29 19:19:29 +01002665 break;
2666 default:
2667 netif_err(efx, hw, efx->net_dev,
2668 "channel %d unknown driver event type %u"
2669 " (data " EFX_QWORD_FMT ")\n",
2670 channel->channel, (unsigned) subcode,
2671 EFX_QWORD_VAL(*event));
2672 }
2673}
2674
2675static int efx_ef10_ev_process(struct efx_channel *channel, int quota)
2676{
2677 struct efx_nic *efx = channel->efx;
2678 efx_qword_t event, *p_event;
2679 unsigned int read_ptr;
2680 int ev_code;
2681 int tx_descs = 0;
2682 int spent = 0;
2683
Eric W. Biederman75363a42014-03-14 18:11:22 -07002684 if (quota <= 0)
2685 return spent;
2686
Ben Hutchings8127d662013-08-29 19:19:29 +01002687 read_ptr = channel->eventq_read_ptr;
2688
2689 for (;;) {
2690 p_event = efx_event(channel, read_ptr);
2691 event = *p_event;
2692
2693 if (!efx_event_present(&event))
2694 break;
2695
2696 EFX_SET_QWORD(*p_event);
2697
2698 ++read_ptr;
2699
2700 ev_code = EFX_QWORD_FIELD(event, ESF_DZ_EV_CODE);
2701
2702 netif_vdbg(efx, drv, efx->net_dev,
2703 "processing event on %d " EFX_QWORD_FMT "\n",
2704 channel->channel, EFX_QWORD_VAL(event));
2705
2706 switch (ev_code) {
2707 case ESE_DZ_EV_CODE_MCDI_EV:
2708 efx_mcdi_process_event(channel, &event);
2709 break;
2710 case ESE_DZ_EV_CODE_RX_EV:
2711 spent += efx_ef10_handle_rx_event(channel, &event);
2712 if (spent >= quota) {
2713 /* XXX can we split a merged event to
2714 * avoid going over-quota?
2715 */
2716 spent = quota;
2717 goto out;
2718 }
2719 break;
2720 case ESE_DZ_EV_CODE_TX_EV:
2721 tx_descs += efx_ef10_handle_tx_event(channel, &event);
2722 if (tx_descs > efx->txq_entries) {
2723 spent = quota;
2724 goto out;
2725 } else if (++spent == quota) {
2726 goto out;
2727 }
2728 break;
2729 case ESE_DZ_EV_CODE_DRIVER_EV:
2730 efx_ef10_handle_driver_event(channel, &event);
2731 if (++spent == quota)
2732 goto out;
2733 break;
2734 case EFX_EF10_DRVGEN_EV:
2735 efx_ef10_handle_driver_generated_event(channel, &event);
2736 break;
2737 default:
2738 netif_err(efx, hw, efx->net_dev,
2739 "channel %d unknown event type %d"
2740 " (data " EFX_QWORD_FMT ")\n",
2741 channel->channel, ev_code,
2742 EFX_QWORD_VAL(event));
2743 }
2744 }
2745
2746out:
2747 channel->eventq_read_ptr = read_ptr;
2748 return spent;
2749}
2750
2751static void efx_ef10_ev_read_ack(struct efx_channel *channel)
2752{
2753 struct efx_nic *efx = channel->efx;
2754 efx_dword_t rptr;
2755
2756 if (EFX_EF10_WORKAROUND_35388(efx)) {
2757 BUILD_BUG_ON(EFX_MIN_EVQ_SIZE <
2758 (1 << ERF_DD_EVQ_IND_RPTR_WIDTH));
2759 BUILD_BUG_ON(EFX_MAX_EVQ_SIZE >
2760 (1 << 2 * ERF_DD_EVQ_IND_RPTR_WIDTH));
2761
2762 EFX_POPULATE_DWORD_2(rptr, ERF_DD_EVQ_IND_RPTR_FLAGS,
2763 EFE_DD_EVQ_IND_RPTR_FLAGS_HIGH,
2764 ERF_DD_EVQ_IND_RPTR,
2765 (channel->eventq_read_ptr &
2766 channel->eventq_mask) >>
2767 ERF_DD_EVQ_IND_RPTR_WIDTH);
2768 efx_writed_page(efx, &rptr, ER_DD_EVQ_INDIRECT,
2769 channel->channel);
2770 EFX_POPULATE_DWORD_2(rptr, ERF_DD_EVQ_IND_RPTR_FLAGS,
2771 EFE_DD_EVQ_IND_RPTR_FLAGS_LOW,
2772 ERF_DD_EVQ_IND_RPTR,
2773 channel->eventq_read_ptr &
2774 ((1 << ERF_DD_EVQ_IND_RPTR_WIDTH) - 1));
2775 efx_writed_page(efx, &rptr, ER_DD_EVQ_INDIRECT,
2776 channel->channel);
2777 } else {
2778 EFX_POPULATE_DWORD_1(rptr, ERF_DZ_EVQ_RPTR,
2779 channel->eventq_read_ptr &
2780 channel->eventq_mask);
2781 efx_writed_page(efx, &rptr, ER_DZ_EVQ_RPTR, channel->channel);
2782 }
2783}
2784
2785static void efx_ef10_ev_test_generate(struct efx_channel *channel)
2786{
2787 MCDI_DECLARE_BUF(inbuf, MC_CMD_DRIVER_EVENT_IN_LEN);
2788 struct efx_nic *efx = channel->efx;
2789 efx_qword_t event;
2790 int rc;
2791
2792 EFX_POPULATE_QWORD_2(event,
2793 ESF_DZ_EV_CODE, EFX_EF10_DRVGEN_EV,
2794 ESF_DZ_EV_DATA, EFX_EF10_TEST);
2795
2796 MCDI_SET_DWORD(inbuf, DRIVER_EVENT_IN_EVQ, channel->channel);
2797
2798 /* MCDI_SET_QWORD is not appropriate here since EFX_POPULATE_* has
2799 * already swapped the data to little-endian order.
2800 */
2801 memcpy(MCDI_PTR(inbuf, DRIVER_EVENT_IN_DATA), &event.u64[0],
2802 sizeof(efx_qword_t));
2803
2804 rc = efx_mcdi_rpc(efx, MC_CMD_DRIVER_EVENT, inbuf, sizeof(inbuf),
2805 NULL, 0, NULL);
2806 if (rc != 0)
2807 goto fail;
2808
2809 return;
2810
2811fail:
2812 WARN_ON(true);
2813 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
2814}
2815
2816void efx_ef10_handle_drain_event(struct efx_nic *efx)
2817{
2818 if (atomic_dec_and_test(&efx->active_queues))
2819 wake_up(&efx->flush_wq);
2820
2821 WARN_ON(atomic_read(&efx->active_queues) < 0);
2822}
2823
2824static int efx_ef10_fini_dmaq(struct efx_nic *efx)
2825{
2826 struct efx_ef10_nic_data *nic_data = efx->nic_data;
2827 struct efx_channel *channel;
2828 struct efx_tx_queue *tx_queue;
2829 struct efx_rx_queue *rx_queue;
2830 int pending;
2831
2832 /* If the MC has just rebooted, the TX/RX queues will have already been
2833 * torn down, but efx->active_queues needs to be set to zero.
2834 */
2835 if (nic_data->must_realloc_vis) {
2836 atomic_set(&efx->active_queues, 0);
2837 return 0;
2838 }
2839
2840 /* Do not attempt to write to the NIC during EEH recovery */
2841 if (efx->state != STATE_RECOVERY) {
2842 efx_for_each_channel(channel, efx) {
2843 efx_for_each_channel_rx_queue(rx_queue, channel)
2844 efx_ef10_rx_fini(rx_queue);
2845 efx_for_each_channel_tx_queue(tx_queue, channel)
2846 efx_ef10_tx_fini(tx_queue);
2847 }
2848
2849 wait_event_timeout(efx->flush_wq,
2850 atomic_read(&efx->active_queues) == 0,
2851 msecs_to_jiffies(EFX_MAX_FLUSH_TIME));
2852 pending = atomic_read(&efx->active_queues);
2853 if (pending) {
2854 netif_err(efx, hw, efx->net_dev, "failed to flush %d queues\n",
2855 pending);
2856 return -ETIMEDOUT;
2857 }
2858 }
2859
2860 return 0;
2861}
2862
Edward Creee2835462014-04-16 19:27:48 +01002863static void efx_ef10_prepare_flr(struct efx_nic *efx)
2864{
2865 atomic_set(&efx->active_queues, 0);
2866}
2867
Ben Hutchings8127d662013-08-29 19:19:29 +01002868static bool efx_ef10_filter_equal(const struct efx_filter_spec *left,
2869 const struct efx_filter_spec *right)
2870{
2871 if ((left->match_flags ^ right->match_flags) |
2872 ((left->flags ^ right->flags) &
2873 (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)))
2874 return false;
2875
2876 return memcmp(&left->outer_vid, &right->outer_vid,
2877 sizeof(struct efx_filter_spec) -
2878 offsetof(struct efx_filter_spec, outer_vid)) == 0;
2879}
2880
2881static unsigned int efx_ef10_filter_hash(const struct efx_filter_spec *spec)
2882{
2883 BUILD_BUG_ON(offsetof(struct efx_filter_spec, outer_vid) & 3);
2884 return jhash2((const u32 *)&spec->outer_vid,
2885 (sizeof(struct efx_filter_spec) -
2886 offsetof(struct efx_filter_spec, outer_vid)) / 4,
2887 0);
2888 /* XXX should we randomise the initval? */
2889}
2890
2891/* Decide whether a filter should be exclusive or else should allow
2892 * delivery to additional recipients. Currently we decide that
2893 * filters for specific local unicast MAC and IP addresses are
2894 * exclusive.
2895 */
2896static bool efx_ef10_filter_is_exclusive(const struct efx_filter_spec *spec)
2897{
2898 if (spec->match_flags & EFX_FILTER_MATCH_LOC_MAC &&
2899 !is_multicast_ether_addr(spec->loc_mac))
2900 return true;
2901
2902 if ((spec->match_flags &
2903 (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) ==
2904 (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) {
2905 if (spec->ether_type == htons(ETH_P_IP) &&
2906 !ipv4_is_multicast(spec->loc_host[0]))
2907 return true;
2908 if (spec->ether_type == htons(ETH_P_IPV6) &&
2909 ((const u8 *)spec->loc_host)[0] != 0xff)
2910 return true;
2911 }
2912
2913 return false;
2914}
2915
2916static struct efx_filter_spec *
2917efx_ef10_filter_entry_spec(const struct efx_ef10_filter_table *table,
2918 unsigned int filter_idx)
2919{
2920 return (struct efx_filter_spec *)(table->entry[filter_idx].spec &
2921 ~EFX_EF10_FILTER_FLAGS);
2922}
2923
2924static unsigned int
2925efx_ef10_filter_entry_flags(const struct efx_ef10_filter_table *table,
2926 unsigned int filter_idx)
2927{
2928 return table->entry[filter_idx].spec & EFX_EF10_FILTER_FLAGS;
2929}
2930
2931static void
2932efx_ef10_filter_set_entry(struct efx_ef10_filter_table *table,
2933 unsigned int filter_idx,
2934 const struct efx_filter_spec *spec,
2935 unsigned int flags)
2936{
2937 table->entry[filter_idx].spec = (unsigned long)spec | flags;
2938}
2939
2940static void efx_ef10_filter_push_prep(struct efx_nic *efx,
2941 const struct efx_filter_spec *spec,
2942 efx_dword_t *inbuf, u64 handle,
2943 bool replacing)
2944{
2945 struct efx_ef10_nic_data *nic_data = efx->nic_data;
Jon Cooperdcb41232016-04-25 16:51:00 +01002946 u32 flags = spec->flags;
Ben Hutchings8127d662013-08-29 19:19:29 +01002947
2948 memset(inbuf, 0, MC_CMD_FILTER_OP_IN_LEN);
2949
Jon Cooperdcb41232016-04-25 16:51:00 +01002950 /* Remove RSS flag if we don't have an RSS context. */
2951 if (flags & EFX_FILTER_FLAG_RX_RSS &&
2952 spec->rss_context == EFX_FILTER_RSS_CONTEXT_DEFAULT &&
2953 nic_data->rx_rss_context == EFX_EF10_RSS_CONTEXT_INVALID)
2954 flags &= ~EFX_FILTER_FLAG_RX_RSS;
2955
Ben Hutchings8127d662013-08-29 19:19:29 +01002956 if (replacing) {
2957 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
2958 MC_CMD_FILTER_OP_IN_OP_REPLACE);
2959 MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE, handle);
2960 } else {
2961 u32 match_fields = 0;
2962
2963 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
2964 efx_ef10_filter_is_exclusive(spec) ?
2965 MC_CMD_FILTER_OP_IN_OP_INSERT :
2966 MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE);
2967
2968 /* Convert match flags and values. Unlike almost
2969 * everything else in MCDI, these fields are in
2970 * network byte order.
2971 */
2972 if (spec->match_flags & EFX_FILTER_MATCH_LOC_MAC_IG)
2973 match_fields |=
2974 is_multicast_ether_addr(spec->loc_mac) ?
2975 1 << MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_LBN :
2976 1 << MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_LBN;
2977#define COPY_FIELD(gen_flag, gen_field, mcdi_field) \
2978 if (spec->match_flags & EFX_FILTER_MATCH_ ## gen_flag) { \
2979 match_fields |= \
2980 1 << MC_CMD_FILTER_OP_IN_MATCH_ ## \
2981 mcdi_field ## _LBN; \
2982 BUILD_BUG_ON( \
2983 MC_CMD_FILTER_OP_IN_ ## mcdi_field ## _LEN < \
2984 sizeof(spec->gen_field)); \
2985 memcpy(MCDI_PTR(inbuf, FILTER_OP_IN_ ## mcdi_field), \
2986 &spec->gen_field, sizeof(spec->gen_field)); \
2987 }
2988 COPY_FIELD(REM_HOST, rem_host, SRC_IP);
2989 COPY_FIELD(LOC_HOST, loc_host, DST_IP);
2990 COPY_FIELD(REM_MAC, rem_mac, SRC_MAC);
2991 COPY_FIELD(REM_PORT, rem_port, SRC_PORT);
2992 COPY_FIELD(LOC_MAC, loc_mac, DST_MAC);
2993 COPY_FIELD(LOC_PORT, loc_port, DST_PORT);
2994 COPY_FIELD(ETHER_TYPE, ether_type, ETHER_TYPE);
2995 COPY_FIELD(INNER_VID, inner_vid, INNER_VLAN);
2996 COPY_FIELD(OUTER_VID, outer_vid, OUTER_VLAN);
2997 COPY_FIELD(IP_PROTO, ip_proto, IP_PROTO);
2998#undef COPY_FIELD
2999 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_MATCH_FIELDS,
3000 match_fields);
3001 }
3002
Daniel Pieczko45b24492015-05-06 00:57:14 +01003003 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_PORT_ID, nic_data->vport_id);
Ben Hutchings8127d662013-08-29 19:19:29 +01003004 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_DEST,
3005 spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ?
3006 MC_CMD_FILTER_OP_IN_RX_DEST_DROP :
3007 MC_CMD_FILTER_OP_IN_RX_DEST_HOST);
Shradha Shahe3d36292015-05-06 00:56:24 +01003008 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_TX_DOMAIN, 0);
Ben Hutchings8127d662013-08-29 19:19:29 +01003009 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_TX_DEST,
3010 MC_CMD_FILTER_OP_IN_TX_DEST_DEFAULT);
Ben Hutchingsa0bc3482013-12-16 18:56:24 +00003011 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_QUEUE,
3012 spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ?
3013 0 : spec->dmaq_id);
Ben Hutchings8127d662013-08-29 19:19:29 +01003014 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_MODE,
Jon Cooperdcb41232016-04-25 16:51:00 +01003015 (flags & EFX_FILTER_FLAG_RX_RSS) ?
Ben Hutchings8127d662013-08-29 19:19:29 +01003016 MC_CMD_FILTER_OP_IN_RX_MODE_RSS :
3017 MC_CMD_FILTER_OP_IN_RX_MODE_SIMPLE);
Jon Cooperdcb41232016-04-25 16:51:00 +01003018 if (flags & EFX_FILTER_FLAG_RX_RSS)
Ben Hutchings8127d662013-08-29 19:19:29 +01003019 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_CONTEXT,
3020 spec->rss_context !=
3021 EFX_FILTER_RSS_CONTEXT_DEFAULT ?
3022 spec->rss_context : nic_data->rx_rss_context);
3023}
3024
3025static int efx_ef10_filter_push(struct efx_nic *efx,
3026 const struct efx_filter_spec *spec,
3027 u64 *handle, bool replacing)
3028{
3029 MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN);
3030 MCDI_DECLARE_BUF(outbuf, MC_CMD_FILTER_OP_OUT_LEN);
3031 int rc;
3032
3033 efx_ef10_filter_push_prep(efx, spec, inbuf, *handle, replacing);
3034 rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf),
3035 outbuf, sizeof(outbuf), NULL);
3036 if (rc == 0)
3037 *handle = MCDI_QWORD(outbuf, FILTER_OP_OUT_HANDLE);
Ben Hutchings065e64c2013-10-09 14:17:27 +01003038 if (rc == -ENOSPC)
3039 rc = -EBUSY; /* to match efx_farch_filter_insert() */
Ben Hutchings8127d662013-08-29 19:19:29 +01003040 return rc;
3041}
3042
3043static int efx_ef10_filter_rx_match_pri(struct efx_ef10_filter_table *table,
3044 enum efx_filter_match_flags match_flags)
3045{
3046 unsigned int match_pri;
3047
3048 for (match_pri = 0;
3049 match_pri < table->rx_match_count;
3050 match_pri++)
3051 if (table->rx_match_flags[match_pri] == match_flags)
3052 return match_pri;
3053
3054 return -EPROTONOSUPPORT;
3055}
3056
3057static s32 efx_ef10_filter_insert(struct efx_nic *efx,
3058 struct efx_filter_spec *spec,
3059 bool replace_equal)
3060{
3061 struct efx_ef10_filter_table *table = efx->filter_state;
3062 DECLARE_BITMAP(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT);
3063 struct efx_filter_spec *saved_spec;
3064 unsigned int match_pri, hash;
3065 unsigned int priv_flags;
3066 bool replacing = false;
3067 int ins_index = -1;
3068 DEFINE_WAIT(wait);
3069 bool is_mc_recip;
3070 s32 rc;
3071
3072 /* For now, only support RX filters */
3073 if ((spec->flags & (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)) !=
3074 EFX_FILTER_FLAG_RX)
3075 return -EINVAL;
3076
3077 rc = efx_ef10_filter_rx_match_pri(table, spec->match_flags);
3078 if (rc < 0)
3079 return rc;
3080 match_pri = rc;
3081
3082 hash = efx_ef10_filter_hash(spec);
3083 is_mc_recip = efx_filter_is_mc_recipient(spec);
3084 if (is_mc_recip)
3085 bitmap_zero(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT);
3086
3087 /* Find any existing filters with the same match tuple or
3088 * else a free slot to insert at. If any of them are busy,
3089 * we have to wait and retry.
3090 */
3091 for (;;) {
3092 unsigned int depth = 1;
3093 unsigned int i;
3094
3095 spin_lock_bh(&efx->filter_lock);
3096
3097 for (;;) {
3098 i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1);
3099 saved_spec = efx_ef10_filter_entry_spec(table, i);
3100
3101 if (!saved_spec) {
3102 if (ins_index < 0)
3103 ins_index = i;
3104 } else if (efx_ef10_filter_equal(spec, saved_spec)) {
3105 if (table->entry[i].spec &
3106 EFX_EF10_FILTER_FLAG_BUSY)
3107 break;
3108 if (spec->priority < saved_spec->priority &&
Ben Hutchings7665d1a2013-11-21 19:02:18 +00003109 spec->priority != EFX_FILTER_PRI_AUTO) {
Ben Hutchings8127d662013-08-29 19:19:29 +01003110 rc = -EPERM;
3111 goto out_unlock;
3112 }
3113 if (!is_mc_recip) {
3114 /* This is the only one */
3115 if (spec->priority ==
3116 saved_spec->priority &&
3117 !replace_equal) {
3118 rc = -EEXIST;
3119 goto out_unlock;
3120 }
3121 ins_index = i;
3122 goto found;
3123 } else if (spec->priority >
3124 saved_spec->priority ||
3125 (spec->priority ==
3126 saved_spec->priority &&
3127 replace_equal)) {
3128 if (ins_index < 0)
3129 ins_index = i;
3130 else
3131 __set_bit(depth, mc_rem_map);
3132 }
3133 }
3134
3135 /* Once we reach the maximum search depth, use
3136 * the first suitable slot or return -EBUSY if
3137 * there was none
3138 */
3139 if (depth == EFX_EF10_FILTER_SEARCH_LIMIT) {
3140 if (ins_index < 0) {
3141 rc = -EBUSY;
3142 goto out_unlock;
3143 }
3144 goto found;
3145 }
3146
3147 ++depth;
3148 }
3149
3150 prepare_to_wait(&table->waitq, &wait, TASK_UNINTERRUPTIBLE);
3151 spin_unlock_bh(&efx->filter_lock);
3152 schedule();
3153 }
3154
3155found:
3156 /* Create a software table entry if necessary, and mark it
3157 * busy. We might yet fail to insert, but any attempt to
3158 * insert a conflicting filter while we're waiting for the
3159 * firmware must find the busy entry.
3160 */
3161 saved_spec = efx_ef10_filter_entry_spec(table, ins_index);
3162 if (saved_spec) {
Ben Hutchings7665d1a2013-11-21 19:02:18 +00003163 if (spec->priority == EFX_FILTER_PRI_AUTO &&
3164 saved_spec->priority >= EFX_FILTER_PRI_AUTO) {
Ben Hutchings8127d662013-08-29 19:19:29 +01003165 /* Just make sure it won't be removed */
Ben Hutchings7665d1a2013-11-21 19:02:18 +00003166 if (saved_spec->priority > EFX_FILTER_PRI_AUTO)
3167 saved_spec->flags |= EFX_FILTER_FLAG_RX_OVER_AUTO;
Ben Hutchings8127d662013-08-29 19:19:29 +01003168 table->entry[ins_index].spec &=
Ben Hutchingsb59e6ef2013-11-21 19:02:22 +00003169 ~EFX_EF10_FILTER_FLAG_AUTO_OLD;
Ben Hutchings8127d662013-08-29 19:19:29 +01003170 rc = ins_index;
3171 goto out_unlock;
3172 }
3173 replacing = true;
3174 priv_flags = efx_ef10_filter_entry_flags(table, ins_index);
3175 } else {
3176 saved_spec = kmalloc(sizeof(*spec), GFP_ATOMIC);
3177 if (!saved_spec) {
3178 rc = -ENOMEM;
3179 goto out_unlock;
3180 }
3181 *saved_spec = *spec;
3182 priv_flags = 0;
3183 }
3184 efx_ef10_filter_set_entry(table, ins_index, saved_spec,
3185 priv_flags | EFX_EF10_FILTER_FLAG_BUSY);
3186
3187 /* Mark lower-priority multicast recipients busy prior to removal */
3188 if (is_mc_recip) {
3189 unsigned int depth, i;
3190
3191 for (depth = 0; depth < EFX_EF10_FILTER_SEARCH_LIMIT; depth++) {
3192 i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1);
3193 if (test_bit(depth, mc_rem_map))
3194 table->entry[i].spec |=
3195 EFX_EF10_FILTER_FLAG_BUSY;
3196 }
3197 }
3198
3199 spin_unlock_bh(&efx->filter_lock);
3200
3201 rc = efx_ef10_filter_push(efx, spec, &table->entry[ins_index].handle,
3202 replacing);
3203
3204 /* Finalise the software table entry */
3205 spin_lock_bh(&efx->filter_lock);
3206 if (rc == 0) {
3207 if (replacing) {
3208 /* Update the fields that may differ */
Ben Hutchings7665d1a2013-11-21 19:02:18 +00003209 if (saved_spec->priority == EFX_FILTER_PRI_AUTO)
3210 saved_spec->flags |=
3211 EFX_FILTER_FLAG_RX_OVER_AUTO;
Ben Hutchings8127d662013-08-29 19:19:29 +01003212 saved_spec->priority = spec->priority;
Ben Hutchings7665d1a2013-11-21 19:02:18 +00003213 saved_spec->flags &= EFX_FILTER_FLAG_RX_OVER_AUTO;
Ben Hutchings8127d662013-08-29 19:19:29 +01003214 saved_spec->flags |= spec->flags;
3215 saved_spec->rss_context = spec->rss_context;
3216 saved_spec->dmaq_id = spec->dmaq_id;
3217 }
3218 } else if (!replacing) {
3219 kfree(saved_spec);
3220 saved_spec = NULL;
3221 }
3222 efx_ef10_filter_set_entry(table, ins_index, saved_spec, priv_flags);
3223
3224 /* Remove and finalise entries for lower-priority multicast
3225 * recipients
3226 */
3227 if (is_mc_recip) {
3228 MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN);
3229 unsigned int depth, i;
3230
3231 memset(inbuf, 0, sizeof(inbuf));
3232
3233 for (depth = 0; depth < EFX_EF10_FILTER_SEARCH_LIMIT; depth++) {
3234 if (!test_bit(depth, mc_rem_map))
3235 continue;
3236
3237 i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1);
3238 saved_spec = efx_ef10_filter_entry_spec(table, i);
3239 priv_flags = efx_ef10_filter_entry_flags(table, i);
3240
3241 if (rc == 0) {
3242 spin_unlock_bh(&efx->filter_lock);
3243 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
3244 MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE);
3245 MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
3246 table->entry[i].handle);
3247 rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP,
3248 inbuf, sizeof(inbuf),
3249 NULL, 0, NULL);
3250 spin_lock_bh(&efx->filter_lock);
3251 }
3252
3253 if (rc == 0) {
3254 kfree(saved_spec);
3255 saved_spec = NULL;
3256 priv_flags = 0;
3257 } else {
3258 priv_flags &= ~EFX_EF10_FILTER_FLAG_BUSY;
3259 }
3260 efx_ef10_filter_set_entry(table, i, saved_spec,
3261 priv_flags);
3262 }
3263 }
3264
3265 /* If successful, return the inserted filter ID */
3266 if (rc == 0)
3267 rc = match_pri * HUNT_FILTER_TBL_ROWS + ins_index;
3268
3269 wake_up_all(&table->waitq);
3270out_unlock:
3271 spin_unlock_bh(&efx->filter_lock);
3272 finish_wait(&table->waitq, &wait);
3273 return rc;
3274}
3275
Fengguang Wu9fd8095d2013-08-31 06:54:05 +08003276static void efx_ef10_filter_update_rx_scatter(struct efx_nic *efx)
Ben Hutchings8127d662013-08-29 19:19:29 +01003277{
3278 /* no need to do anything here on EF10 */
3279}
3280
3281/* Remove a filter.
Ben Hutchingsb59e6ef2013-11-21 19:02:22 +00003282 * If !by_index, remove by ID
3283 * If by_index, remove by index
Ben Hutchings8127d662013-08-29 19:19:29 +01003284 * Filter ID may come from userland and must be range-checked.
3285 */
3286static int efx_ef10_filter_remove_internal(struct efx_nic *efx,
Ben Hutchingsfbd79122013-11-21 19:15:03 +00003287 unsigned int priority_mask,
Ben Hutchingsb59e6ef2013-11-21 19:02:22 +00003288 u32 filter_id, bool by_index)
Ben Hutchings8127d662013-08-29 19:19:29 +01003289{
3290 unsigned int filter_idx = filter_id % HUNT_FILTER_TBL_ROWS;
3291 struct efx_ef10_filter_table *table = efx->filter_state;
3292 MCDI_DECLARE_BUF(inbuf,
3293 MC_CMD_FILTER_OP_IN_HANDLE_OFST +
3294 MC_CMD_FILTER_OP_IN_HANDLE_LEN);
3295 struct efx_filter_spec *spec;
3296 DEFINE_WAIT(wait);
3297 int rc;
3298
3299 /* Find the software table entry and mark it busy. Don't
3300 * remove it yet; any attempt to update while we're waiting
3301 * for the firmware must find the busy entry.
3302 */
3303 for (;;) {
3304 spin_lock_bh(&efx->filter_lock);
3305 if (!(table->entry[filter_idx].spec &
3306 EFX_EF10_FILTER_FLAG_BUSY))
3307 break;
3308 prepare_to_wait(&table->waitq, &wait, TASK_UNINTERRUPTIBLE);
3309 spin_unlock_bh(&efx->filter_lock);
3310 schedule();
3311 }
Ben Hutchings7665d1a2013-11-21 19:02:18 +00003312
Ben Hutchings8127d662013-08-29 19:19:29 +01003313 spec = efx_ef10_filter_entry_spec(table, filter_idx);
Ben Hutchings7665d1a2013-11-21 19:02:18 +00003314 if (!spec ||
Ben Hutchingsb59e6ef2013-11-21 19:02:22 +00003315 (!by_index &&
Ben Hutchings8127d662013-08-29 19:19:29 +01003316 efx_ef10_filter_rx_match_pri(table, spec->match_flags) !=
3317 filter_id / HUNT_FILTER_TBL_ROWS)) {
3318 rc = -ENOENT;
3319 goto out_unlock;
3320 }
Ben Hutchings7665d1a2013-11-21 19:02:18 +00003321
3322 if (spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO &&
Ben Hutchingsfbd79122013-11-21 19:15:03 +00003323 priority_mask == (1U << EFX_FILTER_PRI_AUTO)) {
Ben Hutchings7665d1a2013-11-21 19:02:18 +00003324 /* Just remove flags */
3325 spec->flags &= ~EFX_FILTER_FLAG_RX_OVER_AUTO;
Ben Hutchingsb59e6ef2013-11-21 19:02:22 +00003326 table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_AUTO_OLD;
Ben Hutchings7665d1a2013-11-21 19:02:18 +00003327 rc = 0;
3328 goto out_unlock;
3329 }
3330
Ben Hutchingsfbd79122013-11-21 19:15:03 +00003331 if (!(priority_mask & (1U << spec->priority))) {
Ben Hutchings7665d1a2013-11-21 19:02:18 +00003332 rc = -ENOENT;
3333 goto out_unlock;
3334 }
3335
Ben Hutchings8127d662013-08-29 19:19:29 +01003336 table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY;
3337 spin_unlock_bh(&efx->filter_lock);
3338
Ben Hutchings7665d1a2013-11-21 19:02:18 +00003339 if (spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO) {
Ben Hutchingsb59e6ef2013-11-21 19:02:22 +00003340 /* Reset to an automatic filter */
Ben Hutchings8127d662013-08-29 19:19:29 +01003341
3342 struct efx_filter_spec new_spec = *spec;
3343
Ben Hutchings7665d1a2013-11-21 19:02:18 +00003344 new_spec.priority = EFX_FILTER_PRI_AUTO;
Ben Hutchings8127d662013-08-29 19:19:29 +01003345 new_spec.flags = (EFX_FILTER_FLAG_RX |
Bert Kenwardf1c2ef42015-12-11 09:39:32 +00003346 (efx_rss_enabled(efx) ?
3347 EFX_FILTER_FLAG_RX_RSS : 0));
Ben Hutchings8127d662013-08-29 19:19:29 +01003348 new_spec.dmaq_id = 0;
3349 new_spec.rss_context = EFX_FILTER_RSS_CONTEXT_DEFAULT;
3350 rc = efx_ef10_filter_push(efx, &new_spec,
3351 &table->entry[filter_idx].handle,
3352 true);
3353
3354 spin_lock_bh(&efx->filter_lock);
3355 if (rc == 0)
3356 *spec = new_spec;
3357 } else {
3358 /* Really remove the filter */
3359
3360 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
3361 efx_ef10_filter_is_exclusive(spec) ?
3362 MC_CMD_FILTER_OP_IN_OP_REMOVE :
3363 MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE);
3364 MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
3365 table->entry[filter_idx].handle);
3366 rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP,
3367 inbuf, sizeof(inbuf), NULL, 0, NULL);
3368
3369 spin_lock_bh(&efx->filter_lock);
3370 if (rc == 0) {
3371 kfree(spec);
3372 efx_ef10_filter_set_entry(table, filter_idx, NULL, 0);
3373 }
3374 }
Ben Hutchings7665d1a2013-11-21 19:02:18 +00003375
Ben Hutchings8127d662013-08-29 19:19:29 +01003376 table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_BUSY;
3377 wake_up_all(&table->waitq);
3378out_unlock:
3379 spin_unlock_bh(&efx->filter_lock);
3380 finish_wait(&table->waitq, &wait);
3381 return rc;
3382}
3383
3384static int efx_ef10_filter_remove_safe(struct efx_nic *efx,
3385 enum efx_filter_priority priority,
3386 u32 filter_id)
3387{
Ben Hutchingsfbd79122013-11-21 19:15:03 +00003388 return efx_ef10_filter_remove_internal(efx, 1U << priority,
3389 filter_id, false);
Ben Hutchings8127d662013-08-29 19:19:29 +01003390}
3391
Edward Cree12fb0da2015-07-21 15:11:00 +01003392static u32 efx_ef10_filter_get_unsafe_id(struct efx_nic *efx, u32 filter_id)
3393{
3394 return filter_id % HUNT_FILTER_TBL_ROWS;
3395}
3396
3397static int efx_ef10_filter_remove_unsafe(struct efx_nic *efx,
3398 enum efx_filter_priority priority,
3399 u32 filter_id)
3400{
3401 return efx_ef10_filter_remove_internal(efx, 1U << priority,
3402 filter_id, true);
3403}
3404
Ben Hutchings8127d662013-08-29 19:19:29 +01003405static int efx_ef10_filter_get_safe(struct efx_nic *efx,
3406 enum efx_filter_priority priority,
3407 u32 filter_id, struct efx_filter_spec *spec)
3408{
3409 unsigned int filter_idx = filter_id % HUNT_FILTER_TBL_ROWS;
3410 struct efx_ef10_filter_table *table = efx->filter_state;
3411 const struct efx_filter_spec *saved_spec;
3412 int rc;
3413
3414 spin_lock_bh(&efx->filter_lock);
3415 saved_spec = efx_ef10_filter_entry_spec(table, filter_idx);
3416 if (saved_spec && saved_spec->priority == priority &&
3417 efx_ef10_filter_rx_match_pri(table, saved_spec->match_flags) ==
3418 filter_id / HUNT_FILTER_TBL_ROWS) {
3419 *spec = *saved_spec;
3420 rc = 0;
3421 } else {
3422 rc = -ENOENT;
3423 }
3424 spin_unlock_bh(&efx->filter_lock);
3425 return rc;
3426}
3427
Ben Hutchingsfbd79122013-11-21 19:15:03 +00003428static int efx_ef10_filter_clear_rx(struct efx_nic *efx,
Ben Hutchings8127d662013-08-29 19:19:29 +01003429 enum efx_filter_priority priority)
3430{
Ben Hutchingsfbd79122013-11-21 19:15:03 +00003431 unsigned int priority_mask;
3432 unsigned int i;
3433 int rc;
3434
3435 priority_mask = (((1U << (priority + 1)) - 1) &
3436 ~(1U << EFX_FILTER_PRI_AUTO));
3437
3438 for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) {
3439 rc = efx_ef10_filter_remove_internal(efx, priority_mask,
3440 i, true);
3441 if (rc && rc != -ENOENT)
3442 return rc;
3443 }
3444
3445 return 0;
Ben Hutchings8127d662013-08-29 19:19:29 +01003446}
3447
3448static u32 efx_ef10_filter_count_rx_used(struct efx_nic *efx,
3449 enum efx_filter_priority priority)
3450{
3451 struct efx_ef10_filter_table *table = efx->filter_state;
3452 unsigned int filter_idx;
3453 s32 count = 0;
3454
3455 spin_lock_bh(&efx->filter_lock);
3456 for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
3457 if (table->entry[filter_idx].spec &&
3458 efx_ef10_filter_entry_spec(table, filter_idx)->priority ==
3459 priority)
3460 ++count;
3461 }
3462 spin_unlock_bh(&efx->filter_lock);
3463 return count;
3464}
3465
3466static u32 efx_ef10_filter_get_rx_id_limit(struct efx_nic *efx)
3467{
3468 struct efx_ef10_filter_table *table = efx->filter_state;
3469
3470 return table->rx_match_count * HUNT_FILTER_TBL_ROWS;
3471}
3472
3473static s32 efx_ef10_filter_get_rx_ids(struct efx_nic *efx,
3474 enum efx_filter_priority priority,
3475 u32 *buf, u32 size)
3476{
3477 struct efx_ef10_filter_table *table = efx->filter_state;
3478 struct efx_filter_spec *spec;
3479 unsigned int filter_idx;
3480 s32 count = 0;
3481
3482 spin_lock_bh(&efx->filter_lock);
3483 for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
3484 spec = efx_ef10_filter_entry_spec(table, filter_idx);
3485 if (spec && spec->priority == priority) {
3486 if (count == size) {
3487 count = -EMSGSIZE;
3488 break;
3489 }
3490 buf[count++] = (efx_ef10_filter_rx_match_pri(
3491 table, spec->match_flags) *
3492 HUNT_FILTER_TBL_ROWS +
3493 filter_idx);
3494 }
3495 }
3496 spin_unlock_bh(&efx->filter_lock);
3497 return count;
3498}
3499
3500#ifdef CONFIG_RFS_ACCEL
3501
3502static efx_mcdi_async_completer efx_ef10_filter_rfs_insert_complete;
3503
3504static s32 efx_ef10_filter_rfs_insert(struct efx_nic *efx,
3505 struct efx_filter_spec *spec)
3506{
3507 struct efx_ef10_filter_table *table = efx->filter_state;
3508 MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN);
3509 struct efx_filter_spec *saved_spec;
3510 unsigned int hash, i, depth = 1;
3511 bool replacing = false;
3512 int ins_index = -1;
3513 u64 cookie;
3514 s32 rc;
3515
3516 /* Must be an RX filter without RSS and not for a multicast
3517 * destination address (RFS only works for connected sockets).
3518 * These restrictions allow us to pass only a tiny amount of
3519 * data through to the completion function.
3520 */
3521 EFX_WARN_ON_PARANOID(spec->flags !=
3522 (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_RX_SCATTER));
3523 EFX_WARN_ON_PARANOID(spec->priority != EFX_FILTER_PRI_HINT);
3524 EFX_WARN_ON_PARANOID(efx_filter_is_mc_recipient(spec));
3525
3526 hash = efx_ef10_filter_hash(spec);
3527
3528 spin_lock_bh(&efx->filter_lock);
3529
3530 /* Find any existing filter with the same match tuple or else
3531 * a free slot to insert at. If an existing filter is busy,
3532 * we have to give up.
3533 */
3534 for (;;) {
3535 i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1);
3536 saved_spec = efx_ef10_filter_entry_spec(table, i);
3537
3538 if (!saved_spec) {
3539 if (ins_index < 0)
3540 ins_index = i;
3541 } else if (efx_ef10_filter_equal(spec, saved_spec)) {
3542 if (table->entry[i].spec & EFX_EF10_FILTER_FLAG_BUSY) {
3543 rc = -EBUSY;
3544 goto fail_unlock;
3545 }
Ben Hutchings8127d662013-08-29 19:19:29 +01003546 if (spec->priority < saved_spec->priority) {
3547 rc = -EPERM;
3548 goto fail_unlock;
3549 }
3550 ins_index = i;
3551 break;
3552 }
3553
3554 /* Once we reach the maximum search depth, use the
3555 * first suitable slot or return -EBUSY if there was
3556 * none
3557 */
3558 if (depth == EFX_EF10_FILTER_SEARCH_LIMIT) {
3559 if (ins_index < 0) {
3560 rc = -EBUSY;
3561 goto fail_unlock;
3562 }
3563 break;
3564 }
3565
3566 ++depth;
3567 }
3568
3569 /* Create a software table entry if necessary, and mark it
3570 * busy. We might yet fail to insert, but any attempt to
3571 * insert a conflicting filter while we're waiting for the
3572 * firmware must find the busy entry.
3573 */
3574 saved_spec = efx_ef10_filter_entry_spec(table, ins_index);
3575 if (saved_spec) {
3576 replacing = true;
3577 } else {
3578 saved_spec = kmalloc(sizeof(*spec), GFP_ATOMIC);
3579 if (!saved_spec) {
3580 rc = -ENOMEM;
3581 goto fail_unlock;
3582 }
3583 *saved_spec = *spec;
3584 }
3585 efx_ef10_filter_set_entry(table, ins_index, saved_spec,
3586 EFX_EF10_FILTER_FLAG_BUSY);
3587
3588 spin_unlock_bh(&efx->filter_lock);
3589
3590 /* Pack up the variables needed on completion */
3591 cookie = replacing << 31 | ins_index << 16 | spec->dmaq_id;
3592
3593 efx_ef10_filter_push_prep(efx, spec, inbuf,
3594 table->entry[ins_index].handle, replacing);
3595 efx_mcdi_rpc_async(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf),
3596 MC_CMD_FILTER_OP_OUT_LEN,
3597 efx_ef10_filter_rfs_insert_complete, cookie);
3598
3599 return ins_index;
3600
3601fail_unlock:
3602 spin_unlock_bh(&efx->filter_lock);
3603 return rc;
3604}
3605
3606static void
3607efx_ef10_filter_rfs_insert_complete(struct efx_nic *efx, unsigned long cookie,
3608 int rc, efx_dword_t *outbuf,
3609 size_t outlen_actual)
3610{
3611 struct efx_ef10_filter_table *table = efx->filter_state;
3612 unsigned int ins_index, dmaq_id;
3613 struct efx_filter_spec *spec;
3614 bool replacing;
3615
3616 /* Unpack the cookie */
3617 replacing = cookie >> 31;
3618 ins_index = (cookie >> 16) & (HUNT_FILTER_TBL_ROWS - 1);
3619 dmaq_id = cookie & 0xffff;
3620
3621 spin_lock_bh(&efx->filter_lock);
3622 spec = efx_ef10_filter_entry_spec(table, ins_index);
3623 if (rc == 0) {
3624 table->entry[ins_index].handle =
3625 MCDI_QWORD(outbuf, FILTER_OP_OUT_HANDLE);
3626 if (replacing)
3627 spec->dmaq_id = dmaq_id;
3628 } else if (!replacing) {
3629 kfree(spec);
3630 spec = NULL;
3631 }
3632 efx_ef10_filter_set_entry(table, ins_index, spec, 0);
3633 spin_unlock_bh(&efx->filter_lock);
3634
3635 wake_up_all(&table->waitq);
3636}
3637
3638static void
3639efx_ef10_filter_rfs_expire_complete(struct efx_nic *efx,
3640 unsigned long filter_idx,
3641 int rc, efx_dword_t *outbuf,
3642 size_t outlen_actual);
3643
3644static bool efx_ef10_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
3645 unsigned int filter_idx)
3646{
3647 struct efx_ef10_filter_table *table = efx->filter_state;
3648 struct efx_filter_spec *spec =
3649 efx_ef10_filter_entry_spec(table, filter_idx);
3650 MCDI_DECLARE_BUF(inbuf,
3651 MC_CMD_FILTER_OP_IN_HANDLE_OFST +
3652 MC_CMD_FILTER_OP_IN_HANDLE_LEN);
3653
3654 if (!spec ||
3655 (table->entry[filter_idx].spec & EFX_EF10_FILTER_FLAG_BUSY) ||
3656 spec->priority != EFX_FILTER_PRI_HINT ||
3657 !rps_may_expire_flow(efx->net_dev, spec->dmaq_id,
3658 flow_id, filter_idx))
3659 return false;
3660
3661 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
3662 MC_CMD_FILTER_OP_IN_OP_REMOVE);
3663 MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
3664 table->entry[filter_idx].handle);
3665 if (efx_mcdi_rpc_async(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf), 0,
3666 efx_ef10_filter_rfs_expire_complete, filter_idx))
3667 return false;
3668
3669 table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY;
3670 return true;
3671}
3672
3673static void
3674efx_ef10_filter_rfs_expire_complete(struct efx_nic *efx,
3675 unsigned long filter_idx,
3676 int rc, efx_dword_t *outbuf,
3677 size_t outlen_actual)
3678{
3679 struct efx_ef10_filter_table *table = efx->filter_state;
3680 struct efx_filter_spec *spec =
3681 efx_ef10_filter_entry_spec(table, filter_idx);
3682
3683 spin_lock_bh(&efx->filter_lock);
3684 if (rc == 0) {
3685 kfree(spec);
3686 efx_ef10_filter_set_entry(table, filter_idx, NULL, 0);
3687 }
3688 table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_BUSY;
3689 wake_up_all(&table->waitq);
3690 spin_unlock_bh(&efx->filter_lock);
3691}
3692
3693#endif /* CONFIG_RFS_ACCEL */
3694
3695static int efx_ef10_filter_match_flags_from_mcdi(u32 mcdi_flags)
3696{
3697 int match_flags = 0;
3698
3699#define MAP_FLAG(gen_flag, mcdi_field) { \
3700 u32 old_mcdi_flags = mcdi_flags; \
3701 mcdi_flags &= ~(1 << MC_CMD_FILTER_OP_IN_MATCH_ ## \
3702 mcdi_field ## _LBN); \
3703 if (mcdi_flags != old_mcdi_flags) \
3704 match_flags |= EFX_FILTER_MATCH_ ## gen_flag; \
3705 }
3706 MAP_FLAG(LOC_MAC_IG, UNKNOWN_UCAST_DST);
3707 MAP_FLAG(LOC_MAC_IG, UNKNOWN_MCAST_DST);
3708 MAP_FLAG(REM_HOST, SRC_IP);
3709 MAP_FLAG(LOC_HOST, DST_IP);
3710 MAP_FLAG(REM_MAC, SRC_MAC);
3711 MAP_FLAG(REM_PORT, SRC_PORT);
3712 MAP_FLAG(LOC_MAC, DST_MAC);
3713 MAP_FLAG(LOC_PORT, DST_PORT);
3714 MAP_FLAG(ETHER_TYPE, ETHER_TYPE);
3715 MAP_FLAG(INNER_VID, INNER_VLAN);
3716 MAP_FLAG(OUTER_VID, OUTER_VLAN);
3717 MAP_FLAG(IP_PROTO, IP_PROTO);
3718#undef MAP_FLAG
3719
3720 /* Did we map them all? */
3721 if (mcdi_flags)
3722 return -EINVAL;
3723
3724 return match_flags;
3725}
3726
3727static int efx_ef10_filter_table_probe(struct efx_nic *efx)
3728{
3729 MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_PARSER_DISP_INFO_IN_LEN);
3730 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX);
3731 unsigned int pd_match_pri, pd_match_count;
3732 struct efx_ef10_filter_table *table;
3733 size_t outlen;
3734 int rc;
3735
3736 table = kzalloc(sizeof(*table), GFP_KERNEL);
3737 if (!table)
3738 return -ENOMEM;
3739
3740 /* Find out which RX filter types are supported, and their priorities */
3741 MCDI_SET_DWORD(inbuf, GET_PARSER_DISP_INFO_IN_OP,
3742 MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES);
3743 rc = efx_mcdi_rpc(efx, MC_CMD_GET_PARSER_DISP_INFO,
3744 inbuf, sizeof(inbuf), outbuf, sizeof(outbuf),
3745 &outlen);
3746 if (rc)
3747 goto fail;
3748 pd_match_count = MCDI_VAR_ARRAY_LEN(
3749 outlen, GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES);
3750 table->rx_match_count = 0;
3751
3752 for (pd_match_pri = 0; pd_match_pri < pd_match_count; pd_match_pri++) {
3753 u32 mcdi_flags =
3754 MCDI_ARRAY_DWORD(
3755 outbuf,
3756 GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES,
3757 pd_match_pri);
3758 rc = efx_ef10_filter_match_flags_from_mcdi(mcdi_flags);
3759 if (rc < 0) {
3760 netif_dbg(efx, probe, efx->net_dev,
3761 "%s: fw flags %#x pri %u not supported in driver\n",
3762 __func__, mcdi_flags, pd_match_pri);
3763 } else {
3764 netif_dbg(efx, probe, efx->net_dev,
3765 "%s: fw flags %#x pri %u supported as driver flags %#x pri %u\n",
3766 __func__, mcdi_flags, pd_match_pri,
3767 rc, table->rx_match_count);
3768 table->rx_match_flags[table->rx_match_count++] = rc;
3769 }
3770 }
3771
3772 table->entry = vzalloc(HUNT_FILTER_TBL_ROWS * sizeof(*table->entry));
3773 if (!table->entry) {
3774 rc = -ENOMEM;
3775 goto fail;
3776 }
3777
Edward Cree12fb0da2015-07-21 15:11:00 +01003778 table->ucdef_id = EFX_EF10_FILTER_ID_INVALID;
3779 table->bcast_id = EFX_EF10_FILTER_ID_INVALID;
3780 table->mcdef_id = EFX_EF10_FILTER_ID_INVALID;
3781
Ben Hutchings8127d662013-08-29 19:19:29 +01003782 efx->filter_state = table;
3783 init_waitqueue_head(&table->waitq);
3784 return 0;
3785
3786fail:
3787 kfree(table);
3788 return rc;
3789}
3790
Edward Cree0d322412015-05-20 11:10:03 +01003791/* Caller must hold efx->filter_sem for read if race against
3792 * efx_ef10_filter_table_remove() is possible
3793 */
Ben Hutchings8127d662013-08-29 19:19:29 +01003794static void efx_ef10_filter_table_restore(struct efx_nic *efx)
3795{
3796 struct efx_ef10_filter_table *table = efx->filter_state;
3797 struct efx_ef10_nic_data *nic_data = efx->nic_data;
3798 struct efx_filter_spec *spec;
3799 unsigned int filter_idx;
3800 bool failed = false;
3801 int rc;
3802
Edward Cree0d322412015-05-20 11:10:03 +01003803 WARN_ON(!rwsem_is_locked(&efx->filter_sem));
3804
Ben Hutchings8127d662013-08-29 19:19:29 +01003805 if (!nic_data->must_restore_filters)
3806 return;
3807
Edward Cree0d322412015-05-20 11:10:03 +01003808 if (!table)
3809 return;
3810
Ben Hutchings8127d662013-08-29 19:19:29 +01003811 spin_lock_bh(&efx->filter_lock);
3812
3813 for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
3814 spec = efx_ef10_filter_entry_spec(table, filter_idx);
3815 if (!spec)
3816 continue;
3817
3818 table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY;
3819 spin_unlock_bh(&efx->filter_lock);
3820
3821 rc = efx_ef10_filter_push(efx, spec,
3822 &table->entry[filter_idx].handle,
3823 false);
3824 if (rc)
3825 failed = true;
3826
3827 spin_lock_bh(&efx->filter_lock);
3828 if (rc) {
3829 kfree(spec);
3830 efx_ef10_filter_set_entry(table, filter_idx, NULL, 0);
3831 } else {
3832 table->entry[filter_idx].spec &=
3833 ~EFX_EF10_FILTER_FLAG_BUSY;
3834 }
3835 }
3836
3837 spin_unlock_bh(&efx->filter_lock);
3838
3839 if (failed)
3840 netif_err(efx, hw, efx->net_dev,
3841 "unable to restore all filters\n");
3842 else
3843 nic_data->must_restore_filters = false;
3844}
3845
Edward Cree0d322412015-05-20 11:10:03 +01003846/* Caller must hold efx->filter_sem for write */
Ben Hutchings8127d662013-08-29 19:19:29 +01003847static void efx_ef10_filter_table_remove(struct efx_nic *efx)
3848{
3849 struct efx_ef10_filter_table *table = efx->filter_state;
3850 MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN);
3851 struct efx_filter_spec *spec;
3852 unsigned int filter_idx;
3853 int rc;
3854
Edward Cree0d322412015-05-20 11:10:03 +01003855 efx->filter_state = NULL;
3856 if (!table)
3857 return;
3858
Ben Hutchings8127d662013-08-29 19:19:29 +01003859 for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
3860 spec = efx_ef10_filter_entry_spec(table, filter_idx);
3861 if (!spec)
3862 continue;
3863
3864 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
3865 efx_ef10_filter_is_exclusive(spec) ?
3866 MC_CMD_FILTER_OP_IN_OP_REMOVE :
3867 MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE);
3868 MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
3869 table->entry[filter_idx].handle);
Bert Kenwarde65a5102015-12-23 08:57:36 +00003870 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FILTER_OP, inbuf,
3871 sizeof(inbuf), NULL, 0, NULL);
Ben Hutchings48ce5632013-11-01 16:42:44 +00003872 if (rc)
Bert Kenwarde65a5102015-12-23 08:57:36 +00003873 netif_info(efx, drv, efx->net_dev,
3874 "%s: filter %04x remove failed\n",
3875 __func__, filter_idx);
Ben Hutchings8127d662013-08-29 19:19:29 +01003876 kfree(spec);
3877 }
3878
3879 vfree(table->entry);
3880 kfree(table);
3881}
3882
Edward Cree12fb0da2015-07-21 15:11:00 +01003883#define EFX_EF10_FILTER_DO_MARK_OLD(id) \
Bert Kenwarde65a5102015-12-23 08:57:36 +00003884 if (id != EFX_EF10_FILTER_ID_INVALID) { \
3885 filter_idx = efx_ef10_filter_get_unsafe_id(efx, id); \
3886 if (!table->entry[filter_idx].spec) \
3887 netif_dbg(efx, drv, efx->net_dev, \
3888 "%s: marked null spec old %04x:%04x\n", \
3889 __func__, id, filter_idx); \
3890 table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_AUTO_OLD;\
3891 }
Daniel Pieczko822b96f2015-07-21 15:10:27 +01003892static void efx_ef10_filter_mark_old(struct efx_nic *efx)
Ben Hutchings8127d662013-08-29 19:19:29 +01003893{
3894 struct efx_ef10_filter_table *table = efx->filter_state;
Daniel Pieczko822b96f2015-07-21 15:10:27 +01003895 unsigned int filter_idx, i;
Ben Hutchings8127d662013-08-29 19:19:29 +01003896
Edward Cree0d322412015-05-20 11:10:03 +01003897 if (!table)
3898 return;
3899
Ben Hutchings8127d662013-08-29 19:19:29 +01003900 /* Mark old filters that may need to be removed */
3901 spin_lock_bh(&efx->filter_lock);
Edward Cree12fb0da2015-07-21 15:11:00 +01003902 for (i = 0; i < table->dev_uc_count; i++)
3903 EFX_EF10_FILTER_DO_MARK_OLD(table->dev_uc_list[i].id);
3904 for (i = 0; i < table->dev_mc_count; i++)
3905 EFX_EF10_FILTER_DO_MARK_OLD(table->dev_mc_list[i].id);
3906 EFX_EF10_FILTER_DO_MARK_OLD(table->ucdef_id);
3907 EFX_EF10_FILTER_DO_MARK_OLD(table->bcast_id);
3908 EFX_EF10_FILTER_DO_MARK_OLD(table->mcdef_id);
Ben Hutchings8127d662013-08-29 19:19:29 +01003909 spin_unlock_bh(&efx->filter_lock);
Daniel Pieczko822b96f2015-07-21 15:10:27 +01003910}
Edward Cree12fb0da2015-07-21 15:11:00 +01003911#undef EFX_EF10_FILTER_DO_MARK_OLD
Ben Hutchings8127d662013-08-29 19:19:29 +01003912
Daniel Pieczko822b96f2015-07-21 15:10:27 +01003913static void efx_ef10_filter_uc_addr_list(struct efx_nic *efx, bool *promisc)
3914{
3915 struct efx_ef10_filter_table *table = efx->filter_state;
3916 struct net_device *net_dev = efx->net_dev;
3917 struct netdev_hw_addr *uc;
Edward Cree12fb0da2015-07-21 15:11:00 +01003918 int addr_count;
Daniel Pieczko822b96f2015-07-21 15:10:27 +01003919 unsigned int i;
3920
Edward Cree12fb0da2015-07-21 15:11:00 +01003921 table->ucdef_id = EFX_EF10_FILTER_ID_INVALID;
3922 addr_count = netdev_uc_count(net_dev);
3923 if (net_dev->flags & IFF_PROMISC)
Daniel Pieczko822b96f2015-07-21 15:10:27 +01003924 *promisc = true;
Edward Cree12fb0da2015-07-21 15:11:00 +01003925 table->dev_uc_count = 1 + addr_count;
Daniel Pieczko822b96f2015-07-21 15:10:27 +01003926 ether_addr_copy(table->dev_uc_list[0].addr, net_dev->dev_addr);
3927 i = 1;
3928 netdev_for_each_uc_addr(uc, net_dev) {
Edward Cree12fb0da2015-07-21 15:11:00 +01003929 if (i >= EFX_EF10_FILTER_DEV_UC_MAX) {
3930 *promisc = true;
3931 break;
3932 }
Daniel Pieczko822b96f2015-07-21 15:10:27 +01003933 ether_addr_copy(table->dev_uc_list[i].addr, uc->addr);
Edward Cree12fb0da2015-07-21 15:11:00 +01003934 table->dev_uc_list[i].id = EFX_EF10_FILTER_ID_INVALID;
Daniel Pieczko822b96f2015-07-21 15:10:27 +01003935 i++;
3936 }
3937}
3938
3939static void efx_ef10_filter_mc_addr_list(struct efx_nic *efx, bool *promisc)
3940{
3941 struct efx_ef10_filter_table *table = efx->filter_state;
3942 struct net_device *net_dev = efx->net_dev;
3943 struct netdev_hw_addr *mc;
Daniel Pieczkoab8b1f7c2015-07-21 15:10:44 +01003944 unsigned int i, addr_count;
Daniel Pieczko822b96f2015-07-21 15:10:27 +01003945
Edward Cree12fb0da2015-07-21 15:11:00 +01003946 table->mcdef_id = EFX_EF10_FILTER_ID_INVALID;
3947 table->bcast_id = EFX_EF10_FILTER_ID_INVALID;
Daniel Pieczkoab8b1f7c2015-07-21 15:10:44 +01003948 if (net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI))
Daniel Pieczko822b96f2015-07-21 15:10:27 +01003949 *promisc = true;
Daniel Pieczko822b96f2015-07-21 15:10:27 +01003950
Edward Cree12fb0da2015-07-21 15:11:00 +01003951 addr_count = netdev_mc_count(net_dev);
3952 i = 0;
Daniel Pieczkoab8b1f7c2015-07-21 15:10:44 +01003953 netdev_for_each_mc_addr(mc, net_dev) {
Edward Cree12fb0da2015-07-21 15:11:00 +01003954 if (i >= EFX_EF10_FILTER_DEV_MC_MAX) {
3955 *promisc = true;
3956 break;
3957 }
Daniel Pieczkoab8b1f7c2015-07-21 15:10:44 +01003958 ether_addr_copy(table->dev_mc_list[i].addr, mc->addr);
Edward Cree12fb0da2015-07-21 15:11:00 +01003959 table->dev_mc_list[i].id = EFX_EF10_FILTER_ID_INVALID;
Daniel Pieczkoab8b1f7c2015-07-21 15:10:44 +01003960 i++;
Ben Hutchings8127d662013-08-29 19:19:29 +01003961 }
Edward Cree12fb0da2015-07-21 15:11:00 +01003962
3963 table->dev_mc_count = i;
Daniel Pieczko822b96f2015-07-21 15:10:27 +01003964}
Ben Hutchings8127d662013-08-29 19:19:29 +01003965
Edward Cree12fb0da2015-07-21 15:11:00 +01003966static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx,
3967 bool multicast, bool rollback)
Daniel Pieczko822b96f2015-07-21 15:10:27 +01003968{
3969 struct efx_ef10_filter_table *table = efx->filter_state;
3970 struct efx_ef10_dev_addr *addr_list;
Bert Kenwardf1c2ef42015-12-11 09:39:32 +00003971 enum efx_filter_flags filter_flags;
Daniel Pieczko822b96f2015-07-21 15:10:27 +01003972 struct efx_filter_spec spec;
Edward Cree12fb0da2015-07-21 15:11:00 +01003973 u8 baddr[ETH_ALEN];
3974 unsigned int i, j;
3975 int addr_count;
Daniel Pieczko822b96f2015-07-21 15:10:27 +01003976 int rc;
3977
3978 if (multicast) {
3979 addr_list = table->dev_mc_list;
Edward Cree12fb0da2015-07-21 15:11:00 +01003980 addr_count = table->dev_mc_count;
Daniel Pieczko822b96f2015-07-21 15:10:27 +01003981 } else {
3982 addr_list = table->dev_uc_list;
Edward Cree12fb0da2015-07-21 15:11:00 +01003983 addr_count = table->dev_uc_count;
Daniel Pieczko822b96f2015-07-21 15:10:27 +01003984 }
3985
Bert Kenwardf1c2ef42015-12-11 09:39:32 +00003986 filter_flags = efx_rss_enabled(efx) ? EFX_FILTER_FLAG_RX_RSS : 0;
3987
Daniel Pieczko822b96f2015-07-21 15:10:27 +01003988 /* Insert/renew filters */
Edward Cree12fb0da2015-07-21 15:11:00 +01003989 for (i = 0; i < addr_count; i++) {
Bert Kenwardf1c2ef42015-12-11 09:39:32 +00003990 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0);
Jon Cooperb6f568e2015-07-21 15:10:15 +01003991 efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC,
Daniel Pieczko822b96f2015-07-21 15:10:27 +01003992 addr_list[i].addr);
Jon Cooperb6f568e2015-07-21 15:10:15 +01003993 rc = efx_ef10_filter_insert(efx, &spec, true);
3994 if (rc < 0) {
Edward Cree12fb0da2015-07-21 15:11:00 +01003995 if (rollback) {
3996 netif_info(efx, drv, efx->net_dev,
3997 "efx_ef10_filter_insert failed rc=%d\n",
3998 rc);
3999 /* Fall back to promiscuous */
4000 for (j = 0; j < i; j++) {
4001 if (addr_list[j].id == EFX_EF10_FILTER_ID_INVALID)
4002 continue;
4003 efx_ef10_filter_remove_unsafe(
4004 efx, EFX_FILTER_PRI_AUTO,
4005 addr_list[j].id);
4006 addr_list[j].id = EFX_EF10_FILTER_ID_INVALID;
4007 }
4008 return rc;
4009 } else {
4010 /* mark as not inserted, and carry on */
4011 rc = EFX_EF10_FILTER_ID_INVALID;
Daniel Pieczko822b96f2015-07-21 15:10:27 +01004012 }
Ben Hutchings8127d662013-08-29 19:19:29 +01004013 }
Edward Cree12fb0da2015-07-21 15:11:00 +01004014 addr_list[i].id = efx_ef10_filter_get_unsafe_id(efx, rc);
Ben Hutchings8127d662013-08-29 19:19:29 +01004015 }
Daniel Pieczko822b96f2015-07-21 15:10:27 +01004016
Edward Cree12fb0da2015-07-21 15:11:00 +01004017 if (multicast && rollback) {
4018 /* Also need an Ethernet broadcast filter */
Bert Kenwardf1c2ef42015-12-11 09:39:32 +00004019 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0);
Edward Cree12fb0da2015-07-21 15:11:00 +01004020 eth_broadcast_addr(baddr);
4021 efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC, baddr);
Daniel Pieczko822b96f2015-07-21 15:10:27 +01004022 rc = efx_ef10_filter_insert(efx, &spec, true);
Edward Cree12fb0da2015-07-21 15:11:00 +01004023 if (rc < 0) {
Daniel Pieczko822b96f2015-07-21 15:10:27 +01004024 netif_warn(efx, drv, efx->net_dev,
Edward Cree12fb0da2015-07-21 15:11:00 +01004025 "Broadcast filter insert failed rc=%d\n", rc);
4026 /* Fall back to promiscuous */
4027 for (j = 0; j < i; j++) {
4028 if (addr_list[j].id == EFX_EF10_FILTER_ID_INVALID)
4029 continue;
4030 efx_ef10_filter_remove_unsafe(
4031 efx, EFX_FILTER_PRI_AUTO,
4032 addr_list[j].id);
4033 addr_list[j].id = EFX_EF10_FILTER_ID_INVALID;
4034 }
4035 return rc;
4036 } else {
4037 table->bcast_id = efx_ef10_filter_get_unsafe_id(efx, rc);
4038 }
Daniel Pieczko822b96f2015-07-21 15:10:27 +01004039 }
Edward Cree12fb0da2015-07-21 15:11:00 +01004040
4041 return 0;
4042}
4043
4044static int efx_ef10_filter_insert_def(struct efx_nic *efx, bool multicast,
4045 bool rollback)
4046{
4047 struct efx_ef10_filter_table *table = efx->filter_state;
4048 struct efx_ef10_nic_data *nic_data = efx->nic_data;
Bert Kenwardf1c2ef42015-12-11 09:39:32 +00004049 enum efx_filter_flags filter_flags;
Edward Cree12fb0da2015-07-21 15:11:00 +01004050 struct efx_filter_spec spec;
4051 u8 baddr[ETH_ALEN];
4052 int rc;
4053
Bert Kenwardf1c2ef42015-12-11 09:39:32 +00004054 filter_flags = efx_rss_enabled(efx) ? EFX_FILTER_FLAG_RX_RSS : 0;
4055
4056 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0);
Edward Cree12fb0da2015-07-21 15:11:00 +01004057
4058 if (multicast)
4059 efx_filter_set_mc_def(&spec);
4060 else
4061 efx_filter_set_uc_def(&spec);
4062
4063 rc = efx_ef10_filter_insert(efx, &spec, true);
4064 if (rc < 0) {
Bert Kenward09a04202015-12-23 08:58:15 +00004065 netif_printk(efx, drv, rc == -EPERM ? KERN_DEBUG : KERN_WARNING,
4066 efx->net_dev,
4067 "%scast mismatch filter insert failed rc=%d\n",
4068 multicast ? "Multi" : "Uni", rc);
Edward Cree12fb0da2015-07-21 15:11:00 +01004069 } else if (multicast) {
4070 table->mcdef_id = efx_ef10_filter_get_unsafe_id(efx, rc);
4071 if (!nic_data->workaround_26807) {
4072 /* Also need an Ethernet broadcast filter */
4073 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
Bert Kenwardf1c2ef42015-12-11 09:39:32 +00004074 filter_flags, 0);
Edward Cree12fb0da2015-07-21 15:11:00 +01004075 eth_broadcast_addr(baddr);
4076 efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC,
4077 baddr);
4078 rc = efx_ef10_filter_insert(efx, &spec, true);
4079 if (rc < 0) {
4080 netif_warn(efx, drv, efx->net_dev,
4081 "Broadcast filter insert failed rc=%d\n",
4082 rc);
4083 if (rollback) {
4084 /* Roll back the mc_def filter */
4085 efx_ef10_filter_remove_unsafe(
4086 efx, EFX_FILTER_PRI_AUTO,
4087 table->mcdef_id);
4088 table->mcdef_id = EFX_EF10_FILTER_ID_INVALID;
4089 return rc;
4090 }
4091 } else {
4092 table->bcast_id = efx_ef10_filter_get_unsafe_id(efx, rc);
4093 }
4094 }
4095 rc = 0;
4096 } else {
4097 table->ucdef_id = rc;
4098 rc = 0;
4099 }
4100 return rc;
Daniel Pieczko822b96f2015-07-21 15:10:27 +01004101}
4102
4103/* Remove filters that weren't renewed. Since nothing else changes the AUTO_OLD
4104 * flag or removes these filters, we don't need to hold the filter_lock while
4105 * scanning for these filters.
4106 */
4107static void efx_ef10_filter_remove_old(struct efx_nic *efx)
4108{
4109 struct efx_ef10_filter_table *table = efx->filter_state;
Bert Kenwarde65a5102015-12-23 08:57:36 +00004110 int remove_failed = 0;
4111 int remove_noent = 0;
4112 int rc;
Daniel Pieczko822b96f2015-07-21 15:10:27 +01004113 int i;
4114
Ben Hutchings8127d662013-08-29 19:19:29 +01004115 for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) {
4116 if (ACCESS_ONCE(table->entry[i].spec) &
Ben Hutchingsb59e6ef2013-11-21 19:02:22 +00004117 EFX_EF10_FILTER_FLAG_AUTO_OLD) {
Bert Kenwarde65a5102015-12-23 08:57:36 +00004118 rc = efx_ef10_filter_remove_internal(efx,
4119 1U << EFX_FILTER_PRI_AUTO, i, true);
4120 if (rc == -ENOENT)
4121 remove_noent++;
4122 else if (rc)
4123 remove_failed++;
Ben Hutchings8127d662013-08-29 19:19:29 +01004124 }
4125 }
Bert Kenwarde65a5102015-12-23 08:57:36 +00004126
4127 if (remove_failed)
4128 netif_info(efx, drv, efx->net_dev,
4129 "%s: failed to remove %d filters\n",
4130 __func__, remove_failed);
4131 if (remove_noent)
4132 netif_info(efx, drv, efx->net_dev,
4133 "%s: failed to remove %d non-existent filters\n",
4134 __func__, remove_noent);
Ben Hutchings8127d662013-08-29 19:19:29 +01004135}
4136
Daniel Pieczko7a186f42015-07-07 11:37:19 +01004137static int efx_ef10_vport_set_mac_address(struct efx_nic *efx)
4138{
4139 struct efx_ef10_nic_data *nic_data = efx->nic_data;
4140 u8 mac_old[ETH_ALEN];
4141 int rc, rc2;
4142
4143 /* Only reconfigure a PF-created vport */
4144 if (is_zero_ether_addr(nic_data->vport_mac))
4145 return 0;
4146
4147 efx_device_detach_sync(efx);
4148 efx_net_stop(efx->net_dev);
4149 down_write(&efx->filter_sem);
4150 efx_ef10_filter_table_remove(efx);
4151 up_write(&efx->filter_sem);
4152
4153 rc = efx_ef10_vadaptor_free(efx, nic_data->vport_id);
4154 if (rc)
4155 goto restore_filters;
4156
4157 ether_addr_copy(mac_old, nic_data->vport_mac);
4158 rc = efx_ef10_vport_del_mac(efx, nic_data->vport_id,
4159 nic_data->vport_mac);
4160 if (rc)
4161 goto restore_vadaptor;
4162
4163 rc = efx_ef10_vport_add_mac(efx, nic_data->vport_id,
4164 efx->net_dev->dev_addr);
4165 if (!rc) {
4166 ether_addr_copy(nic_data->vport_mac, efx->net_dev->dev_addr);
4167 } else {
4168 rc2 = efx_ef10_vport_add_mac(efx, nic_data->vport_id, mac_old);
4169 if (rc2) {
4170 /* Failed to add original MAC, so clear vport_mac */
4171 eth_zero_addr(nic_data->vport_mac);
4172 goto reset_nic;
4173 }
4174 }
4175
4176restore_vadaptor:
4177 rc2 = efx_ef10_vadaptor_alloc(efx, nic_data->vport_id);
4178 if (rc2)
4179 goto reset_nic;
4180restore_filters:
4181 down_write(&efx->filter_sem);
4182 rc2 = efx_ef10_filter_table_probe(efx);
4183 up_write(&efx->filter_sem);
4184 if (rc2)
4185 goto reset_nic;
4186
4187 rc2 = efx_net_open(efx->net_dev);
4188 if (rc2)
4189 goto reset_nic;
4190
4191 netif_device_attach(efx->net_dev);
4192
4193 return rc;
4194
4195reset_nic:
4196 netif_err(efx, drv, efx->net_dev,
4197 "Failed to restore when changing MAC address - scheduling reset\n");
4198 efx_schedule_reset(efx, RESET_TYPE_DATAPATH);
4199
4200 return rc ? rc : rc2;
4201}
4202
Daniel Pieczko822b96f2015-07-21 15:10:27 +01004203/* Caller must hold efx->filter_sem for read if race against
4204 * efx_ef10_filter_table_remove() is possible
4205 */
4206static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx)
4207{
4208 struct efx_ef10_filter_table *table = efx->filter_state;
Daniel Pieczkoab8b1f7c2015-07-21 15:10:44 +01004209 struct efx_ef10_nic_data *nic_data = efx->nic_data;
Daniel Pieczko822b96f2015-07-21 15:10:27 +01004210 struct net_device *net_dev = efx->net_dev;
4211 bool uc_promisc = false, mc_promisc = false;
4212
4213 if (!efx_dev_registered(efx))
4214 return;
4215
4216 if (!table)
4217 return;
4218
4219 efx_ef10_filter_mark_old(efx);
4220
4221 /* Copy/convert the address lists; add the primary station
4222 * address and broadcast address
4223 */
4224 netif_addr_lock_bh(net_dev);
4225 efx_ef10_filter_uc_addr_list(efx, &uc_promisc);
4226 efx_ef10_filter_mc_addr_list(efx, &mc_promisc);
4227 netif_addr_unlock_bh(net_dev);
4228
Edward Cree12fb0da2015-07-21 15:11:00 +01004229 /* Insert/renew unicast filters */
4230 if (uc_promisc) {
4231 efx_ef10_filter_insert_def(efx, false, false);
4232 efx_ef10_filter_insert_addr_list(efx, false, false);
4233 } else {
4234 /* If any of the filters failed to insert, fall back to
4235 * promiscuous mode - add in the uc_def filter. But keep
4236 * our individual unicast filters.
4237 */
4238 if (efx_ef10_filter_insert_addr_list(efx, false, false))
4239 efx_ef10_filter_insert_def(efx, false, false);
4240 }
Daniel Pieczkoab8b1f7c2015-07-21 15:10:44 +01004241
Edward Cree12fb0da2015-07-21 15:11:00 +01004242 /* Insert/renew multicast filters */
Daniel Pieczkoab8b1f7c2015-07-21 15:10:44 +01004243 /* If changing promiscuous state with cascaded multicast filters, remove
4244 * old filters first, so that packets are dropped rather than duplicated
4245 */
4246 if (nic_data->workaround_26807 && efx->mc_promisc != mc_promisc)
4247 efx_ef10_filter_remove_old(efx);
Edward Cree12fb0da2015-07-21 15:11:00 +01004248 if (mc_promisc) {
4249 if (nic_data->workaround_26807) {
4250 /* If we failed to insert promiscuous filters, rollback
4251 * and fall back to individual multicast filters
4252 */
4253 if (efx_ef10_filter_insert_def(efx, true, true)) {
4254 /* Changing promisc state, so remove old filters */
4255 efx_ef10_filter_remove_old(efx);
4256 efx_ef10_filter_insert_addr_list(efx, true, false);
4257 }
4258 } else {
4259 /* If we failed to insert promiscuous filters, don't
4260 * rollback. Regardless, also insert the mc_list
4261 */
4262 efx_ef10_filter_insert_def(efx, true, false);
4263 efx_ef10_filter_insert_addr_list(efx, true, false);
4264 }
4265 } else {
4266 /* If any filters failed to insert, rollback and fall back to
4267 * promiscuous mode - mc_def filter and maybe broadcast. If
4268 * that fails, roll back again and insert as many of our
4269 * individual multicast filters as we can.
4270 */
4271 if (efx_ef10_filter_insert_addr_list(efx, true, true)) {
4272 /* Changing promisc state, so remove old filters */
4273 if (nic_data->workaround_26807)
4274 efx_ef10_filter_remove_old(efx);
4275 if (efx_ef10_filter_insert_def(efx, true, true))
4276 efx_ef10_filter_insert_addr_list(efx, true, false);
4277 }
4278 }
Daniel Pieczko822b96f2015-07-21 15:10:27 +01004279
4280 efx_ef10_filter_remove_old(efx);
Daniel Pieczkoab8b1f7c2015-07-21 15:10:44 +01004281 efx->mc_promisc = mc_promisc;
Daniel Pieczko822b96f2015-07-21 15:10:27 +01004282}
4283
Shradha Shah910c8782015-05-20 11:12:48 +01004284static int efx_ef10_set_mac_address(struct efx_nic *efx)
4285{
4286 MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_SET_MAC_IN_LEN);
4287 struct efx_ef10_nic_data *nic_data = efx->nic_data;
4288 bool was_enabled = efx->port_enabled;
4289 int rc;
4290
4291 efx_device_detach_sync(efx);
4292 efx_net_stop(efx->net_dev);
4293 down_write(&efx->filter_sem);
4294 efx_ef10_filter_table_remove(efx);
4295
4296 ether_addr_copy(MCDI_PTR(inbuf, VADAPTOR_SET_MAC_IN_MACADDR),
4297 efx->net_dev->dev_addr);
4298 MCDI_SET_DWORD(inbuf, VADAPTOR_SET_MAC_IN_UPSTREAM_PORT_ID,
4299 nic_data->vport_id);
Daniel Pieczko535a6172015-07-07 11:37:33 +01004300 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_VADAPTOR_SET_MAC, inbuf,
4301 sizeof(inbuf), NULL, 0, NULL);
Shradha Shah910c8782015-05-20 11:12:48 +01004302
4303 efx_ef10_filter_table_probe(efx);
4304 up_write(&efx->filter_sem);
4305 if (was_enabled)
4306 efx_net_open(efx->net_dev);
4307 netif_device_attach(efx->net_dev);
4308
Daniel Pieczko9e9f6652015-07-07 11:37:00 +01004309#ifdef CONFIG_SFC_SRIOV
4310 if (efx->pci_dev->is_virtfn && efx->pci_dev->physfn) {
Shradha Shah910c8782015-05-20 11:12:48 +01004311 struct pci_dev *pci_dev_pf = efx->pci_dev->physfn;
4312
Daniel Pieczko9e9f6652015-07-07 11:37:00 +01004313 if (rc == -EPERM) {
4314 struct efx_nic *efx_pf;
Shradha Shah910c8782015-05-20 11:12:48 +01004315
Daniel Pieczko9e9f6652015-07-07 11:37:00 +01004316 /* Switch to PF and change MAC address on vport */
4317 efx_pf = pci_get_drvdata(pci_dev_pf);
4318
4319 rc = efx_ef10_sriov_set_vf_mac(efx_pf,
Shradha Shah910c8782015-05-20 11:12:48 +01004320 nic_data->vf_index,
Daniel Pieczko9e9f6652015-07-07 11:37:00 +01004321 efx->net_dev->dev_addr);
4322 } else if (!rc) {
Shradha Shah910c8782015-05-20 11:12:48 +01004323 struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf);
4324 struct efx_ef10_nic_data *nic_data = efx_pf->nic_data;
4325 unsigned int i;
4326
Daniel Pieczko9e9f6652015-07-07 11:37:00 +01004327 /* MAC address successfully changed by VF (with MAC
4328 * spoofing) so update the parent PF if possible.
4329 */
Shradha Shah910c8782015-05-20 11:12:48 +01004330 for (i = 0; i < efx_pf->vf_count; ++i) {
4331 struct ef10_vf *vf = nic_data->vf + i;
4332
4333 if (vf->efx == efx) {
4334 ether_addr_copy(vf->mac,
4335 efx->net_dev->dev_addr);
4336 return 0;
4337 }
4338 }
4339 }
Daniel Pieczko9e9f6652015-07-07 11:37:00 +01004340 } else
Shradha Shah910c8782015-05-20 11:12:48 +01004341#endif
Daniel Pieczko9e9f6652015-07-07 11:37:00 +01004342 if (rc == -EPERM) {
4343 netif_err(efx, drv, efx->net_dev,
4344 "Cannot change MAC address; use sfboot to enable"
4345 " mac-spoofing on this interface\n");
Daniel Pieczko7a186f42015-07-07 11:37:19 +01004346 } else if (rc == -ENOSYS && !efx_ef10_is_vf(efx)) {
4347 /* If the active MCFW does not support MC_CMD_VADAPTOR_SET_MAC
4348 * fall-back to the method of changing the MAC address on the
4349 * vport. This only applies to PFs because such versions of
4350 * MCFW do not support VFs.
4351 */
4352 rc = efx_ef10_vport_set_mac_address(efx);
Daniel Pieczko535a6172015-07-07 11:37:33 +01004353 } else {
4354 efx_mcdi_display_error(efx, MC_CMD_VADAPTOR_SET_MAC,
4355 sizeof(inbuf), NULL, 0, rc);
Daniel Pieczko9e9f6652015-07-07 11:37:00 +01004356 }
4357
Shradha Shah910c8782015-05-20 11:12:48 +01004358 return rc;
4359}
4360
Ben Hutchings8127d662013-08-29 19:19:29 +01004361static int efx_ef10_mac_reconfigure(struct efx_nic *efx)
4362{
4363 efx_ef10_filter_sync_rx_mode(efx);
4364
4365 return efx_mcdi_set_mac(efx);
4366}
4367
Shradha Shah862f8942015-05-20 11:08:56 +01004368static int efx_ef10_mac_reconfigure_vf(struct efx_nic *efx)
4369{
4370 efx_ef10_filter_sync_rx_mode(efx);
4371
4372 return 0;
4373}
4374
Jon Cooper74cd60a2013-09-16 14:18:51 +01004375static int efx_ef10_start_bist(struct efx_nic *efx, u32 bist_type)
4376{
4377 MCDI_DECLARE_BUF(inbuf, MC_CMD_START_BIST_IN_LEN);
4378
4379 MCDI_SET_DWORD(inbuf, START_BIST_IN_TYPE, bist_type);
4380 return efx_mcdi_rpc(efx, MC_CMD_START_BIST, inbuf, sizeof(inbuf),
4381 NULL, 0, NULL);
4382}
4383
4384/* MC BISTs follow a different poll mechanism to phy BISTs.
4385 * The BIST is done in the poll handler on the MC, and the MCDI command
4386 * will block until the BIST is done.
4387 */
4388static int efx_ef10_poll_bist(struct efx_nic *efx)
4389{
4390 int rc;
4391 MCDI_DECLARE_BUF(outbuf, MC_CMD_POLL_BIST_OUT_LEN);
4392 size_t outlen;
4393 u32 result;
4394
4395 rc = efx_mcdi_rpc(efx, MC_CMD_POLL_BIST, NULL, 0,
4396 outbuf, sizeof(outbuf), &outlen);
4397 if (rc != 0)
4398 return rc;
4399
4400 if (outlen < MC_CMD_POLL_BIST_OUT_LEN)
4401 return -EIO;
4402
4403 result = MCDI_DWORD(outbuf, POLL_BIST_OUT_RESULT);
4404 switch (result) {
4405 case MC_CMD_POLL_BIST_PASSED:
4406 netif_dbg(efx, hw, efx->net_dev, "BIST passed.\n");
4407 return 0;
4408 case MC_CMD_POLL_BIST_TIMEOUT:
4409 netif_err(efx, hw, efx->net_dev, "BIST timed out\n");
4410 return -EIO;
4411 case MC_CMD_POLL_BIST_FAILED:
4412 netif_err(efx, hw, efx->net_dev, "BIST failed.\n");
4413 return -EIO;
4414 default:
4415 netif_err(efx, hw, efx->net_dev,
4416 "BIST returned unknown result %u", result);
4417 return -EIO;
4418 }
4419}
4420
4421static int efx_ef10_run_bist(struct efx_nic *efx, u32 bist_type)
4422{
4423 int rc;
4424
4425 netif_dbg(efx, drv, efx->net_dev, "starting BIST type %u\n", bist_type);
4426
4427 rc = efx_ef10_start_bist(efx, bist_type);
4428 if (rc != 0)
4429 return rc;
4430
4431 return efx_ef10_poll_bist(efx);
4432}
4433
4434static int
4435efx_ef10_test_chip(struct efx_nic *efx, struct efx_self_tests *tests)
4436{
4437 int rc, rc2;
4438
4439 efx_reset_down(efx, RESET_TYPE_WORLD);
4440
4441 rc = efx_mcdi_rpc(efx, MC_CMD_ENABLE_OFFLINE_BIST,
4442 NULL, 0, NULL, 0, NULL);
4443 if (rc != 0)
4444 goto out;
4445
4446 tests->memory = efx_ef10_run_bist(efx, MC_CMD_MC_MEM_BIST) ? -1 : 1;
4447 tests->registers = efx_ef10_run_bist(efx, MC_CMD_REG_BIST) ? -1 : 1;
4448
4449 rc = efx_mcdi_reset(efx, RESET_TYPE_WORLD);
4450
4451out:
Daniel Pieczko27324822015-07-31 11:14:54 +01004452 if (rc == -EPERM)
4453 rc = 0;
Jon Cooper74cd60a2013-09-16 14:18:51 +01004454 rc2 = efx_reset_up(efx, RESET_TYPE_WORLD, rc == 0);
4455 return rc ? rc : rc2;
4456}
4457
Ben Hutchings8127d662013-08-29 19:19:29 +01004458#ifdef CONFIG_SFC_MTD
4459
4460struct efx_ef10_nvram_type_info {
4461 u16 type, type_mask;
4462 u8 port;
4463 const char *name;
4464};
4465
4466static const struct efx_ef10_nvram_type_info efx_ef10_nvram_types[] = {
4467 { NVRAM_PARTITION_TYPE_MC_FIRMWARE, 0, 0, "sfc_mcfw" },
4468 { NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP, 0, 0, "sfc_mcfw_backup" },
4469 { NVRAM_PARTITION_TYPE_EXPANSION_ROM, 0, 0, "sfc_exp_rom" },
4470 { NVRAM_PARTITION_TYPE_STATIC_CONFIG, 0, 0, "sfc_static_cfg" },
4471 { NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG, 0, 0, "sfc_dynamic_cfg" },
4472 { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0, 0, 0, "sfc_exp_rom_cfg" },
4473 { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT1, 0, 1, "sfc_exp_rom_cfg" },
4474 { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT2, 0, 2, "sfc_exp_rom_cfg" },
4475 { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT3, 0, 3, "sfc_exp_rom_cfg" },
Ben Hutchingsa84f3bf92013-10-09 14:14:41 +01004476 { NVRAM_PARTITION_TYPE_LICENSE, 0, 0, "sfc_license" },
Ben Hutchings8127d662013-08-29 19:19:29 +01004477 { NVRAM_PARTITION_TYPE_PHY_MIN, 0xff, 0, "sfc_phy_fw" },
4478};
4479
4480static int efx_ef10_mtd_probe_partition(struct efx_nic *efx,
4481 struct efx_mcdi_mtd_partition *part,
4482 unsigned int type)
4483{
4484 MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_METADATA_IN_LEN);
4485 MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_METADATA_OUT_LENMAX);
4486 const struct efx_ef10_nvram_type_info *info;
4487 size_t size, erase_size, outlen;
4488 bool protected;
4489 int rc;
4490
4491 for (info = efx_ef10_nvram_types; ; info++) {
4492 if (info ==
4493 efx_ef10_nvram_types + ARRAY_SIZE(efx_ef10_nvram_types))
4494 return -ENODEV;
4495 if ((type & ~info->type_mask) == info->type)
4496 break;
4497 }
4498 if (info->port != efx_port_num(efx))
4499 return -ENODEV;
4500
4501 rc = efx_mcdi_nvram_info(efx, type, &size, &erase_size, &protected);
4502 if (rc)
4503 return rc;
4504 if (protected)
4505 return -ENODEV; /* hide it */
4506
4507 part->nvram_type = type;
4508
4509 MCDI_SET_DWORD(inbuf, NVRAM_METADATA_IN_TYPE, type);
4510 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_METADATA, inbuf, sizeof(inbuf),
4511 outbuf, sizeof(outbuf), &outlen);
4512 if (rc)
4513 return rc;
4514 if (outlen < MC_CMD_NVRAM_METADATA_OUT_LENMIN)
4515 return -EIO;
4516 if (MCDI_DWORD(outbuf, NVRAM_METADATA_OUT_FLAGS) &
4517 (1 << MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_LBN))
4518 part->fw_subtype = MCDI_DWORD(outbuf,
4519 NVRAM_METADATA_OUT_SUBTYPE);
4520
4521 part->common.dev_type_name = "EF10 NVRAM manager";
4522 part->common.type_name = info->name;
4523
4524 part->common.mtd.type = MTD_NORFLASH;
4525 part->common.mtd.flags = MTD_CAP_NORFLASH;
4526 part->common.mtd.size = size;
4527 part->common.mtd.erasesize = erase_size;
4528
4529 return 0;
4530}
4531
4532static int efx_ef10_mtd_probe(struct efx_nic *efx)
4533{
4534 MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX);
4535 struct efx_mcdi_mtd_partition *parts;
4536 size_t outlen, n_parts_total, i, n_parts;
4537 unsigned int type;
4538 int rc;
4539
4540 ASSERT_RTNL();
4541
4542 BUILD_BUG_ON(MC_CMD_NVRAM_PARTITIONS_IN_LEN != 0);
4543 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_PARTITIONS, NULL, 0,
4544 outbuf, sizeof(outbuf), &outlen);
4545 if (rc)
4546 return rc;
4547 if (outlen < MC_CMD_NVRAM_PARTITIONS_OUT_LENMIN)
4548 return -EIO;
4549
4550 n_parts_total = MCDI_DWORD(outbuf, NVRAM_PARTITIONS_OUT_NUM_PARTITIONS);
4551 if (n_parts_total >
4552 MCDI_VAR_ARRAY_LEN(outlen, NVRAM_PARTITIONS_OUT_TYPE_ID))
4553 return -EIO;
4554
4555 parts = kcalloc(n_parts_total, sizeof(*parts), GFP_KERNEL);
4556 if (!parts)
4557 return -ENOMEM;
4558
4559 n_parts = 0;
4560 for (i = 0; i < n_parts_total; i++) {
4561 type = MCDI_ARRAY_DWORD(outbuf, NVRAM_PARTITIONS_OUT_TYPE_ID,
4562 i);
4563 rc = efx_ef10_mtd_probe_partition(efx, &parts[n_parts], type);
4564 if (rc == 0)
4565 n_parts++;
4566 else if (rc != -ENODEV)
4567 goto fail;
4568 }
4569
4570 rc = efx_mtd_add(efx, &parts[0].common, n_parts, sizeof(*parts));
4571fail:
4572 if (rc)
4573 kfree(parts);
4574 return rc;
4575}
4576
4577#endif /* CONFIG_SFC_MTD */
4578
4579static void efx_ef10_ptp_write_host_time(struct efx_nic *efx, u32 host_time)
4580{
4581 _efx_writed(efx, cpu_to_le32(host_time), ER_DZ_MC_DB_LWRD);
4582}
4583
Shradha Shah02246a72015-05-06 00:58:14 +01004584static void efx_ef10_ptp_write_host_time_vf(struct efx_nic *efx,
4585 u32 host_time) {}
4586
Jon Cooperbd9a2652013-11-18 12:54:41 +00004587static int efx_ef10_rx_enable_timestamping(struct efx_channel *channel,
4588 bool temp)
4589{
4590 MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_LEN);
4591 int rc;
4592
4593 if (channel->sync_events_state == SYNC_EVENTS_REQUESTED ||
4594 channel->sync_events_state == SYNC_EVENTS_VALID ||
4595 (temp && channel->sync_events_state == SYNC_EVENTS_DISABLED))
4596 return 0;
4597 channel->sync_events_state = SYNC_EVENTS_REQUESTED;
4598
4599 MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_TIME_EVENT_SUBSCRIBE);
4600 MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
4601 MCDI_SET_DWORD(inbuf, PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE,
4602 channel->channel);
4603
4604 rc = efx_mcdi_rpc(channel->efx, MC_CMD_PTP,
4605 inbuf, sizeof(inbuf), NULL, 0, NULL);
4606
4607 if (rc != 0)
4608 channel->sync_events_state = temp ? SYNC_EVENTS_QUIESCENT :
4609 SYNC_EVENTS_DISABLED;
4610
4611 return rc;
4612}
4613
4614static int efx_ef10_rx_disable_timestamping(struct efx_channel *channel,
4615 bool temp)
4616{
4617 MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_LEN);
4618 int rc;
4619
4620 if (channel->sync_events_state == SYNC_EVENTS_DISABLED ||
4621 (temp && channel->sync_events_state == SYNC_EVENTS_QUIESCENT))
4622 return 0;
4623 if (channel->sync_events_state == SYNC_EVENTS_QUIESCENT) {
4624 channel->sync_events_state = SYNC_EVENTS_DISABLED;
4625 return 0;
4626 }
4627 channel->sync_events_state = temp ? SYNC_EVENTS_QUIESCENT :
4628 SYNC_EVENTS_DISABLED;
4629
4630 MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_TIME_EVENT_UNSUBSCRIBE);
4631 MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
4632 MCDI_SET_DWORD(inbuf, PTP_IN_TIME_EVENT_UNSUBSCRIBE_CONTROL,
4633 MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_SINGLE);
4634 MCDI_SET_DWORD(inbuf, PTP_IN_TIME_EVENT_UNSUBSCRIBE_QUEUE,
4635 channel->channel);
4636
4637 rc = efx_mcdi_rpc(channel->efx, MC_CMD_PTP,
4638 inbuf, sizeof(inbuf), NULL, 0, NULL);
4639
4640 return rc;
4641}
4642
4643static int efx_ef10_ptp_set_ts_sync_events(struct efx_nic *efx, bool en,
4644 bool temp)
4645{
4646 int (*set)(struct efx_channel *channel, bool temp);
4647 struct efx_channel *channel;
4648
4649 set = en ?
4650 efx_ef10_rx_enable_timestamping :
4651 efx_ef10_rx_disable_timestamping;
4652
4653 efx_for_each_channel(channel, efx) {
4654 int rc = set(channel, temp);
4655 if (en && rc != 0) {
4656 efx_ef10_ptp_set_ts_sync_events(efx, false, temp);
4657 return rc;
4658 }
4659 }
4660
4661 return 0;
4662}
4663
Shradha Shah02246a72015-05-06 00:58:14 +01004664static int efx_ef10_ptp_set_ts_config_vf(struct efx_nic *efx,
4665 struct hwtstamp_config *init)
4666{
4667 return -EOPNOTSUPP;
4668}
4669
Jon Cooperbd9a2652013-11-18 12:54:41 +00004670static int efx_ef10_ptp_set_ts_config(struct efx_nic *efx,
4671 struct hwtstamp_config *init)
4672{
4673 int rc;
4674
4675 switch (init->rx_filter) {
4676 case HWTSTAMP_FILTER_NONE:
4677 efx_ef10_ptp_set_ts_sync_events(efx, false, false);
4678 /* if TX timestamping is still requested then leave PTP on */
4679 return efx_ptp_change_mode(efx,
4680 init->tx_type != HWTSTAMP_TX_OFF, 0);
4681 case HWTSTAMP_FILTER_ALL:
4682 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
4683 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
4684 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
4685 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
4686 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
4687 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
4688 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
4689 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
4690 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
4691 case HWTSTAMP_FILTER_PTP_V2_EVENT:
4692 case HWTSTAMP_FILTER_PTP_V2_SYNC:
4693 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
4694 init->rx_filter = HWTSTAMP_FILTER_ALL;
4695 rc = efx_ptp_change_mode(efx, true, 0);
4696 if (!rc)
4697 rc = efx_ef10_ptp_set_ts_sync_events(efx, true, false);
4698 if (rc)
4699 efx_ptp_change_mode(efx, false, 0);
4700 return rc;
4701 default:
4702 return -ERANGE;
4703 }
4704}
4705
Andrew Rybchenko100a9db2016-06-15 17:42:26 +01004706#define EF10_OFFLOAD_FEATURES \
4707 (NETIF_F_IP_CSUM | \
4708 NETIF_F_IPV6_CSUM | \
4709 NETIF_F_RXHASH | \
4710 NETIF_F_NTUPLE)
4711
Shradha Shah02246a72015-05-06 00:58:14 +01004712const struct efx_nic_type efx_hunt_a0_vf_nic_type = {
Shradha Shah6f7f8aa2015-05-06 01:00:07 +01004713 .is_vf = true,
Shradha Shah02246a72015-05-06 00:58:14 +01004714 .mem_bar = EFX_MEM_VF_BAR,
Ben Hutchings8127d662013-08-29 19:19:29 +01004715 .mem_map_size = efx_ef10_mem_map_size,
Shradha Shah02246a72015-05-06 00:58:14 +01004716 .probe = efx_ef10_probe_vf,
4717 .remove = efx_ef10_remove,
4718 .dimension_resources = efx_ef10_dimension_resources,
4719 .init = efx_ef10_init_nic,
4720 .fini = efx_port_dummy_op_void,
Jon Cooper087e9022015-05-20 11:11:35 +01004721 .map_reset_reason = efx_ef10_map_reset_reason,
Shradha Shah02246a72015-05-06 00:58:14 +01004722 .map_reset_flags = efx_ef10_map_reset_flags,
4723 .reset = efx_ef10_reset,
4724 .probe_port = efx_mcdi_port_probe,
4725 .remove_port = efx_mcdi_port_remove,
4726 .fini_dmaq = efx_ef10_fini_dmaq,
4727 .prepare_flr = efx_ef10_prepare_flr,
4728 .finish_flr = efx_port_dummy_op_void,
4729 .describe_stats = efx_ef10_describe_stats,
Daniel Pieczkod7788192015-06-02 11:39:20 +01004730 .update_stats = efx_ef10_update_stats_vf,
Shradha Shah02246a72015-05-06 00:58:14 +01004731 .start_stats = efx_port_dummy_op_void,
4732 .pull_stats = efx_port_dummy_op_void,
4733 .stop_stats = efx_port_dummy_op_void,
4734 .set_id_led = efx_mcdi_set_id_led,
4735 .push_irq_moderation = efx_ef10_push_irq_moderation,
Shradha Shah862f8942015-05-20 11:08:56 +01004736 .reconfigure_mac = efx_ef10_mac_reconfigure_vf,
Shradha Shah02246a72015-05-06 00:58:14 +01004737 .check_mac_fault = efx_mcdi_mac_check_fault,
4738 .reconfigure_port = efx_mcdi_port_reconfigure,
4739 .get_wol = efx_ef10_get_wol_vf,
4740 .set_wol = efx_ef10_set_wol_vf,
4741 .resume_wol = efx_port_dummy_op_void,
4742 .mcdi_request = efx_ef10_mcdi_request,
4743 .mcdi_poll_response = efx_ef10_mcdi_poll_response,
4744 .mcdi_read_response = efx_ef10_mcdi_read_response,
4745 .mcdi_poll_reboot = efx_ef10_mcdi_poll_reboot,
Daniel Pieczkoc577e592015-10-09 10:40:35 +01004746 .mcdi_reboot_detected = efx_ef10_mcdi_reboot_detected,
Shradha Shah02246a72015-05-06 00:58:14 +01004747 .irq_enable_master = efx_port_dummy_op_void,
4748 .irq_test_generate = efx_ef10_irq_test_generate,
4749 .irq_disable_non_ev = efx_port_dummy_op_void,
4750 .irq_handle_msi = efx_ef10_msi_interrupt,
4751 .irq_handle_legacy = efx_ef10_legacy_interrupt,
4752 .tx_probe = efx_ef10_tx_probe,
4753 .tx_init = efx_ef10_tx_init,
4754 .tx_remove = efx_ef10_tx_remove,
4755 .tx_write = efx_ef10_tx_write,
Jon Cooper267c0152015-05-06 00:59:38 +01004756 .rx_push_rss_config = efx_ef10_vf_rx_push_rss_config,
Shradha Shah02246a72015-05-06 00:58:14 +01004757 .rx_probe = efx_ef10_rx_probe,
4758 .rx_init = efx_ef10_rx_init,
4759 .rx_remove = efx_ef10_rx_remove,
4760 .rx_write = efx_ef10_rx_write,
4761 .rx_defer_refill = efx_ef10_rx_defer_refill,
4762 .ev_probe = efx_ef10_ev_probe,
4763 .ev_init = efx_ef10_ev_init,
4764 .ev_fini = efx_ef10_ev_fini,
4765 .ev_remove = efx_ef10_ev_remove,
4766 .ev_process = efx_ef10_ev_process,
4767 .ev_read_ack = efx_ef10_ev_read_ack,
4768 .ev_test_generate = efx_ef10_ev_test_generate,
4769 .filter_table_probe = efx_ef10_filter_table_probe,
4770 .filter_table_restore = efx_ef10_filter_table_restore,
4771 .filter_table_remove = efx_ef10_filter_table_remove,
4772 .filter_update_rx_scatter = efx_ef10_filter_update_rx_scatter,
4773 .filter_insert = efx_ef10_filter_insert,
4774 .filter_remove_safe = efx_ef10_filter_remove_safe,
4775 .filter_get_safe = efx_ef10_filter_get_safe,
4776 .filter_clear_rx = efx_ef10_filter_clear_rx,
4777 .filter_count_rx_used = efx_ef10_filter_count_rx_used,
4778 .filter_get_rx_id_limit = efx_ef10_filter_get_rx_id_limit,
4779 .filter_get_rx_ids = efx_ef10_filter_get_rx_ids,
4780#ifdef CONFIG_RFS_ACCEL
4781 .filter_rfs_insert = efx_ef10_filter_rfs_insert,
4782 .filter_rfs_expire_one = efx_ef10_filter_rfs_expire_one,
4783#endif
4784#ifdef CONFIG_SFC_MTD
4785 .mtd_probe = efx_port_dummy_op_int,
4786#endif
4787 .ptp_write_host_time = efx_ef10_ptp_write_host_time_vf,
4788 .ptp_set_ts_config = efx_ef10_ptp_set_ts_config_vf,
4789#ifdef CONFIG_SFC_SRIOV
Shradha Shah7b8c7b52015-05-06 00:58:54 +01004790 .vswitching_probe = efx_ef10_vswitching_probe_vf,
4791 .vswitching_restore = efx_ef10_vswitching_restore_vf,
4792 .vswitching_remove = efx_ef10_vswitching_remove_vf,
Shradha Shah1d051e02015-06-02 11:38:16 +01004793 .sriov_get_phys_port_id = efx_ef10_sriov_get_phys_port_id,
Shradha Shah02246a72015-05-06 00:58:14 +01004794#endif
Daniel Pieczko0d5e0fb2015-05-20 11:10:20 +01004795 .get_mac_address = efx_ef10_get_mac_address_vf,
Shradha Shah910c8782015-05-20 11:12:48 +01004796 .set_mac_address = efx_ef10_set_mac_address,
Daniel Pieczko0d5e0fb2015-05-20 11:10:20 +01004797
Shradha Shah02246a72015-05-06 00:58:14 +01004798 .revision = EFX_REV_HUNT_A0,
4799 .max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH),
4800 .rx_prefix_size = ES_DZ_RX_PREFIX_SIZE,
4801 .rx_hash_offset = ES_DZ_RX_PREFIX_HASH_OFST,
4802 .rx_ts_offset = ES_DZ_RX_PREFIX_TSTAMP_OFST,
4803 .can_rx_scatter = true,
4804 .always_rx_scatter = true,
4805 .max_interrupt_mode = EFX_INT_MODE_MSIX,
4806 .timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH,
Andrew Rybchenko100a9db2016-06-15 17:42:26 +01004807 .offload_features = EF10_OFFLOAD_FEATURES,
Shradha Shah02246a72015-05-06 00:58:14 +01004808 .mcdi_max_ver = 2,
4809 .max_rx_ip_filters = HUNT_FILTER_TBL_ROWS,
4810 .hwtstamp_filters = 1 << HWTSTAMP_FILTER_NONE |
4811 1 << HWTSTAMP_FILTER_ALL,
4812};
4813
4814const struct efx_nic_type efx_hunt_a0_nic_type = {
Shradha Shah6f7f8aa2015-05-06 01:00:07 +01004815 .is_vf = false,
Shradha Shah02246a72015-05-06 00:58:14 +01004816 .mem_bar = EFX_MEM_BAR,
4817 .mem_map_size = efx_ef10_mem_map_size,
4818 .probe = efx_ef10_probe_pf,
Ben Hutchings8127d662013-08-29 19:19:29 +01004819 .remove = efx_ef10_remove,
4820 .dimension_resources = efx_ef10_dimension_resources,
4821 .init = efx_ef10_init_nic,
4822 .fini = efx_port_dummy_op_void,
Jon Cooper087e9022015-05-20 11:11:35 +01004823 .map_reset_reason = efx_ef10_map_reset_reason,
Ben Hutchings8127d662013-08-29 19:19:29 +01004824 .map_reset_flags = efx_ef10_map_reset_flags,
Jon Cooper3e336262014-01-17 19:48:06 +00004825 .reset = efx_ef10_reset,
Ben Hutchings8127d662013-08-29 19:19:29 +01004826 .probe_port = efx_mcdi_port_probe,
4827 .remove_port = efx_mcdi_port_remove,
4828 .fini_dmaq = efx_ef10_fini_dmaq,
Edward Creee2835462014-04-16 19:27:48 +01004829 .prepare_flr = efx_ef10_prepare_flr,
4830 .finish_flr = efx_port_dummy_op_void,
Ben Hutchings8127d662013-08-29 19:19:29 +01004831 .describe_stats = efx_ef10_describe_stats,
Daniel Pieczkod7788192015-06-02 11:39:20 +01004832 .update_stats = efx_ef10_update_stats_pf,
Ben Hutchings8127d662013-08-29 19:19:29 +01004833 .start_stats = efx_mcdi_mac_start_stats,
Jon Cooperf8f3b5a2013-09-30 17:36:50 +01004834 .pull_stats = efx_mcdi_mac_pull_stats,
Ben Hutchings8127d662013-08-29 19:19:29 +01004835 .stop_stats = efx_mcdi_mac_stop_stats,
4836 .set_id_led = efx_mcdi_set_id_led,
4837 .push_irq_moderation = efx_ef10_push_irq_moderation,
4838 .reconfigure_mac = efx_ef10_mac_reconfigure,
4839 .check_mac_fault = efx_mcdi_mac_check_fault,
4840 .reconfigure_port = efx_mcdi_port_reconfigure,
4841 .get_wol = efx_ef10_get_wol,
4842 .set_wol = efx_ef10_set_wol,
4843 .resume_wol = efx_port_dummy_op_void,
Jon Cooper74cd60a2013-09-16 14:18:51 +01004844 .test_chip = efx_ef10_test_chip,
Ben Hutchings8127d662013-08-29 19:19:29 +01004845 .test_nvram = efx_mcdi_nvram_test_all,
4846 .mcdi_request = efx_ef10_mcdi_request,
4847 .mcdi_poll_response = efx_ef10_mcdi_poll_response,
4848 .mcdi_read_response = efx_ef10_mcdi_read_response,
4849 .mcdi_poll_reboot = efx_ef10_mcdi_poll_reboot,
Daniel Pieczkoc577e592015-10-09 10:40:35 +01004850 .mcdi_reboot_detected = efx_ef10_mcdi_reboot_detected,
Ben Hutchings8127d662013-08-29 19:19:29 +01004851 .irq_enable_master = efx_port_dummy_op_void,
4852 .irq_test_generate = efx_ef10_irq_test_generate,
4853 .irq_disable_non_ev = efx_port_dummy_op_void,
4854 .irq_handle_msi = efx_ef10_msi_interrupt,
4855 .irq_handle_legacy = efx_ef10_legacy_interrupt,
4856 .tx_probe = efx_ef10_tx_probe,
4857 .tx_init = efx_ef10_tx_init,
4858 .tx_remove = efx_ef10_tx_remove,
4859 .tx_write = efx_ef10_tx_write,
Jon Cooper267c0152015-05-06 00:59:38 +01004860 .rx_push_rss_config = efx_ef10_pf_rx_push_rss_config,
Ben Hutchings8127d662013-08-29 19:19:29 +01004861 .rx_probe = efx_ef10_rx_probe,
4862 .rx_init = efx_ef10_rx_init,
4863 .rx_remove = efx_ef10_rx_remove,
4864 .rx_write = efx_ef10_rx_write,
4865 .rx_defer_refill = efx_ef10_rx_defer_refill,
4866 .ev_probe = efx_ef10_ev_probe,
4867 .ev_init = efx_ef10_ev_init,
4868 .ev_fini = efx_ef10_ev_fini,
4869 .ev_remove = efx_ef10_ev_remove,
4870 .ev_process = efx_ef10_ev_process,
4871 .ev_read_ack = efx_ef10_ev_read_ack,
4872 .ev_test_generate = efx_ef10_ev_test_generate,
4873 .filter_table_probe = efx_ef10_filter_table_probe,
4874 .filter_table_restore = efx_ef10_filter_table_restore,
4875 .filter_table_remove = efx_ef10_filter_table_remove,
4876 .filter_update_rx_scatter = efx_ef10_filter_update_rx_scatter,
4877 .filter_insert = efx_ef10_filter_insert,
4878 .filter_remove_safe = efx_ef10_filter_remove_safe,
4879 .filter_get_safe = efx_ef10_filter_get_safe,
4880 .filter_clear_rx = efx_ef10_filter_clear_rx,
4881 .filter_count_rx_used = efx_ef10_filter_count_rx_used,
4882 .filter_get_rx_id_limit = efx_ef10_filter_get_rx_id_limit,
4883 .filter_get_rx_ids = efx_ef10_filter_get_rx_ids,
4884#ifdef CONFIG_RFS_ACCEL
4885 .filter_rfs_insert = efx_ef10_filter_rfs_insert,
4886 .filter_rfs_expire_one = efx_ef10_filter_rfs_expire_one,
4887#endif
4888#ifdef CONFIG_SFC_MTD
4889 .mtd_probe = efx_ef10_mtd_probe,
4890 .mtd_rename = efx_mcdi_mtd_rename,
4891 .mtd_read = efx_mcdi_mtd_read,
4892 .mtd_erase = efx_mcdi_mtd_erase,
4893 .mtd_write = efx_mcdi_mtd_write,
4894 .mtd_sync = efx_mcdi_mtd_sync,
4895#endif
4896 .ptp_write_host_time = efx_ef10_ptp_write_host_time,
Jon Cooperbd9a2652013-11-18 12:54:41 +00004897 .ptp_set_ts_sync_events = efx_ef10_ptp_set_ts_sync_events,
4898 .ptp_set_ts_config = efx_ef10_ptp_set_ts_config,
Shradha Shah7fa8d542015-05-06 00:55:13 +01004899#ifdef CONFIG_SFC_SRIOV
Shradha Shah834e23d2015-05-06 00:55:58 +01004900 .sriov_configure = efx_ef10_sriov_configure,
Shradha Shahd98a4ff2014-11-05 12:16:46 +00004901 .sriov_init = efx_ef10_sriov_init,
4902 .sriov_fini = efx_ef10_sriov_fini,
Shradha Shahd98a4ff2014-11-05 12:16:46 +00004903 .sriov_wanted = efx_ef10_sriov_wanted,
4904 .sriov_reset = efx_ef10_sriov_reset,
Shradha Shah7fa8d542015-05-06 00:55:13 +01004905 .sriov_flr = efx_ef10_sriov_flr,
4906 .sriov_set_vf_mac = efx_ef10_sriov_set_vf_mac,
4907 .sriov_set_vf_vlan = efx_ef10_sriov_set_vf_vlan,
4908 .sriov_set_vf_spoofchk = efx_ef10_sriov_set_vf_spoofchk,
4909 .sriov_get_vf_config = efx_ef10_sriov_get_vf_config,
Edward Cree4392dc62015-05-20 11:12:13 +01004910 .sriov_set_vf_link_state = efx_ef10_sriov_set_vf_link_state,
Shradha Shah7b8c7b52015-05-06 00:58:54 +01004911 .vswitching_probe = efx_ef10_vswitching_probe_pf,
4912 .vswitching_restore = efx_ef10_vswitching_restore_pf,
4913 .vswitching_remove = efx_ef10_vswitching_remove_pf,
Shradha Shah7fa8d542015-05-06 00:55:13 +01004914#endif
Daniel Pieczko0d5e0fb2015-05-20 11:10:20 +01004915 .get_mac_address = efx_ef10_get_mac_address_pf,
Shradha Shah910c8782015-05-20 11:12:48 +01004916 .set_mac_address = efx_ef10_set_mac_address,
Ben Hutchings8127d662013-08-29 19:19:29 +01004917
4918 .revision = EFX_REV_HUNT_A0,
4919 .max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH),
4920 .rx_prefix_size = ES_DZ_RX_PREFIX_SIZE,
4921 .rx_hash_offset = ES_DZ_RX_PREFIX_HASH_OFST,
Jon Cooperbd9a2652013-11-18 12:54:41 +00004922 .rx_ts_offset = ES_DZ_RX_PREFIX_TSTAMP_OFST,
Ben Hutchings8127d662013-08-29 19:19:29 +01004923 .can_rx_scatter = true,
4924 .always_rx_scatter = true,
4925 .max_interrupt_mode = EFX_INT_MODE_MSIX,
4926 .timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH,
Andrew Rybchenko100a9db2016-06-15 17:42:26 +01004927 .offload_features = EF10_OFFLOAD_FEATURES,
Ben Hutchings8127d662013-08-29 19:19:29 +01004928 .mcdi_max_ver = 2,
4929 .max_rx_ip_filters = HUNT_FILTER_TBL_ROWS,
Jon Cooperbd9a2652013-11-18 12:54:41 +00004930 .hwtstamp_filters = 1 << HWTSTAMP_FILTER_NONE |
4931 1 << HWTSTAMP_FILTER_ALL,
Ben Hutchings8127d662013-08-29 19:19:29 +01004932};