blob: 5d46d155b6423470084e606b8d082ac80db5a9b0 [file] [log] [blame]
Ben Hutchings8127d662013-08-29 19:19:29 +01001/****************************************************************************
2 * Driver for Solarflare network controllers and boards
3 * Copyright 2012-2013 Solarflare Communications Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation, incorporated herein by reference.
8 */
9
10#include "net_driver.h"
11#include "ef10_regs.h"
12#include "io.h"
13#include "mcdi.h"
14#include "mcdi_pcol.h"
15#include "nic.h"
16#include "workarounds.h"
Jon Cooper74cd60a2013-09-16 14:18:51 +010017#include "selftest.h"
Ben Hutchings8127d662013-08-29 19:19:29 +010018#include <linux/in.h>
19#include <linux/jhash.h>
20#include <linux/wait.h>
21#include <linux/workqueue.h>
22
23/* Hardware control for EF10 architecture including 'Huntington'. */
24
25#define EFX_EF10_DRVGEN_EV 7
26enum {
27 EFX_EF10_TEST = 1,
28 EFX_EF10_REFILL,
29};
30
31/* The reserved RSS context value */
32#define EFX_EF10_RSS_CONTEXT_INVALID 0xffffffff
33
34/* The filter table(s) are managed by firmware and we have write-only
35 * access. When removing filters we must identify them to the
36 * firmware by a 64-bit handle, but this is too wide for Linux kernel
37 * interfaces (32-bit for RX NFC, 16-bit for RFS). Also, we need to
38 * be able to tell in advance whether a requested insertion will
39 * replace an existing filter. Therefore we maintain a software hash
40 * table, which should be at least as large as the hardware hash
41 * table.
42 *
43 * Huntington has a single 8K filter table shared between all filter
44 * types and both ports.
45 */
46#define HUNT_FILTER_TBL_ROWS 8192
47
48struct efx_ef10_filter_table {
49/* The RX match field masks supported by this fw & hw, in order of priority */
50 enum efx_filter_match_flags rx_match_flags[
51 MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MAXNUM];
52 unsigned int rx_match_count;
53
54 struct {
55 unsigned long spec; /* pointer to spec plus flag bits */
56/* BUSY flag indicates that an update is in progress. STACK_OLD is
57 * used to mark and sweep stack-owned MAC filters.
58 */
59#define EFX_EF10_FILTER_FLAG_BUSY 1UL
60#define EFX_EF10_FILTER_FLAG_STACK_OLD 2UL
61#define EFX_EF10_FILTER_FLAGS 3UL
62 u64 handle; /* firmware handle */
63 } *entry;
64 wait_queue_head_t waitq;
65/* Shadow of net_device address lists, guarded by mac_lock */
66#define EFX_EF10_FILTER_STACK_UC_MAX 32
67#define EFX_EF10_FILTER_STACK_MC_MAX 256
68 struct {
69 u8 addr[ETH_ALEN];
70 u16 id;
71 } stack_uc_list[EFX_EF10_FILTER_STACK_UC_MAX],
72 stack_mc_list[EFX_EF10_FILTER_STACK_MC_MAX];
73 int stack_uc_count; /* negative for PROMISC */
74 int stack_mc_count; /* negative for PROMISC/ALLMULTI */
75};
76
77/* An arbitrary search limit for the software hash table */
78#define EFX_EF10_FILTER_SEARCH_LIMIT 200
79
80static void efx_ef10_rx_push_indir_table(struct efx_nic *efx);
81static void efx_ef10_rx_free_indir_table(struct efx_nic *efx);
82static void efx_ef10_filter_table_remove(struct efx_nic *efx);
83
84static int efx_ef10_get_warm_boot_count(struct efx_nic *efx)
85{
86 efx_dword_t reg;
87
88 efx_readd(efx, &reg, ER_DZ_BIU_MC_SFT_STATUS);
89 return EFX_DWORD_FIELD(reg, EFX_WORD_1) == 0xb007 ?
90 EFX_DWORD_FIELD(reg, EFX_WORD_0) : -EIO;
91}
92
93static unsigned int efx_ef10_mem_map_size(struct efx_nic *efx)
94{
95 return resource_size(&efx->pci_dev->resource[EFX_MEM_BAR]);
96}
97
Ben Hutchingse5a25382013-09-05 22:50:59 +010098static int efx_ef10_init_datapath_caps(struct efx_nic *efx)
Ben Hutchings8127d662013-08-29 19:19:29 +010099{
100 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CAPABILITIES_OUT_LEN);
101 struct efx_ef10_nic_data *nic_data = efx->nic_data;
102 size_t outlen;
103 int rc;
104
105 BUILD_BUG_ON(MC_CMD_GET_CAPABILITIES_IN_LEN != 0);
106
107 rc = efx_mcdi_rpc(efx, MC_CMD_GET_CAPABILITIES, NULL, 0,
108 outbuf, sizeof(outbuf), &outlen);
109 if (rc)
110 return rc;
Ben Hutchingse5a25382013-09-05 22:50:59 +0100111 if (outlen < sizeof(outbuf)) {
112 netif_err(efx, drv, efx->net_dev,
113 "unable to read datapath firmware capabilities\n");
114 return -EIO;
115 }
Ben Hutchings8127d662013-08-29 19:19:29 +0100116
Ben Hutchingse5a25382013-09-05 22:50:59 +0100117 nic_data->datapath_caps =
118 MCDI_DWORD(outbuf, GET_CAPABILITIES_OUT_FLAGS1);
119
120 if (!(nic_data->datapath_caps &
121 (1 << MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_LBN))) {
122 netif_err(efx, drv, efx->net_dev,
123 "current firmware does not support TSO\n");
124 return -ENODEV;
125 }
126
127 if (!(nic_data->datapath_caps &
128 (1 << MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_14_LBN))) {
129 netif_err(efx, probe, efx->net_dev,
130 "current firmware does not support an RX prefix\n");
131 return -ENODEV;
Ben Hutchings8127d662013-08-29 19:19:29 +0100132 }
133
134 return 0;
135}
136
137static int efx_ef10_get_sysclk_freq(struct efx_nic *efx)
138{
139 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CLOCK_OUT_LEN);
140 int rc;
141
142 rc = efx_mcdi_rpc(efx, MC_CMD_GET_CLOCK, NULL, 0,
143 outbuf, sizeof(outbuf), NULL);
144 if (rc)
145 return rc;
146 rc = MCDI_DWORD(outbuf, GET_CLOCK_OUT_SYS_FREQ);
147 return rc > 0 ? rc : -ERANGE;
148}
149
150static int efx_ef10_get_mac_address(struct efx_nic *efx, u8 *mac_address)
151{
152 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_MAC_ADDRESSES_OUT_LEN);
153 size_t outlen;
154 int rc;
155
156 BUILD_BUG_ON(MC_CMD_GET_MAC_ADDRESSES_IN_LEN != 0);
157
158 rc = efx_mcdi_rpc(efx, MC_CMD_GET_MAC_ADDRESSES, NULL, 0,
159 outbuf, sizeof(outbuf), &outlen);
160 if (rc)
161 return rc;
162 if (outlen < MC_CMD_GET_MAC_ADDRESSES_OUT_LEN)
163 return -EIO;
164
165 memcpy(mac_address,
166 MCDI_PTR(outbuf, GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE), ETH_ALEN);
167 return 0;
168}
169
170static int efx_ef10_probe(struct efx_nic *efx)
171{
172 struct efx_ef10_nic_data *nic_data;
173 int i, rc;
174
175 /* We can have one VI for each 8K region. However we need
176 * multiple TX queues per channel.
177 */
178 efx->max_channels =
179 min_t(unsigned int,
180 EFX_MAX_CHANNELS,
181 resource_size(&efx->pci_dev->resource[EFX_MEM_BAR]) /
182 (EFX_VI_PAGE_SIZE * EFX_TXQ_TYPES));
183 BUG_ON(efx->max_channels == 0);
184
185 nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL);
186 if (!nic_data)
187 return -ENOMEM;
188 efx->nic_data = nic_data;
189
190 rc = efx_nic_alloc_buffer(efx, &nic_data->mcdi_buf,
191 8 + MCDI_CTL_SDU_LEN_MAX_V2, GFP_KERNEL);
192 if (rc)
193 goto fail1;
194
195 /* Get the MC's warm boot count. In case it's rebooting right
196 * now, be prepared to retry.
197 */
198 i = 0;
199 for (;;) {
200 rc = efx_ef10_get_warm_boot_count(efx);
201 if (rc >= 0)
202 break;
203 if (++i == 5)
204 goto fail2;
205 ssleep(1);
206 }
207 nic_data->warm_boot_count = rc;
208
209 nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
210
211 /* In case we're recovering from a crash (kexec), we want to
212 * cancel any outstanding request by the previous user of this
213 * function. We send a special message using the least
214 * significant bits of the 'high' (doorbell) register.
215 */
216 _efx_writed(efx, cpu_to_le32(1), ER_DZ_MC_DB_HWRD);
217
218 rc = efx_mcdi_init(efx);
219 if (rc)
220 goto fail2;
221
222 /* Reset (most) configuration for this function */
223 rc = efx_mcdi_reset(efx, RESET_TYPE_ALL);
224 if (rc)
225 goto fail3;
226
227 /* Enable event logging */
228 rc = efx_mcdi_log_ctrl(efx, true, false, 0);
229 if (rc)
230 goto fail3;
231
Ben Hutchingse5a25382013-09-05 22:50:59 +0100232 rc = efx_ef10_init_datapath_caps(efx);
Ben Hutchings8127d662013-08-29 19:19:29 +0100233 if (rc < 0)
234 goto fail3;
235
236 efx->rx_packet_len_offset =
237 ES_DZ_RX_PREFIX_PKTLEN_OFST - ES_DZ_RX_PREFIX_SIZE;
238
Ben Hutchings8127d662013-08-29 19:19:29 +0100239 rc = efx_mcdi_port_get_number(efx);
240 if (rc < 0)
241 goto fail3;
242 efx->port_num = rc;
243
244 rc = efx_ef10_get_mac_address(efx, efx->net_dev->perm_addr);
245 if (rc)
246 goto fail3;
247
248 rc = efx_ef10_get_sysclk_freq(efx);
249 if (rc < 0)
250 goto fail3;
251 efx->timer_quantum_ns = 1536000 / rc; /* 1536 cycles */
252
253 /* Check whether firmware supports bug 35388 workaround */
254 rc = efx_mcdi_set_workaround(efx, MC_CMD_WORKAROUND_BUG35388, true);
255 if (rc == 0)
256 nic_data->workaround_35388 = true;
257 else if (rc != -ENOSYS && rc != -ENOENT)
258 goto fail3;
259 netif_dbg(efx, probe, efx->net_dev,
260 "workaround for bug 35388 is %sabled\n",
261 nic_data->workaround_35388 ? "en" : "dis");
262
263 rc = efx_mcdi_mon_probe(efx);
264 if (rc)
265 goto fail3;
266
Ben Hutchings8127d662013-08-29 19:19:29 +0100267 return 0;
268
269fail3:
270 efx_mcdi_fini(efx);
271fail2:
272 efx_nic_free_buffer(efx, &nic_data->mcdi_buf);
273fail1:
274 kfree(nic_data);
275 efx->nic_data = NULL;
276 return rc;
277}
278
279static int efx_ef10_free_vis(struct efx_nic *efx)
280{
281 int rc = efx_mcdi_rpc(efx, MC_CMD_FREE_VIS, NULL, 0, NULL, 0, NULL);
282
283 /* -EALREADY means nothing to free, so ignore */
284 if (rc == -EALREADY)
285 rc = 0;
286 return rc;
287}
288
Ben Hutchings183233b2013-06-28 21:47:12 +0100289#ifdef EFX_USE_PIO
290
291static void efx_ef10_free_piobufs(struct efx_nic *efx)
292{
293 struct efx_ef10_nic_data *nic_data = efx->nic_data;
294 MCDI_DECLARE_BUF(inbuf, MC_CMD_FREE_PIOBUF_IN_LEN);
295 unsigned int i;
296 int rc;
297
298 BUILD_BUG_ON(MC_CMD_FREE_PIOBUF_OUT_LEN != 0);
299
300 for (i = 0; i < nic_data->n_piobufs; i++) {
301 MCDI_SET_DWORD(inbuf, FREE_PIOBUF_IN_PIOBUF_HANDLE,
302 nic_data->piobuf_handle[i]);
303 rc = efx_mcdi_rpc(efx, MC_CMD_FREE_PIOBUF, inbuf, sizeof(inbuf),
304 NULL, 0, NULL);
305 WARN_ON(rc);
306 }
307
308 nic_data->n_piobufs = 0;
309}
310
311static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n)
312{
313 struct efx_ef10_nic_data *nic_data = efx->nic_data;
314 MCDI_DECLARE_BUF(outbuf, MC_CMD_ALLOC_PIOBUF_OUT_LEN);
315 unsigned int i;
316 size_t outlen;
317 int rc = 0;
318
319 BUILD_BUG_ON(MC_CMD_ALLOC_PIOBUF_IN_LEN != 0);
320
321 for (i = 0; i < n; i++) {
322 rc = efx_mcdi_rpc(efx, MC_CMD_ALLOC_PIOBUF, NULL, 0,
323 outbuf, sizeof(outbuf), &outlen);
324 if (rc)
325 break;
326 if (outlen < MC_CMD_ALLOC_PIOBUF_OUT_LEN) {
327 rc = -EIO;
328 break;
329 }
330 nic_data->piobuf_handle[i] =
331 MCDI_DWORD(outbuf, ALLOC_PIOBUF_OUT_PIOBUF_HANDLE);
332 netif_dbg(efx, probe, efx->net_dev,
333 "allocated PIO buffer %u handle %x\n", i,
334 nic_data->piobuf_handle[i]);
335 }
336
337 nic_data->n_piobufs = i;
338 if (rc)
339 efx_ef10_free_piobufs(efx);
340 return rc;
341}
342
343static int efx_ef10_link_piobufs(struct efx_nic *efx)
344{
345 struct efx_ef10_nic_data *nic_data = efx->nic_data;
346 MCDI_DECLARE_BUF(inbuf,
347 max(MC_CMD_LINK_PIOBUF_IN_LEN,
348 MC_CMD_UNLINK_PIOBUF_IN_LEN));
349 struct efx_channel *channel;
350 struct efx_tx_queue *tx_queue;
351 unsigned int offset, index;
352 int rc;
353
354 BUILD_BUG_ON(MC_CMD_LINK_PIOBUF_OUT_LEN != 0);
355 BUILD_BUG_ON(MC_CMD_UNLINK_PIOBUF_OUT_LEN != 0);
356
357 /* Link a buffer to each VI in the write-combining mapping */
358 for (index = 0; index < nic_data->n_piobufs; ++index) {
359 MCDI_SET_DWORD(inbuf, LINK_PIOBUF_IN_PIOBUF_HANDLE,
360 nic_data->piobuf_handle[index]);
361 MCDI_SET_DWORD(inbuf, LINK_PIOBUF_IN_TXQ_INSTANCE,
362 nic_data->pio_write_vi_base + index);
363 rc = efx_mcdi_rpc(efx, MC_CMD_LINK_PIOBUF,
364 inbuf, MC_CMD_LINK_PIOBUF_IN_LEN,
365 NULL, 0, NULL);
366 if (rc) {
367 netif_err(efx, drv, efx->net_dev,
368 "failed to link VI %u to PIO buffer %u (%d)\n",
369 nic_data->pio_write_vi_base + index, index,
370 rc);
371 goto fail;
372 }
373 netif_dbg(efx, probe, efx->net_dev,
374 "linked VI %u to PIO buffer %u\n",
375 nic_data->pio_write_vi_base + index, index);
376 }
377
378 /* Link a buffer to each TX queue */
379 efx_for_each_channel(channel, efx) {
380 efx_for_each_channel_tx_queue(tx_queue, channel) {
381 /* We assign the PIO buffers to queues in
382 * reverse order to allow for the following
383 * special case.
384 */
385 offset = ((efx->tx_channel_offset + efx->n_tx_channels -
386 tx_queue->channel->channel - 1) *
387 efx_piobuf_size);
388 index = offset / ER_DZ_TX_PIOBUF_SIZE;
389 offset = offset % ER_DZ_TX_PIOBUF_SIZE;
390
391 /* When the host page size is 4K, the first
392 * host page in the WC mapping may be within
393 * the same VI page as the last TX queue. We
394 * can only link one buffer to each VI.
395 */
396 if (tx_queue->queue == nic_data->pio_write_vi_base) {
397 BUG_ON(index != 0);
398 rc = 0;
399 } else {
400 MCDI_SET_DWORD(inbuf,
401 LINK_PIOBUF_IN_PIOBUF_HANDLE,
402 nic_data->piobuf_handle[index]);
403 MCDI_SET_DWORD(inbuf,
404 LINK_PIOBUF_IN_TXQ_INSTANCE,
405 tx_queue->queue);
406 rc = efx_mcdi_rpc(efx, MC_CMD_LINK_PIOBUF,
407 inbuf, MC_CMD_LINK_PIOBUF_IN_LEN,
408 NULL, 0, NULL);
409 }
410
411 if (rc) {
412 /* This is non-fatal; the TX path just
413 * won't use PIO for this queue
414 */
415 netif_err(efx, drv, efx->net_dev,
416 "failed to link VI %u to PIO buffer %u (%d)\n",
417 tx_queue->queue, index, rc);
418 tx_queue->piobuf = NULL;
419 } else {
420 tx_queue->piobuf =
421 nic_data->pio_write_base +
422 index * EFX_VI_PAGE_SIZE + offset;
423 tx_queue->piobuf_offset = offset;
424 netif_dbg(efx, probe, efx->net_dev,
425 "linked VI %u to PIO buffer %u offset %x addr %p\n",
426 tx_queue->queue, index,
427 tx_queue->piobuf_offset,
428 tx_queue->piobuf);
429 }
430 }
431 }
432
433 return 0;
434
435fail:
436 while (index--) {
437 MCDI_SET_DWORD(inbuf, UNLINK_PIOBUF_IN_TXQ_INSTANCE,
438 nic_data->pio_write_vi_base + index);
439 efx_mcdi_rpc(efx, MC_CMD_UNLINK_PIOBUF,
440 inbuf, MC_CMD_UNLINK_PIOBUF_IN_LEN,
441 NULL, 0, NULL);
442 }
443 return rc;
444}
445
446#else /* !EFX_USE_PIO */
447
448static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n)
449{
450 return n == 0 ? 0 : -ENOBUFS;
451}
452
453static int efx_ef10_link_piobufs(struct efx_nic *efx)
454{
455 return 0;
456}
457
458static void efx_ef10_free_piobufs(struct efx_nic *efx)
459{
460}
461
462#endif /* EFX_USE_PIO */
463
Ben Hutchings8127d662013-08-29 19:19:29 +0100464static void efx_ef10_remove(struct efx_nic *efx)
465{
466 struct efx_ef10_nic_data *nic_data = efx->nic_data;
467 int rc;
468
469 efx_mcdi_mon_remove(efx);
470
471 /* This needs to be after efx_ptp_remove_channel() with no filters */
472 efx_ef10_rx_free_indir_table(efx);
473
Ben Hutchings183233b2013-06-28 21:47:12 +0100474 if (nic_data->wc_membase)
475 iounmap(nic_data->wc_membase);
476
Ben Hutchings8127d662013-08-29 19:19:29 +0100477 rc = efx_ef10_free_vis(efx);
478 WARN_ON(rc != 0);
479
Ben Hutchings183233b2013-06-28 21:47:12 +0100480 if (!nic_data->must_restore_piobufs)
481 efx_ef10_free_piobufs(efx);
482
Ben Hutchings8127d662013-08-29 19:19:29 +0100483 efx_mcdi_fini(efx);
484 efx_nic_free_buffer(efx, &nic_data->mcdi_buf);
485 kfree(nic_data);
486}
487
488static int efx_ef10_alloc_vis(struct efx_nic *efx,
489 unsigned int min_vis, unsigned int max_vis)
490{
491 MCDI_DECLARE_BUF(inbuf, MC_CMD_ALLOC_VIS_IN_LEN);
492 MCDI_DECLARE_BUF(outbuf, MC_CMD_ALLOC_VIS_OUT_LEN);
493 struct efx_ef10_nic_data *nic_data = efx->nic_data;
494 size_t outlen;
495 int rc;
496
497 MCDI_SET_DWORD(inbuf, ALLOC_VIS_IN_MIN_VI_COUNT, min_vis);
498 MCDI_SET_DWORD(inbuf, ALLOC_VIS_IN_MAX_VI_COUNT, max_vis);
499 rc = efx_mcdi_rpc(efx, MC_CMD_ALLOC_VIS, inbuf, sizeof(inbuf),
500 outbuf, sizeof(outbuf), &outlen);
501 if (rc != 0)
502 return rc;
503
504 if (outlen < MC_CMD_ALLOC_VIS_OUT_LEN)
505 return -EIO;
506
507 netif_dbg(efx, drv, efx->net_dev, "base VI is A0x%03x\n",
508 MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_BASE));
509
510 nic_data->vi_base = MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_BASE);
511 nic_data->n_allocated_vis = MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_COUNT);
512 return 0;
513}
514
Ben Hutchings183233b2013-06-28 21:47:12 +0100515/* Note that the failure path of this function does not free
516 * resources, as this will be done by efx_ef10_remove().
517 */
Ben Hutchings8127d662013-08-29 19:19:29 +0100518static int efx_ef10_dimension_resources(struct efx_nic *efx)
519{
Ben Hutchings183233b2013-06-28 21:47:12 +0100520 struct efx_ef10_nic_data *nic_data = efx->nic_data;
521 unsigned int uc_mem_map_size, wc_mem_map_size;
522 unsigned int min_vis, pio_write_vi_base, max_vis;
523 void __iomem *membase;
524 int rc;
Ben Hutchings8127d662013-08-29 19:19:29 +0100525
Ben Hutchings183233b2013-06-28 21:47:12 +0100526 min_vis = max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES);
527
528#ifdef EFX_USE_PIO
529 /* Try to allocate PIO buffers if wanted and if the full
530 * number of PIO buffers would be sufficient to allocate one
531 * copy-buffer per TX channel. Failure is non-fatal, as there
532 * are only a small number of PIO buffers shared between all
533 * functions of the controller.
534 */
535 if (efx_piobuf_size != 0 &&
536 ER_DZ_TX_PIOBUF_SIZE / efx_piobuf_size * EF10_TX_PIOBUF_COUNT >=
537 efx->n_tx_channels) {
538 unsigned int n_piobufs =
539 DIV_ROUND_UP(efx->n_tx_channels,
540 ER_DZ_TX_PIOBUF_SIZE / efx_piobuf_size);
541
542 rc = efx_ef10_alloc_piobufs(efx, n_piobufs);
543 if (rc)
544 netif_err(efx, probe, efx->net_dev,
545 "failed to allocate PIO buffers (%d)\n", rc);
546 else
547 netif_dbg(efx, probe, efx->net_dev,
548 "allocated %u PIO buffers\n", n_piobufs);
549 }
550#else
551 nic_data->n_piobufs = 0;
552#endif
553
554 /* PIO buffers should be mapped with write-combining enabled,
555 * and we want to make single UC and WC mappings rather than
556 * several of each (in fact that's the only option if host
557 * page size is >4K). So we may allocate some extra VIs just
558 * for writing PIO buffers through.
559 */
560 uc_mem_map_size = PAGE_ALIGN((min_vis - 1) * EFX_VI_PAGE_SIZE +
561 ER_DZ_TX_PIOBUF);
562 if (nic_data->n_piobufs) {
563 pio_write_vi_base = uc_mem_map_size / EFX_VI_PAGE_SIZE;
564 wc_mem_map_size = (PAGE_ALIGN((pio_write_vi_base +
565 nic_data->n_piobufs) *
566 EFX_VI_PAGE_SIZE) -
567 uc_mem_map_size);
568 max_vis = pio_write_vi_base + nic_data->n_piobufs;
569 } else {
570 pio_write_vi_base = 0;
571 wc_mem_map_size = 0;
572 max_vis = min_vis;
573 }
574
575 /* In case the last attached driver failed to free VIs, do it now */
576 rc = efx_ef10_free_vis(efx);
577 if (rc != 0)
578 return rc;
579
580 rc = efx_ef10_alloc_vis(efx, min_vis, max_vis);
581 if (rc != 0)
582 return rc;
583
584 /* If we didn't get enough VIs to map all the PIO buffers, free the
585 * PIO buffers
586 */
587 if (nic_data->n_piobufs &&
588 nic_data->n_allocated_vis <
589 pio_write_vi_base + nic_data->n_piobufs) {
590 netif_dbg(efx, probe, efx->net_dev,
591 "%u VIs are not sufficient to map %u PIO buffers\n",
592 nic_data->n_allocated_vis, nic_data->n_piobufs);
593 efx_ef10_free_piobufs(efx);
594 }
595
596 /* Shrink the original UC mapping of the memory BAR */
597 membase = ioremap_nocache(efx->membase_phys, uc_mem_map_size);
598 if (!membase) {
599 netif_err(efx, probe, efx->net_dev,
600 "could not shrink memory BAR to %x\n",
601 uc_mem_map_size);
602 return -ENOMEM;
603 }
604 iounmap(efx->membase);
605 efx->membase = membase;
606
607 /* Set up the WC mapping if needed */
608 if (wc_mem_map_size) {
609 nic_data->wc_membase = ioremap_wc(efx->membase_phys +
610 uc_mem_map_size,
611 wc_mem_map_size);
612 if (!nic_data->wc_membase) {
613 netif_err(efx, probe, efx->net_dev,
614 "could not allocate WC mapping of size %x\n",
615 wc_mem_map_size);
616 return -ENOMEM;
617 }
618 nic_data->pio_write_vi_base = pio_write_vi_base;
619 nic_data->pio_write_base =
620 nic_data->wc_membase +
621 (pio_write_vi_base * EFX_VI_PAGE_SIZE + ER_DZ_TX_PIOBUF -
622 uc_mem_map_size);
623
624 rc = efx_ef10_link_piobufs(efx);
625 if (rc)
626 efx_ef10_free_piobufs(efx);
627 }
628
629 netif_dbg(efx, probe, efx->net_dev,
630 "memory BAR at %pa (virtual %p+%x UC, %p+%x WC)\n",
631 &efx->membase_phys, efx->membase, uc_mem_map_size,
632 nic_data->wc_membase, wc_mem_map_size);
633
634 return 0;
Ben Hutchings8127d662013-08-29 19:19:29 +0100635}
636
637static int efx_ef10_init_nic(struct efx_nic *efx)
638{
639 struct efx_ef10_nic_data *nic_data = efx->nic_data;
640 int rc;
641
Ben Hutchingsa915ccc2013-09-05 22:51:55 +0100642 if (nic_data->must_check_datapath_caps) {
643 rc = efx_ef10_init_datapath_caps(efx);
644 if (rc)
645 return rc;
646 nic_data->must_check_datapath_caps = false;
647 }
648
Ben Hutchings8127d662013-08-29 19:19:29 +0100649 if (nic_data->must_realloc_vis) {
650 /* We cannot let the number of VIs change now */
651 rc = efx_ef10_alloc_vis(efx, nic_data->n_allocated_vis,
652 nic_data->n_allocated_vis);
653 if (rc)
654 return rc;
655 nic_data->must_realloc_vis = false;
656 }
657
Ben Hutchings183233b2013-06-28 21:47:12 +0100658 if (nic_data->must_restore_piobufs && nic_data->n_piobufs) {
659 rc = efx_ef10_alloc_piobufs(efx, nic_data->n_piobufs);
660 if (rc == 0) {
661 rc = efx_ef10_link_piobufs(efx);
662 if (rc)
663 efx_ef10_free_piobufs(efx);
664 }
665
666 /* Log an error on failure, but this is non-fatal */
667 if (rc)
668 netif_err(efx, drv, efx->net_dev,
669 "failed to restore PIO buffers (%d)\n", rc);
670 nic_data->must_restore_piobufs = false;
671 }
672
Ben Hutchings8127d662013-08-29 19:19:29 +0100673 efx_ef10_rx_push_indir_table(efx);
674 return 0;
675}
676
677static int efx_ef10_map_reset_flags(u32 *flags)
678{
679 enum {
680 EF10_RESET_PORT = ((ETH_RESET_MAC | ETH_RESET_PHY) <<
681 ETH_RESET_SHARED_SHIFT),
682 EF10_RESET_MC = ((ETH_RESET_DMA | ETH_RESET_FILTER |
683 ETH_RESET_OFFLOAD | ETH_RESET_MAC |
684 ETH_RESET_PHY | ETH_RESET_MGMT) <<
685 ETH_RESET_SHARED_SHIFT)
686 };
687
688 /* We assume for now that our PCI function is permitted to
689 * reset everything.
690 */
691
692 if ((*flags & EF10_RESET_MC) == EF10_RESET_MC) {
693 *flags &= ~EF10_RESET_MC;
694 return RESET_TYPE_WORLD;
695 }
696
697 if ((*flags & EF10_RESET_PORT) == EF10_RESET_PORT) {
698 *flags &= ~EF10_RESET_PORT;
699 return RESET_TYPE_ALL;
700 }
701
702 /* no invisible reset implemented */
703
704 return -EINVAL;
705}
706
707#define EF10_DMA_STAT(ext_name, mcdi_name) \
708 [EF10_STAT_ ## ext_name] = \
709 { #ext_name, 64, 8 * MC_CMD_MAC_ ## mcdi_name }
710#define EF10_DMA_INVIS_STAT(int_name, mcdi_name) \
711 [EF10_STAT_ ## int_name] = \
712 { NULL, 64, 8 * MC_CMD_MAC_ ## mcdi_name }
713#define EF10_OTHER_STAT(ext_name) \
714 [EF10_STAT_ ## ext_name] = { #ext_name, 0, 0 }
715
716static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = {
717 EF10_DMA_STAT(tx_bytes, TX_BYTES),
718 EF10_DMA_STAT(tx_packets, TX_PKTS),
719 EF10_DMA_STAT(tx_pause, TX_PAUSE_PKTS),
720 EF10_DMA_STAT(tx_control, TX_CONTROL_PKTS),
721 EF10_DMA_STAT(tx_unicast, TX_UNICAST_PKTS),
722 EF10_DMA_STAT(tx_multicast, TX_MULTICAST_PKTS),
723 EF10_DMA_STAT(tx_broadcast, TX_BROADCAST_PKTS),
724 EF10_DMA_STAT(tx_lt64, TX_LT64_PKTS),
725 EF10_DMA_STAT(tx_64, TX_64_PKTS),
726 EF10_DMA_STAT(tx_65_to_127, TX_65_TO_127_PKTS),
727 EF10_DMA_STAT(tx_128_to_255, TX_128_TO_255_PKTS),
728 EF10_DMA_STAT(tx_256_to_511, TX_256_TO_511_PKTS),
729 EF10_DMA_STAT(tx_512_to_1023, TX_512_TO_1023_PKTS),
730 EF10_DMA_STAT(tx_1024_to_15xx, TX_1024_TO_15XX_PKTS),
731 EF10_DMA_STAT(tx_15xx_to_jumbo, TX_15XX_TO_JUMBO_PKTS),
732 EF10_DMA_STAT(rx_bytes, RX_BYTES),
733 EF10_DMA_INVIS_STAT(rx_bytes_minus_good_bytes, RX_BAD_BYTES),
734 EF10_OTHER_STAT(rx_good_bytes),
735 EF10_OTHER_STAT(rx_bad_bytes),
736 EF10_DMA_STAT(rx_packets, RX_PKTS),
737 EF10_DMA_STAT(rx_good, RX_GOOD_PKTS),
738 EF10_DMA_STAT(rx_bad, RX_BAD_FCS_PKTS),
739 EF10_DMA_STAT(rx_pause, RX_PAUSE_PKTS),
740 EF10_DMA_STAT(rx_control, RX_CONTROL_PKTS),
741 EF10_DMA_STAT(rx_unicast, RX_UNICAST_PKTS),
742 EF10_DMA_STAT(rx_multicast, RX_MULTICAST_PKTS),
743 EF10_DMA_STAT(rx_broadcast, RX_BROADCAST_PKTS),
744 EF10_DMA_STAT(rx_lt64, RX_UNDERSIZE_PKTS),
745 EF10_DMA_STAT(rx_64, RX_64_PKTS),
746 EF10_DMA_STAT(rx_65_to_127, RX_65_TO_127_PKTS),
747 EF10_DMA_STAT(rx_128_to_255, RX_128_TO_255_PKTS),
748 EF10_DMA_STAT(rx_256_to_511, RX_256_TO_511_PKTS),
749 EF10_DMA_STAT(rx_512_to_1023, RX_512_TO_1023_PKTS),
750 EF10_DMA_STAT(rx_1024_to_15xx, RX_1024_TO_15XX_PKTS),
751 EF10_DMA_STAT(rx_15xx_to_jumbo, RX_15XX_TO_JUMBO_PKTS),
752 EF10_DMA_STAT(rx_gtjumbo, RX_GTJUMBO_PKTS),
753 EF10_DMA_STAT(rx_bad_gtjumbo, RX_JABBER_PKTS),
754 EF10_DMA_STAT(rx_overflow, RX_OVERFLOW_PKTS),
755 EF10_DMA_STAT(rx_align_error, RX_ALIGN_ERROR_PKTS),
756 EF10_DMA_STAT(rx_length_error, RX_LENGTH_ERROR_PKTS),
757 EF10_DMA_STAT(rx_nodesc_drops, RX_NODESC_DROPS),
Edward Cree568d7a02013-09-25 17:32:09 +0100758 EF10_DMA_STAT(rx_pm_trunc_bb_overflow, PM_TRUNC_BB_OVERFLOW),
759 EF10_DMA_STAT(rx_pm_discard_bb_overflow, PM_DISCARD_BB_OVERFLOW),
760 EF10_DMA_STAT(rx_pm_trunc_vfifo_full, PM_TRUNC_VFIFO_FULL),
761 EF10_DMA_STAT(rx_pm_discard_vfifo_full, PM_DISCARD_VFIFO_FULL),
762 EF10_DMA_STAT(rx_pm_trunc_qbb, PM_TRUNC_QBB),
763 EF10_DMA_STAT(rx_pm_discard_qbb, PM_DISCARD_QBB),
764 EF10_DMA_STAT(rx_pm_discard_mapping, PM_DISCARD_MAPPING),
765 EF10_DMA_STAT(rx_dp_q_disabled_packets, RXDP_Q_DISABLED_PKTS),
766 EF10_DMA_STAT(rx_dp_di_dropped_packets, RXDP_DI_DROPPED_PKTS),
767 EF10_DMA_STAT(rx_dp_streaming_packets, RXDP_STREAMING_PKTS),
768 EF10_DMA_STAT(rx_dp_emerg_fetch, RXDP_EMERGENCY_FETCH_CONDITIONS),
769 EF10_DMA_STAT(rx_dp_emerg_wait, RXDP_EMERGENCY_WAIT_CONDITIONS),
Ben Hutchings8127d662013-08-29 19:19:29 +0100770};
771
772#define HUNT_COMMON_STAT_MASK ((1ULL << EF10_STAT_tx_bytes) | \
773 (1ULL << EF10_STAT_tx_packets) | \
774 (1ULL << EF10_STAT_tx_pause) | \
775 (1ULL << EF10_STAT_tx_unicast) | \
776 (1ULL << EF10_STAT_tx_multicast) | \
777 (1ULL << EF10_STAT_tx_broadcast) | \
778 (1ULL << EF10_STAT_rx_bytes) | \
779 (1ULL << EF10_STAT_rx_bytes_minus_good_bytes) | \
780 (1ULL << EF10_STAT_rx_good_bytes) | \
781 (1ULL << EF10_STAT_rx_bad_bytes) | \
782 (1ULL << EF10_STAT_rx_packets) | \
783 (1ULL << EF10_STAT_rx_good) | \
784 (1ULL << EF10_STAT_rx_bad) | \
785 (1ULL << EF10_STAT_rx_pause) | \
786 (1ULL << EF10_STAT_rx_control) | \
787 (1ULL << EF10_STAT_rx_unicast) | \
788 (1ULL << EF10_STAT_rx_multicast) | \
789 (1ULL << EF10_STAT_rx_broadcast) | \
790 (1ULL << EF10_STAT_rx_lt64) | \
791 (1ULL << EF10_STAT_rx_64) | \
792 (1ULL << EF10_STAT_rx_65_to_127) | \
793 (1ULL << EF10_STAT_rx_128_to_255) | \
794 (1ULL << EF10_STAT_rx_256_to_511) | \
795 (1ULL << EF10_STAT_rx_512_to_1023) | \
796 (1ULL << EF10_STAT_rx_1024_to_15xx) | \
797 (1ULL << EF10_STAT_rx_15xx_to_jumbo) | \
798 (1ULL << EF10_STAT_rx_gtjumbo) | \
799 (1ULL << EF10_STAT_rx_bad_gtjumbo) | \
800 (1ULL << EF10_STAT_rx_overflow) | \
801 (1ULL << EF10_STAT_rx_nodesc_drops))
802
803/* These statistics are only provided by the 10G MAC. For a 10G/40G
804 * switchable port we do not expose these because they might not
805 * include all the packets they should.
806 */
807#define HUNT_10G_ONLY_STAT_MASK ((1ULL << EF10_STAT_tx_control) | \
808 (1ULL << EF10_STAT_tx_lt64) | \
809 (1ULL << EF10_STAT_tx_64) | \
810 (1ULL << EF10_STAT_tx_65_to_127) | \
811 (1ULL << EF10_STAT_tx_128_to_255) | \
812 (1ULL << EF10_STAT_tx_256_to_511) | \
813 (1ULL << EF10_STAT_tx_512_to_1023) | \
814 (1ULL << EF10_STAT_tx_1024_to_15xx) | \
815 (1ULL << EF10_STAT_tx_15xx_to_jumbo))
816
817/* These statistics are only provided by the 40G MAC. For a 10G/40G
818 * switchable port we do expose these because the errors will otherwise
819 * be silent.
820 */
821#define HUNT_40G_EXTRA_STAT_MASK ((1ULL << EF10_STAT_rx_align_error) | \
822 (1ULL << EF10_STAT_rx_length_error))
823
Edward Cree568d7a02013-09-25 17:32:09 +0100824/* These statistics are only provided if the firmware supports the
825 * capability PM_AND_RXDP_COUNTERS.
826 */
827#define HUNT_PM_AND_RXDP_STAT_MASK ( \
828 (1ULL << EF10_STAT_rx_pm_trunc_bb_overflow) | \
829 (1ULL << EF10_STAT_rx_pm_discard_bb_overflow) | \
830 (1ULL << EF10_STAT_rx_pm_trunc_vfifo_full) | \
831 (1ULL << EF10_STAT_rx_pm_discard_vfifo_full) | \
832 (1ULL << EF10_STAT_rx_pm_trunc_qbb) | \
833 (1ULL << EF10_STAT_rx_pm_discard_qbb) | \
834 (1ULL << EF10_STAT_rx_pm_discard_mapping) | \
835 (1ULL << EF10_STAT_rx_dp_q_disabled_packets) | \
836 (1ULL << EF10_STAT_rx_dp_di_dropped_packets) | \
837 (1ULL << EF10_STAT_rx_dp_streaming_packets) | \
838 (1ULL << EF10_STAT_rx_dp_emerg_fetch) | \
839 (1ULL << EF10_STAT_rx_dp_emerg_wait))
Ben Hutchings8127d662013-08-29 19:19:29 +0100840
Edward Cree4bae9132013-09-27 18:52:49 +0100841static u64 efx_ef10_raw_stat_mask(struct efx_nic *efx)
Ben Hutchings8127d662013-08-29 19:19:29 +0100842{
Edward Cree4bae9132013-09-27 18:52:49 +0100843 u64 raw_mask = HUNT_COMMON_STAT_MASK;
Ben Hutchings8127d662013-08-29 19:19:29 +0100844 u32 port_caps = efx_mcdi_phy_get_caps(efx);
Edward Cree568d7a02013-09-25 17:32:09 +0100845 struct efx_ef10_nic_data *nic_data = efx->nic_data;
Ben Hutchings8127d662013-08-29 19:19:29 +0100846
847 if (port_caps & (1 << MC_CMD_PHY_CAP_40000FDX_LBN))
Edward Cree4bae9132013-09-27 18:52:49 +0100848 raw_mask |= HUNT_40G_EXTRA_STAT_MASK;
Ben Hutchings8127d662013-08-29 19:19:29 +0100849 else
Edward Cree4bae9132013-09-27 18:52:49 +0100850 raw_mask |= HUNT_10G_ONLY_STAT_MASK;
Edward Cree568d7a02013-09-25 17:32:09 +0100851
852 if (nic_data->datapath_caps &
853 (1 << MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_LBN))
854 raw_mask |= HUNT_PM_AND_RXDP_STAT_MASK;
855
Edward Cree4bae9132013-09-27 18:52:49 +0100856 return raw_mask;
857}
858
859static void efx_ef10_get_stat_mask(struct efx_nic *efx, unsigned long *mask)
860{
861 u64 raw_mask = efx_ef10_raw_stat_mask(efx);
862
863#if BITS_PER_LONG == 64
864 mask[0] = raw_mask;
865#else
866 mask[0] = raw_mask & 0xffffffff;
867 mask[1] = raw_mask >> 32;
868#endif
Ben Hutchings8127d662013-08-29 19:19:29 +0100869}
870
871static size_t efx_ef10_describe_stats(struct efx_nic *efx, u8 *names)
872{
Edward Cree4bae9132013-09-27 18:52:49 +0100873 DECLARE_BITMAP(mask, EF10_STAT_COUNT);
874
875 efx_ef10_get_stat_mask(efx, mask);
Ben Hutchings8127d662013-08-29 19:19:29 +0100876 return efx_nic_describe_stats(efx_ef10_stat_desc, EF10_STAT_COUNT,
Edward Cree4bae9132013-09-27 18:52:49 +0100877 mask, names);
Ben Hutchings8127d662013-08-29 19:19:29 +0100878}
879
880static int efx_ef10_try_update_nic_stats(struct efx_nic *efx)
881{
882 struct efx_ef10_nic_data *nic_data = efx->nic_data;
Edward Cree4bae9132013-09-27 18:52:49 +0100883 DECLARE_BITMAP(mask, EF10_STAT_COUNT);
Ben Hutchings8127d662013-08-29 19:19:29 +0100884 __le64 generation_start, generation_end;
885 u64 *stats = nic_data->stats;
886 __le64 *dma_stats;
887
Edward Cree4bae9132013-09-27 18:52:49 +0100888 efx_ef10_get_stat_mask(efx, mask);
889
Ben Hutchings8127d662013-08-29 19:19:29 +0100890 dma_stats = efx->stats_buffer.addr;
891 nic_data = efx->nic_data;
892
893 generation_end = dma_stats[MC_CMD_MAC_GENERATION_END];
894 if (generation_end == EFX_MC_STATS_GENERATION_INVALID)
895 return 0;
896 rmb();
Edward Cree4bae9132013-09-27 18:52:49 +0100897 efx_nic_update_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, mask,
Ben Hutchings8127d662013-08-29 19:19:29 +0100898 stats, efx->stats_buffer.addr, false);
Jon Cooperd546a892013-09-27 18:26:30 +0100899 rmb();
Ben Hutchings8127d662013-08-29 19:19:29 +0100900 generation_start = dma_stats[MC_CMD_MAC_GENERATION_START];
901 if (generation_end != generation_start)
902 return -EAGAIN;
903
904 /* Update derived statistics */
905 stats[EF10_STAT_rx_good_bytes] =
906 stats[EF10_STAT_rx_bytes] -
907 stats[EF10_STAT_rx_bytes_minus_good_bytes];
908 efx_update_diff_stat(&stats[EF10_STAT_rx_bad_bytes],
909 stats[EF10_STAT_rx_bytes_minus_good_bytes]);
910
911 return 0;
912}
913
914
915static size_t efx_ef10_update_stats(struct efx_nic *efx, u64 *full_stats,
916 struct rtnl_link_stats64 *core_stats)
917{
Edward Cree4bae9132013-09-27 18:52:49 +0100918 DECLARE_BITMAP(mask, EF10_STAT_COUNT);
Ben Hutchings8127d662013-08-29 19:19:29 +0100919 struct efx_ef10_nic_data *nic_data = efx->nic_data;
920 u64 *stats = nic_data->stats;
921 size_t stats_count = 0, index;
922 int retry;
923
Edward Cree4bae9132013-09-27 18:52:49 +0100924 efx_ef10_get_stat_mask(efx, mask);
925
Ben Hutchings8127d662013-08-29 19:19:29 +0100926 /* If we're unlucky enough to read statistics during the DMA, wait
927 * up to 10ms for it to finish (typically takes <500us)
928 */
929 for (retry = 0; retry < 100; ++retry) {
930 if (efx_ef10_try_update_nic_stats(efx) == 0)
931 break;
932 udelay(100);
933 }
934
935 if (full_stats) {
936 for_each_set_bit(index, mask, EF10_STAT_COUNT) {
937 if (efx_ef10_stat_desc[index].name) {
938 *full_stats++ = stats[index];
939 ++stats_count;
940 }
941 }
942 }
943
944 if (core_stats) {
945 core_stats->rx_packets = stats[EF10_STAT_rx_packets];
946 core_stats->tx_packets = stats[EF10_STAT_tx_packets];
947 core_stats->rx_bytes = stats[EF10_STAT_rx_bytes];
948 core_stats->tx_bytes = stats[EF10_STAT_tx_bytes];
949 core_stats->rx_dropped = stats[EF10_STAT_rx_nodesc_drops];
950 core_stats->multicast = stats[EF10_STAT_rx_multicast];
951 core_stats->rx_length_errors =
952 stats[EF10_STAT_rx_gtjumbo] +
953 stats[EF10_STAT_rx_length_error];
954 core_stats->rx_crc_errors = stats[EF10_STAT_rx_bad];
955 core_stats->rx_frame_errors = stats[EF10_STAT_rx_align_error];
956 core_stats->rx_fifo_errors = stats[EF10_STAT_rx_overflow];
957 core_stats->rx_errors = (core_stats->rx_length_errors +
958 core_stats->rx_crc_errors +
959 core_stats->rx_frame_errors);
960 }
961
962 return stats_count;
963}
964
965static void efx_ef10_push_irq_moderation(struct efx_channel *channel)
966{
967 struct efx_nic *efx = channel->efx;
968 unsigned int mode, value;
969 efx_dword_t timer_cmd;
970
971 if (channel->irq_moderation) {
972 mode = 3;
973 value = channel->irq_moderation - 1;
974 } else {
975 mode = 0;
976 value = 0;
977 }
978
979 if (EFX_EF10_WORKAROUND_35388(efx)) {
980 EFX_POPULATE_DWORD_3(timer_cmd, ERF_DD_EVQ_IND_TIMER_FLAGS,
981 EFE_DD_EVQ_IND_TIMER_FLAGS,
982 ERF_DD_EVQ_IND_TIMER_MODE, mode,
983 ERF_DD_EVQ_IND_TIMER_VAL, value);
984 efx_writed_page(efx, &timer_cmd, ER_DD_EVQ_INDIRECT,
985 channel->channel);
986 } else {
987 EFX_POPULATE_DWORD_2(timer_cmd, ERF_DZ_TC_TIMER_MODE, mode,
988 ERF_DZ_TC_TIMER_VAL, value);
989 efx_writed_page(efx, &timer_cmd, ER_DZ_EVQ_TMR,
990 channel->channel);
991 }
992}
993
994static void efx_ef10_get_wol(struct efx_nic *efx, struct ethtool_wolinfo *wol)
995{
996 wol->supported = 0;
997 wol->wolopts = 0;
998 memset(&wol->sopass, 0, sizeof(wol->sopass));
999}
1000
1001static int efx_ef10_set_wol(struct efx_nic *efx, u32 type)
1002{
1003 if (type != 0)
1004 return -EINVAL;
1005 return 0;
1006}
1007
1008static void efx_ef10_mcdi_request(struct efx_nic *efx,
1009 const efx_dword_t *hdr, size_t hdr_len,
1010 const efx_dword_t *sdu, size_t sdu_len)
1011{
1012 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1013 u8 *pdu = nic_data->mcdi_buf.addr;
1014
1015 memcpy(pdu, hdr, hdr_len);
1016 memcpy(pdu + hdr_len, sdu, sdu_len);
1017 wmb();
1018
1019 /* The hardware provides 'low' and 'high' (doorbell) registers
1020 * for passing the 64-bit address of an MCDI request to
1021 * firmware. However the dwords are swapped by firmware. The
1022 * least significant bits of the doorbell are then 0 for all
1023 * MCDI requests due to alignment.
1024 */
1025 _efx_writed(efx, cpu_to_le32((u64)nic_data->mcdi_buf.dma_addr >> 32),
1026 ER_DZ_MC_DB_LWRD);
1027 _efx_writed(efx, cpu_to_le32((u32)nic_data->mcdi_buf.dma_addr),
1028 ER_DZ_MC_DB_HWRD);
1029}
1030
1031static bool efx_ef10_mcdi_poll_response(struct efx_nic *efx)
1032{
1033 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1034 const efx_dword_t hdr = *(const efx_dword_t *)nic_data->mcdi_buf.addr;
1035
1036 rmb();
1037 return EFX_DWORD_FIELD(hdr, MCDI_HEADER_RESPONSE);
1038}
1039
1040static void
1041efx_ef10_mcdi_read_response(struct efx_nic *efx, efx_dword_t *outbuf,
1042 size_t offset, size_t outlen)
1043{
1044 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1045 const u8 *pdu = nic_data->mcdi_buf.addr;
1046
1047 memcpy(outbuf, pdu + offset, outlen);
1048}
1049
1050static int efx_ef10_mcdi_poll_reboot(struct efx_nic *efx)
1051{
1052 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1053 int rc;
1054
1055 rc = efx_ef10_get_warm_boot_count(efx);
1056 if (rc < 0) {
1057 /* The firmware is presumably in the process of
1058 * rebooting. However, we are supposed to report each
1059 * reboot just once, so we must only do that once we
1060 * can read and store the updated warm boot count.
1061 */
1062 return 0;
1063 }
1064
1065 if (rc == nic_data->warm_boot_count)
1066 return 0;
1067
1068 nic_data->warm_boot_count = rc;
1069
1070 /* All our allocations have been reset */
1071 nic_data->must_realloc_vis = true;
1072 nic_data->must_restore_filters = true;
Ben Hutchings183233b2013-06-28 21:47:12 +01001073 nic_data->must_restore_piobufs = true;
Ben Hutchings8127d662013-08-29 19:19:29 +01001074 nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
1075
Ben Hutchingsa915ccc2013-09-05 22:51:55 +01001076 /* The datapath firmware might have been changed */
1077 nic_data->must_check_datapath_caps = true;
1078
Ben Hutchings869070c2013-09-05 22:46:10 +01001079 /* MAC statistics have been cleared on the NIC; clear the local
1080 * statistic that we update with efx_update_diff_stat().
1081 */
1082 nic_data->stats[EF10_STAT_rx_bad_bytes] = 0;
1083
Ben Hutchings8127d662013-08-29 19:19:29 +01001084 return -EIO;
1085}
1086
1087/* Handle an MSI interrupt
1088 *
1089 * Handle an MSI hardware interrupt. This routine schedules event
1090 * queue processing. No interrupt acknowledgement cycle is necessary.
1091 * Also, we never need to check that the interrupt is for us, since
1092 * MSI interrupts cannot be shared.
1093 */
1094static irqreturn_t efx_ef10_msi_interrupt(int irq, void *dev_id)
1095{
1096 struct efx_msi_context *context = dev_id;
1097 struct efx_nic *efx = context->efx;
1098
1099 netif_vdbg(efx, intr, efx->net_dev,
1100 "IRQ %d on CPU %d\n", irq, raw_smp_processor_id());
1101
1102 if (likely(ACCESS_ONCE(efx->irq_soft_enabled))) {
1103 /* Note test interrupts */
1104 if (context->index == efx->irq_level)
1105 efx->last_irq_cpu = raw_smp_processor_id();
1106
1107 /* Schedule processing of the channel */
1108 efx_schedule_channel_irq(efx->channel[context->index]);
1109 }
1110
1111 return IRQ_HANDLED;
1112}
1113
1114static irqreturn_t efx_ef10_legacy_interrupt(int irq, void *dev_id)
1115{
1116 struct efx_nic *efx = dev_id;
1117 bool soft_enabled = ACCESS_ONCE(efx->irq_soft_enabled);
1118 struct efx_channel *channel;
1119 efx_dword_t reg;
1120 u32 queues;
1121
1122 /* Read the ISR which also ACKs the interrupts */
1123 efx_readd(efx, &reg, ER_DZ_BIU_INT_ISR);
1124 queues = EFX_DWORD_FIELD(reg, ERF_DZ_ISR_REG);
1125
1126 if (queues == 0)
1127 return IRQ_NONE;
1128
1129 if (likely(soft_enabled)) {
1130 /* Note test interrupts */
1131 if (queues & (1U << efx->irq_level))
1132 efx->last_irq_cpu = raw_smp_processor_id();
1133
1134 efx_for_each_channel(channel, efx) {
1135 if (queues & 1)
1136 efx_schedule_channel_irq(channel);
1137 queues >>= 1;
1138 }
1139 }
1140
1141 netif_vdbg(efx, intr, efx->net_dev,
1142 "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n",
1143 irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg));
1144
1145 return IRQ_HANDLED;
1146}
1147
1148static void efx_ef10_irq_test_generate(struct efx_nic *efx)
1149{
1150 MCDI_DECLARE_BUF(inbuf, MC_CMD_TRIGGER_INTERRUPT_IN_LEN);
1151
1152 BUILD_BUG_ON(MC_CMD_TRIGGER_INTERRUPT_OUT_LEN != 0);
1153
1154 MCDI_SET_DWORD(inbuf, TRIGGER_INTERRUPT_IN_INTR_LEVEL, efx->irq_level);
1155 (void) efx_mcdi_rpc(efx, MC_CMD_TRIGGER_INTERRUPT,
1156 inbuf, sizeof(inbuf), NULL, 0, NULL);
1157}
1158
1159static int efx_ef10_tx_probe(struct efx_tx_queue *tx_queue)
1160{
1161 return efx_nic_alloc_buffer(tx_queue->efx, &tx_queue->txd.buf,
1162 (tx_queue->ptr_mask + 1) *
1163 sizeof(efx_qword_t),
1164 GFP_KERNEL);
1165}
1166
1167/* This writes to the TX_DESC_WPTR and also pushes data */
1168static inline void efx_ef10_push_tx_desc(struct efx_tx_queue *tx_queue,
1169 const efx_qword_t *txd)
1170{
1171 unsigned int write_ptr;
1172 efx_oword_t reg;
1173
1174 write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
1175 EFX_POPULATE_OWORD_1(reg, ERF_DZ_TX_DESC_WPTR, write_ptr);
1176 reg.qword[0] = *txd;
1177 efx_writeo_page(tx_queue->efx, &reg,
1178 ER_DZ_TX_DESC_UPD, tx_queue->queue);
1179}
1180
1181static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue)
1182{
1183 MCDI_DECLARE_BUF(inbuf, MC_CMD_INIT_TXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 /
1184 EFX_BUF_SIZE));
1185 MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_TXQ_OUT_LEN);
1186 bool csum_offload = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD;
1187 size_t entries = tx_queue->txd.buf.len / EFX_BUF_SIZE;
1188 struct efx_channel *channel = tx_queue->channel;
1189 struct efx_nic *efx = tx_queue->efx;
1190 size_t inlen, outlen;
1191 dma_addr_t dma_addr;
1192 efx_qword_t *txd;
1193 int rc;
1194 int i;
1195
1196 MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_SIZE, tx_queue->ptr_mask + 1);
1197 MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_TARGET_EVQ, channel->channel);
1198 MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_LABEL, tx_queue->queue);
1199 MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_INSTANCE, tx_queue->queue);
1200 MCDI_POPULATE_DWORD_2(inbuf, INIT_TXQ_IN_FLAGS,
1201 INIT_TXQ_IN_FLAG_IP_CSUM_DIS, !csum_offload,
1202 INIT_TXQ_IN_FLAG_TCP_CSUM_DIS, !csum_offload);
1203 MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_OWNER_ID, 0);
1204 MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
1205
1206 dma_addr = tx_queue->txd.buf.dma_addr;
1207
1208 netif_dbg(efx, hw, efx->net_dev, "pushing TXQ %d. %zu entries (%llx)\n",
1209 tx_queue->queue, entries, (u64)dma_addr);
1210
1211 for (i = 0; i < entries; ++i) {
1212 MCDI_SET_ARRAY_QWORD(inbuf, INIT_TXQ_IN_DMA_ADDR, i, dma_addr);
1213 dma_addr += EFX_BUF_SIZE;
1214 }
1215
1216 inlen = MC_CMD_INIT_TXQ_IN_LEN(entries);
1217
1218 rc = efx_mcdi_rpc(efx, MC_CMD_INIT_TXQ, inbuf, inlen,
1219 outbuf, sizeof(outbuf), &outlen);
1220 if (rc)
1221 goto fail;
1222
1223 /* A previous user of this TX queue might have set us up the
1224 * bomb by writing a descriptor to the TX push collector but
1225 * not the doorbell. (Each collector belongs to a port, not a
1226 * queue or function, so cannot easily be reset.) We must
1227 * attempt to push a no-op descriptor in its place.
1228 */
1229 tx_queue->buffer[0].flags = EFX_TX_BUF_OPTION;
1230 tx_queue->insert_count = 1;
1231 txd = efx_tx_desc(tx_queue, 0);
1232 EFX_POPULATE_QWORD_4(*txd,
1233 ESF_DZ_TX_DESC_IS_OPT, true,
1234 ESF_DZ_TX_OPTION_TYPE,
1235 ESE_DZ_TX_OPTION_DESC_CRC_CSUM,
1236 ESF_DZ_TX_OPTION_UDP_TCP_CSUM, csum_offload,
1237 ESF_DZ_TX_OPTION_IP_CSUM, csum_offload);
1238 tx_queue->write_count = 1;
1239 wmb();
1240 efx_ef10_push_tx_desc(tx_queue, txd);
1241
1242 return;
1243
1244fail:
1245 WARN_ON(true);
1246 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1247}
1248
1249static void efx_ef10_tx_fini(struct efx_tx_queue *tx_queue)
1250{
1251 MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_TXQ_IN_LEN);
1252 MCDI_DECLARE_BUF(outbuf, MC_CMD_FINI_TXQ_OUT_LEN);
1253 struct efx_nic *efx = tx_queue->efx;
1254 size_t outlen;
1255 int rc;
1256
1257 MCDI_SET_DWORD(inbuf, FINI_TXQ_IN_INSTANCE,
1258 tx_queue->queue);
1259
1260 rc = efx_mcdi_rpc(efx, MC_CMD_FINI_TXQ, inbuf, sizeof(inbuf),
1261 outbuf, sizeof(outbuf), &outlen);
1262
1263 if (rc && rc != -EALREADY)
1264 goto fail;
1265
1266 return;
1267
1268fail:
1269 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1270}
1271
1272static void efx_ef10_tx_remove(struct efx_tx_queue *tx_queue)
1273{
1274 efx_nic_free_buffer(tx_queue->efx, &tx_queue->txd.buf);
1275}
1276
1277/* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */
1278static inline void efx_ef10_notify_tx_desc(struct efx_tx_queue *tx_queue)
1279{
1280 unsigned int write_ptr;
1281 efx_dword_t reg;
1282
1283 write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
1284 EFX_POPULATE_DWORD_1(reg, ERF_DZ_TX_DESC_WPTR_DWORD, write_ptr);
1285 efx_writed_page(tx_queue->efx, &reg,
1286 ER_DZ_TX_DESC_UPD_DWORD, tx_queue->queue);
1287}
1288
1289static void efx_ef10_tx_write(struct efx_tx_queue *tx_queue)
1290{
1291 unsigned int old_write_count = tx_queue->write_count;
1292 struct efx_tx_buffer *buffer;
1293 unsigned int write_ptr;
1294 efx_qword_t *txd;
1295
1296 BUG_ON(tx_queue->write_count == tx_queue->insert_count);
1297
1298 do {
1299 write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
1300 buffer = &tx_queue->buffer[write_ptr];
1301 txd = efx_tx_desc(tx_queue, write_ptr);
1302 ++tx_queue->write_count;
1303
1304 /* Create TX descriptor ring entry */
1305 if (buffer->flags & EFX_TX_BUF_OPTION) {
1306 *txd = buffer->option;
1307 } else {
1308 BUILD_BUG_ON(EFX_TX_BUF_CONT != 1);
1309 EFX_POPULATE_QWORD_3(
1310 *txd,
1311 ESF_DZ_TX_KER_CONT,
1312 buffer->flags & EFX_TX_BUF_CONT,
1313 ESF_DZ_TX_KER_BYTE_CNT, buffer->len,
1314 ESF_DZ_TX_KER_BUF_ADDR, buffer->dma_addr);
1315 }
1316 } while (tx_queue->write_count != tx_queue->insert_count);
1317
1318 wmb(); /* Ensure descriptors are written before they are fetched */
1319
1320 if (efx_nic_may_push_tx_desc(tx_queue, old_write_count)) {
1321 txd = efx_tx_desc(tx_queue,
1322 old_write_count & tx_queue->ptr_mask);
1323 efx_ef10_push_tx_desc(tx_queue, txd);
1324 ++tx_queue->pushes;
1325 } else {
1326 efx_ef10_notify_tx_desc(tx_queue);
1327 }
1328}
1329
1330static int efx_ef10_alloc_rss_context(struct efx_nic *efx, u32 *context)
1331{
1332 MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN);
1333 MCDI_DECLARE_BUF(outbuf, MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN);
1334 size_t outlen;
1335 int rc;
1336
1337 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID,
1338 EVB_PORT_ID_ASSIGNED);
1339 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_TYPE,
1340 MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_EXCLUSIVE);
1341 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_NUM_QUEUES,
1342 EFX_MAX_CHANNELS);
1343
1344 rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_ALLOC, inbuf, sizeof(inbuf),
1345 outbuf, sizeof(outbuf), &outlen);
1346 if (rc != 0)
1347 return rc;
1348
1349 if (outlen < MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN)
1350 return -EIO;
1351
1352 *context = MCDI_DWORD(outbuf, RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID);
1353
1354 return 0;
1355}
1356
1357static void efx_ef10_free_rss_context(struct efx_nic *efx, u32 context)
1358{
1359 MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_FREE_IN_LEN);
1360 int rc;
1361
1362 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_FREE_IN_RSS_CONTEXT_ID,
1363 context);
1364
1365 rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_FREE, inbuf, sizeof(inbuf),
1366 NULL, 0, NULL);
1367 WARN_ON(rc != 0);
1368}
1369
1370static int efx_ef10_populate_rss_table(struct efx_nic *efx, u32 context)
1371{
1372 MCDI_DECLARE_BUF(tablebuf, MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN);
1373 MCDI_DECLARE_BUF(keybuf, MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN);
1374 int i, rc;
1375
1376 MCDI_SET_DWORD(tablebuf, RSS_CONTEXT_SET_TABLE_IN_RSS_CONTEXT_ID,
1377 context);
1378 BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) !=
1379 MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_LEN);
1380
1381 for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); ++i)
1382 MCDI_PTR(tablebuf,
1383 RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE)[i] =
1384 (u8) efx->rx_indir_table[i];
1385
1386 rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_TABLE, tablebuf,
1387 sizeof(tablebuf), NULL, 0, NULL);
1388 if (rc != 0)
1389 return rc;
1390
1391 MCDI_SET_DWORD(keybuf, RSS_CONTEXT_SET_KEY_IN_RSS_CONTEXT_ID,
1392 context);
1393 BUILD_BUG_ON(ARRAY_SIZE(efx->rx_hash_key) !=
1394 MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN);
1395 for (i = 0; i < ARRAY_SIZE(efx->rx_hash_key); ++i)
1396 MCDI_PTR(keybuf, RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY)[i] =
1397 efx->rx_hash_key[i];
1398
1399 return efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_KEY, keybuf,
1400 sizeof(keybuf), NULL, 0, NULL);
1401}
1402
1403static void efx_ef10_rx_free_indir_table(struct efx_nic *efx)
1404{
1405 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1406
1407 if (nic_data->rx_rss_context != EFX_EF10_RSS_CONTEXT_INVALID)
1408 efx_ef10_free_rss_context(efx, nic_data->rx_rss_context);
1409 nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
1410}
1411
1412static void efx_ef10_rx_push_indir_table(struct efx_nic *efx)
1413{
1414 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1415 int rc;
1416
1417 netif_dbg(efx, drv, efx->net_dev, "pushing RX indirection table\n");
1418
1419 if (nic_data->rx_rss_context == EFX_EF10_RSS_CONTEXT_INVALID) {
1420 rc = efx_ef10_alloc_rss_context(efx, &nic_data->rx_rss_context);
1421 if (rc != 0)
1422 goto fail;
1423 }
1424
1425 rc = efx_ef10_populate_rss_table(efx, nic_data->rx_rss_context);
1426 if (rc != 0)
1427 goto fail;
1428
1429 return;
1430
1431fail:
1432 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1433}
1434
1435static int efx_ef10_rx_probe(struct efx_rx_queue *rx_queue)
1436{
1437 return efx_nic_alloc_buffer(rx_queue->efx, &rx_queue->rxd.buf,
1438 (rx_queue->ptr_mask + 1) *
1439 sizeof(efx_qword_t),
1440 GFP_KERNEL);
1441}
1442
1443static void efx_ef10_rx_init(struct efx_rx_queue *rx_queue)
1444{
1445 MCDI_DECLARE_BUF(inbuf,
1446 MC_CMD_INIT_RXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 /
1447 EFX_BUF_SIZE));
1448 MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_RXQ_OUT_LEN);
1449 struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
1450 size_t entries = rx_queue->rxd.buf.len / EFX_BUF_SIZE;
1451 struct efx_nic *efx = rx_queue->efx;
1452 size_t inlen, outlen;
1453 dma_addr_t dma_addr;
1454 int rc;
1455 int i;
1456
1457 rx_queue->scatter_n = 0;
1458 rx_queue->scatter_len = 0;
1459
1460 MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_SIZE, rx_queue->ptr_mask + 1);
1461 MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_TARGET_EVQ, channel->channel);
1462 MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_LABEL, efx_rx_queue_index(rx_queue));
1463 MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_INSTANCE,
1464 efx_rx_queue_index(rx_queue));
1465 MCDI_POPULATE_DWORD_1(inbuf, INIT_RXQ_IN_FLAGS,
1466 INIT_RXQ_IN_FLAG_PREFIX, 1);
1467 MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_OWNER_ID, 0);
1468 MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
1469
1470 dma_addr = rx_queue->rxd.buf.dma_addr;
1471
1472 netif_dbg(efx, hw, efx->net_dev, "pushing RXQ %d. %zu entries (%llx)\n",
1473 efx_rx_queue_index(rx_queue), entries, (u64)dma_addr);
1474
1475 for (i = 0; i < entries; ++i) {
1476 MCDI_SET_ARRAY_QWORD(inbuf, INIT_RXQ_IN_DMA_ADDR, i, dma_addr);
1477 dma_addr += EFX_BUF_SIZE;
1478 }
1479
1480 inlen = MC_CMD_INIT_RXQ_IN_LEN(entries);
1481
1482 rc = efx_mcdi_rpc(efx, MC_CMD_INIT_RXQ, inbuf, inlen,
1483 outbuf, sizeof(outbuf), &outlen);
1484 if (rc)
1485 goto fail;
1486
1487 return;
1488
1489fail:
1490 WARN_ON(true);
1491 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1492}
1493
1494static void efx_ef10_rx_fini(struct efx_rx_queue *rx_queue)
1495{
1496 MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_RXQ_IN_LEN);
1497 MCDI_DECLARE_BUF(outbuf, MC_CMD_FINI_RXQ_OUT_LEN);
1498 struct efx_nic *efx = rx_queue->efx;
1499 size_t outlen;
1500 int rc;
1501
1502 MCDI_SET_DWORD(inbuf, FINI_RXQ_IN_INSTANCE,
1503 efx_rx_queue_index(rx_queue));
1504
1505 rc = efx_mcdi_rpc(efx, MC_CMD_FINI_RXQ, inbuf, sizeof(inbuf),
1506 outbuf, sizeof(outbuf), &outlen);
1507
1508 if (rc && rc != -EALREADY)
1509 goto fail;
1510
1511 return;
1512
1513fail:
1514 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1515}
1516
1517static void efx_ef10_rx_remove(struct efx_rx_queue *rx_queue)
1518{
1519 efx_nic_free_buffer(rx_queue->efx, &rx_queue->rxd.buf);
1520}
1521
1522/* This creates an entry in the RX descriptor queue */
1523static inline void
1524efx_ef10_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index)
1525{
1526 struct efx_rx_buffer *rx_buf;
1527 efx_qword_t *rxd;
1528
1529 rxd = efx_rx_desc(rx_queue, index);
1530 rx_buf = efx_rx_buffer(rx_queue, index);
1531 EFX_POPULATE_QWORD_2(*rxd,
1532 ESF_DZ_RX_KER_BYTE_CNT, rx_buf->len,
1533 ESF_DZ_RX_KER_BUF_ADDR, rx_buf->dma_addr);
1534}
1535
1536static void efx_ef10_rx_write(struct efx_rx_queue *rx_queue)
1537{
1538 struct efx_nic *efx = rx_queue->efx;
1539 unsigned int write_count;
1540 efx_dword_t reg;
1541
1542 /* Firmware requires that RX_DESC_WPTR be a multiple of 8 */
1543 write_count = rx_queue->added_count & ~7;
1544 if (rx_queue->notified_count == write_count)
1545 return;
1546
1547 do
1548 efx_ef10_build_rx_desc(
1549 rx_queue,
1550 rx_queue->notified_count & rx_queue->ptr_mask);
1551 while (++rx_queue->notified_count != write_count);
1552
1553 wmb();
1554 EFX_POPULATE_DWORD_1(reg, ERF_DZ_RX_DESC_WPTR,
1555 write_count & rx_queue->ptr_mask);
1556 efx_writed_page(efx, &reg, ER_DZ_RX_DESC_UPD,
1557 efx_rx_queue_index(rx_queue));
1558}
1559
1560static efx_mcdi_async_completer efx_ef10_rx_defer_refill_complete;
1561
1562static void efx_ef10_rx_defer_refill(struct efx_rx_queue *rx_queue)
1563{
1564 struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
1565 MCDI_DECLARE_BUF(inbuf, MC_CMD_DRIVER_EVENT_IN_LEN);
1566 efx_qword_t event;
1567
1568 EFX_POPULATE_QWORD_2(event,
1569 ESF_DZ_EV_CODE, EFX_EF10_DRVGEN_EV,
1570 ESF_DZ_EV_DATA, EFX_EF10_REFILL);
1571
1572 MCDI_SET_DWORD(inbuf, DRIVER_EVENT_IN_EVQ, channel->channel);
1573
1574 /* MCDI_SET_QWORD is not appropriate here since EFX_POPULATE_* has
1575 * already swapped the data to little-endian order.
1576 */
1577 memcpy(MCDI_PTR(inbuf, DRIVER_EVENT_IN_DATA), &event.u64[0],
1578 sizeof(efx_qword_t));
1579
1580 efx_mcdi_rpc_async(channel->efx, MC_CMD_DRIVER_EVENT,
1581 inbuf, sizeof(inbuf), 0,
1582 efx_ef10_rx_defer_refill_complete, 0);
1583}
1584
1585static void
1586efx_ef10_rx_defer_refill_complete(struct efx_nic *efx, unsigned long cookie,
1587 int rc, efx_dword_t *outbuf,
1588 size_t outlen_actual)
1589{
1590 /* nothing to do */
1591}
1592
1593static int efx_ef10_ev_probe(struct efx_channel *channel)
1594{
1595 return efx_nic_alloc_buffer(channel->efx, &channel->eventq.buf,
1596 (channel->eventq_mask + 1) *
1597 sizeof(efx_qword_t),
1598 GFP_KERNEL);
1599}
1600
1601static int efx_ef10_ev_init(struct efx_channel *channel)
1602{
1603 MCDI_DECLARE_BUF(inbuf,
1604 MC_CMD_INIT_EVQ_IN_LEN(EFX_MAX_EVQ_SIZE * 8 /
1605 EFX_BUF_SIZE));
1606 MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_EVQ_OUT_LEN);
1607 size_t entries = channel->eventq.buf.len / EFX_BUF_SIZE;
1608 struct efx_nic *efx = channel->efx;
1609 struct efx_ef10_nic_data *nic_data;
1610 bool supports_rx_merge;
1611 size_t inlen, outlen;
1612 dma_addr_t dma_addr;
1613 int rc;
1614 int i;
1615
1616 nic_data = efx->nic_data;
1617 supports_rx_merge =
1618 !!(nic_data->datapath_caps &
1619 1 << MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN);
1620
1621 /* Fill event queue with all ones (i.e. empty events) */
1622 memset(channel->eventq.buf.addr, 0xff, channel->eventq.buf.len);
1623
1624 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_SIZE, channel->eventq_mask + 1);
1625 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_INSTANCE, channel->channel);
1626 /* INIT_EVQ expects index in vector table, not absolute */
1627 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_IRQ_NUM, channel->channel);
1628 MCDI_POPULATE_DWORD_4(inbuf, INIT_EVQ_IN_FLAGS,
1629 INIT_EVQ_IN_FLAG_INTERRUPTING, 1,
1630 INIT_EVQ_IN_FLAG_RX_MERGE, 1,
1631 INIT_EVQ_IN_FLAG_TX_MERGE, 1,
1632 INIT_EVQ_IN_FLAG_CUT_THRU, !supports_rx_merge);
1633 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_MODE,
1634 MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS);
1635 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_LOAD, 0);
1636 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_RELOAD, 0);
1637 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_COUNT_MODE,
1638 MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS);
1639 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_COUNT_THRSHLD, 0);
1640
1641 dma_addr = channel->eventq.buf.dma_addr;
1642 for (i = 0; i < entries; ++i) {
1643 MCDI_SET_ARRAY_QWORD(inbuf, INIT_EVQ_IN_DMA_ADDR, i, dma_addr);
1644 dma_addr += EFX_BUF_SIZE;
1645 }
1646
1647 inlen = MC_CMD_INIT_EVQ_IN_LEN(entries);
1648
1649 rc = efx_mcdi_rpc(efx, MC_CMD_INIT_EVQ, inbuf, inlen,
1650 outbuf, sizeof(outbuf), &outlen);
1651 if (rc)
1652 goto fail;
1653
1654 /* IRQ return is ignored */
1655
1656 return 0;
1657
1658fail:
1659 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1660 return rc;
1661}
1662
1663static void efx_ef10_ev_fini(struct efx_channel *channel)
1664{
1665 MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_EVQ_IN_LEN);
1666 MCDI_DECLARE_BUF(outbuf, MC_CMD_FINI_EVQ_OUT_LEN);
1667 struct efx_nic *efx = channel->efx;
1668 size_t outlen;
1669 int rc;
1670
1671 MCDI_SET_DWORD(inbuf, FINI_EVQ_IN_INSTANCE, channel->channel);
1672
1673 rc = efx_mcdi_rpc(efx, MC_CMD_FINI_EVQ, inbuf, sizeof(inbuf),
1674 outbuf, sizeof(outbuf), &outlen);
1675
1676 if (rc && rc != -EALREADY)
1677 goto fail;
1678
1679 return;
1680
1681fail:
1682 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1683}
1684
1685static void efx_ef10_ev_remove(struct efx_channel *channel)
1686{
1687 efx_nic_free_buffer(channel->efx, &channel->eventq.buf);
1688}
1689
1690static void efx_ef10_handle_rx_wrong_queue(struct efx_rx_queue *rx_queue,
1691 unsigned int rx_queue_label)
1692{
1693 struct efx_nic *efx = rx_queue->efx;
1694
1695 netif_info(efx, hw, efx->net_dev,
1696 "rx event arrived on queue %d labeled as queue %u\n",
1697 efx_rx_queue_index(rx_queue), rx_queue_label);
1698
1699 efx_schedule_reset(efx, RESET_TYPE_DISABLE);
1700}
1701
1702static void
1703efx_ef10_handle_rx_bad_lbits(struct efx_rx_queue *rx_queue,
1704 unsigned int actual, unsigned int expected)
1705{
1706 unsigned int dropped = (actual - expected) & rx_queue->ptr_mask;
1707 struct efx_nic *efx = rx_queue->efx;
1708
1709 netif_info(efx, hw, efx->net_dev,
1710 "dropped %d events (index=%d expected=%d)\n",
1711 dropped, actual, expected);
1712
1713 efx_schedule_reset(efx, RESET_TYPE_DISABLE);
1714}
1715
1716/* partially received RX was aborted. clean up. */
1717static void efx_ef10_handle_rx_abort(struct efx_rx_queue *rx_queue)
1718{
1719 unsigned int rx_desc_ptr;
1720
1721 WARN_ON(rx_queue->scatter_n == 0);
1722
1723 netif_dbg(rx_queue->efx, hw, rx_queue->efx->net_dev,
1724 "scattered RX aborted (dropping %u buffers)\n",
1725 rx_queue->scatter_n);
1726
1727 rx_desc_ptr = rx_queue->removed_count & rx_queue->ptr_mask;
1728
1729 efx_rx_packet(rx_queue, rx_desc_ptr, rx_queue->scatter_n,
1730 0, EFX_RX_PKT_DISCARD);
1731
1732 rx_queue->removed_count += rx_queue->scatter_n;
1733 rx_queue->scatter_n = 0;
1734 rx_queue->scatter_len = 0;
1735 ++efx_rx_queue_channel(rx_queue)->n_rx_nodesc_trunc;
1736}
1737
1738static int efx_ef10_handle_rx_event(struct efx_channel *channel,
1739 const efx_qword_t *event)
1740{
1741 unsigned int rx_bytes, next_ptr_lbits, rx_queue_label, rx_l4_class;
1742 unsigned int n_descs, n_packets, i;
1743 struct efx_nic *efx = channel->efx;
1744 struct efx_rx_queue *rx_queue;
1745 bool rx_cont;
1746 u16 flags = 0;
1747
1748 if (unlikely(ACCESS_ONCE(efx->reset_pending)))
1749 return 0;
1750
1751 /* Basic packet information */
1752 rx_bytes = EFX_QWORD_FIELD(*event, ESF_DZ_RX_BYTES);
1753 next_ptr_lbits = EFX_QWORD_FIELD(*event, ESF_DZ_RX_DSC_PTR_LBITS);
1754 rx_queue_label = EFX_QWORD_FIELD(*event, ESF_DZ_RX_QLABEL);
1755 rx_l4_class = EFX_QWORD_FIELD(*event, ESF_DZ_RX_L4_CLASS);
1756 rx_cont = EFX_QWORD_FIELD(*event, ESF_DZ_RX_CONT);
1757
1758 WARN_ON(EFX_QWORD_FIELD(*event, ESF_DZ_RX_DROP_EVENT));
1759
1760 rx_queue = efx_channel_get_rx_queue(channel);
1761
1762 if (unlikely(rx_queue_label != efx_rx_queue_index(rx_queue)))
1763 efx_ef10_handle_rx_wrong_queue(rx_queue, rx_queue_label);
1764
1765 n_descs = ((next_ptr_lbits - rx_queue->removed_count) &
1766 ((1 << ESF_DZ_RX_DSC_PTR_LBITS_WIDTH) - 1));
1767
1768 if (n_descs != rx_queue->scatter_n + 1) {
1769 /* detect rx abort */
1770 if (unlikely(n_descs == rx_queue->scatter_n)) {
1771 WARN_ON(rx_bytes != 0);
1772 efx_ef10_handle_rx_abort(rx_queue);
1773 return 0;
1774 }
1775
1776 if (unlikely(rx_queue->scatter_n != 0)) {
1777 /* Scattered packet completions cannot be
1778 * merged, so something has gone wrong.
1779 */
1780 efx_ef10_handle_rx_bad_lbits(
1781 rx_queue, next_ptr_lbits,
1782 (rx_queue->removed_count +
1783 rx_queue->scatter_n + 1) &
1784 ((1 << ESF_DZ_RX_DSC_PTR_LBITS_WIDTH) - 1));
1785 return 0;
1786 }
1787
1788 /* Merged completion for multiple non-scattered packets */
1789 rx_queue->scatter_n = 1;
1790 rx_queue->scatter_len = 0;
1791 n_packets = n_descs;
1792 ++channel->n_rx_merge_events;
1793 channel->n_rx_merge_packets += n_packets;
1794 flags |= EFX_RX_PKT_PREFIX_LEN;
1795 } else {
1796 ++rx_queue->scatter_n;
1797 rx_queue->scatter_len += rx_bytes;
1798 if (rx_cont)
1799 return 0;
1800 n_packets = 1;
1801 }
1802
1803 if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_RX_ECRC_ERR)))
1804 flags |= EFX_RX_PKT_DISCARD;
1805
1806 if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_RX_IPCKSUM_ERR))) {
1807 channel->n_rx_ip_hdr_chksum_err += n_packets;
1808 } else if (unlikely(EFX_QWORD_FIELD(*event,
1809 ESF_DZ_RX_TCPUDP_CKSUM_ERR))) {
1810 channel->n_rx_tcp_udp_chksum_err += n_packets;
1811 } else if (rx_l4_class == ESE_DZ_L4_CLASS_TCP ||
1812 rx_l4_class == ESE_DZ_L4_CLASS_UDP) {
1813 flags |= EFX_RX_PKT_CSUMMED;
1814 }
1815
1816 if (rx_l4_class == ESE_DZ_L4_CLASS_TCP)
1817 flags |= EFX_RX_PKT_TCP;
1818
1819 channel->irq_mod_score += 2 * n_packets;
1820
1821 /* Handle received packet(s) */
1822 for (i = 0; i < n_packets; i++) {
1823 efx_rx_packet(rx_queue,
1824 rx_queue->removed_count & rx_queue->ptr_mask,
1825 rx_queue->scatter_n, rx_queue->scatter_len,
1826 flags);
1827 rx_queue->removed_count += rx_queue->scatter_n;
1828 }
1829
1830 rx_queue->scatter_n = 0;
1831 rx_queue->scatter_len = 0;
1832
1833 return n_packets;
1834}
1835
1836static int
1837efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
1838{
1839 struct efx_nic *efx = channel->efx;
1840 struct efx_tx_queue *tx_queue;
1841 unsigned int tx_ev_desc_ptr;
1842 unsigned int tx_ev_q_label;
1843 int tx_descs = 0;
1844
1845 if (unlikely(ACCESS_ONCE(efx->reset_pending)))
1846 return 0;
1847
1848 if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_TX_DROP_EVENT)))
1849 return 0;
1850
1851 /* Transmit completion */
1852 tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, ESF_DZ_TX_DESCR_INDX);
1853 tx_ev_q_label = EFX_QWORD_FIELD(*event, ESF_DZ_TX_QLABEL);
1854 tx_queue = efx_channel_get_tx_queue(channel,
1855 tx_ev_q_label % EFX_TXQ_TYPES);
1856 tx_descs = ((tx_ev_desc_ptr + 1 - tx_queue->read_count) &
1857 tx_queue->ptr_mask);
1858 efx_xmit_done(tx_queue, tx_ev_desc_ptr & tx_queue->ptr_mask);
1859
1860 return tx_descs;
1861}
1862
1863static void
1864efx_ef10_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
1865{
1866 struct efx_nic *efx = channel->efx;
1867 int subcode;
1868
1869 subcode = EFX_QWORD_FIELD(*event, ESF_DZ_DRV_SUB_CODE);
1870
1871 switch (subcode) {
1872 case ESE_DZ_DRV_TIMER_EV:
1873 case ESE_DZ_DRV_WAKE_UP_EV:
1874 break;
1875 case ESE_DZ_DRV_START_UP_EV:
1876 /* event queue init complete. ok. */
1877 break;
1878 default:
1879 netif_err(efx, hw, efx->net_dev,
1880 "channel %d unknown driver event type %d"
1881 " (data " EFX_QWORD_FMT ")\n",
1882 channel->channel, subcode,
1883 EFX_QWORD_VAL(*event));
1884
1885 }
1886}
1887
1888static void efx_ef10_handle_driver_generated_event(struct efx_channel *channel,
1889 efx_qword_t *event)
1890{
1891 struct efx_nic *efx = channel->efx;
1892 u32 subcode;
1893
1894 subcode = EFX_QWORD_FIELD(*event, EFX_DWORD_0);
1895
1896 switch (subcode) {
1897 case EFX_EF10_TEST:
1898 channel->event_test_cpu = raw_smp_processor_id();
1899 break;
1900 case EFX_EF10_REFILL:
1901 /* The queue must be empty, so we won't receive any rx
1902 * events, so efx_process_channel() won't refill the
1903 * queue. Refill it here
1904 */
1905 efx_fast_push_rx_descriptors(&channel->rx_queue);
1906 break;
1907 default:
1908 netif_err(efx, hw, efx->net_dev,
1909 "channel %d unknown driver event type %u"
1910 " (data " EFX_QWORD_FMT ")\n",
1911 channel->channel, (unsigned) subcode,
1912 EFX_QWORD_VAL(*event));
1913 }
1914}
1915
1916static int efx_ef10_ev_process(struct efx_channel *channel, int quota)
1917{
1918 struct efx_nic *efx = channel->efx;
1919 efx_qword_t event, *p_event;
1920 unsigned int read_ptr;
1921 int ev_code;
1922 int tx_descs = 0;
1923 int spent = 0;
1924
1925 read_ptr = channel->eventq_read_ptr;
1926
1927 for (;;) {
1928 p_event = efx_event(channel, read_ptr);
1929 event = *p_event;
1930
1931 if (!efx_event_present(&event))
1932 break;
1933
1934 EFX_SET_QWORD(*p_event);
1935
1936 ++read_ptr;
1937
1938 ev_code = EFX_QWORD_FIELD(event, ESF_DZ_EV_CODE);
1939
1940 netif_vdbg(efx, drv, efx->net_dev,
1941 "processing event on %d " EFX_QWORD_FMT "\n",
1942 channel->channel, EFX_QWORD_VAL(event));
1943
1944 switch (ev_code) {
1945 case ESE_DZ_EV_CODE_MCDI_EV:
1946 efx_mcdi_process_event(channel, &event);
1947 break;
1948 case ESE_DZ_EV_CODE_RX_EV:
1949 spent += efx_ef10_handle_rx_event(channel, &event);
1950 if (spent >= quota) {
1951 /* XXX can we split a merged event to
1952 * avoid going over-quota?
1953 */
1954 spent = quota;
1955 goto out;
1956 }
1957 break;
1958 case ESE_DZ_EV_CODE_TX_EV:
1959 tx_descs += efx_ef10_handle_tx_event(channel, &event);
1960 if (tx_descs > efx->txq_entries) {
1961 spent = quota;
1962 goto out;
1963 } else if (++spent == quota) {
1964 goto out;
1965 }
1966 break;
1967 case ESE_DZ_EV_CODE_DRIVER_EV:
1968 efx_ef10_handle_driver_event(channel, &event);
1969 if (++spent == quota)
1970 goto out;
1971 break;
1972 case EFX_EF10_DRVGEN_EV:
1973 efx_ef10_handle_driver_generated_event(channel, &event);
1974 break;
1975 default:
1976 netif_err(efx, hw, efx->net_dev,
1977 "channel %d unknown event type %d"
1978 " (data " EFX_QWORD_FMT ")\n",
1979 channel->channel, ev_code,
1980 EFX_QWORD_VAL(event));
1981 }
1982 }
1983
1984out:
1985 channel->eventq_read_ptr = read_ptr;
1986 return spent;
1987}
1988
1989static void efx_ef10_ev_read_ack(struct efx_channel *channel)
1990{
1991 struct efx_nic *efx = channel->efx;
1992 efx_dword_t rptr;
1993
1994 if (EFX_EF10_WORKAROUND_35388(efx)) {
1995 BUILD_BUG_ON(EFX_MIN_EVQ_SIZE <
1996 (1 << ERF_DD_EVQ_IND_RPTR_WIDTH));
1997 BUILD_BUG_ON(EFX_MAX_EVQ_SIZE >
1998 (1 << 2 * ERF_DD_EVQ_IND_RPTR_WIDTH));
1999
2000 EFX_POPULATE_DWORD_2(rptr, ERF_DD_EVQ_IND_RPTR_FLAGS,
2001 EFE_DD_EVQ_IND_RPTR_FLAGS_HIGH,
2002 ERF_DD_EVQ_IND_RPTR,
2003 (channel->eventq_read_ptr &
2004 channel->eventq_mask) >>
2005 ERF_DD_EVQ_IND_RPTR_WIDTH);
2006 efx_writed_page(efx, &rptr, ER_DD_EVQ_INDIRECT,
2007 channel->channel);
2008 EFX_POPULATE_DWORD_2(rptr, ERF_DD_EVQ_IND_RPTR_FLAGS,
2009 EFE_DD_EVQ_IND_RPTR_FLAGS_LOW,
2010 ERF_DD_EVQ_IND_RPTR,
2011 channel->eventq_read_ptr &
2012 ((1 << ERF_DD_EVQ_IND_RPTR_WIDTH) - 1));
2013 efx_writed_page(efx, &rptr, ER_DD_EVQ_INDIRECT,
2014 channel->channel);
2015 } else {
2016 EFX_POPULATE_DWORD_1(rptr, ERF_DZ_EVQ_RPTR,
2017 channel->eventq_read_ptr &
2018 channel->eventq_mask);
2019 efx_writed_page(efx, &rptr, ER_DZ_EVQ_RPTR, channel->channel);
2020 }
2021}
2022
2023static void efx_ef10_ev_test_generate(struct efx_channel *channel)
2024{
2025 MCDI_DECLARE_BUF(inbuf, MC_CMD_DRIVER_EVENT_IN_LEN);
2026 struct efx_nic *efx = channel->efx;
2027 efx_qword_t event;
2028 int rc;
2029
2030 EFX_POPULATE_QWORD_2(event,
2031 ESF_DZ_EV_CODE, EFX_EF10_DRVGEN_EV,
2032 ESF_DZ_EV_DATA, EFX_EF10_TEST);
2033
2034 MCDI_SET_DWORD(inbuf, DRIVER_EVENT_IN_EVQ, channel->channel);
2035
2036 /* MCDI_SET_QWORD is not appropriate here since EFX_POPULATE_* has
2037 * already swapped the data to little-endian order.
2038 */
2039 memcpy(MCDI_PTR(inbuf, DRIVER_EVENT_IN_DATA), &event.u64[0],
2040 sizeof(efx_qword_t));
2041
2042 rc = efx_mcdi_rpc(efx, MC_CMD_DRIVER_EVENT, inbuf, sizeof(inbuf),
2043 NULL, 0, NULL);
2044 if (rc != 0)
2045 goto fail;
2046
2047 return;
2048
2049fail:
2050 WARN_ON(true);
2051 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
2052}
2053
2054void efx_ef10_handle_drain_event(struct efx_nic *efx)
2055{
2056 if (atomic_dec_and_test(&efx->active_queues))
2057 wake_up(&efx->flush_wq);
2058
2059 WARN_ON(atomic_read(&efx->active_queues) < 0);
2060}
2061
2062static int efx_ef10_fini_dmaq(struct efx_nic *efx)
2063{
2064 struct efx_ef10_nic_data *nic_data = efx->nic_data;
2065 struct efx_channel *channel;
2066 struct efx_tx_queue *tx_queue;
2067 struct efx_rx_queue *rx_queue;
2068 int pending;
2069
2070 /* If the MC has just rebooted, the TX/RX queues will have already been
2071 * torn down, but efx->active_queues needs to be set to zero.
2072 */
2073 if (nic_data->must_realloc_vis) {
2074 atomic_set(&efx->active_queues, 0);
2075 return 0;
2076 }
2077
2078 /* Do not attempt to write to the NIC during EEH recovery */
2079 if (efx->state != STATE_RECOVERY) {
2080 efx_for_each_channel(channel, efx) {
2081 efx_for_each_channel_rx_queue(rx_queue, channel)
2082 efx_ef10_rx_fini(rx_queue);
2083 efx_for_each_channel_tx_queue(tx_queue, channel)
2084 efx_ef10_tx_fini(tx_queue);
2085 }
2086
2087 wait_event_timeout(efx->flush_wq,
2088 atomic_read(&efx->active_queues) == 0,
2089 msecs_to_jiffies(EFX_MAX_FLUSH_TIME));
2090 pending = atomic_read(&efx->active_queues);
2091 if (pending) {
2092 netif_err(efx, hw, efx->net_dev, "failed to flush %d queues\n",
2093 pending);
2094 return -ETIMEDOUT;
2095 }
2096 }
2097
2098 return 0;
2099}
2100
2101static bool efx_ef10_filter_equal(const struct efx_filter_spec *left,
2102 const struct efx_filter_spec *right)
2103{
2104 if ((left->match_flags ^ right->match_flags) |
2105 ((left->flags ^ right->flags) &
2106 (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)))
2107 return false;
2108
2109 return memcmp(&left->outer_vid, &right->outer_vid,
2110 sizeof(struct efx_filter_spec) -
2111 offsetof(struct efx_filter_spec, outer_vid)) == 0;
2112}
2113
2114static unsigned int efx_ef10_filter_hash(const struct efx_filter_spec *spec)
2115{
2116 BUILD_BUG_ON(offsetof(struct efx_filter_spec, outer_vid) & 3);
2117 return jhash2((const u32 *)&spec->outer_vid,
2118 (sizeof(struct efx_filter_spec) -
2119 offsetof(struct efx_filter_spec, outer_vid)) / 4,
2120 0);
2121 /* XXX should we randomise the initval? */
2122}
2123
2124/* Decide whether a filter should be exclusive or else should allow
2125 * delivery to additional recipients. Currently we decide that
2126 * filters for specific local unicast MAC and IP addresses are
2127 * exclusive.
2128 */
2129static bool efx_ef10_filter_is_exclusive(const struct efx_filter_spec *spec)
2130{
2131 if (spec->match_flags & EFX_FILTER_MATCH_LOC_MAC &&
2132 !is_multicast_ether_addr(spec->loc_mac))
2133 return true;
2134
2135 if ((spec->match_flags &
2136 (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) ==
2137 (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) {
2138 if (spec->ether_type == htons(ETH_P_IP) &&
2139 !ipv4_is_multicast(spec->loc_host[0]))
2140 return true;
2141 if (spec->ether_type == htons(ETH_P_IPV6) &&
2142 ((const u8 *)spec->loc_host)[0] != 0xff)
2143 return true;
2144 }
2145
2146 return false;
2147}
2148
2149static struct efx_filter_spec *
2150efx_ef10_filter_entry_spec(const struct efx_ef10_filter_table *table,
2151 unsigned int filter_idx)
2152{
2153 return (struct efx_filter_spec *)(table->entry[filter_idx].spec &
2154 ~EFX_EF10_FILTER_FLAGS);
2155}
2156
2157static unsigned int
2158efx_ef10_filter_entry_flags(const struct efx_ef10_filter_table *table,
2159 unsigned int filter_idx)
2160{
2161 return table->entry[filter_idx].spec & EFX_EF10_FILTER_FLAGS;
2162}
2163
2164static void
2165efx_ef10_filter_set_entry(struct efx_ef10_filter_table *table,
2166 unsigned int filter_idx,
2167 const struct efx_filter_spec *spec,
2168 unsigned int flags)
2169{
2170 table->entry[filter_idx].spec = (unsigned long)spec | flags;
2171}
2172
2173static void efx_ef10_filter_push_prep(struct efx_nic *efx,
2174 const struct efx_filter_spec *spec,
2175 efx_dword_t *inbuf, u64 handle,
2176 bool replacing)
2177{
2178 struct efx_ef10_nic_data *nic_data = efx->nic_data;
2179
2180 memset(inbuf, 0, MC_CMD_FILTER_OP_IN_LEN);
2181
2182 if (replacing) {
2183 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
2184 MC_CMD_FILTER_OP_IN_OP_REPLACE);
2185 MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE, handle);
2186 } else {
2187 u32 match_fields = 0;
2188
2189 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
2190 efx_ef10_filter_is_exclusive(spec) ?
2191 MC_CMD_FILTER_OP_IN_OP_INSERT :
2192 MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE);
2193
2194 /* Convert match flags and values. Unlike almost
2195 * everything else in MCDI, these fields are in
2196 * network byte order.
2197 */
2198 if (spec->match_flags & EFX_FILTER_MATCH_LOC_MAC_IG)
2199 match_fields |=
2200 is_multicast_ether_addr(spec->loc_mac) ?
2201 1 << MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_LBN :
2202 1 << MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_LBN;
2203#define COPY_FIELD(gen_flag, gen_field, mcdi_field) \
2204 if (spec->match_flags & EFX_FILTER_MATCH_ ## gen_flag) { \
2205 match_fields |= \
2206 1 << MC_CMD_FILTER_OP_IN_MATCH_ ## \
2207 mcdi_field ## _LBN; \
2208 BUILD_BUG_ON( \
2209 MC_CMD_FILTER_OP_IN_ ## mcdi_field ## _LEN < \
2210 sizeof(spec->gen_field)); \
2211 memcpy(MCDI_PTR(inbuf, FILTER_OP_IN_ ## mcdi_field), \
2212 &spec->gen_field, sizeof(spec->gen_field)); \
2213 }
2214 COPY_FIELD(REM_HOST, rem_host, SRC_IP);
2215 COPY_FIELD(LOC_HOST, loc_host, DST_IP);
2216 COPY_FIELD(REM_MAC, rem_mac, SRC_MAC);
2217 COPY_FIELD(REM_PORT, rem_port, SRC_PORT);
2218 COPY_FIELD(LOC_MAC, loc_mac, DST_MAC);
2219 COPY_FIELD(LOC_PORT, loc_port, DST_PORT);
2220 COPY_FIELD(ETHER_TYPE, ether_type, ETHER_TYPE);
2221 COPY_FIELD(INNER_VID, inner_vid, INNER_VLAN);
2222 COPY_FIELD(OUTER_VID, outer_vid, OUTER_VLAN);
2223 COPY_FIELD(IP_PROTO, ip_proto, IP_PROTO);
2224#undef COPY_FIELD
2225 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_MATCH_FIELDS,
2226 match_fields);
2227 }
2228
2229 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
2230 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_DEST,
2231 spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ?
2232 MC_CMD_FILTER_OP_IN_RX_DEST_DROP :
2233 MC_CMD_FILTER_OP_IN_RX_DEST_HOST);
2234 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_TX_DEST,
2235 MC_CMD_FILTER_OP_IN_TX_DEST_DEFAULT);
2236 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_QUEUE, spec->dmaq_id);
2237 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_MODE,
2238 (spec->flags & EFX_FILTER_FLAG_RX_RSS) ?
2239 MC_CMD_FILTER_OP_IN_RX_MODE_RSS :
2240 MC_CMD_FILTER_OP_IN_RX_MODE_SIMPLE);
2241 if (spec->flags & EFX_FILTER_FLAG_RX_RSS)
2242 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_CONTEXT,
2243 spec->rss_context !=
2244 EFX_FILTER_RSS_CONTEXT_DEFAULT ?
2245 spec->rss_context : nic_data->rx_rss_context);
2246}
2247
2248static int efx_ef10_filter_push(struct efx_nic *efx,
2249 const struct efx_filter_spec *spec,
2250 u64 *handle, bool replacing)
2251{
2252 MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN);
2253 MCDI_DECLARE_BUF(outbuf, MC_CMD_FILTER_OP_OUT_LEN);
2254 int rc;
2255
2256 efx_ef10_filter_push_prep(efx, spec, inbuf, *handle, replacing);
2257 rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf),
2258 outbuf, sizeof(outbuf), NULL);
2259 if (rc == 0)
2260 *handle = MCDI_QWORD(outbuf, FILTER_OP_OUT_HANDLE);
2261 return rc;
2262}
2263
2264static int efx_ef10_filter_rx_match_pri(struct efx_ef10_filter_table *table,
2265 enum efx_filter_match_flags match_flags)
2266{
2267 unsigned int match_pri;
2268
2269 for (match_pri = 0;
2270 match_pri < table->rx_match_count;
2271 match_pri++)
2272 if (table->rx_match_flags[match_pri] == match_flags)
2273 return match_pri;
2274
2275 return -EPROTONOSUPPORT;
2276}
2277
2278static s32 efx_ef10_filter_insert(struct efx_nic *efx,
2279 struct efx_filter_spec *spec,
2280 bool replace_equal)
2281{
2282 struct efx_ef10_filter_table *table = efx->filter_state;
2283 DECLARE_BITMAP(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT);
2284 struct efx_filter_spec *saved_spec;
2285 unsigned int match_pri, hash;
2286 unsigned int priv_flags;
2287 bool replacing = false;
2288 int ins_index = -1;
2289 DEFINE_WAIT(wait);
2290 bool is_mc_recip;
2291 s32 rc;
2292
2293 /* For now, only support RX filters */
2294 if ((spec->flags & (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)) !=
2295 EFX_FILTER_FLAG_RX)
2296 return -EINVAL;
2297
2298 rc = efx_ef10_filter_rx_match_pri(table, spec->match_flags);
2299 if (rc < 0)
2300 return rc;
2301 match_pri = rc;
2302
2303 hash = efx_ef10_filter_hash(spec);
2304 is_mc_recip = efx_filter_is_mc_recipient(spec);
2305 if (is_mc_recip)
2306 bitmap_zero(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT);
2307
2308 /* Find any existing filters with the same match tuple or
2309 * else a free slot to insert at. If any of them are busy,
2310 * we have to wait and retry.
2311 */
2312 for (;;) {
2313 unsigned int depth = 1;
2314 unsigned int i;
2315
2316 spin_lock_bh(&efx->filter_lock);
2317
2318 for (;;) {
2319 i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1);
2320 saved_spec = efx_ef10_filter_entry_spec(table, i);
2321
2322 if (!saved_spec) {
2323 if (ins_index < 0)
2324 ins_index = i;
2325 } else if (efx_ef10_filter_equal(spec, saved_spec)) {
2326 if (table->entry[i].spec &
2327 EFX_EF10_FILTER_FLAG_BUSY)
2328 break;
2329 if (spec->priority < saved_spec->priority &&
2330 !(saved_spec->priority ==
2331 EFX_FILTER_PRI_REQUIRED &&
2332 saved_spec->flags &
2333 EFX_FILTER_FLAG_RX_STACK)) {
2334 rc = -EPERM;
2335 goto out_unlock;
2336 }
2337 if (!is_mc_recip) {
2338 /* This is the only one */
2339 if (spec->priority ==
2340 saved_spec->priority &&
2341 !replace_equal) {
2342 rc = -EEXIST;
2343 goto out_unlock;
2344 }
2345 ins_index = i;
2346 goto found;
2347 } else if (spec->priority >
2348 saved_spec->priority ||
2349 (spec->priority ==
2350 saved_spec->priority &&
2351 replace_equal)) {
2352 if (ins_index < 0)
2353 ins_index = i;
2354 else
2355 __set_bit(depth, mc_rem_map);
2356 }
2357 }
2358
2359 /* Once we reach the maximum search depth, use
2360 * the first suitable slot or return -EBUSY if
2361 * there was none
2362 */
2363 if (depth == EFX_EF10_FILTER_SEARCH_LIMIT) {
2364 if (ins_index < 0) {
2365 rc = -EBUSY;
2366 goto out_unlock;
2367 }
2368 goto found;
2369 }
2370
2371 ++depth;
2372 }
2373
2374 prepare_to_wait(&table->waitq, &wait, TASK_UNINTERRUPTIBLE);
2375 spin_unlock_bh(&efx->filter_lock);
2376 schedule();
2377 }
2378
2379found:
2380 /* Create a software table entry if necessary, and mark it
2381 * busy. We might yet fail to insert, but any attempt to
2382 * insert a conflicting filter while we're waiting for the
2383 * firmware must find the busy entry.
2384 */
2385 saved_spec = efx_ef10_filter_entry_spec(table, ins_index);
2386 if (saved_spec) {
2387 if (spec->flags & EFX_FILTER_FLAG_RX_STACK) {
2388 /* Just make sure it won't be removed */
2389 saved_spec->flags |= EFX_FILTER_FLAG_RX_STACK;
2390 table->entry[ins_index].spec &=
2391 ~EFX_EF10_FILTER_FLAG_STACK_OLD;
2392 rc = ins_index;
2393 goto out_unlock;
2394 }
2395 replacing = true;
2396 priv_flags = efx_ef10_filter_entry_flags(table, ins_index);
2397 } else {
2398 saved_spec = kmalloc(sizeof(*spec), GFP_ATOMIC);
2399 if (!saved_spec) {
2400 rc = -ENOMEM;
2401 goto out_unlock;
2402 }
2403 *saved_spec = *spec;
2404 priv_flags = 0;
2405 }
2406 efx_ef10_filter_set_entry(table, ins_index, saved_spec,
2407 priv_flags | EFX_EF10_FILTER_FLAG_BUSY);
2408
2409 /* Mark lower-priority multicast recipients busy prior to removal */
2410 if (is_mc_recip) {
2411 unsigned int depth, i;
2412
2413 for (depth = 0; depth < EFX_EF10_FILTER_SEARCH_LIMIT; depth++) {
2414 i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1);
2415 if (test_bit(depth, mc_rem_map))
2416 table->entry[i].spec |=
2417 EFX_EF10_FILTER_FLAG_BUSY;
2418 }
2419 }
2420
2421 spin_unlock_bh(&efx->filter_lock);
2422
2423 rc = efx_ef10_filter_push(efx, spec, &table->entry[ins_index].handle,
2424 replacing);
2425
2426 /* Finalise the software table entry */
2427 spin_lock_bh(&efx->filter_lock);
2428 if (rc == 0) {
2429 if (replacing) {
2430 /* Update the fields that may differ */
2431 saved_spec->priority = spec->priority;
2432 saved_spec->flags &= EFX_FILTER_FLAG_RX_STACK;
2433 saved_spec->flags |= spec->flags;
2434 saved_spec->rss_context = spec->rss_context;
2435 saved_spec->dmaq_id = spec->dmaq_id;
2436 }
2437 } else if (!replacing) {
2438 kfree(saved_spec);
2439 saved_spec = NULL;
2440 }
2441 efx_ef10_filter_set_entry(table, ins_index, saved_spec, priv_flags);
2442
2443 /* Remove and finalise entries for lower-priority multicast
2444 * recipients
2445 */
2446 if (is_mc_recip) {
2447 MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN);
2448 unsigned int depth, i;
2449
2450 memset(inbuf, 0, sizeof(inbuf));
2451
2452 for (depth = 0; depth < EFX_EF10_FILTER_SEARCH_LIMIT; depth++) {
2453 if (!test_bit(depth, mc_rem_map))
2454 continue;
2455
2456 i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1);
2457 saved_spec = efx_ef10_filter_entry_spec(table, i);
2458 priv_flags = efx_ef10_filter_entry_flags(table, i);
2459
2460 if (rc == 0) {
2461 spin_unlock_bh(&efx->filter_lock);
2462 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
2463 MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE);
2464 MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
2465 table->entry[i].handle);
2466 rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP,
2467 inbuf, sizeof(inbuf),
2468 NULL, 0, NULL);
2469 spin_lock_bh(&efx->filter_lock);
2470 }
2471
2472 if (rc == 0) {
2473 kfree(saved_spec);
2474 saved_spec = NULL;
2475 priv_flags = 0;
2476 } else {
2477 priv_flags &= ~EFX_EF10_FILTER_FLAG_BUSY;
2478 }
2479 efx_ef10_filter_set_entry(table, i, saved_spec,
2480 priv_flags);
2481 }
2482 }
2483
2484 /* If successful, return the inserted filter ID */
2485 if (rc == 0)
2486 rc = match_pri * HUNT_FILTER_TBL_ROWS + ins_index;
2487
2488 wake_up_all(&table->waitq);
2489out_unlock:
2490 spin_unlock_bh(&efx->filter_lock);
2491 finish_wait(&table->waitq, &wait);
2492 return rc;
2493}
2494
Fengguang Wu9fd8095d2013-08-31 06:54:05 +08002495static void efx_ef10_filter_update_rx_scatter(struct efx_nic *efx)
Ben Hutchings8127d662013-08-29 19:19:29 +01002496{
2497 /* no need to do anything here on EF10 */
2498}
2499
2500/* Remove a filter.
2501 * If !stack_requested, remove by ID
2502 * If stack_requested, remove by index
2503 * Filter ID may come from userland and must be range-checked.
2504 */
2505static int efx_ef10_filter_remove_internal(struct efx_nic *efx,
2506 enum efx_filter_priority priority,
2507 u32 filter_id, bool stack_requested)
2508{
2509 unsigned int filter_idx = filter_id % HUNT_FILTER_TBL_ROWS;
2510 struct efx_ef10_filter_table *table = efx->filter_state;
2511 MCDI_DECLARE_BUF(inbuf,
2512 MC_CMD_FILTER_OP_IN_HANDLE_OFST +
2513 MC_CMD_FILTER_OP_IN_HANDLE_LEN);
2514 struct efx_filter_spec *spec;
2515 DEFINE_WAIT(wait);
2516 int rc;
2517
2518 /* Find the software table entry and mark it busy. Don't
2519 * remove it yet; any attempt to update while we're waiting
2520 * for the firmware must find the busy entry.
2521 */
2522 for (;;) {
2523 spin_lock_bh(&efx->filter_lock);
2524 if (!(table->entry[filter_idx].spec &
2525 EFX_EF10_FILTER_FLAG_BUSY))
2526 break;
2527 prepare_to_wait(&table->waitq, &wait, TASK_UNINTERRUPTIBLE);
2528 spin_unlock_bh(&efx->filter_lock);
2529 schedule();
2530 }
2531 spec = efx_ef10_filter_entry_spec(table, filter_idx);
2532 if (!spec || spec->priority > priority ||
2533 (!stack_requested &&
2534 efx_ef10_filter_rx_match_pri(table, spec->match_flags) !=
2535 filter_id / HUNT_FILTER_TBL_ROWS)) {
2536 rc = -ENOENT;
2537 goto out_unlock;
2538 }
2539 table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY;
2540 spin_unlock_bh(&efx->filter_lock);
2541
2542 if (spec->flags & EFX_FILTER_FLAG_RX_STACK && !stack_requested) {
2543 /* Reset steering of a stack-owned filter */
2544
2545 struct efx_filter_spec new_spec = *spec;
2546
2547 new_spec.priority = EFX_FILTER_PRI_REQUIRED;
2548 new_spec.flags = (EFX_FILTER_FLAG_RX |
2549 EFX_FILTER_FLAG_RX_RSS |
2550 EFX_FILTER_FLAG_RX_STACK);
2551 new_spec.dmaq_id = 0;
2552 new_spec.rss_context = EFX_FILTER_RSS_CONTEXT_DEFAULT;
2553 rc = efx_ef10_filter_push(efx, &new_spec,
2554 &table->entry[filter_idx].handle,
2555 true);
2556
2557 spin_lock_bh(&efx->filter_lock);
2558 if (rc == 0)
2559 *spec = new_spec;
2560 } else {
2561 /* Really remove the filter */
2562
2563 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
2564 efx_ef10_filter_is_exclusive(spec) ?
2565 MC_CMD_FILTER_OP_IN_OP_REMOVE :
2566 MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE);
2567 MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
2568 table->entry[filter_idx].handle);
2569 rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP,
2570 inbuf, sizeof(inbuf), NULL, 0, NULL);
2571
2572 spin_lock_bh(&efx->filter_lock);
2573 if (rc == 0) {
2574 kfree(spec);
2575 efx_ef10_filter_set_entry(table, filter_idx, NULL, 0);
2576 }
2577 }
2578 table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_BUSY;
2579 wake_up_all(&table->waitq);
2580out_unlock:
2581 spin_unlock_bh(&efx->filter_lock);
2582 finish_wait(&table->waitq, &wait);
2583 return rc;
2584}
2585
2586static int efx_ef10_filter_remove_safe(struct efx_nic *efx,
2587 enum efx_filter_priority priority,
2588 u32 filter_id)
2589{
2590 return efx_ef10_filter_remove_internal(efx, priority, filter_id, false);
2591}
2592
2593static int efx_ef10_filter_get_safe(struct efx_nic *efx,
2594 enum efx_filter_priority priority,
2595 u32 filter_id, struct efx_filter_spec *spec)
2596{
2597 unsigned int filter_idx = filter_id % HUNT_FILTER_TBL_ROWS;
2598 struct efx_ef10_filter_table *table = efx->filter_state;
2599 const struct efx_filter_spec *saved_spec;
2600 int rc;
2601
2602 spin_lock_bh(&efx->filter_lock);
2603 saved_spec = efx_ef10_filter_entry_spec(table, filter_idx);
2604 if (saved_spec && saved_spec->priority == priority &&
2605 efx_ef10_filter_rx_match_pri(table, saved_spec->match_flags) ==
2606 filter_id / HUNT_FILTER_TBL_ROWS) {
2607 *spec = *saved_spec;
2608 rc = 0;
2609 } else {
2610 rc = -ENOENT;
2611 }
2612 spin_unlock_bh(&efx->filter_lock);
2613 return rc;
2614}
2615
2616static void efx_ef10_filter_clear_rx(struct efx_nic *efx,
2617 enum efx_filter_priority priority)
2618{
2619 /* TODO */
2620}
2621
2622static u32 efx_ef10_filter_count_rx_used(struct efx_nic *efx,
2623 enum efx_filter_priority priority)
2624{
2625 struct efx_ef10_filter_table *table = efx->filter_state;
2626 unsigned int filter_idx;
2627 s32 count = 0;
2628
2629 spin_lock_bh(&efx->filter_lock);
2630 for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
2631 if (table->entry[filter_idx].spec &&
2632 efx_ef10_filter_entry_spec(table, filter_idx)->priority ==
2633 priority)
2634 ++count;
2635 }
2636 spin_unlock_bh(&efx->filter_lock);
2637 return count;
2638}
2639
2640static u32 efx_ef10_filter_get_rx_id_limit(struct efx_nic *efx)
2641{
2642 struct efx_ef10_filter_table *table = efx->filter_state;
2643
2644 return table->rx_match_count * HUNT_FILTER_TBL_ROWS;
2645}
2646
2647static s32 efx_ef10_filter_get_rx_ids(struct efx_nic *efx,
2648 enum efx_filter_priority priority,
2649 u32 *buf, u32 size)
2650{
2651 struct efx_ef10_filter_table *table = efx->filter_state;
2652 struct efx_filter_spec *spec;
2653 unsigned int filter_idx;
2654 s32 count = 0;
2655
2656 spin_lock_bh(&efx->filter_lock);
2657 for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
2658 spec = efx_ef10_filter_entry_spec(table, filter_idx);
2659 if (spec && spec->priority == priority) {
2660 if (count == size) {
2661 count = -EMSGSIZE;
2662 break;
2663 }
2664 buf[count++] = (efx_ef10_filter_rx_match_pri(
2665 table, spec->match_flags) *
2666 HUNT_FILTER_TBL_ROWS +
2667 filter_idx);
2668 }
2669 }
2670 spin_unlock_bh(&efx->filter_lock);
2671 return count;
2672}
2673
2674#ifdef CONFIG_RFS_ACCEL
2675
2676static efx_mcdi_async_completer efx_ef10_filter_rfs_insert_complete;
2677
2678static s32 efx_ef10_filter_rfs_insert(struct efx_nic *efx,
2679 struct efx_filter_spec *spec)
2680{
2681 struct efx_ef10_filter_table *table = efx->filter_state;
2682 MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN);
2683 struct efx_filter_spec *saved_spec;
2684 unsigned int hash, i, depth = 1;
2685 bool replacing = false;
2686 int ins_index = -1;
2687 u64 cookie;
2688 s32 rc;
2689
2690 /* Must be an RX filter without RSS and not for a multicast
2691 * destination address (RFS only works for connected sockets).
2692 * These restrictions allow us to pass only a tiny amount of
2693 * data through to the completion function.
2694 */
2695 EFX_WARN_ON_PARANOID(spec->flags !=
2696 (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_RX_SCATTER));
2697 EFX_WARN_ON_PARANOID(spec->priority != EFX_FILTER_PRI_HINT);
2698 EFX_WARN_ON_PARANOID(efx_filter_is_mc_recipient(spec));
2699
2700 hash = efx_ef10_filter_hash(spec);
2701
2702 spin_lock_bh(&efx->filter_lock);
2703
2704 /* Find any existing filter with the same match tuple or else
2705 * a free slot to insert at. If an existing filter is busy,
2706 * we have to give up.
2707 */
2708 for (;;) {
2709 i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1);
2710 saved_spec = efx_ef10_filter_entry_spec(table, i);
2711
2712 if (!saved_spec) {
2713 if (ins_index < 0)
2714 ins_index = i;
2715 } else if (efx_ef10_filter_equal(spec, saved_spec)) {
2716 if (table->entry[i].spec & EFX_EF10_FILTER_FLAG_BUSY) {
2717 rc = -EBUSY;
2718 goto fail_unlock;
2719 }
2720 EFX_WARN_ON_PARANOID(saved_spec->flags &
2721 EFX_FILTER_FLAG_RX_STACK);
2722 if (spec->priority < saved_spec->priority) {
2723 rc = -EPERM;
2724 goto fail_unlock;
2725 }
2726 ins_index = i;
2727 break;
2728 }
2729
2730 /* Once we reach the maximum search depth, use the
2731 * first suitable slot or return -EBUSY if there was
2732 * none
2733 */
2734 if (depth == EFX_EF10_FILTER_SEARCH_LIMIT) {
2735 if (ins_index < 0) {
2736 rc = -EBUSY;
2737 goto fail_unlock;
2738 }
2739 break;
2740 }
2741
2742 ++depth;
2743 }
2744
2745 /* Create a software table entry if necessary, and mark it
2746 * busy. We might yet fail to insert, but any attempt to
2747 * insert a conflicting filter while we're waiting for the
2748 * firmware must find the busy entry.
2749 */
2750 saved_spec = efx_ef10_filter_entry_spec(table, ins_index);
2751 if (saved_spec) {
2752 replacing = true;
2753 } else {
2754 saved_spec = kmalloc(sizeof(*spec), GFP_ATOMIC);
2755 if (!saved_spec) {
2756 rc = -ENOMEM;
2757 goto fail_unlock;
2758 }
2759 *saved_spec = *spec;
2760 }
2761 efx_ef10_filter_set_entry(table, ins_index, saved_spec,
2762 EFX_EF10_FILTER_FLAG_BUSY);
2763
2764 spin_unlock_bh(&efx->filter_lock);
2765
2766 /* Pack up the variables needed on completion */
2767 cookie = replacing << 31 | ins_index << 16 | spec->dmaq_id;
2768
2769 efx_ef10_filter_push_prep(efx, spec, inbuf,
2770 table->entry[ins_index].handle, replacing);
2771 efx_mcdi_rpc_async(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf),
2772 MC_CMD_FILTER_OP_OUT_LEN,
2773 efx_ef10_filter_rfs_insert_complete, cookie);
2774
2775 return ins_index;
2776
2777fail_unlock:
2778 spin_unlock_bh(&efx->filter_lock);
2779 return rc;
2780}
2781
2782static void
2783efx_ef10_filter_rfs_insert_complete(struct efx_nic *efx, unsigned long cookie,
2784 int rc, efx_dword_t *outbuf,
2785 size_t outlen_actual)
2786{
2787 struct efx_ef10_filter_table *table = efx->filter_state;
2788 unsigned int ins_index, dmaq_id;
2789 struct efx_filter_spec *spec;
2790 bool replacing;
2791
2792 /* Unpack the cookie */
2793 replacing = cookie >> 31;
2794 ins_index = (cookie >> 16) & (HUNT_FILTER_TBL_ROWS - 1);
2795 dmaq_id = cookie & 0xffff;
2796
2797 spin_lock_bh(&efx->filter_lock);
2798 spec = efx_ef10_filter_entry_spec(table, ins_index);
2799 if (rc == 0) {
2800 table->entry[ins_index].handle =
2801 MCDI_QWORD(outbuf, FILTER_OP_OUT_HANDLE);
2802 if (replacing)
2803 spec->dmaq_id = dmaq_id;
2804 } else if (!replacing) {
2805 kfree(spec);
2806 spec = NULL;
2807 }
2808 efx_ef10_filter_set_entry(table, ins_index, spec, 0);
2809 spin_unlock_bh(&efx->filter_lock);
2810
2811 wake_up_all(&table->waitq);
2812}
2813
2814static void
2815efx_ef10_filter_rfs_expire_complete(struct efx_nic *efx,
2816 unsigned long filter_idx,
2817 int rc, efx_dword_t *outbuf,
2818 size_t outlen_actual);
2819
2820static bool efx_ef10_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
2821 unsigned int filter_idx)
2822{
2823 struct efx_ef10_filter_table *table = efx->filter_state;
2824 struct efx_filter_spec *spec =
2825 efx_ef10_filter_entry_spec(table, filter_idx);
2826 MCDI_DECLARE_BUF(inbuf,
2827 MC_CMD_FILTER_OP_IN_HANDLE_OFST +
2828 MC_CMD_FILTER_OP_IN_HANDLE_LEN);
2829
2830 if (!spec ||
2831 (table->entry[filter_idx].spec & EFX_EF10_FILTER_FLAG_BUSY) ||
2832 spec->priority != EFX_FILTER_PRI_HINT ||
2833 !rps_may_expire_flow(efx->net_dev, spec->dmaq_id,
2834 flow_id, filter_idx))
2835 return false;
2836
2837 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
2838 MC_CMD_FILTER_OP_IN_OP_REMOVE);
2839 MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
2840 table->entry[filter_idx].handle);
2841 if (efx_mcdi_rpc_async(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf), 0,
2842 efx_ef10_filter_rfs_expire_complete, filter_idx))
2843 return false;
2844
2845 table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY;
2846 return true;
2847}
2848
2849static void
2850efx_ef10_filter_rfs_expire_complete(struct efx_nic *efx,
2851 unsigned long filter_idx,
2852 int rc, efx_dword_t *outbuf,
2853 size_t outlen_actual)
2854{
2855 struct efx_ef10_filter_table *table = efx->filter_state;
2856 struct efx_filter_spec *spec =
2857 efx_ef10_filter_entry_spec(table, filter_idx);
2858
2859 spin_lock_bh(&efx->filter_lock);
2860 if (rc == 0) {
2861 kfree(spec);
2862 efx_ef10_filter_set_entry(table, filter_idx, NULL, 0);
2863 }
2864 table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_BUSY;
2865 wake_up_all(&table->waitq);
2866 spin_unlock_bh(&efx->filter_lock);
2867}
2868
2869#endif /* CONFIG_RFS_ACCEL */
2870
2871static int efx_ef10_filter_match_flags_from_mcdi(u32 mcdi_flags)
2872{
2873 int match_flags = 0;
2874
2875#define MAP_FLAG(gen_flag, mcdi_field) { \
2876 u32 old_mcdi_flags = mcdi_flags; \
2877 mcdi_flags &= ~(1 << MC_CMD_FILTER_OP_IN_MATCH_ ## \
2878 mcdi_field ## _LBN); \
2879 if (mcdi_flags != old_mcdi_flags) \
2880 match_flags |= EFX_FILTER_MATCH_ ## gen_flag; \
2881 }
2882 MAP_FLAG(LOC_MAC_IG, UNKNOWN_UCAST_DST);
2883 MAP_FLAG(LOC_MAC_IG, UNKNOWN_MCAST_DST);
2884 MAP_FLAG(REM_HOST, SRC_IP);
2885 MAP_FLAG(LOC_HOST, DST_IP);
2886 MAP_FLAG(REM_MAC, SRC_MAC);
2887 MAP_FLAG(REM_PORT, SRC_PORT);
2888 MAP_FLAG(LOC_MAC, DST_MAC);
2889 MAP_FLAG(LOC_PORT, DST_PORT);
2890 MAP_FLAG(ETHER_TYPE, ETHER_TYPE);
2891 MAP_FLAG(INNER_VID, INNER_VLAN);
2892 MAP_FLAG(OUTER_VID, OUTER_VLAN);
2893 MAP_FLAG(IP_PROTO, IP_PROTO);
2894#undef MAP_FLAG
2895
2896 /* Did we map them all? */
2897 if (mcdi_flags)
2898 return -EINVAL;
2899
2900 return match_flags;
2901}
2902
2903static int efx_ef10_filter_table_probe(struct efx_nic *efx)
2904{
2905 MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_PARSER_DISP_INFO_IN_LEN);
2906 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX);
2907 unsigned int pd_match_pri, pd_match_count;
2908 struct efx_ef10_filter_table *table;
2909 size_t outlen;
2910 int rc;
2911
2912 table = kzalloc(sizeof(*table), GFP_KERNEL);
2913 if (!table)
2914 return -ENOMEM;
2915
2916 /* Find out which RX filter types are supported, and their priorities */
2917 MCDI_SET_DWORD(inbuf, GET_PARSER_DISP_INFO_IN_OP,
2918 MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES);
2919 rc = efx_mcdi_rpc(efx, MC_CMD_GET_PARSER_DISP_INFO,
2920 inbuf, sizeof(inbuf), outbuf, sizeof(outbuf),
2921 &outlen);
2922 if (rc)
2923 goto fail;
2924 pd_match_count = MCDI_VAR_ARRAY_LEN(
2925 outlen, GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES);
2926 table->rx_match_count = 0;
2927
2928 for (pd_match_pri = 0; pd_match_pri < pd_match_count; pd_match_pri++) {
2929 u32 mcdi_flags =
2930 MCDI_ARRAY_DWORD(
2931 outbuf,
2932 GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES,
2933 pd_match_pri);
2934 rc = efx_ef10_filter_match_flags_from_mcdi(mcdi_flags);
2935 if (rc < 0) {
2936 netif_dbg(efx, probe, efx->net_dev,
2937 "%s: fw flags %#x pri %u not supported in driver\n",
2938 __func__, mcdi_flags, pd_match_pri);
2939 } else {
2940 netif_dbg(efx, probe, efx->net_dev,
2941 "%s: fw flags %#x pri %u supported as driver flags %#x pri %u\n",
2942 __func__, mcdi_flags, pd_match_pri,
2943 rc, table->rx_match_count);
2944 table->rx_match_flags[table->rx_match_count++] = rc;
2945 }
2946 }
2947
2948 table->entry = vzalloc(HUNT_FILTER_TBL_ROWS * sizeof(*table->entry));
2949 if (!table->entry) {
2950 rc = -ENOMEM;
2951 goto fail;
2952 }
2953
2954 efx->filter_state = table;
2955 init_waitqueue_head(&table->waitq);
2956 return 0;
2957
2958fail:
2959 kfree(table);
2960 return rc;
2961}
2962
2963static void efx_ef10_filter_table_restore(struct efx_nic *efx)
2964{
2965 struct efx_ef10_filter_table *table = efx->filter_state;
2966 struct efx_ef10_nic_data *nic_data = efx->nic_data;
2967 struct efx_filter_spec *spec;
2968 unsigned int filter_idx;
2969 bool failed = false;
2970 int rc;
2971
2972 if (!nic_data->must_restore_filters)
2973 return;
2974
2975 spin_lock_bh(&efx->filter_lock);
2976
2977 for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
2978 spec = efx_ef10_filter_entry_spec(table, filter_idx);
2979 if (!spec)
2980 continue;
2981
2982 table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY;
2983 spin_unlock_bh(&efx->filter_lock);
2984
2985 rc = efx_ef10_filter_push(efx, spec,
2986 &table->entry[filter_idx].handle,
2987 false);
2988 if (rc)
2989 failed = true;
2990
2991 spin_lock_bh(&efx->filter_lock);
2992 if (rc) {
2993 kfree(spec);
2994 efx_ef10_filter_set_entry(table, filter_idx, NULL, 0);
2995 } else {
2996 table->entry[filter_idx].spec &=
2997 ~EFX_EF10_FILTER_FLAG_BUSY;
2998 }
2999 }
3000
3001 spin_unlock_bh(&efx->filter_lock);
3002
3003 if (failed)
3004 netif_err(efx, hw, efx->net_dev,
3005 "unable to restore all filters\n");
3006 else
3007 nic_data->must_restore_filters = false;
3008}
3009
3010static void efx_ef10_filter_table_remove(struct efx_nic *efx)
3011{
3012 struct efx_ef10_filter_table *table = efx->filter_state;
3013 MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN);
3014 struct efx_filter_spec *spec;
3015 unsigned int filter_idx;
3016 int rc;
3017
3018 for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
3019 spec = efx_ef10_filter_entry_spec(table, filter_idx);
3020 if (!spec)
3021 continue;
3022
3023 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
3024 efx_ef10_filter_is_exclusive(spec) ?
3025 MC_CMD_FILTER_OP_IN_OP_REMOVE :
3026 MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE);
3027 MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
3028 table->entry[filter_idx].handle);
3029 rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf),
3030 NULL, 0, NULL);
3031
3032 WARN_ON(rc != 0);
3033 kfree(spec);
3034 }
3035
3036 vfree(table->entry);
3037 kfree(table);
3038}
3039
3040static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx)
3041{
3042 struct efx_ef10_filter_table *table = efx->filter_state;
3043 struct net_device *net_dev = efx->net_dev;
3044 struct efx_filter_spec spec;
3045 bool remove_failed = false;
3046 struct netdev_hw_addr *uc;
3047 struct netdev_hw_addr *mc;
3048 unsigned int filter_idx;
3049 int i, n, rc;
3050
3051 if (!efx_dev_registered(efx))
3052 return;
3053
3054 /* Mark old filters that may need to be removed */
3055 spin_lock_bh(&efx->filter_lock);
3056 n = table->stack_uc_count < 0 ? 1 : table->stack_uc_count;
3057 for (i = 0; i < n; i++) {
3058 filter_idx = table->stack_uc_list[i].id % HUNT_FILTER_TBL_ROWS;
3059 table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_STACK_OLD;
3060 }
3061 n = table->stack_mc_count < 0 ? 1 : table->stack_mc_count;
3062 for (i = 0; i < n; i++) {
3063 filter_idx = table->stack_mc_list[i].id % HUNT_FILTER_TBL_ROWS;
3064 table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_STACK_OLD;
3065 }
3066 spin_unlock_bh(&efx->filter_lock);
3067
3068 /* Copy/convert the address lists; add the primary station
3069 * address and broadcast address
3070 */
3071 netif_addr_lock_bh(net_dev);
3072 if (net_dev->flags & IFF_PROMISC ||
3073 netdev_uc_count(net_dev) >= EFX_EF10_FILTER_STACK_UC_MAX) {
3074 table->stack_uc_count = -1;
3075 } else {
3076 table->stack_uc_count = 1 + netdev_uc_count(net_dev);
3077 memcpy(table->stack_uc_list[0].addr, net_dev->dev_addr,
3078 ETH_ALEN);
3079 i = 1;
3080 netdev_for_each_uc_addr(uc, net_dev) {
3081 memcpy(table->stack_uc_list[i].addr,
3082 uc->addr, ETH_ALEN);
3083 i++;
3084 }
3085 }
3086 if (net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI) ||
3087 netdev_mc_count(net_dev) >= EFX_EF10_FILTER_STACK_MC_MAX) {
3088 table->stack_mc_count = -1;
3089 } else {
3090 table->stack_mc_count = 1 + netdev_mc_count(net_dev);
3091 eth_broadcast_addr(table->stack_mc_list[0].addr);
3092 i = 1;
3093 netdev_for_each_mc_addr(mc, net_dev) {
3094 memcpy(table->stack_mc_list[i].addr,
3095 mc->addr, ETH_ALEN);
3096 i++;
3097 }
3098 }
3099 netif_addr_unlock_bh(net_dev);
3100
3101 /* Insert/renew unicast filters */
3102 if (table->stack_uc_count >= 0) {
3103 for (i = 0; i < table->stack_uc_count; i++) {
3104 efx_filter_init_rx(&spec, EFX_FILTER_PRI_REQUIRED,
3105 EFX_FILTER_FLAG_RX_RSS |
3106 EFX_FILTER_FLAG_RX_STACK,
3107 0);
3108 efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC,
3109 table->stack_uc_list[i].addr);
3110 rc = efx_ef10_filter_insert(efx, &spec, true);
3111 if (rc < 0) {
3112 /* Fall back to unicast-promisc */
3113 while (i--)
3114 efx_ef10_filter_remove_safe(
3115 efx, EFX_FILTER_PRI_REQUIRED,
3116 table->stack_uc_list[i].id);
3117 table->stack_uc_count = -1;
3118 break;
3119 }
3120 table->stack_uc_list[i].id = rc;
3121 }
3122 }
3123 if (table->stack_uc_count < 0) {
3124 efx_filter_init_rx(&spec, EFX_FILTER_PRI_REQUIRED,
3125 EFX_FILTER_FLAG_RX_RSS |
3126 EFX_FILTER_FLAG_RX_STACK,
3127 0);
3128 efx_filter_set_uc_def(&spec);
3129 rc = efx_ef10_filter_insert(efx, &spec, true);
3130 if (rc < 0) {
3131 WARN_ON(1);
3132 table->stack_uc_count = 0;
3133 } else {
3134 table->stack_uc_list[0].id = rc;
3135 }
3136 }
3137
3138 /* Insert/renew multicast filters */
3139 if (table->stack_mc_count >= 0) {
3140 for (i = 0; i < table->stack_mc_count; i++) {
3141 efx_filter_init_rx(&spec, EFX_FILTER_PRI_REQUIRED,
3142 EFX_FILTER_FLAG_RX_RSS |
3143 EFX_FILTER_FLAG_RX_STACK,
3144 0);
3145 efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC,
3146 table->stack_mc_list[i].addr);
3147 rc = efx_ef10_filter_insert(efx, &spec, true);
3148 if (rc < 0) {
3149 /* Fall back to multicast-promisc */
3150 while (i--)
3151 efx_ef10_filter_remove_safe(
3152 efx, EFX_FILTER_PRI_REQUIRED,
3153 table->stack_mc_list[i].id);
3154 table->stack_mc_count = -1;
3155 break;
3156 }
3157 table->stack_mc_list[i].id = rc;
3158 }
3159 }
3160 if (table->stack_mc_count < 0) {
3161 efx_filter_init_rx(&spec, EFX_FILTER_PRI_REQUIRED,
3162 EFX_FILTER_FLAG_RX_RSS |
3163 EFX_FILTER_FLAG_RX_STACK,
3164 0);
3165 efx_filter_set_mc_def(&spec);
3166 rc = efx_ef10_filter_insert(efx, &spec, true);
3167 if (rc < 0) {
3168 WARN_ON(1);
3169 table->stack_mc_count = 0;
3170 } else {
3171 table->stack_mc_list[0].id = rc;
3172 }
3173 }
3174
3175 /* Remove filters that weren't renewed. Since nothing else
3176 * changes the STACK_OLD flag or removes these filters, we
3177 * don't need to hold the filter_lock while scanning for
3178 * these filters.
3179 */
3180 for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) {
3181 if (ACCESS_ONCE(table->entry[i].spec) &
3182 EFX_EF10_FILTER_FLAG_STACK_OLD) {
3183 if (efx_ef10_filter_remove_internal(efx,
3184 EFX_FILTER_PRI_REQUIRED,
3185 i, true) < 0)
3186 remove_failed = true;
3187 }
3188 }
3189 WARN_ON(remove_failed);
3190}
3191
3192static int efx_ef10_mac_reconfigure(struct efx_nic *efx)
3193{
3194 efx_ef10_filter_sync_rx_mode(efx);
3195
3196 return efx_mcdi_set_mac(efx);
3197}
3198
Jon Cooper74cd60a2013-09-16 14:18:51 +01003199static int efx_ef10_start_bist(struct efx_nic *efx, u32 bist_type)
3200{
3201 MCDI_DECLARE_BUF(inbuf, MC_CMD_START_BIST_IN_LEN);
3202
3203 MCDI_SET_DWORD(inbuf, START_BIST_IN_TYPE, bist_type);
3204 return efx_mcdi_rpc(efx, MC_CMD_START_BIST, inbuf, sizeof(inbuf),
3205 NULL, 0, NULL);
3206}
3207
3208/* MC BISTs follow a different poll mechanism to phy BISTs.
3209 * The BIST is done in the poll handler on the MC, and the MCDI command
3210 * will block until the BIST is done.
3211 */
3212static int efx_ef10_poll_bist(struct efx_nic *efx)
3213{
3214 int rc;
3215 MCDI_DECLARE_BUF(outbuf, MC_CMD_POLL_BIST_OUT_LEN);
3216 size_t outlen;
3217 u32 result;
3218
3219 rc = efx_mcdi_rpc(efx, MC_CMD_POLL_BIST, NULL, 0,
3220 outbuf, sizeof(outbuf), &outlen);
3221 if (rc != 0)
3222 return rc;
3223
3224 if (outlen < MC_CMD_POLL_BIST_OUT_LEN)
3225 return -EIO;
3226
3227 result = MCDI_DWORD(outbuf, POLL_BIST_OUT_RESULT);
3228 switch (result) {
3229 case MC_CMD_POLL_BIST_PASSED:
3230 netif_dbg(efx, hw, efx->net_dev, "BIST passed.\n");
3231 return 0;
3232 case MC_CMD_POLL_BIST_TIMEOUT:
3233 netif_err(efx, hw, efx->net_dev, "BIST timed out\n");
3234 return -EIO;
3235 case MC_CMD_POLL_BIST_FAILED:
3236 netif_err(efx, hw, efx->net_dev, "BIST failed.\n");
3237 return -EIO;
3238 default:
3239 netif_err(efx, hw, efx->net_dev,
3240 "BIST returned unknown result %u", result);
3241 return -EIO;
3242 }
3243}
3244
3245static int efx_ef10_run_bist(struct efx_nic *efx, u32 bist_type)
3246{
3247 int rc;
3248
3249 netif_dbg(efx, drv, efx->net_dev, "starting BIST type %u\n", bist_type);
3250
3251 rc = efx_ef10_start_bist(efx, bist_type);
3252 if (rc != 0)
3253 return rc;
3254
3255 return efx_ef10_poll_bist(efx);
3256}
3257
3258static int
3259efx_ef10_test_chip(struct efx_nic *efx, struct efx_self_tests *tests)
3260{
3261 int rc, rc2;
3262
3263 efx_reset_down(efx, RESET_TYPE_WORLD);
3264
3265 rc = efx_mcdi_rpc(efx, MC_CMD_ENABLE_OFFLINE_BIST,
3266 NULL, 0, NULL, 0, NULL);
3267 if (rc != 0)
3268 goto out;
3269
3270 tests->memory = efx_ef10_run_bist(efx, MC_CMD_MC_MEM_BIST) ? -1 : 1;
3271 tests->registers = efx_ef10_run_bist(efx, MC_CMD_REG_BIST) ? -1 : 1;
3272
3273 rc = efx_mcdi_reset(efx, RESET_TYPE_WORLD);
3274
3275out:
3276 rc2 = efx_reset_up(efx, RESET_TYPE_WORLD, rc == 0);
3277 return rc ? rc : rc2;
3278}
3279
Ben Hutchings8127d662013-08-29 19:19:29 +01003280#ifdef CONFIG_SFC_MTD
3281
3282struct efx_ef10_nvram_type_info {
3283 u16 type, type_mask;
3284 u8 port;
3285 const char *name;
3286};
3287
3288static const struct efx_ef10_nvram_type_info efx_ef10_nvram_types[] = {
3289 { NVRAM_PARTITION_TYPE_MC_FIRMWARE, 0, 0, "sfc_mcfw" },
3290 { NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP, 0, 0, "sfc_mcfw_backup" },
3291 { NVRAM_PARTITION_TYPE_EXPANSION_ROM, 0, 0, "sfc_exp_rom" },
3292 { NVRAM_PARTITION_TYPE_STATIC_CONFIG, 0, 0, "sfc_static_cfg" },
3293 { NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG, 0, 0, "sfc_dynamic_cfg" },
3294 { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0, 0, 0, "sfc_exp_rom_cfg" },
3295 { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT1, 0, 1, "sfc_exp_rom_cfg" },
3296 { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT2, 0, 2, "sfc_exp_rom_cfg" },
3297 { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT3, 0, 3, "sfc_exp_rom_cfg" },
3298 { NVRAM_PARTITION_TYPE_PHY_MIN, 0xff, 0, "sfc_phy_fw" },
3299};
3300
3301static int efx_ef10_mtd_probe_partition(struct efx_nic *efx,
3302 struct efx_mcdi_mtd_partition *part,
3303 unsigned int type)
3304{
3305 MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_METADATA_IN_LEN);
3306 MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_METADATA_OUT_LENMAX);
3307 const struct efx_ef10_nvram_type_info *info;
3308 size_t size, erase_size, outlen;
3309 bool protected;
3310 int rc;
3311
3312 for (info = efx_ef10_nvram_types; ; info++) {
3313 if (info ==
3314 efx_ef10_nvram_types + ARRAY_SIZE(efx_ef10_nvram_types))
3315 return -ENODEV;
3316 if ((type & ~info->type_mask) == info->type)
3317 break;
3318 }
3319 if (info->port != efx_port_num(efx))
3320 return -ENODEV;
3321
3322 rc = efx_mcdi_nvram_info(efx, type, &size, &erase_size, &protected);
3323 if (rc)
3324 return rc;
3325 if (protected)
3326 return -ENODEV; /* hide it */
3327
3328 part->nvram_type = type;
3329
3330 MCDI_SET_DWORD(inbuf, NVRAM_METADATA_IN_TYPE, type);
3331 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_METADATA, inbuf, sizeof(inbuf),
3332 outbuf, sizeof(outbuf), &outlen);
3333 if (rc)
3334 return rc;
3335 if (outlen < MC_CMD_NVRAM_METADATA_OUT_LENMIN)
3336 return -EIO;
3337 if (MCDI_DWORD(outbuf, NVRAM_METADATA_OUT_FLAGS) &
3338 (1 << MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_LBN))
3339 part->fw_subtype = MCDI_DWORD(outbuf,
3340 NVRAM_METADATA_OUT_SUBTYPE);
3341
3342 part->common.dev_type_name = "EF10 NVRAM manager";
3343 part->common.type_name = info->name;
3344
3345 part->common.mtd.type = MTD_NORFLASH;
3346 part->common.mtd.flags = MTD_CAP_NORFLASH;
3347 part->common.mtd.size = size;
3348 part->common.mtd.erasesize = erase_size;
3349
3350 return 0;
3351}
3352
3353static int efx_ef10_mtd_probe(struct efx_nic *efx)
3354{
3355 MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX);
3356 struct efx_mcdi_mtd_partition *parts;
3357 size_t outlen, n_parts_total, i, n_parts;
3358 unsigned int type;
3359 int rc;
3360
3361 ASSERT_RTNL();
3362
3363 BUILD_BUG_ON(MC_CMD_NVRAM_PARTITIONS_IN_LEN != 0);
3364 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_PARTITIONS, NULL, 0,
3365 outbuf, sizeof(outbuf), &outlen);
3366 if (rc)
3367 return rc;
3368 if (outlen < MC_CMD_NVRAM_PARTITIONS_OUT_LENMIN)
3369 return -EIO;
3370
3371 n_parts_total = MCDI_DWORD(outbuf, NVRAM_PARTITIONS_OUT_NUM_PARTITIONS);
3372 if (n_parts_total >
3373 MCDI_VAR_ARRAY_LEN(outlen, NVRAM_PARTITIONS_OUT_TYPE_ID))
3374 return -EIO;
3375
3376 parts = kcalloc(n_parts_total, sizeof(*parts), GFP_KERNEL);
3377 if (!parts)
3378 return -ENOMEM;
3379
3380 n_parts = 0;
3381 for (i = 0; i < n_parts_total; i++) {
3382 type = MCDI_ARRAY_DWORD(outbuf, NVRAM_PARTITIONS_OUT_TYPE_ID,
3383 i);
3384 rc = efx_ef10_mtd_probe_partition(efx, &parts[n_parts], type);
3385 if (rc == 0)
3386 n_parts++;
3387 else if (rc != -ENODEV)
3388 goto fail;
3389 }
3390
3391 rc = efx_mtd_add(efx, &parts[0].common, n_parts, sizeof(*parts));
3392fail:
3393 if (rc)
3394 kfree(parts);
3395 return rc;
3396}
3397
3398#endif /* CONFIG_SFC_MTD */
3399
3400static void efx_ef10_ptp_write_host_time(struct efx_nic *efx, u32 host_time)
3401{
3402 _efx_writed(efx, cpu_to_le32(host_time), ER_DZ_MC_DB_LWRD);
3403}
3404
3405const struct efx_nic_type efx_hunt_a0_nic_type = {
3406 .mem_map_size = efx_ef10_mem_map_size,
3407 .probe = efx_ef10_probe,
3408 .remove = efx_ef10_remove,
3409 .dimension_resources = efx_ef10_dimension_resources,
3410 .init = efx_ef10_init_nic,
3411 .fini = efx_port_dummy_op_void,
3412 .map_reset_reason = efx_mcdi_map_reset_reason,
3413 .map_reset_flags = efx_ef10_map_reset_flags,
3414 .reset = efx_mcdi_reset,
3415 .probe_port = efx_mcdi_port_probe,
3416 .remove_port = efx_mcdi_port_remove,
3417 .fini_dmaq = efx_ef10_fini_dmaq,
3418 .describe_stats = efx_ef10_describe_stats,
3419 .update_stats = efx_ef10_update_stats,
3420 .start_stats = efx_mcdi_mac_start_stats,
3421 .stop_stats = efx_mcdi_mac_stop_stats,
3422 .set_id_led = efx_mcdi_set_id_led,
3423 .push_irq_moderation = efx_ef10_push_irq_moderation,
3424 .reconfigure_mac = efx_ef10_mac_reconfigure,
3425 .check_mac_fault = efx_mcdi_mac_check_fault,
3426 .reconfigure_port = efx_mcdi_port_reconfigure,
3427 .get_wol = efx_ef10_get_wol,
3428 .set_wol = efx_ef10_set_wol,
3429 .resume_wol = efx_port_dummy_op_void,
Jon Cooper74cd60a2013-09-16 14:18:51 +01003430 .test_chip = efx_ef10_test_chip,
Ben Hutchings8127d662013-08-29 19:19:29 +01003431 .test_nvram = efx_mcdi_nvram_test_all,
3432 .mcdi_request = efx_ef10_mcdi_request,
3433 .mcdi_poll_response = efx_ef10_mcdi_poll_response,
3434 .mcdi_read_response = efx_ef10_mcdi_read_response,
3435 .mcdi_poll_reboot = efx_ef10_mcdi_poll_reboot,
3436 .irq_enable_master = efx_port_dummy_op_void,
3437 .irq_test_generate = efx_ef10_irq_test_generate,
3438 .irq_disable_non_ev = efx_port_dummy_op_void,
3439 .irq_handle_msi = efx_ef10_msi_interrupt,
3440 .irq_handle_legacy = efx_ef10_legacy_interrupt,
3441 .tx_probe = efx_ef10_tx_probe,
3442 .tx_init = efx_ef10_tx_init,
3443 .tx_remove = efx_ef10_tx_remove,
3444 .tx_write = efx_ef10_tx_write,
3445 .rx_push_indir_table = efx_ef10_rx_push_indir_table,
3446 .rx_probe = efx_ef10_rx_probe,
3447 .rx_init = efx_ef10_rx_init,
3448 .rx_remove = efx_ef10_rx_remove,
3449 .rx_write = efx_ef10_rx_write,
3450 .rx_defer_refill = efx_ef10_rx_defer_refill,
3451 .ev_probe = efx_ef10_ev_probe,
3452 .ev_init = efx_ef10_ev_init,
3453 .ev_fini = efx_ef10_ev_fini,
3454 .ev_remove = efx_ef10_ev_remove,
3455 .ev_process = efx_ef10_ev_process,
3456 .ev_read_ack = efx_ef10_ev_read_ack,
3457 .ev_test_generate = efx_ef10_ev_test_generate,
3458 .filter_table_probe = efx_ef10_filter_table_probe,
3459 .filter_table_restore = efx_ef10_filter_table_restore,
3460 .filter_table_remove = efx_ef10_filter_table_remove,
3461 .filter_update_rx_scatter = efx_ef10_filter_update_rx_scatter,
3462 .filter_insert = efx_ef10_filter_insert,
3463 .filter_remove_safe = efx_ef10_filter_remove_safe,
3464 .filter_get_safe = efx_ef10_filter_get_safe,
3465 .filter_clear_rx = efx_ef10_filter_clear_rx,
3466 .filter_count_rx_used = efx_ef10_filter_count_rx_used,
3467 .filter_get_rx_id_limit = efx_ef10_filter_get_rx_id_limit,
3468 .filter_get_rx_ids = efx_ef10_filter_get_rx_ids,
3469#ifdef CONFIG_RFS_ACCEL
3470 .filter_rfs_insert = efx_ef10_filter_rfs_insert,
3471 .filter_rfs_expire_one = efx_ef10_filter_rfs_expire_one,
3472#endif
3473#ifdef CONFIG_SFC_MTD
3474 .mtd_probe = efx_ef10_mtd_probe,
3475 .mtd_rename = efx_mcdi_mtd_rename,
3476 .mtd_read = efx_mcdi_mtd_read,
3477 .mtd_erase = efx_mcdi_mtd_erase,
3478 .mtd_write = efx_mcdi_mtd_write,
3479 .mtd_sync = efx_mcdi_mtd_sync,
3480#endif
3481 .ptp_write_host_time = efx_ef10_ptp_write_host_time,
3482
3483 .revision = EFX_REV_HUNT_A0,
3484 .max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH),
3485 .rx_prefix_size = ES_DZ_RX_PREFIX_SIZE,
3486 .rx_hash_offset = ES_DZ_RX_PREFIX_HASH_OFST,
3487 .can_rx_scatter = true,
3488 .always_rx_scatter = true,
3489 .max_interrupt_mode = EFX_INT_MODE_MSIX,
3490 .timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH,
3491 .offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3492 NETIF_F_RXHASH | NETIF_F_NTUPLE),
3493 .mcdi_max_ver = 2,
3494 .max_rx_ip_filters = HUNT_FILTER_TBL_ROWS,
3495};