blob: aa1b169f45ec1fec4c989443bc493d8c03ea080e [file] [log] [blame]
Ben Hutchings86094f72013-08-21 19:51:04 +01001/****************************************************************************
Ben Hutchingsf7a6d2c2013-08-29 23:32:48 +01002 * Driver for Solarflare network controllers and boards
Ben Hutchings86094f72013-08-21 19:51:04 +01003 * Copyright 2005-2006 Fen Systems Ltd.
Ben Hutchingsf7a6d2c2013-08-29 23:32:48 +01004 * Copyright 2006-2013 Solarflare Communications Inc.
Ben Hutchings86094f72013-08-21 19:51:04 +01005 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#include <linux/bitops.h>
12#include <linux/delay.h>
13#include <linux/interrupt.h>
14#include <linux/pci.h>
15#include <linux/module.h>
16#include <linux/seq_file.h>
Ben Hutchings964e6132012-11-19 23:08:22 +000017#include <linux/crc32.h>
Ben Hutchings86094f72013-08-21 19:51:04 +010018#include "net_driver.h"
19#include "bitfield.h"
20#include "efx.h"
21#include "nic.h"
22#include "farch_regs.h"
23#include "io.h"
24#include "workarounds.h"
25
26/* Falcon-architecture (SFC4000 and SFC9000-family) support */
27
28/**************************************************************************
29 *
30 * Configurable values
31 *
32 **************************************************************************
33 */
34
35/* This is set to 16 for a good reason. In summary, if larger than
36 * 16, the descriptor cache holds more than a default socket
37 * buffer's worth of packets (for UDP we can only have at most one
38 * socket buffer's worth outstanding). This combined with the fact
39 * that we only get 1 TX event per descriptor cache means the NIC
40 * goes idle.
41 */
42#define TX_DC_ENTRIES 16
43#define TX_DC_ENTRIES_ORDER 1
44
45#define RX_DC_ENTRIES 64
46#define RX_DC_ENTRIES_ORDER 3
47
48/* If EFX_MAX_INT_ERRORS internal errors occur within
49 * EFX_INT_ERROR_EXPIRE seconds, we consider the NIC broken and
50 * disable it.
51 */
52#define EFX_INT_ERROR_EXPIRE 3600
53#define EFX_MAX_INT_ERRORS 5
54
55/* Depth of RX flush request fifo */
56#define EFX_RX_FLUSH_COUNT 4
57
58/* Driver generated events */
59#define _EFX_CHANNEL_MAGIC_TEST 0x000101
60#define _EFX_CHANNEL_MAGIC_FILL 0x000102
61#define _EFX_CHANNEL_MAGIC_RX_DRAIN 0x000103
62#define _EFX_CHANNEL_MAGIC_TX_DRAIN 0x000104
63
64#define _EFX_CHANNEL_MAGIC(_code, _data) ((_code) << 8 | (_data))
65#define _EFX_CHANNEL_MAGIC_CODE(_magic) ((_magic) >> 8)
66
67#define EFX_CHANNEL_MAGIC_TEST(_channel) \
68 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TEST, (_channel)->channel)
69#define EFX_CHANNEL_MAGIC_FILL(_rx_queue) \
70 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_FILL, \
71 efx_rx_queue_index(_rx_queue))
72#define EFX_CHANNEL_MAGIC_RX_DRAIN(_rx_queue) \
73 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_RX_DRAIN, \
74 efx_rx_queue_index(_rx_queue))
75#define EFX_CHANNEL_MAGIC_TX_DRAIN(_tx_queue) \
76 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TX_DRAIN, \
77 (_tx_queue)->queue)
78
79static void efx_farch_magic_event(struct efx_channel *channel, u32 magic);
80
81/**************************************************************************
82 *
83 * Hardware access
84 *
85 **************************************************************************/
86
87static inline void efx_write_buf_tbl(struct efx_nic *efx, efx_qword_t *value,
88 unsigned int index)
89{
90 efx_sram_writeq(efx, efx->membase + efx->type->buf_tbl_base,
91 value, index);
92}
93
94static bool efx_masked_compare_oword(const efx_oword_t *a, const efx_oword_t *b,
95 const efx_oword_t *mask)
96{
97 return ((a->u64[0] ^ b->u64[0]) & mask->u64[0]) ||
98 ((a->u64[1] ^ b->u64[1]) & mask->u64[1]);
99}
100
101int efx_farch_test_registers(struct efx_nic *efx,
102 const struct efx_farch_register_test *regs,
103 size_t n_regs)
104{
105 unsigned address = 0, i, j;
106 efx_oword_t mask, imask, original, reg, buf;
107
108 for (i = 0; i < n_regs; ++i) {
109 address = regs[i].address;
110 mask = imask = regs[i].mask;
111 EFX_INVERT_OWORD(imask);
112
113 efx_reado(efx, &original, address);
114
115 /* bit sweep on and off */
116 for (j = 0; j < 128; j++) {
117 if (!EFX_EXTRACT_OWORD32(mask, j, j))
118 continue;
119
120 /* Test this testable bit can be set in isolation */
121 EFX_AND_OWORD(reg, original, mask);
122 EFX_SET_OWORD32(reg, j, j, 1);
123
124 efx_writeo(efx, &reg, address);
125 efx_reado(efx, &buf, address);
126
127 if (efx_masked_compare_oword(&reg, &buf, &mask))
128 goto fail;
129
130 /* Test this testable bit can be cleared in isolation */
131 EFX_OR_OWORD(reg, original, mask);
132 EFX_SET_OWORD32(reg, j, j, 0);
133
134 efx_writeo(efx, &reg, address);
135 efx_reado(efx, &buf, address);
136
137 if (efx_masked_compare_oword(&reg, &buf, &mask))
138 goto fail;
139 }
140
141 efx_writeo(efx, &original, address);
142 }
143
144 return 0;
145
146fail:
147 netif_err(efx, hw, efx->net_dev,
148 "wrote "EFX_OWORD_FMT" read "EFX_OWORD_FMT
149 " at address 0x%x mask "EFX_OWORD_FMT"\n", EFX_OWORD_VAL(reg),
150 EFX_OWORD_VAL(buf), address, EFX_OWORD_VAL(mask));
151 return -EIO;
152}
153
154/**************************************************************************
155 *
156 * Special buffer handling
157 * Special buffers are used for event queues and the TX and RX
158 * descriptor rings.
159 *
160 *************************************************************************/
161
162/*
163 * Initialise a special buffer
164 *
165 * This will define a buffer (previously allocated via
166 * efx_alloc_special_buffer()) in the buffer table, allowing
167 * it to be used for event queues, descriptor rings etc.
168 */
169static void
170efx_init_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
171{
172 efx_qword_t buf_desc;
173 unsigned int index;
174 dma_addr_t dma_addr;
175 int i;
176
177 EFX_BUG_ON_PARANOID(!buffer->buf.addr);
178
179 /* Write buffer descriptors to NIC */
180 for (i = 0; i < buffer->entries; i++) {
181 index = buffer->index + i;
182 dma_addr = buffer->buf.dma_addr + (i * EFX_BUF_SIZE);
183 netif_dbg(efx, probe, efx->net_dev,
184 "mapping special buffer %d at %llx\n",
185 index, (unsigned long long)dma_addr);
186 EFX_POPULATE_QWORD_3(buf_desc,
187 FRF_AZ_BUF_ADR_REGION, 0,
188 FRF_AZ_BUF_ADR_FBUF, dma_addr >> 12,
189 FRF_AZ_BUF_OWNER_ID_FBUF, 0);
190 efx_write_buf_tbl(efx, &buf_desc, index);
191 }
192}
193
194/* Unmaps a buffer and clears the buffer table entries */
195static void
196efx_fini_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
197{
198 efx_oword_t buf_tbl_upd;
199 unsigned int start = buffer->index;
200 unsigned int end = (buffer->index + buffer->entries - 1);
201
202 if (!buffer->entries)
203 return;
204
205 netif_dbg(efx, hw, efx->net_dev, "unmapping special buffers %d-%d\n",
206 buffer->index, buffer->index + buffer->entries - 1);
207
208 EFX_POPULATE_OWORD_4(buf_tbl_upd,
209 FRF_AZ_BUF_UPD_CMD, 0,
210 FRF_AZ_BUF_CLR_CMD, 1,
211 FRF_AZ_BUF_CLR_END_ID, end,
212 FRF_AZ_BUF_CLR_START_ID, start);
213 efx_writeo(efx, &buf_tbl_upd, FR_AZ_BUF_TBL_UPD);
214}
215
216/*
217 * Allocate a new special buffer
218 *
219 * This allocates memory for a new buffer, clears it and allocates a
220 * new buffer ID range. It does not write into the buffer table.
221 *
222 * This call will allocate 4KB buffers, since 8KB buffers can't be
223 * used for event queues and descriptor rings.
224 */
225static int efx_alloc_special_buffer(struct efx_nic *efx,
226 struct efx_special_buffer *buffer,
227 unsigned int len)
228{
229 len = ALIGN(len, EFX_BUF_SIZE);
230
231 if (efx_nic_alloc_buffer(efx, &buffer->buf, len, GFP_KERNEL))
232 return -ENOMEM;
233 buffer->entries = len / EFX_BUF_SIZE;
234 BUG_ON(buffer->buf.dma_addr & (EFX_BUF_SIZE - 1));
235
236 /* Select new buffer ID */
237 buffer->index = efx->next_buffer_table;
238 efx->next_buffer_table += buffer->entries;
239#ifdef CONFIG_SFC_SRIOV
240 BUG_ON(efx_sriov_enabled(efx) &&
241 efx->vf_buftbl_base < efx->next_buffer_table);
242#endif
243
244 netif_dbg(efx, probe, efx->net_dev,
245 "allocating special buffers %d-%d at %llx+%x "
246 "(virt %p phys %llx)\n", buffer->index,
247 buffer->index + buffer->entries - 1,
248 (u64)buffer->buf.dma_addr, len,
249 buffer->buf.addr, (u64)virt_to_phys(buffer->buf.addr));
250
251 return 0;
252}
253
254static void
255efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
256{
257 if (!buffer->buf.addr)
258 return;
259
260 netif_dbg(efx, hw, efx->net_dev,
261 "deallocating special buffers %d-%d at %llx+%x "
262 "(virt %p phys %llx)\n", buffer->index,
263 buffer->index + buffer->entries - 1,
264 (u64)buffer->buf.dma_addr, buffer->buf.len,
265 buffer->buf.addr, (u64)virt_to_phys(buffer->buf.addr));
266
267 efx_nic_free_buffer(efx, &buffer->buf);
268 buffer->entries = 0;
269}
270
271/**************************************************************************
272 *
273 * TX path
274 *
275 **************************************************************************/
276
277/* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */
278static inline void efx_farch_notify_tx_desc(struct efx_tx_queue *tx_queue)
279{
280 unsigned write_ptr;
281 efx_dword_t reg;
282
283 write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
284 EFX_POPULATE_DWORD_1(reg, FRF_AZ_TX_DESC_WPTR_DWORD, write_ptr);
285 efx_writed_page(tx_queue->efx, &reg,
286 FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue);
287}
288
289/* Write pointer and first descriptor for TX descriptor ring */
290static inline void efx_farch_push_tx_desc(struct efx_tx_queue *tx_queue,
291 const efx_qword_t *txd)
292{
293 unsigned write_ptr;
294 efx_oword_t reg;
295
296 BUILD_BUG_ON(FRF_AZ_TX_DESC_LBN != 0);
297 BUILD_BUG_ON(FR_AA_TX_DESC_UPD_KER != FR_BZ_TX_DESC_UPD_P0);
298
299 write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
300 EFX_POPULATE_OWORD_2(reg, FRF_AZ_TX_DESC_PUSH_CMD, true,
301 FRF_AZ_TX_DESC_WPTR, write_ptr);
302 reg.qword[0] = *txd;
303 efx_writeo_page(tx_queue->efx, &reg,
304 FR_BZ_TX_DESC_UPD_P0, tx_queue->queue);
305}
306
307
308/* For each entry inserted into the software descriptor ring, create a
309 * descriptor in the hardware TX descriptor ring (in host memory), and
310 * write a doorbell.
311 */
312void efx_farch_tx_write(struct efx_tx_queue *tx_queue)
313{
Ben Hutchings86094f72013-08-21 19:51:04 +0100314 struct efx_tx_buffer *buffer;
315 efx_qword_t *txd;
316 unsigned write_ptr;
317 unsigned old_write_count = tx_queue->write_count;
318
319 BUG_ON(tx_queue->write_count == tx_queue->insert_count);
320
321 do {
322 write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
323 buffer = &tx_queue->buffer[write_ptr];
324 txd = efx_tx_desc(tx_queue, write_ptr);
325 ++tx_queue->write_count;
326
Ben Hutchingsba8977b2013-01-08 23:43:19 +0000327 EFX_BUG_ON_PARANOID(buffer->flags & EFX_TX_BUF_OPTION);
328
Ben Hutchings86094f72013-08-21 19:51:04 +0100329 /* Create TX descriptor ring entry */
330 BUILD_BUG_ON(EFX_TX_BUF_CONT != 1);
331 EFX_POPULATE_QWORD_4(*txd,
332 FSF_AZ_TX_KER_CONT,
333 buffer->flags & EFX_TX_BUF_CONT,
334 FSF_AZ_TX_KER_BYTE_COUNT, buffer->len,
335 FSF_AZ_TX_KER_BUF_REGION, 0,
336 FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr);
337 } while (tx_queue->write_count != tx_queue->insert_count);
338
339 wmb(); /* Ensure descriptors are written before they are fetched */
340
341 if (efx_nic_may_push_tx_desc(tx_queue, old_write_count)) {
342 txd = efx_tx_desc(tx_queue,
343 old_write_count & tx_queue->ptr_mask);
344 efx_farch_push_tx_desc(tx_queue, txd);
345 ++tx_queue->pushes;
346 } else {
347 efx_farch_notify_tx_desc(tx_queue);
348 }
349}
350
351/* Allocate hardware resources for a TX queue */
352int efx_farch_tx_probe(struct efx_tx_queue *tx_queue)
353{
354 struct efx_nic *efx = tx_queue->efx;
355 unsigned entries;
356
357 entries = tx_queue->ptr_mask + 1;
358 return efx_alloc_special_buffer(efx, &tx_queue->txd,
359 entries * sizeof(efx_qword_t));
360}
361
362void efx_farch_tx_init(struct efx_tx_queue *tx_queue)
363{
364 struct efx_nic *efx = tx_queue->efx;
365 efx_oword_t reg;
366
367 /* Pin TX descriptor ring */
368 efx_init_special_buffer(efx, &tx_queue->txd);
369
370 /* Push TX descriptor ring to card */
371 EFX_POPULATE_OWORD_10(reg,
372 FRF_AZ_TX_DESCQ_EN, 1,
373 FRF_AZ_TX_ISCSI_DDIG_EN, 0,
374 FRF_AZ_TX_ISCSI_HDIG_EN, 0,
375 FRF_AZ_TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index,
376 FRF_AZ_TX_DESCQ_EVQ_ID,
377 tx_queue->channel->channel,
378 FRF_AZ_TX_DESCQ_OWNER_ID, 0,
379 FRF_AZ_TX_DESCQ_LABEL, tx_queue->queue,
380 FRF_AZ_TX_DESCQ_SIZE,
381 __ffs(tx_queue->txd.entries),
382 FRF_AZ_TX_DESCQ_TYPE, 0,
383 FRF_BZ_TX_NON_IP_DROP_DIS, 1);
384
385 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
386 int csum = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD;
387 EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_IP_CHKSM_DIS, !csum);
388 EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_TCP_CHKSM_DIS,
389 !csum);
390 }
391
392 efx_writeo_table(efx, &reg, efx->type->txd_ptr_tbl_base,
393 tx_queue->queue);
394
395 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) {
396 /* Only 128 bits in this register */
397 BUILD_BUG_ON(EFX_MAX_TX_QUEUES > 128);
398
399 efx_reado(efx, &reg, FR_AA_TX_CHKSM_CFG);
400 if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD)
401 __clear_bit_le(tx_queue->queue, &reg);
402 else
403 __set_bit_le(tx_queue->queue, &reg);
404 efx_writeo(efx, &reg, FR_AA_TX_CHKSM_CFG);
405 }
406
407 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
408 EFX_POPULATE_OWORD_1(reg,
409 FRF_BZ_TX_PACE,
410 (tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ?
411 FFE_BZ_TX_PACE_OFF :
412 FFE_BZ_TX_PACE_RESERVED);
413 efx_writeo_table(efx, &reg, FR_BZ_TX_PACE_TBL,
414 tx_queue->queue);
415 }
416}
417
418static void efx_farch_flush_tx_queue(struct efx_tx_queue *tx_queue)
419{
420 struct efx_nic *efx = tx_queue->efx;
421 efx_oword_t tx_flush_descq;
422
423 WARN_ON(atomic_read(&tx_queue->flush_outstanding));
424 atomic_set(&tx_queue->flush_outstanding, 1);
425
426 EFX_POPULATE_OWORD_2(tx_flush_descq,
427 FRF_AZ_TX_FLUSH_DESCQ_CMD, 1,
428 FRF_AZ_TX_FLUSH_DESCQ, tx_queue->queue);
429 efx_writeo(efx, &tx_flush_descq, FR_AZ_TX_FLUSH_DESCQ);
430}
431
432void efx_farch_tx_fini(struct efx_tx_queue *tx_queue)
433{
434 struct efx_nic *efx = tx_queue->efx;
435 efx_oword_t tx_desc_ptr;
436
437 /* Remove TX descriptor ring from card */
438 EFX_ZERO_OWORD(tx_desc_ptr);
439 efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,
440 tx_queue->queue);
441
442 /* Unpin TX descriptor ring */
443 efx_fini_special_buffer(efx, &tx_queue->txd);
444}
445
446/* Free buffers backing TX queue */
447void efx_farch_tx_remove(struct efx_tx_queue *tx_queue)
448{
449 efx_free_special_buffer(tx_queue->efx, &tx_queue->txd);
450}
451
452/**************************************************************************
453 *
454 * RX path
455 *
456 **************************************************************************/
457
458/* This creates an entry in the RX descriptor queue */
459static inline void
460efx_farch_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned index)
461{
462 struct efx_rx_buffer *rx_buf;
463 efx_qword_t *rxd;
464
465 rxd = efx_rx_desc(rx_queue, index);
466 rx_buf = efx_rx_buffer(rx_queue, index);
467 EFX_POPULATE_QWORD_3(*rxd,
468 FSF_AZ_RX_KER_BUF_SIZE,
469 rx_buf->len -
470 rx_queue->efx->type->rx_buffer_padding,
471 FSF_AZ_RX_KER_BUF_REGION, 0,
472 FSF_AZ_RX_KER_BUF_ADDR, rx_buf->dma_addr);
473}
474
475/* This writes to the RX_DESC_WPTR register for the specified receive
476 * descriptor ring.
477 */
478void efx_farch_rx_write(struct efx_rx_queue *rx_queue)
479{
480 struct efx_nic *efx = rx_queue->efx;
481 efx_dword_t reg;
482 unsigned write_ptr;
483
484 while (rx_queue->notified_count != rx_queue->added_count) {
485 efx_farch_build_rx_desc(
486 rx_queue,
487 rx_queue->notified_count & rx_queue->ptr_mask);
488 ++rx_queue->notified_count;
489 }
490
491 wmb();
492 write_ptr = rx_queue->added_count & rx_queue->ptr_mask;
493 EFX_POPULATE_DWORD_1(reg, FRF_AZ_RX_DESC_WPTR_DWORD, write_ptr);
494 efx_writed_page(efx, &reg, FR_AZ_RX_DESC_UPD_DWORD_P0,
495 efx_rx_queue_index(rx_queue));
496}
497
498int efx_farch_rx_probe(struct efx_rx_queue *rx_queue)
499{
500 struct efx_nic *efx = rx_queue->efx;
501 unsigned entries;
502
503 entries = rx_queue->ptr_mask + 1;
504 return efx_alloc_special_buffer(efx, &rx_queue->rxd,
505 entries * sizeof(efx_qword_t));
506}
507
508void efx_farch_rx_init(struct efx_rx_queue *rx_queue)
509{
510 efx_oword_t rx_desc_ptr;
511 struct efx_nic *efx = rx_queue->efx;
512 bool is_b0 = efx_nic_rev(efx) >= EFX_REV_FALCON_B0;
513 bool iscsi_digest_en = is_b0;
514 bool jumbo_en;
515
516 /* For kernel-mode queues in Falcon A1, the JUMBO flag enables
517 * DMA to continue after a PCIe page boundary (and scattering
518 * is not possible). In Falcon B0 and Siena, it enables
519 * scatter.
520 */
521 jumbo_en = !is_b0 || efx->rx_scatter;
522
523 netif_dbg(efx, hw, efx->net_dev,
524 "RX queue %d ring in special buffers %d-%d\n",
525 efx_rx_queue_index(rx_queue), rx_queue->rxd.index,
526 rx_queue->rxd.index + rx_queue->rxd.entries - 1);
527
528 rx_queue->scatter_n = 0;
529
530 /* Pin RX descriptor ring */
531 efx_init_special_buffer(efx, &rx_queue->rxd);
532
533 /* Push RX descriptor ring to card */
534 EFX_POPULATE_OWORD_10(rx_desc_ptr,
535 FRF_AZ_RX_ISCSI_DDIG_EN, iscsi_digest_en,
536 FRF_AZ_RX_ISCSI_HDIG_EN, iscsi_digest_en,
537 FRF_AZ_RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index,
538 FRF_AZ_RX_DESCQ_EVQ_ID,
539 efx_rx_queue_channel(rx_queue)->channel,
540 FRF_AZ_RX_DESCQ_OWNER_ID, 0,
541 FRF_AZ_RX_DESCQ_LABEL,
542 efx_rx_queue_index(rx_queue),
543 FRF_AZ_RX_DESCQ_SIZE,
544 __ffs(rx_queue->rxd.entries),
545 FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ ,
546 FRF_AZ_RX_DESCQ_JUMBO, jumbo_en,
547 FRF_AZ_RX_DESCQ_EN, 1);
548 efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
549 efx_rx_queue_index(rx_queue));
550}
551
552static void efx_farch_flush_rx_queue(struct efx_rx_queue *rx_queue)
553{
554 struct efx_nic *efx = rx_queue->efx;
555 efx_oword_t rx_flush_descq;
556
557 EFX_POPULATE_OWORD_2(rx_flush_descq,
558 FRF_AZ_RX_FLUSH_DESCQ_CMD, 1,
559 FRF_AZ_RX_FLUSH_DESCQ,
560 efx_rx_queue_index(rx_queue));
561 efx_writeo(efx, &rx_flush_descq, FR_AZ_RX_FLUSH_DESCQ);
562}
563
564void efx_farch_rx_fini(struct efx_rx_queue *rx_queue)
565{
566 efx_oword_t rx_desc_ptr;
567 struct efx_nic *efx = rx_queue->efx;
568
569 /* Remove RX descriptor ring from card */
570 EFX_ZERO_OWORD(rx_desc_ptr);
571 efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
572 efx_rx_queue_index(rx_queue));
573
574 /* Unpin RX descriptor ring */
575 efx_fini_special_buffer(efx, &rx_queue->rxd);
576}
577
578/* Free buffers backing RX queue */
579void efx_farch_rx_remove(struct efx_rx_queue *rx_queue)
580{
581 efx_free_special_buffer(rx_queue->efx, &rx_queue->rxd);
582}
583
584/**************************************************************************
585 *
586 * Flush handling
587 *
588 **************************************************************************/
589
590/* efx_farch_flush_queues() must be woken up when all flushes are completed,
591 * or more RX flushes can be kicked off.
592 */
593static bool efx_farch_flush_wake(struct efx_nic *efx)
594{
595 /* Ensure that all updates are visible to efx_farch_flush_queues() */
596 smp_mb();
597
Alexandre Rames3881d8a2013-06-10 11:03:21 +0100598 return (atomic_read(&efx->active_queues) == 0 ||
Ben Hutchings86094f72013-08-21 19:51:04 +0100599 (atomic_read(&efx->rxq_flush_outstanding) < EFX_RX_FLUSH_COUNT
600 && atomic_read(&efx->rxq_flush_pending) > 0));
601}
602
603static bool efx_check_tx_flush_complete(struct efx_nic *efx)
604{
605 bool i = true;
606 efx_oword_t txd_ptr_tbl;
607 struct efx_channel *channel;
608 struct efx_tx_queue *tx_queue;
609
610 efx_for_each_channel(channel, efx) {
611 efx_for_each_channel_tx_queue(tx_queue, channel) {
612 efx_reado_table(efx, &txd_ptr_tbl,
613 FR_BZ_TX_DESC_PTR_TBL, tx_queue->queue);
614 if (EFX_OWORD_FIELD(txd_ptr_tbl,
615 FRF_AZ_TX_DESCQ_FLUSH) ||
616 EFX_OWORD_FIELD(txd_ptr_tbl,
617 FRF_AZ_TX_DESCQ_EN)) {
618 netif_dbg(efx, hw, efx->net_dev,
619 "flush did not complete on TXQ %d\n",
620 tx_queue->queue);
621 i = false;
622 } else if (atomic_cmpxchg(&tx_queue->flush_outstanding,
623 1, 0)) {
624 /* The flush is complete, but we didn't
625 * receive a flush completion event
626 */
627 netif_dbg(efx, hw, efx->net_dev,
628 "flush complete on TXQ %d, so drain "
629 "the queue\n", tx_queue->queue);
Alexandre Rames3881d8a2013-06-10 11:03:21 +0100630 /* Don't need to increment active_queues as it
Ben Hutchings86094f72013-08-21 19:51:04 +0100631 * has already been incremented for the queues
632 * which did not drain
633 */
634 efx_farch_magic_event(channel,
635 EFX_CHANNEL_MAGIC_TX_DRAIN(
636 tx_queue));
637 }
638 }
639 }
640
641 return i;
642}
643
644/* Flush all the transmit queues, and continue flushing receive queues until
645 * they're all flushed. Wait for the DRAIN events to be recieved so that there
646 * are no more RX and TX events left on any channel. */
647static int efx_farch_do_flush(struct efx_nic *efx)
648{
649 unsigned timeout = msecs_to_jiffies(5000); /* 5s for all flushes and drains */
650 struct efx_channel *channel;
651 struct efx_rx_queue *rx_queue;
652 struct efx_tx_queue *tx_queue;
653 int rc = 0;
654
655 efx_for_each_channel(channel, efx) {
656 efx_for_each_channel_tx_queue(tx_queue, channel) {
Ben Hutchings86094f72013-08-21 19:51:04 +0100657 efx_farch_flush_tx_queue(tx_queue);
658 }
659 efx_for_each_channel_rx_queue(rx_queue, channel) {
Ben Hutchings86094f72013-08-21 19:51:04 +0100660 rx_queue->flush_pending = true;
661 atomic_inc(&efx->rxq_flush_pending);
662 }
663 }
664
Alexandre Rames3881d8a2013-06-10 11:03:21 +0100665 while (timeout && atomic_read(&efx->active_queues) > 0) {
Ben Hutchings86094f72013-08-21 19:51:04 +0100666 /* If SRIOV is enabled, then offload receive queue flushing to
667 * the firmware (though we will still have to poll for
668 * completion). If that fails, fall back to the old scheme.
669 */
670 if (efx_sriov_enabled(efx)) {
671 rc = efx_mcdi_flush_rxqs(efx);
672 if (!rc)
673 goto wait;
674 }
675
676 /* The hardware supports four concurrent rx flushes, each of
677 * which may need to be retried if there is an outstanding
678 * descriptor fetch
679 */
680 efx_for_each_channel(channel, efx) {
681 efx_for_each_channel_rx_queue(rx_queue, channel) {
682 if (atomic_read(&efx->rxq_flush_outstanding) >=
683 EFX_RX_FLUSH_COUNT)
684 break;
685
686 if (rx_queue->flush_pending) {
687 rx_queue->flush_pending = false;
688 atomic_dec(&efx->rxq_flush_pending);
689 atomic_inc(&efx->rxq_flush_outstanding);
690 efx_farch_flush_rx_queue(rx_queue);
691 }
692 }
693 }
694
695 wait:
696 timeout = wait_event_timeout(efx->flush_wq,
697 efx_farch_flush_wake(efx),
698 timeout);
699 }
700
Alexandre Rames3881d8a2013-06-10 11:03:21 +0100701 if (atomic_read(&efx->active_queues) &&
Ben Hutchings86094f72013-08-21 19:51:04 +0100702 !efx_check_tx_flush_complete(efx)) {
703 netif_err(efx, hw, efx->net_dev, "failed to flush %d queues "
Alexandre Rames3881d8a2013-06-10 11:03:21 +0100704 "(rx %d+%d)\n", atomic_read(&efx->active_queues),
Ben Hutchings86094f72013-08-21 19:51:04 +0100705 atomic_read(&efx->rxq_flush_outstanding),
706 atomic_read(&efx->rxq_flush_pending));
707 rc = -ETIMEDOUT;
708
Alexandre Rames3881d8a2013-06-10 11:03:21 +0100709 atomic_set(&efx->active_queues, 0);
Ben Hutchings86094f72013-08-21 19:51:04 +0100710 atomic_set(&efx->rxq_flush_pending, 0);
711 atomic_set(&efx->rxq_flush_outstanding, 0);
712 }
713
714 return rc;
715}
716
717int efx_farch_fini_dmaq(struct efx_nic *efx)
718{
719 struct efx_channel *channel;
720 struct efx_tx_queue *tx_queue;
721 struct efx_rx_queue *rx_queue;
722 int rc = 0;
723
724 /* Do not attempt to write to the NIC during EEH recovery */
725 if (efx->state != STATE_RECOVERY) {
726 /* Only perform flush if DMA is enabled */
727 if (efx->pci_dev->is_busmaster) {
728 efx->type->prepare_flush(efx);
729 rc = efx_farch_do_flush(efx);
730 efx->type->finish_flush(efx);
731 }
732
733 efx_for_each_channel(channel, efx) {
734 efx_for_each_channel_rx_queue(rx_queue, channel)
735 efx_farch_rx_fini(rx_queue);
736 efx_for_each_channel_tx_queue(tx_queue, channel)
737 efx_farch_tx_fini(tx_queue);
738 }
739 }
740
741 return rc;
742}
743
744/**************************************************************************
745 *
746 * Event queue processing
747 * Event queues are processed by per-channel tasklets.
748 *
749 **************************************************************************/
750
751/* Update a channel's event queue's read pointer (RPTR) register
752 *
753 * This writes the EVQ_RPTR_REG register for the specified channel's
754 * event queue.
755 */
756void efx_farch_ev_read_ack(struct efx_channel *channel)
757{
758 efx_dword_t reg;
759 struct efx_nic *efx = channel->efx;
760
761 EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR,
762 channel->eventq_read_ptr & channel->eventq_mask);
763
764 /* For Falcon A1, EVQ_RPTR_KER is documented as having a step size
765 * of 4 bytes, but it is really 16 bytes just like later revisions.
766 */
767 efx_writed(efx, &reg,
768 efx->type->evq_rptr_tbl_base +
769 FR_BZ_EVQ_RPTR_STEP * channel->channel);
770}
771
772/* Use HW to insert a SW defined event */
773void efx_farch_generate_event(struct efx_nic *efx, unsigned int evq,
774 efx_qword_t *event)
775{
776 efx_oword_t drv_ev_reg;
777
778 BUILD_BUG_ON(FRF_AZ_DRV_EV_DATA_LBN != 0 ||
779 FRF_AZ_DRV_EV_DATA_WIDTH != 64);
780 drv_ev_reg.u32[0] = event->u32[0];
781 drv_ev_reg.u32[1] = event->u32[1];
782 drv_ev_reg.u32[2] = 0;
783 drv_ev_reg.u32[3] = 0;
784 EFX_SET_OWORD_FIELD(drv_ev_reg, FRF_AZ_DRV_EV_QID, evq);
785 efx_writeo(efx, &drv_ev_reg, FR_AZ_DRV_EV);
786}
787
788static void efx_farch_magic_event(struct efx_channel *channel, u32 magic)
789{
790 efx_qword_t event;
791
792 EFX_POPULATE_QWORD_2(event, FSF_AZ_EV_CODE,
793 FSE_AZ_EV_CODE_DRV_GEN_EV,
794 FSF_AZ_DRV_GEN_EV_MAGIC, magic);
795 efx_farch_generate_event(channel->efx, channel->channel, &event);
796}
797
798/* Handle a transmit completion event
799 *
800 * The NIC batches TX completion events; the message we receive is of
801 * the form "complete all TX events up to this index".
802 */
803static int
804efx_farch_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
805{
806 unsigned int tx_ev_desc_ptr;
807 unsigned int tx_ev_q_label;
808 struct efx_tx_queue *tx_queue;
809 struct efx_nic *efx = channel->efx;
810 int tx_packets = 0;
811
812 if (unlikely(ACCESS_ONCE(efx->reset_pending)))
813 return 0;
814
815 if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) {
816 /* Transmit completion */
817 tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR);
818 tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
819 tx_queue = efx_channel_get_tx_queue(
820 channel, tx_ev_q_label % EFX_TXQ_TYPES);
821 tx_packets = ((tx_ev_desc_ptr - tx_queue->read_count) &
822 tx_queue->ptr_mask);
823 efx_xmit_done(tx_queue, tx_ev_desc_ptr);
824 } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) {
825 /* Rewrite the FIFO write pointer */
826 tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
827 tx_queue = efx_channel_get_tx_queue(
828 channel, tx_ev_q_label % EFX_TXQ_TYPES);
829
830 netif_tx_lock(efx->net_dev);
831 efx_farch_notify_tx_desc(tx_queue);
832 netif_tx_unlock(efx->net_dev);
Ben Hutchingsab3b8252012-10-05 19:31:02 +0100833 } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_PKT_ERR)) {
Alexandre Rames3de82b92013-06-13 11:36:15 +0100834 efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
Ben Hutchings86094f72013-08-21 19:51:04 +0100835 } else {
836 netif_err(efx, tx_err, efx->net_dev,
837 "channel %d unexpected TX event "
838 EFX_QWORD_FMT"\n", channel->channel,
839 EFX_QWORD_VAL(*event));
840 }
841
842 return tx_packets;
843}
844
845/* Detect errors included in the rx_evt_pkt_ok bit. */
846static u16 efx_farch_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
847 const efx_qword_t *event)
848{
849 struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
850 struct efx_nic *efx = rx_queue->efx;
851 bool rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err;
852 bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err;
853 bool rx_ev_frm_trunc, rx_ev_drib_nib, rx_ev_tobe_disc;
854 bool rx_ev_other_err, rx_ev_pause_frm;
855 bool rx_ev_hdr_type, rx_ev_mcast_pkt;
856 unsigned rx_ev_pkt_type;
857
858 rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
859 rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT);
860 rx_ev_tobe_disc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_TOBE_DISC);
861 rx_ev_pkt_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_TYPE);
862 rx_ev_buf_owner_id_err = EFX_QWORD_FIELD(*event,
863 FSF_AZ_RX_EV_BUF_OWNER_ID_ERR);
864 rx_ev_ip_hdr_chksum_err = EFX_QWORD_FIELD(*event,
865 FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR);
866 rx_ev_tcp_udp_chksum_err = EFX_QWORD_FIELD(*event,
867 FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR);
868 rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_ETH_CRC_ERR);
869 rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_FRM_TRUNC);
870 rx_ev_drib_nib = ((efx_nic_rev(efx) >= EFX_REV_FALCON_B0) ?
871 0 : EFX_QWORD_FIELD(*event, FSF_AA_RX_EV_DRIB_NIB));
872 rx_ev_pause_frm = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PAUSE_FRM_ERR);
873
874 /* Every error apart from tobe_disc and pause_frm */
875 rx_ev_other_err = (rx_ev_drib_nib | rx_ev_tcp_udp_chksum_err |
876 rx_ev_buf_owner_id_err | rx_ev_eth_crc_err |
877 rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err);
878
879 /* Count errors that are not in MAC stats. Ignore expected
880 * checksum errors during self-test. */
881 if (rx_ev_frm_trunc)
882 ++channel->n_rx_frm_trunc;
883 else if (rx_ev_tobe_disc)
884 ++channel->n_rx_tobe_disc;
885 else if (!efx->loopback_selftest) {
886 if (rx_ev_ip_hdr_chksum_err)
887 ++channel->n_rx_ip_hdr_chksum_err;
888 else if (rx_ev_tcp_udp_chksum_err)
889 ++channel->n_rx_tcp_udp_chksum_err;
890 }
891
892 /* TOBE_DISC is expected on unicast mismatches; don't print out an
893 * error message. FRM_TRUNC indicates RXDP dropped the packet due
894 * to a FIFO overflow.
895 */
896#ifdef DEBUG
897 if (rx_ev_other_err && net_ratelimit()) {
898 netif_dbg(efx, rx_err, efx->net_dev,
899 " RX queue %d unexpected RX event "
900 EFX_QWORD_FMT "%s%s%s%s%s%s%s%s\n",
901 efx_rx_queue_index(rx_queue), EFX_QWORD_VAL(*event),
902 rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "",
903 rx_ev_ip_hdr_chksum_err ?
904 " [IP_HDR_CHKSUM_ERR]" : "",
905 rx_ev_tcp_udp_chksum_err ?
906 " [TCP_UDP_CHKSUM_ERR]" : "",
907 rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "",
908 rx_ev_frm_trunc ? " [FRM_TRUNC]" : "",
909 rx_ev_drib_nib ? " [DRIB_NIB]" : "",
910 rx_ev_tobe_disc ? " [TOBE_DISC]" : "",
911 rx_ev_pause_frm ? " [PAUSE]" : "");
912 }
913#endif
914
915 /* The frame must be discarded if any of these are true. */
916 return (rx_ev_eth_crc_err | rx_ev_frm_trunc | rx_ev_drib_nib |
917 rx_ev_tobe_disc | rx_ev_pause_frm) ?
918 EFX_RX_PKT_DISCARD : 0;
919}
920
921/* Handle receive events that are not in-order. Return true if this
922 * can be handled as a partial packet discard, false if it's more
923 * serious.
924 */
925static bool
926efx_farch_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index)
927{
928 struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
929 struct efx_nic *efx = rx_queue->efx;
930 unsigned expected, dropped;
931
932 if (rx_queue->scatter_n &&
933 index == ((rx_queue->removed_count + rx_queue->scatter_n - 1) &
934 rx_queue->ptr_mask)) {
935 ++channel->n_rx_nodesc_trunc;
936 return true;
937 }
938
939 expected = rx_queue->removed_count & rx_queue->ptr_mask;
940 dropped = (index - expected) & rx_queue->ptr_mask;
941 netif_info(efx, rx_err, efx->net_dev,
942 "dropped %d events (index=%d expected=%d)\n",
943 dropped, index, expected);
944
945 efx_schedule_reset(efx, EFX_WORKAROUND_5676(efx) ?
946 RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
947 return false;
948}
949
950/* Handle a packet received event
951 *
952 * The NIC gives a "discard" flag if it's a unicast packet with the
953 * wrong destination address
954 * Also "is multicast" and "matches multicast filter" flags can be used to
955 * discard non-matching multicast packets.
956 */
957static void
958efx_farch_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
959{
960 unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt;
961 unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt;
962 unsigned expected_ptr;
963 bool rx_ev_pkt_ok, rx_ev_sop, rx_ev_cont;
964 u16 flags;
965 struct efx_rx_queue *rx_queue;
966 struct efx_nic *efx = channel->efx;
967
968 if (unlikely(ACCESS_ONCE(efx->reset_pending)))
969 return;
970
971 rx_ev_cont = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT);
972 rx_ev_sop = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP);
973 WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) !=
974 channel->channel);
975
976 rx_queue = efx_channel_get_rx_queue(channel);
977
978 rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR);
979 expected_ptr = ((rx_queue->removed_count + rx_queue->scatter_n) &
980 rx_queue->ptr_mask);
981
982 /* Check for partial drops and other errors */
983 if (unlikely(rx_ev_desc_ptr != expected_ptr) ||
984 unlikely(rx_ev_sop != (rx_queue->scatter_n == 0))) {
985 if (rx_ev_desc_ptr != expected_ptr &&
986 !efx_farch_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr))
987 return;
988
989 /* Discard all pending fragments */
990 if (rx_queue->scatter_n) {
991 efx_rx_packet(
992 rx_queue,
993 rx_queue->removed_count & rx_queue->ptr_mask,
994 rx_queue->scatter_n, 0, EFX_RX_PKT_DISCARD);
995 rx_queue->removed_count += rx_queue->scatter_n;
996 rx_queue->scatter_n = 0;
997 }
998
999 /* Return if there is no new fragment */
1000 if (rx_ev_desc_ptr != expected_ptr)
1001 return;
1002
1003 /* Discard new fragment if not SOP */
1004 if (!rx_ev_sop) {
1005 efx_rx_packet(
1006 rx_queue,
1007 rx_queue->removed_count & rx_queue->ptr_mask,
1008 1, 0, EFX_RX_PKT_DISCARD);
1009 ++rx_queue->removed_count;
1010 return;
1011 }
1012 }
1013
1014 ++rx_queue->scatter_n;
1015 if (rx_ev_cont)
1016 return;
1017
1018 rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT);
1019 rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK);
1020 rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
1021
1022 if (likely(rx_ev_pkt_ok)) {
1023 /* If packet is marked as OK then we can rely on the
1024 * hardware checksum and classification.
1025 */
1026 flags = 0;
1027 switch (rx_ev_hdr_type) {
1028 case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP:
1029 flags |= EFX_RX_PKT_TCP;
1030 /* fall through */
1031 case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP:
1032 flags |= EFX_RX_PKT_CSUMMED;
1033 /* fall through */
1034 case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_OTHER:
1035 case FSE_AZ_RX_EV_HDR_TYPE_OTHER:
1036 break;
1037 }
1038 } else {
1039 flags = efx_farch_handle_rx_not_ok(rx_queue, event);
1040 }
1041
1042 /* Detect multicast packets that didn't match the filter */
1043 rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT);
1044 if (rx_ev_mcast_pkt) {
1045 unsigned int rx_ev_mcast_hash_match =
1046 EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_HASH_MATCH);
1047
1048 if (unlikely(!rx_ev_mcast_hash_match)) {
1049 ++channel->n_rx_mcast_mismatch;
1050 flags |= EFX_RX_PKT_DISCARD;
1051 }
1052 }
1053
1054 channel->irq_mod_score += 2;
1055
1056 /* Handle received packet */
1057 efx_rx_packet(rx_queue,
1058 rx_queue->removed_count & rx_queue->ptr_mask,
1059 rx_queue->scatter_n, rx_ev_byte_cnt, flags);
1060 rx_queue->removed_count += rx_queue->scatter_n;
1061 rx_queue->scatter_n = 0;
1062}
1063
1064/* If this flush done event corresponds to a &struct efx_tx_queue, then
1065 * send an %EFX_CHANNEL_MAGIC_TX_DRAIN event to drain the event queue
1066 * of all transmit completions.
1067 */
1068static void
1069efx_farch_handle_tx_flush_done(struct efx_nic *efx, efx_qword_t *event)
1070{
1071 struct efx_tx_queue *tx_queue;
1072 int qid;
1073
1074 qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
1075 if (qid < EFX_TXQ_TYPES * efx->n_tx_channels) {
1076 tx_queue = efx_get_tx_queue(efx, qid / EFX_TXQ_TYPES,
1077 qid % EFX_TXQ_TYPES);
1078 if (atomic_cmpxchg(&tx_queue->flush_outstanding, 1, 0)) {
1079 efx_farch_magic_event(tx_queue->channel,
1080 EFX_CHANNEL_MAGIC_TX_DRAIN(tx_queue));
1081 }
1082 }
1083}
1084
1085/* If this flush done event corresponds to a &struct efx_rx_queue: If the flush
1086 * was succesful then send an %EFX_CHANNEL_MAGIC_RX_DRAIN, otherwise add
1087 * the RX queue back to the mask of RX queues in need of flushing.
1088 */
1089static void
1090efx_farch_handle_rx_flush_done(struct efx_nic *efx, efx_qword_t *event)
1091{
1092 struct efx_channel *channel;
1093 struct efx_rx_queue *rx_queue;
1094 int qid;
1095 bool failed;
1096
1097 qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID);
1098 failed = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL);
1099 if (qid >= efx->n_channels)
1100 return;
1101 channel = efx_get_channel(efx, qid);
1102 if (!efx_channel_has_rx_queue(channel))
1103 return;
1104 rx_queue = efx_channel_get_rx_queue(channel);
1105
1106 if (failed) {
1107 netif_info(efx, hw, efx->net_dev,
1108 "RXQ %d flush retry\n", qid);
1109 rx_queue->flush_pending = true;
1110 atomic_inc(&efx->rxq_flush_pending);
1111 } else {
1112 efx_farch_magic_event(efx_rx_queue_channel(rx_queue),
1113 EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue));
1114 }
1115 atomic_dec(&efx->rxq_flush_outstanding);
1116 if (efx_farch_flush_wake(efx))
1117 wake_up(&efx->flush_wq);
1118}
1119
1120static void
1121efx_farch_handle_drain_event(struct efx_channel *channel)
1122{
1123 struct efx_nic *efx = channel->efx;
1124
Alexandre Rames3881d8a2013-06-10 11:03:21 +01001125 WARN_ON(atomic_read(&efx->active_queues) == 0);
1126 atomic_dec(&efx->active_queues);
Ben Hutchings86094f72013-08-21 19:51:04 +01001127 if (efx_farch_flush_wake(efx))
1128 wake_up(&efx->flush_wq);
1129}
1130
1131static void efx_farch_handle_generated_event(struct efx_channel *channel,
1132 efx_qword_t *event)
1133{
1134 struct efx_nic *efx = channel->efx;
1135 struct efx_rx_queue *rx_queue =
1136 efx_channel_has_rx_queue(channel) ?
1137 efx_channel_get_rx_queue(channel) : NULL;
1138 unsigned magic, code;
1139
1140 magic = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC);
1141 code = _EFX_CHANNEL_MAGIC_CODE(magic);
1142
1143 if (magic == EFX_CHANNEL_MAGIC_TEST(channel)) {
1144 channel->event_test_cpu = raw_smp_processor_id();
1145 } else if (rx_queue && magic == EFX_CHANNEL_MAGIC_FILL(rx_queue)) {
1146 /* The queue must be empty, so we won't receive any rx
1147 * events, so efx_process_channel() won't refill the
1148 * queue. Refill it here */
Jon Coopercce28792013-10-02 11:04:14 +01001149 efx_fast_push_rx_descriptors(rx_queue, true);
Ben Hutchings86094f72013-08-21 19:51:04 +01001150 } else if (rx_queue && magic == EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue)) {
1151 efx_farch_handle_drain_event(channel);
1152 } else if (code == _EFX_CHANNEL_MAGIC_TX_DRAIN) {
1153 efx_farch_handle_drain_event(channel);
1154 } else {
1155 netif_dbg(efx, hw, efx->net_dev, "channel %d received "
1156 "generated event "EFX_QWORD_FMT"\n",
1157 channel->channel, EFX_QWORD_VAL(*event));
1158 }
1159}
1160
1161static void
1162efx_farch_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
1163{
1164 struct efx_nic *efx = channel->efx;
1165 unsigned int ev_sub_code;
1166 unsigned int ev_sub_data;
1167
1168 ev_sub_code = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBCODE);
1169 ev_sub_data = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
1170
1171 switch (ev_sub_code) {
1172 case FSE_AZ_TX_DESCQ_FLS_DONE_EV:
1173 netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n",
1174 channel->channel, ev_sub_data);
1175 efx_farch_handle_tx_flush_done(efx, event);
1176 efx_sriov_tx_flush_done(efx, event);
1177 break;
1178 case FSE_AZ_RX_DESCQ_FLS_DONE_EV:
1179 netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n",
1180 channel->channel, ev_sub_data);
1181 efx_farch_handle_rx_flush_done(efx, event);
1182 efx_sriov_rx_flush_done(efx, event);
1183 break;
1184 case FSE_AZ_EVQ_INIT_DONE_EV:
1185 netif_dbg(efx, hw, efx->net_dev,
1186 "channel %d EVQ %d initialised\n",
1187 channel->channel, ev_sub_data);
1188 break;
1189 case FSE_AZ_SRM_UPD_DONE_EV:
1190 netif_vdbg(efx, hw, efx->net_dev,
1191 "channel %d SRAM update done\n", channel->channel);
1192 break;
1193 case FSE_AZ_WAKE_UP_EV:
1194 netif_vdbg(efx, hw, efx->net_dev,
1195 "channel %d RXQ %d wakeup event\n",
1196 channel->channel, ev_sub_data);
1197 break;
1198 case FSE_AZ_TIMER_EV:
1199 netif_vdbg(efx, hw, efx->net_dev,
1200 "channel %d RX queue %d timer expired\n",
1201 channel->channel, ev_sub_data);
1202 break;
1203 case FSE_AA_RX_RECOVER_EV:
1204 netif_err(efx, rx_err, efx->net_dev,
1205 "channel %d seen DRIVER RX_RESET event. "
1206 "Resetting.\n", channel->channel);
1207 atomic_inc(&efx->rx_reset);
1208 efx_schedule_reset(efx,
1209 EFX_WORKAROUND_6555(efx) ?
1210 RESET_TYPE_RX_RECOVERY :
1211 RESET_TYPE_DISABLE);
1212 break;
1213 case FSE_BZ_RX_DSC_ERROR_EV:
1214 if (ev_sub_data < EFX_VI_BASE) {
1215 netif_err(efx, rx_err, efx->net_dev,
1216 "RX DMA Q %d reports descriptor fetch error."
1217 " RX Q %d is disabled.\n", ev_sub_data,
1218 ev_sub_data);
Alexandre Rames3de82b92013-06-13 11:36:15 +01001219 efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
Ben Hutchings86094f72013-08-21 19:51:04 +01001220 } else
1221 efx_sriov_desc_fetch_err(efx, ev_sub_data);
1222 break;
1223 case FSE_BZ_TX_DSC_ERROR_EV:
1224 if (ev_sub_data < EFX_VI_BASE) {
1225 netif_err(efx, tx_err, efx->net_dev,
1226 "TX DMA Q %d reports descriptor fetch error."
1227 " TX Q %d is disabled.\n", ev_sub_data,
1228 ev_sub_data);
Alexandre Rames3de82b92013-06-13 11:36:15 +01001229 efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
Ben Hutchings86094f72013-08-21 19:51:04 +01001230 } else
1231 efx_sriov_desc_fetch_err(efx, ev_sub_data);
1232 break;
1233 default:
1234 netif_vdbg(efx, hw, efx->net_dev,
1235 "channel %d unknown driver event code %d "
1236 "data %04x\n", channel->channel, ev_sub_code,
1237 ev_sub_data);
1238 break;
1239 }
1240}
1241
1242int efx_farch_ev_process(struct efx_channel *channel, int budget)
1243{
1244 struct efx_nic *efx = channel->efx;
1245 unsigned int read_ptr;
1246 efx_qword_t event, *p_event;
1247 int ev_code;
1248 int tx_packets = 0;
1249 int spent = 0;
1250
1251 read_ptr = channel->eventq_read_ptr;
1252
1253 for (;;) {
1254 p_event = efx_event(channel, read_ptr);
1255 event = *p_event;
1256
1257 if (!efx_event_present(&event))
1258 /* End of events */
1259 break;
1260
1261 netif_vdbg(channel->efx, intr, channel->efx->net_dev,
1262 "channel %d event is "EFX_QWORD_FMT"\n",
1263 channel->channel, EFX_QWORD_VAL(event));
1264
1265 /* Clear this event by marking it all ones */
1266 EFX_SET_QWORD(*p_event);
1267
1268 ++read_ptr;
1269
1270 ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE);
1271
1272 switch (ev_code) {
1273 case FSE_AZ_EV_CODE_RX_EV:
1274 efx_farch_handle_rx_event(channel, &event);
1275 if (++spent == budget)
1276 goto out;
1277 break;
1278 case FSE_AZ_EV_CODE_TX_EV:
1279 tx_packets += efx_farch_handle_tx_event(channel,
1280 &event);
1281 if (tx_packets > efx->txq_entries) {
1282 spent = budget;
1283 goto out;
1284 }
1285 break;
1286 case FSE_AZ_EV_CODE_DRV_GEN_EV:
1287 efx_farch_handle_generated_event(channel, &event);
1288 break;
1289 case FSE_AZ_EV_CODE_DRIVER_EV:
1290 efx_farch_handle_driver_event(channel, &event);
1291 break;
1292 case FSE_CZ_EV_CODE_USER_EV:
1293 efx_sriov_event(channel, &event);
1294 break;
1295 case FSE_CZ_EV_CODE_MCDI_EV:
1296 efx_mcdi_process_event(channel, &event);
1297 break;
1298 case FSE_AZ_EV_CODE_GLOBAL_EV:
1299 if (efx->type->handle_global_event &&
1300 efx->type->handle_global_event(channel, &event))
1301 break;
1302 /* else fall through */
1303 default:
1304 netif_err(channel->efx, hw, channel->efx->net_dev,
1305 "channel %d unknown event type %d (data "
1306 EFX_QWORD_FMT ")\n", channel->channel,
1307 ev_code, EFX_QWORD_VAL(event));
1308 }
1309 }
1310
1311out:
1312 channel->eventq_read_ptr = read_ptr;
1313 return spent;
1314}
1315
1316/* Allocate buffer table entries for event queue */
1317int efx_farch_ev_probe(struct efx_channel *channel)
1318{
1319 struct efx_nic *efx = channel->efx;
1320 unsigned entries;
1321
1322 entries = channel->eventq_mask + 1;
1323 return efx_alloc_special_buffer(efx, &channel->eventq,
1324 entries * sizeof(efx_qword_t));
1325}
1326
Jon Cooper261e4d92013-04-15 18:51:54 +01001327int efx_farch_ev_init(struct efx_channel *channel)
Ben Hutchings86094f72013-08-21 19:51:04 +01001328{
1329 efx_oword_t reg;
1330 struct efx_nic *efx = channel->efx;
1331
1332 netif_dbg(efx, hw, efx->net_dev,
1333 "channel %d event queue in special buffers %d-%d\n",
1334 channel->channel, channel->eventq.index,
1335 channel->eventq.index + channel->eventq.entries - 1);
1336
1337 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) {
1338 EFX_POPULATE_OWORD_3(reg,
1339 FRF_CZ_TIMER_Q_EN, 1,
1340 FRF_CZ_HOST_NOTIFY_MODE, 0,
1341 FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS);
1342 efx_writeo_table(efx, &reg, FR_BZ_TIMER_TBL, channel->channel);
1343 }
1344
1345 /* Pin event queue buffer */
1346 efx_init_special_buffer(efx, &channel->eventq);
1347
1348 /* Fill event queue with all ones (i.e. empty events) */
1349 memset(channel->eventq.buf.addr, 0xff, channel->eventq.buf.len);
1350
1351 /* Push event queue to card */
1352 EFX_POPULATE_OWORD_3(reg,
1353 FRF_AZ_EVQ_EN, 1,
1354 FRF_AZ_EVQ_SIZE, __ffs(channel->eventq.entries),
1355 FRF_AZ_EVQ_BUF_BASE_ID, channel->eventq.index);
1356 efx_writeo_table(efx, &reg, efx->type->evq_ptr_tbl_base,
1357 channel->channel);
1358
Jon Cooper261e4d92013-04-15 18:51:54 +01001359 return 0;
Ben Hutchings86094f72013-08-21 19:51:04 +01001360}
1361
1362void efx_farch_ev_fini(struct efx_channel *channel)
1363{
1364 efx_oword_t reg;
1365 struct efx_nic *efx = channel->efx;
1366
1367 /* Remove event queue from card */
1368 EFX_ZERO_OWORD(reg);
1369 efx_writeo_table(efx, &reg, efx->type->evq_ptr_tbl_base,
1370 channel->channel);
1371 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
1372 efx_writeo_table(efx, &reg, FR_BZ_TIMER_TBL, channel->channel);
1373
1374 /* Unpin event queue */
1375 efx_fini_special_buffer(efx, &channel->eventq);
1376}
1377
1378/* Free buffers backing event queue */
1379void efx_farch_ev_remove(struct efx_channel *channel)
1380{
1381 efx_free_special_buffer(channel->efx, &channel->eventq);
1382}
1383
1384
1385void efx_farch_ev_test_generate(struct efx_channel *channel)
1386{
1387 efx_farch_magic_event(channel, EFX_CHANNEL_MAGIC_TEST(channel));
1388}
1389
1390void efx_farch_rx_defer_refill(struct efx_rx_queue *rx_queue)
1391{
1392 efx_farch_magic_event(efx_rx_queue_channel(rx_queue),
1393 EFX_CHANNEL_MAGIC_FILL(rx_queue));
1394}
1395
1396/**************************************************************************
1397 *
1398 * Hardware interrupts
1399 * The hardware interrupt handler does very little work; all the event
1400 * queue processing is carried out by per-channel tasklets.
1401 *
1402 **************************************************************************/
1403
1404/* Enable/disable/generate interrupts */
1405static inline void efx_farch_interrupts(struct efx_nic *efx,
1406 bool enabled, bool force)
1407{
1408 efx_oword_t int_en_reg_ker;
1409
1410 EFX_POPULATE_OWORD_3(int_en_reg_ker,
1411 FRF_AZ_KER_INT_LEVE_SEL, efx->irq_level,
1412 FRF_AZ_KER_INT_KER, force,
1413 FRF_AZ_DRV_INT_EN_KER, enabled);
1414 efx_writeo(efx, &int_en_reg_ker, FR_AZ_INT_EN_KER);
1415}
1416
1417void efx_farch_irq_enable_master(struct efx_nic *efx)
1418{
1419 EFX_ZERO_OWORD(*((efx_oword_t *) efx->irq_status.addr));
1420 wmb(); /* Ensure interrupt vector is clear before interrupts enabled */
1421
1422 efx_farch_interrupts(efx, true, false);
1423}
1424
1425void efx_farch_irq_disable_master(struct efx_nic *efx)
1426{
1427 /* Disable interrupts */
1428 efx_farch_interrupts(efx, false, false);
1429}
1430
1431/* Generate a test interrupt
1432 * Interrupt must already have been enabled, otherwise nasty things
1433 * may happen.
1434 */
1435void efx_farch_irq_test_generate(struct efx_nic *efx)
1436{
1437 efx_farch_interrupts(efx, true, true);
1438}
1439
1440/* Process a fatal interrupt
1441 * Disable bus mastering ASAP and schedule a reset
1442 */
1443irqreturn_t efx_farch_fatal_interrupt(struct efx_nic *efx)
1444{
1445 struct falcon_nic_data *nic_data = efx->nic_data;
1446 efx_oword_t *int_ker = efx->irq_status.addr;
1447 efx_oword_t fatal_intr;
1448 int error, mem_perr;
1449
1450 efx_reado(efx, &fatal_intr, FR_AZ_FATAL_INTR_KER);
1451 error = EFX_OWORD_FIELD(fatal_intr, FRF_AZ_FATAL_INTR);
1452
1453 netif_err(efx, hw, efx->net_dev, "SYSTEM ERROR "EFX_OWORD_FMT" status "
1454 EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker),
1455 EFX_OWORD_VAL(fatal_intr),
1456 error ? "disabling bus mastering" : "no recognised error");
1457
1458 /* If this is a memory parity error dump which blocks are offending */
1459 mem_perr = (EFX_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER) ||
1460 EFX_OWORD_FIELD(fatal_intr, FRF_AZ_SRM_PERR_INT_KER));
1461 if (mem_perr) {
1462 efx_oword_t reg;
1463 efx_reado(efx, &reg, FR_AZ_MEM_STAT);
1464 netif_err(efx, hw, efx->net_dev,
1465 "SYSTEM ERROR: memory parity error "EFX_OWORD_FMT"\n",
1466 EFX_OWORD_VAL(reg));
1467 }
1468
1469 /* Disable both devices */
1470 pci_clear_master(efx->pci_dev);
1471 if (efx_nic_is_dual_func(efx))
1472 pci_clear_master(nic_data->pci_dev2);
1473 efx_farch_irq_disable_master(efx);
1474
1475 /* Count errors and reset or disable the NIC accordingly */
1476 if (efx->int_error_count == 0 ||
1477 time_after(jiffies, efx->int_error_expire)) {
1478 efx->int_error_count = 0;
1479 efx->int_error_expire =
1480 jiffies + EFX_INT_ERROR_EXPIRE * HZ;
1481 }
1482 if (++efx->int_error_count < EFX_MAX_INT_ERRORS) {
1483 netif_err(efx, hw, efx->net_dev,
1484 "SYSTEM ERROR - reset scheduled\n");
1485 efx_schedule_reset(efx, RESET_TYPE_INT_ERROR);
1486 } else {
1487 netif_err(efx, hw, efx->net_dev,
1488 "SYSTEM ERROR - max number of errors seen."
1489 "NIC will be disabled\n");
1490 efx_schedule_reset(efx, RESET_TYPE_DISABLE);
1491 }
1492
1493 return IRQ_HANDLED;
1494}
1495
1496/* Handle a legacy interrupt
1497 * Acknowledges the interrupt and schedule event queue processing.
1498 */
1499irqreturn_t efx_farch_legacy_interrupt(int irq, void *dev_id)
1500{
1501 struct efx_nic *efx = dev_id;
1502 bool soft_enabled = ACCESS_ONCE(efx->irq_soft_enabled);
1503 efx_oword_t *int_ker = efx->irq_status.addr;
1504 irqreturn_t result = IRQ_NONE;
1505 struct efx_channel *channel;
1506 efx_dword_t reg;
1507 u32 queues;
1508 int syserr;
1509
1510 /* Read the ISR which also ACKs the interrupts */
1511 efx_readd(efx, &reg, FR_BZ_INT_ISR0);
1512 queues = EFX_EXTRACT_DWORD(reg, 0, 31);
1513
1514 /* Legacy interrupts are disabled too late by the EEH kernel
1515 * code. Disable them earlier.
1516 * If an EEH error occurred, the read will have returned all ones.
1517 */
1518 if (EFX_DWORD_IS_ALL_ONES(reg) && efx_try_recovery(efx) &&
1519 !efx->eeh_disabled_legacy_irq) {
1520 disable_irq_nosync(efx->legacy_irq);
1521 efx->eeh_disabled_legacy_irq = true;
1522 }
1523
1524 /* Handle non-event-queue sources */
1525 if (queues & (1U << efx->irq_level) && soft_enabled) {
1526 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
1527 if (unlikely(syserr))
1528 return efx_farch_fatal_interrupt(efx);
1529 efx->last_irq_cpu = raw_smp_processor_id();
1530 }
1531
1532 if (queues != 0) {
Ben Hutchingsab3b8252012-10-05 19:31:02 +01001533 efx->irq_zero_count = 0;
Ben Hutchings86094f72013-08-21 19:51:04 +01001534
1535 /* Schedule processing of any interrupting queues */
1536 if (likely(soft_enabled)) {
1537 efx_for_each_channel(channel, efx) {
1538 if (queues & 1)
1539 efx_schedule_channel_irq(channel);
1540 queues >>= 1;
1541 }
1542 }
1543 result = IRQ_HANDLED;
1544
Ben Hutchingsab3b8252012-10-05 19:31:02 +01001545 } else {
Ben Hutchings86094f72013-08-21 19:51:04 +01001546 efx_qword_t *event;
1547
Ben Hutchingsab3b8252012-10-05 19:31:02 +01001548 /* Legacy ISR read can return zero once (SF bug 15783) */
1549
Ben Hutchings86094f72013-08-21 19:51:04 +01001550 /* We can't return IRQ_HANDLED more than once on seeing ISR=0
1551 * because this might be a shared interrupt. */
1552 if (efx->irq_zero_count++ == 0)
1553 result = IRQ_HANDLED;
1554
1555 /* Ensure we schedule or rearm all event queues */
1556 if (likely(soft_enabled)) {
1557 efx_for_each_channel(channel, efx) {
1558 event = efx_event(channel,
1559 channel->eventq_read_ptr);
1560 if (efx_event_present(event))
1561 efx_schedule_channel_irq(channel);
1562 else
1563 efx_farch_ev_read_ack(channel);
1564 }
1565 }
1566 }
1567
1568 if (result == IRQ_HANDLED)
1569 netif_vdbg(efx, intr, efx->net_dev,
1570 "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n",
1571 irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg));
1572
1573 return result;
1574}
1575
1576/* Handle an MSI interrupt
1577 *
1578 * Handle an MSI hardware interrupt. This routine schedules event
1579 * queue processing. No interrupt acknowledgement cycle is necessary.
1580 * Also, we never need to check that the interrupt is for us, since
1581 * MSI interrupts cannot be shared.
1582 */
1583irqreturn_t efx_farch_msi_interrupt(int irq, void *dev_id)
1584{
1585 struct efx_msi_context *context = dev_id;
1586 struct efx_nic *efx = context->efx;
1587 efx_oword_t *int_ker = efx->irq_status.addr;
1588 int syserr;
1589
1590 netif_vdbg(efx, intr, efx->net_dev,
1591 "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
1592 irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
1593
1594 if (!likely(ACCESS_ONCE(efx->irq_soft_enabled)))
1595 return IRQ_HANDLED;
1596
1597 /* Handle non-event-queue sources */
1598 if (context->index == efx->irq_level) {
1599 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
1600 if (unlikely(syserr))
1601 return efx_farch_fatal_interrupt(efx);
1602 efx->last_irq_cpu = raw_smp_processor_id();
1603 }
1604
1605 /* Schedule processing of the channel */
1606 efx_schedule_channel_irq(efx->channel[context->index]);
1607
1608 return IRQ_HANDLED;
1609}
1610
Ben Hutchings86094f72013-08-21 19:51:04 +01001611/* Setup RSS indirection table.
1612 * This maps from the hash value of the packet to RXQ
1613 */
1614void efx_farch_rx_push_indir_table(struct efx_nic *efx)
1615{
1616 size_t i = 0;
1617 efx_dword_t dword;
1618
Andrew Rybchenkod43050c2013-11-14 09:00:27 +04001619 BUG_ON(efx_nic_rev(efx) < EFX_REV_FALCON_B0);
Ben Hutchings86094f72013-08-21 19:51:04 +01001620
1621 BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) !=
1622 FR_BZ_RX_INDIRECTION_TBL_ROWS);
1623
1624 for (i = 0; i < FR_BZ_RX_INDIRECTION_TBL_ROWS; i++) {
1625 EFX_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE,
1626 efx->rx_indir_table[i]);
1627 efx_writed(efx, &dword,
1628 FR_BZ_RX_INDIRECTION_TBL +
1629 FR_BZ_RX_INDIRECTION_TBL_STEP * i);
1630 }
1631}
1632
1633/* Looks at available SRAM resources and works out how many queues we
1634 * can support, and where things like descriptor caches should live.
1635 *
1636 * SRAM is split up as follows:
1637 * 0 buftbl entries for channels
1638 * efx->vf_buftbl_base buftbl entries for SR-IOV
1639 * efx->rx_dc_base RX descriptor caches
1640 * efx->tx_dc_base TX descriptor caches
1641 */
1642void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw)
1643{
1644 unsigned vi_count, buftbl_min;
1645
1646 /* Account for the buffer table entries backing the datapath channels
1647 * and the descriptor caches for those channels.
1648 */
1649 buftbl_min = ((efx->n_rx_channels * EFX_MAX_DMAQ_SIZE +
1650 efx->n_tx_channels * EFX_TXQ_TYPES * EFX_MAX_DMAQ_SIZE +
1651 efx->n_channels * EFX_MAX_EVQ_SIZE)
1652 * sizeof(efx_qword_t) / EFX_BUF_SIZE);
1653 vi_count = max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES);
1654
1655#ifdef CONFIG_SFC_SRIOV
1656 if (efx_sriov_wanted(efx)) {
1657 unsigned vi_dc_entries, buftbl_free, entries_per_vf, vf_limit;
1658
1659 efx->vf_buftbl_base = buftbl_min;
1660
1661 vi_dc_entries = RX_DC_ENTRIES + TX_DC_ENTRIES;
1662 vi_count = max(vi_count, EFX_VI_BASE);
1663 buftbl_free = (sram_lim_qw - buftbl_min -
1664 vi_count * vi_dc_entries);
1665
1666 entries_per_vf = ((vi_dc_entries + EFX_VF_BUFTBL_PER_VI) *
1667 efx_vf_size(efx));
1668 vf_limit = min(buftbl_free / entries_per_vf,
1669 (1024U - EFX_VI_BASE) >> efx->vi_scale);
1670
1671 if (efx->vf_count > vf_limit) {
1672 netif_err(efx, probe, efx->net_dev,
1673 "Reducing VF count from from %d to %d\n",
1674 efx->vf_count, vf_limit);
1675 efx->vf_count = vf_limit;
1676 }
1677 vi_count += efx->vf_count * efx_vf_size(efx);
1678 }
1679#endif
1680
1681 efx->tx_dc_base = sram_lim_qw - vi_count * TX_DC_ENTRIES;
1682 efx->rx_dc_base = efx->tx_dc_base - vi_count * RX_DC_ENTRIES;
1683}
1684
1685u32 efx_farch_fpga_ver(struct efx_nic *efx)
1686{
1687 efx_oword_t altera_build;
1688 efx_reado(efx, &altera_build, FR_AZ_ALTERA_BUILD);
1689 return EFX_OWORD_FIELD(altera_build, FRF_AZ_ALTERA_BUILD_VER);
1690}
1691
1692void efx_farch_init_common(struct efx_nic *efx)
1693{
1694 efx_oword_t temp;
1695
1696 /* Set positions of descriptor caches in SRAM. */
1697 EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_TX_DC_BASE_ADR, efx->tx_dc_base);
1698 efx_writeo(efx, &temp, FR_AZ_SRM_TX_DC_CFG);
1699 EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_RX_DC_BASE_ADR, efx->rx_dc_base);
1700 efx_writeo(efx, &temp, FR_AZ_SRM_RX_DC_CFG);
1701
1702 /* Set TX descriptor cache size. */
1703 BUILD_BUG_ON(TX_DC_ENTRIES != (8 << TX_DC_ENTRIES_ORDER));
1704 EFX_POPULATE_OWORD_1(temp, FRF_AZ_TX_DC_SIZE, TX_DC_ENTRIES_ORDER);
1705 efx_writeo(efx, &temp, FR_AZ_TX_DC_CFG);
1706
1707 /* Set RX descriptor cache size. Set low watermark to size-8, as
1708 * this allows most efficient prefetching.
1709 */
1710 BUILD_BUG_ON(RX_DC_ENTRIES != (8 << RX_DC_ENTRIES_ORDER));
1711 EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_SIZE, RX_DC_ENTRIES_ORDER);
1712 efx_writeo(efx, &temp, FR_AZ_RX_DC_CFG);
1713 EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_PF_LWM, RX_DC_ENTRIES - 8);
1714 efx_writeo(efx, &temp, FR_AZ_RX_DC_PF_WM);
1715
1716 /* Program INT_KER address */
1717 EFX_POPULATE_OWORD_2(temp,
1718 FRF_AZ_NORM_INT_VEC_DIS_KER,
1719 EFX_INT_MODE_USE_MSI(efx),
1720 FRF_AZ_INT_ADR_KER, efx->irq_status.dma_addr);
1721 efx_writeo(efx, &temp, FR_AZ_INT_ADR_KER);
1722
1723 if (EFX_WORKAROUND_17213(efx) && !EFX_INT_MODE_USE_MSI(efx))
1724 /* Use an interrupt level unused by event queues */
1725 efx->irq_level = 0x1f;
1726 else
1727 /* Use a valid MSI-X vector */
1728 efx->irq_level = 0;
1729
1730 /* Enable all the genuinely fatal interrupts. (They are still
1731 * masked by the overall interrupt mask, controlled by
1732 * falcon_interrupts()).
1733 *
1734 * Note: All other fatal interrupts are enabled
1735 */
1736 EFX_POPULATE_OWORD_3(temp,
1737 FRF_AZ_ILL_ADR_INT_KER_EN, 1,
1738 FRF_AZ_RBUF_OWN_INT_KER_EN, 1,
1739 FRF_AZ_TBUF_OWN_INT_KER_EN, 1);
1740 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
1741 EFX_SET_OWORD_FIELD(temp, FRF_CZ_SRAM_PERR_INT_P_KER_EN, 1);
1742 EFX_INVERT_OWORD(temp);
1743 efx_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER);
1744
Ben Hutchings86094f72013-08-21 19:51:04 +01001745 /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be
1746 * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q.
1747 */
1748 efx_reado(efx, &temp, FR_AZ_TX_RESERVED);
1749 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER, 0xfe);
1750 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER_EN, 1);
1751 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_ONE_PKT_PER_Q, 1);
1752 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PUSH_EN, 1);
1753 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_DIS_NON_IP_EV, 1);
1754 /* Enable SW_EV to inherit in char driver - assume harmless here */
1755 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1);
1756 /* Prefetch threshold 2 => fetch when descriptor cache half empty */
1757 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_THRESHOLD, 2);
1758 /* Disable hardware watchdog which can misfire */
1759 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_WD_TMR, 0x3fffff);
1760 /* Squash TX of packets of 16 bytes or less */
1761 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
1762 EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1);
1763 efx_writeo(efx, &temp, FR_AZ_TX_RESERVED);
1764
1765 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
1766 EFX_POPULATE_OWORD_4(temp,
1767 /* Default values */
1768 FRF_BZ_TX_PACE_SB_NOT_AF, 0x15,
1769 FRF_BZ_TX_PACE_SB_AF, 0xb,
1770 FRF_BZ_TX_PACE_FB_BASE, 0,
1771 /* Allow large pace values in the
1772 * fast bin. */
1773 FRF_BZ_TX_PACE_BIN_TH,
1774 FFE_BZ_TX_PACE_RESERVED);
1775 efx_writeo(efx, &temp, FR_BZ_TX_PACE);
1776 }
1777}
Ben Hutchingsadd72472012-11-08 01:46:53 +00001778
1779/**************************************************************************
1780 *
1781 * Filter tables
1782 *
1783 **************************************************************************
1784 */
1785
1786/* "Fudge factors" - difference between programmed value and actual depth.
1787 * Due to pipelined implementation we need to program H/W with a value that
1788 * is larger than the hop limit we want.
1789 */
1790#define EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD 3
1791#define EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL 1
1792
1793/* Hard maximum search limit. Hardware will time-out beyond 200-something.
1794 * We also need to avoid infinite loops in efx_farch_filter_search() when the
1795 * table is full.
1796 */
1797#define EFX_FARCH_FILTER_CTL_SRCH_MAX 200
1798
1799/* Don't try very hard to find space for performance hints, as this is
1800 * counter-productive. */
1801#define EFX_FARCH_FILTER_CTL_SRCH_HINT_MAX 5
1802
1803enum efx_farch_filter_type {
1804 EFX_FARCH_FILTER_TCP_FULL = 0,
1805 EFX_FARCH_FILTER_TCP_WILD,
1806 EFX_FARCH_FILTER_UDP_FULL,
1807 EFX_FARCH_FILTER_UDP_WILD,
1808 EFX_FARCH_FILTER_MAC_FULL = 4,
1809 EFX_FARCH_FILTER_MAC_WILD,
1810 EFX_FARCH_FILTER_UC_DEF = 8,
1811 EFX_FARCH_FILTER_MC_DEF,
1812 EFX_FARCH_FILTER_TYPE_COUNT, /* number of specific types */
1813};
1814
1815enum efx_farch_filter_table_id {
1816 EFX_FARCH_FILTER_TABLE_RX_IP = 0,
1817 EFX_FARCH_FILTER_TABLE_RX_MAC,
1818 EFX_FARCH_FILTER_TABLE_RX_DEF,
1819 EFX_FARCH_FILTER_TABLE_TX_MAC,
1820 EFX_FARCH_FILTER_TABLE_COUNT,
1821};
1822
1823enum efx_farch_filter_index {
1824 EFX_FARCH_FILTER_INDEX_UC_DEF,
1825 EFX_FARCH_FILTER_INDEX_MC_DEF,
1826 EFX_FARCH_FILTER_SIZE_RX_DEF,
1827};
1828
1829struct efx_farch_filter_spec {
1830 u8 type:4;
1831 u8 priority:4;
1832 u8 flags;
1833 u16 dmaq_id;
1834 u32 data[3];
1835};
1836
1837struct efx_farch_filter_table {
1838 enum efx_farch_filter_table_id id;
1839 u32 offset; /* address of table relative to BAR */
1840 unsigned size; /* number of entries */
1841 unsigned step; /* step between entries */
1842 unsigned used; /* number currently used */
1843 unsigned long *used_bitmap;
1844 struct efx_farch_filter_spec *spec;
1845 unsigned search_limit[EFX_FARCH_FILTER_TYPE_COUNT];
1846};
1847
1848struct efx_farch_filter_state {
1849 struct efx_farch_filter_table table[EFX_FARCH_FILTER_TABLE_COUNT];
1850};
1851
1852static void
1853efx_farch_filter_table_clear_entry(struct efx_nic *efx,
1854 struct efx_farch_filter_table *table,
1855 unsigned int filter_idx);
1856
1857/* The filter hash function is LFSR polynomial x^16 + x^3 + 1 of a 32-bit
1858 * key derived from the n-tuple. The initial LFSR state is 0xffff. */
1859static u16 efx_farch_filter_hash(u32 key)
1860{
1861 u16 tmp;
1862
1863 /* First 16 rounds */
1864 tmp = 0x1fff ^ key >> 16;
1865 tmp = tmp ^ tmp >> 3 ^ tmp >> 6;
1866 tmp = tmp ^ tmp >> 9;
1867 /* Last 16 rounds */
1868 tmp = tmp ^ tmp << 13 ^ key;
1869 tmp = tmp ^ tmp >> 3 ^ tmp >> 6;
1870 return tmp ^ tmp >> 9;
1871}
1872
1873/* To allow for hash collisions, filter search continues at these
1874 * increments from the first possible entry selected by the hash. */
1875static u16 efx_farch_filter_increment(u32 key)
1876{
1877 return key * 2 - 1;
1878}
1879
1880static enum efx_farch_filter_table_id
1881efx_farch_filter_spec_table_id(const struct efx_farch_filter_spec *spec)
1882{
1883 BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP !=
1884 (EFX_FARCH_FILTER_TCP_FULL >> 2));
1885 BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP !=
1886 (EFX_FARCH_FILTER_TCP_WILD >> 2));
1887 BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP !=
1888 (EFX_FARCH_FILTER_UDP_FULL >> 2));
1889 BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP !=
1890 (EFX_FARCH_FILTER_UDP_WILD >> 2));
1891 BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_MAC !=
1892 (EFX_FARCH_FILTER_MAC_FULL >> 2));
1893 BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_MAC !=
1894 (EFX_FARCH_FILTER_MAC_WILD >> 2));
1895 BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_TX_MAC !=
1896 EFX_FARCH_FILTER_TABLE_RX_MAC + 2);
1897 return (spec->type >> 2) + ((spec->flags & EFX_FILTER_FLAG_TX) ? 2 : 0);
1898}
1899
1900static void efx_farch_filter_push_rx_config(struct efx_nic *efx)
1901{
1902 struct efx_farch_filter_state *state = efx->filter_state;
1903 struct efx_farch_filter_table *table;
1904 efx_oword_t filter_ctl;
1905
1906 efx_reado(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
1907
1908 table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP];
1909 EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_FULL_SRCH_LIMIT,
1910 table->search_limit[EFX_FARCH_FILTER_TCP_FULL] +
1911 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL);
1912 EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_WILD_SRCH_LIMIT,
1913 table->search_limit[EFX_FARCH_FILTER_TCP_WILD] +
1914 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD);
1915 EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_FULL_SRCH_LIMIT,
1916 table->search_limit[EFX_FARCH_FILTER_UDP_FULL] +
1917 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL);
1918 EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_WILD_SRCH_LIMIT,
1919 table->search_limit[EFX_FARCH_FILTER_UDP_WILD] +
1920 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD);
1921
1922 table = &state->table[EFX_FARCH_FILTER_TABLE_RX_MAC];
1923 if (table->size) {
1924 EFX_SET_OWORD_FIELD(
1925 filter_ctl, FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT,
1926 table->search_limit[EFX_FARCH_FILTER_MAC_FULL] +
1927 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL);
1928 EFX_SET_OWORD_FIELD(
1929 filter_ctl, FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT,
1930 table->search_limit[EFX_FARCH_FILTER_MAC_WILD] +
1931 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD);
1932 }
1933
1934 table = &state->table[EFX_FARCH_FILTER_TABLE_RX_DEF];
1935 if (table->size) {
1936 EFX_SET_OWORD_FIELD(
1937 filter_ctl, FRF_CZ_UNICAST_NOMATCH_Q_ID,
1938 table->spec[EFX_FARCH_FILTER_INDEX_UC_DEF].dmaq_id);
1939 EFX_SET_OWORD_FIELD(
1940 filter_ctl, FRF_CZ_UNICAST_NOMATCH_RSS_ENABLED,
1941 !!(table->spec[EFX_FARCH_FILTER_INDEX_UC_DEF].flags &
1942 EFX_FILTER_FLAG_RX_RSS));
1943 EFX_SET_OWORD_FIELD(
1944 filter_ctl, FRF_CZ_MULTICAST_NOMATCH_Q_ID,
1945 table->spec[EFX_FARCH_FILTER_INDEX_MC_DEF].dmaq_id);
1946 EFX_SET_OWORD_FIELD(
1947 filter_ctl, FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED,
1948 !!(table->spec[EFX_FARCH_FILTER_INDEX_MC_DEF].flags &
1949 EFX_FILTER_FLAG_RX_RSS));
1950
1951 /* There is a single bit to enable RX scatter for all
1952 * unmatched packets. Only set it if scatter is
1953 * enabled in both filter specs.
1954 */
1955 EFX_SET_OWORD_FIELD(
1956 filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q,
1957 !!(table->spec[EFX_FARCH_FILTER_INDEX_UC_DEF].flags &
1958 table->spec[EFX_FARCH_FILTER_INDEX_MC_DEF].flags &
1959 EFX_FILTER_FLAG_RX_SCATTER));
1960 } else if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
1961 /* We don't expose 'default' filters because unmatched
1962 * packets always go to the queue number found in the
1963 * RSS table. But we still need to set the RX scatter
1964 * bit here.
1965 */
1966 EFX_SET_OWORD_FIELD(
1967 filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q,
1968 efx->rx_scatter);
1969 }
1970
1971 efx_writeo(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
1972}
1973
1974static void efx_farch_filter_push_tx_limits(struct efx_nic *efx)
1975{
1976 struct efx_farch_filter_state *state = efx->filter_state;
1977 struct efx_farch_filter_table *table;
1978 efx_oword_t tx_cfg;
1979
1980 efx_reado(efx, &tx_cfg, FR_AZ_TX_CFG);
1981
1982 table = &state->table[EFX_FARCH_FILTER_TABLE_TX_MAC];
1983 if (table->size) {
1984 EFX_SET_OWORD_FIELD(
1985 tx_cfg, FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE,
1986 table->search_limit[EFX_FARCH_FILTER_MAC_FULL] +
1987 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL);
1988 EFX_SET_OWORD_FIELD(
1989 tx_cfg, FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE,
1990 table->search_limit[EFX_FARCH_FILTER_MAC_WILD] +
1991 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD);
1992 }
1993
1994 efx_writeo(efx, &tx_cfg, FR_AZ_TX_CFG);
1995}
1996
1997static int
1998efx_farch_filter_from_gen_spec(struct efx_farch_filter_spec *spec,
1999 const struct efx_filter_spec *gen_spec)
2000{
2001 bool is_full = false;
2002
2003 if ((gen_spec->flags & EFX_FILTER_FLAG_RX_RSS) &&
2004 gen_spec->rss_context != EFX_FILTER_RSS_CONTEXT_DEFAULT)
2005 return -EINVAL;
2006
2007 spec->priority = gen_spec->priority;
2008 spec->flags = gen_spec->flags;
2009 spec->dmaq_id = gen_spec->dmaq_id;
2010
2011 switch (gen_spec->match_flags) {
2012 case (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
2013 EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT |
2014 EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT):
2015 is_full = true;
2016 /* fall through */
2017 case (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
2018 EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT): {
2019 __be32 rhost, host1, host2;
2020 __be16 rport, port1, port2;
2021
2022 EFX_BUG_ON_PARANOID(!(gen_spec->flags & EFX_FILTER_FLAG_RX));
2023
2024 if (gen_spec->ether_type != htons(ETH_P_IP))
2025 return -EPROTONOSUPPORT;
2026 if (gen_spec->loc_port == 0 ||
2027 (is_full && gen_spec->rem_port == 0))
2028 return -EADDRNOTAVAIL;
2029 switch (gen_spec->ip_proto) {
2030 case IPPROTO_TCP:
2031 spec->type = (is_full ? EFX_FARCH_FILTER_TCP_FULL :
2032 EFX_FARCH_FILTER_TCP_WILD);
2033 break;
2034 case IPPROTO_UDP:
2035 spec->type = (is_full ? EFX_FARCH_FILTER_UDP_FULL :
2036 EFX_FARCH_FILTER_UDP_WILD);
2037 break;
2038 default:
2039 return -EPROTONOSUPPORT;
2040 }
2041
2042 /* Filter is constructed in terms of source and destination,
2043 * with the odd wrinkle that the ports are swapped in a UDP
2044 * wildcard filter. We need to convert from local and remote
2045 * (= zero for wildcard) addresses.
2046 */
2047 rhost = is_full ? gen_spec->rem_host[0] : 0;
2048 rport = is_full ? gen_spec->rem_port : 0;
2049 host1 = rhost;
2050 host2 = gen_spec->loc_host[0];
2051 if (!is_full && gen_spec->ip_proto == IPPROTO_UDP) {
2052 port1 = gen_spec->loc_port;
2053 port2 = rport;
2054 } else {
2055 port1 = rport;
2056 port2 = gen_spec->loc_port;
2057 }
2058 spec->data[0] = ntohl(host1) << 16 | ntohs(port1);
2059 spec->data[1] = ntohs(port2) << 16 | ntohl(host1) >> 16;
2060 spec->data[2] = ntohl(host2);
2061
2062 break;
2063 }
2064
2065 case EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_OUTER_VID:
2066 is_full = true;
2067 /* fall through */
2068 case EFX_FILTER_MATCH_LOC_MAC:
2069 spec->type = (is_full ? EFX_FARCH_FILTER_MAC_FULL :
2070 EFX_FARCH_FILTER_MAC_WILD);
2071 spec->data[0] = is_full ? ntohs(gen_spec->outer_vid) : 0;
2072 spec->data[1] = (gen_spec->loc_mac[2] << 24 |
2073 gen_spec->loc_mac[3] << 16 |
2074 gen_spec->loc_mac[4] << 8 |
2075 gen_spec->loc_mac[5]);
2076 spec->data[2] = (gen_spec->loc_mac[0] << 8 |
2077 gen_spec->loc_mac[1]);
2078 break;
2079
2080 case EFX_FILTER_MATCH_LOC_MAC_IG:
2081 spec->type = (is_multicast_ether_addr(gen_spec->loc_mac) ?
2082 EFX_FARCH_FILTER_MC_DEF :
2083 EFX_FARCH_FILTER_UC_DEF);
2084 memset(spec->data, 0, sizeof(spec->data)); /* ensure equality */
2085 break;
2086
2087 default:
2088 return -EPROTONOSUPPORT;
2089 }
2090
2091 return 0;
2092}
2093
2094static void
2095efx_farch_filter_to_gen_spec(struct efx_filter_spec *gen_spec,
2096 const struct efx_farch_filter_spec *spec)
2097{
2098 bool is_full = false;
2099
2100 /* *gen_spec should be completely initialised, to be consistent
2101 * with efx_filter_init_{rx,tx}() and in case we want to copy
2102 * it back to userland.
2103 */
2104 memset(gen_spec, 0, sizeof(*gen_spec));
2105
2106 gen_spec->priority = spec->priority;
2107 gen_spec->flags = spec->flags;
2108 gen_spec->dmaq_id = spec->dmaq_id;
2109
2110 switch (spec->type) {
2111 case EFX_FARCH_FILTER_TCP_FULL:
2112 case EFX_FARCH_FILTER_UDP_FULL:
2113 is_full = true;
2114 /* fall through */
2115 case EFX_FARCH_FILTER_TCP_WILD:
2116 case EFX_FARCH_FILTER_UDP_WILD: {
2117 __be32 host1, host2;
2118 __be16 port1, port2;
2119
2120 gen_spec->match_flags =
2121 EFX_FILTER_MATCH_ETHER_TYPE |
2122 EFX_FILTER_MATCH_IP_PROTO |
2123 EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT;
2124 if (is_full)
2125 gen_spec->match_flags |= (EFX_FILTER_MATCH_REM_HOST |
2126 EFX_FILTER_MATCH_REM_PORT);
2127 gen_spec->ether_type = htons(ETH_P_IP);
2128 gen_spec->ip_proto =
2129 (spec->type == EFX_FARCH_FILTER_TCP_FULL ||
2130 spec->type == EFX_FARCH_FILTER_TCP_WILD) ?
2131 IPPROTO_TCP : IPPROTO_UDP;
2132
2133 host1 = htonl(spec->data[0] >> 16 | spec->data[1] << 16);
2134 port1 = htons(spec->data[0]);
2135 host2 = htonl(spec->data[2]);
2136 port2 = htons(spec->data[1] >> 16);
2137 if (spec->flags & EFX_FILTER_FLAG_TX) {
2138 gen_spec->loc_host[0] = host1;
2139 gen_spec->rem_host[0] = host2;
2140 } else {
2141 gen_spec->loc_host[0] = host2;
2142 gen_spec->rem_host[0] = host1;
2143 }
2144 if (!!(gen_spec->flags & EFX_FILTER_FLAG_TX) ^
2145 (!is_full && gen_spec->ip_proto == IPPROTO_UDP)) {
2146 gen_spec->loc_port = port1;
2147 gen_spec->rem_port = port2;
2148 } else {
2149 gen_spec->loc_port = port2;
2150 gen_spec->rem_port = port1;
2151 }
2152
2153 break;
2154 }
2155
2156 case EFX_FARCH_FILTER_MAC_FULL:
2157 is_full = true;
2158 /* fall through */
2159 case EFX_FARCH_FILTER_MAC_WILD:
2160 gen_spec->match_flags = EFX_FILTER_MATCH_LOC_MAC;
2161 if (is_full)
2162 gen_spec->match_flags |= EFX_FILTER_MATCH_OUTER_VID;
2163 gen_spec->loc_mac[0] = spec->data[2] >> 8;
2164 gen_spec->loc_mac[1] = spec->data[2];
2165 gen_spec->loc_mac[2] = spec->data[1] >> 24;
2166 gen_spec->loc_mac[3] = spec->data[1] >> 16;
2167 gen_spec->loc_mac[4] = spec->data[1] >> 8;
2168 gen_spec->loc_mac[5] = spec->data[1];
2169 gen_spec->outer_vid = htons(spec->data[0]);
2170 break;
2171
2172 case EFX_FARCH_FILTER_UC_DEF:
2173 case EFX_FARCH_FILTER_MC_DEF:
2174 gen_spec->match_flags = EFX_FILTER_MATCH_LOC_MAC_IG;
2175 gen_spec->loc_mac[0] = spec->type == EFX_FARCH_FILTER_MC_DEF;
2176 break;
2177
2178 default:
2179 WARN_ON(1);
2180 break;
2181 }
2182}
2183
2184static void
Ben Hutchingsb59e6ef2013-11-21 19:02:22 +00002185efx_farch_filter_init_rx_auto(struct efx_nic *efx,
2186 struct efx_farch_filter_spec *spec)
Ben Hutchingsadd72472012-11-08 01:46:53 +00002187{
Ben Hutchingsadd72472012-11-08 01:46:53 +00002188 /* If there's only one channel then disable RSS for non VF
2189 * traffic, thereby allowing VFs to use RSS when the PF can't.
2190 */
Ben Hutchings7665d1a2013-11-21 19:02:18 +00002191 spec->priority = EFX_FILTER_PRI_AUTO;
2192 spec->flags = (EFX_FILTER_FLAG_RX |
Ben Hutchingsadd72472012-11-08 01:46:53 +00002193 (efx->n_rx_channels > 1 ? EFX_FILTER_FLAG_RX_RSS : 0) |
2194 (efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0));
2195 spec->dmaq_id = 0;
Ben Hutchingsadd72472012-11-08 01:46:53 +00002196}
2197
2198/* Build a filter entry and return its n-tuple key. */
2199static u32 efx_farch_filter_build(efx_oword_t *filter,
2200 struct efx_farch_filter_spec *spec)
2201{
2202 u32 data3;
2203
2204 switch (efx_farch_filter_spec_table_id(spec)) {
2205 case EFX_FARCH_FILTER_TABLE_RX_IP: {
2206 bool is_udp = (spec->type == EFX_FARCH_FILTER_UDP_FULL ||
2207 spec->type == EFX_FARCH_FILTER_UDP_WILD);
2208 EFX_POPULATE_OWORD_7(
2209 *filter,
2210 FRF_BZ_RSS_EN,
2211 !!(spec->flags & EFX_FILTER_FLAG_RX_RSS),
2212 FRF_BZ_SCATTER_EN,
2213 !!(spec->flags & EFX_FILTER_FLAG_RX_SCATTER),
2214 FRF_BZ_TCP_UDP, is_udp,
2215 FRF_BZ_RXQ_ID, spec->dmaq_id,
2216 EFX_DWORD_2, spec->data[2],
2217 EFX_DWORD_1, spec->data[1],
2218 EFX_DWORD_0, spec->data[0]);
2219 data3 = is_udp;
2220 break;
2221 }
2222
2223 case EFX_FARCH_FILTER_TABLE_RX_MAC: {
2224 bool is_wild = spec->type == EFX_FARCH_FILTER_MAC_WILD;
2225 EFX_POPULATE_OWORD_7(
2226 *filter,
2227 FRF_CZ_RMFT_RSS_EN,
2228 !!(spec->flags & EFX_FILTER_FLAG_RX_RSS),
2229 FRF_CZ_RMFT_SCATTER_EN,
2230 !!(spec->flags & EFX_FILTER_FLAG_RX_SCATTER),
2231 FRF_CZ_RMFT_RXQ_ID, spec->dmaq_id,
2232 FRF_CZ_RMFT_WILDCARD_MATCH, is_wild,
2233 FRF_CZ_RMFT_DEST_MAC_HI, spec->data[2],
2234 FRF_CZ_RMFT_DEST_MAC_LO, spec->data[1],
2235 FRF_CZ_RMFT_VLAN_ID, spec->data[0]);
2236 data3 = is_wild;
2237 break;
2238 }
2239
2240 case EFX_FARCH_FILTER_TABLE_TX_MAC: {
2241 bool is_wild = spec->type == EFX_FARCH_FILTER_MAC_WILD;
2242 EFX_POPULATE_OWORD_5(*filter,
2243 FRF_CZ_TMFT_TXQ_ID, spec->dmaq_id,
2244 FRF_CZ_TMFT_WILDCARD_MATCH, is_wild,
2245 FRF_CZ_TMFT_SRC_MAC_HI, spec->data[2],
2246 FRF_CZ_TMFT_SRC_MAC_LO, spec->data[1],
2247 FRF_CZ_TMFT_VLAN_ID, spec->data[0]);
2248 data3 = is_wild | spec->dmaq_id << 1;
2249 break;
2250 }
2251
2252 default:
2253 BUG();
2254 }
2255
2256 return spec->data[0] ^ spec->data[1] ^ spec->data[2] ^ data3;
2257}
2258
2259static bool efx_farch_filter_equal(const struct efx_farch_filter_spec *left,
2260 const struct efx_farch_filter_spec *right)
2261{
2262 if (left->type != right->type ||
2263 memcmp(left->data, right->data, sizeof(left->data)))
2264 return false;
2265
2266 if (left->flags & EFX_FILTER_FLAG_TX &&
2267 left->dmaq_id != right->dmaq_id)
2268 return false;
2269
2270 return true;
2271}
2272
2273/*
2274 * Construct/deconstruct external filter IDs. At least the RX filter
2275 * IDs must be ordered by matching priority, for RX NFC semantics.
2276 *
2277 * Deconstruction needs to be robust against invalid IDs so that
2278 * efx_filter_remove_id_safe() and efx_filter_get_filter_safe() can
2279 * accept user-provided IDs.
2280 */
2281
2282#define EFX_FARCH_FILTER_MATCH_PRI_COUNT 5
2283
2284static const u8 efx_farch_filter_type_match_pri[EFX_FARCH_FILTER_TYPE_COUNT] = {
2285 [EFX_FARCH_FILTER_TCP_FULL] = 0,
2286 [EFX_FARCH_FILTER_UDP_FULL] = 0,
2287 [EFX_FARCH_FILTER_TCP_WILD] = 1,
2288 [EFX_FARCH_FILTER_UDP_WILD] = 1,
2289 [EFX_FARCH_FILTER_MAC_FULL] = 2,
2290 [EFX_FARCH_FILTER_MAC_WILD] = 3,
2291 [EFX_FARCH_FILTER_UC_DEF] = 4,
2292 [EFX_FARCH_FILTER_MC_DEF] = 4,
2293};
2294
2295static const enum efx_farch_filter_table_id efx_farch_filter_range_table[] = {
2296 EFX_FARCH_FILTER_TABLE_RX_IP, /* RX match pri 0 */
2297 EFX_FARCH_FILTER_TABLE_RX_IP,
2298 EFX_FARCH_FILTER_TABLE_RX_MAC,
2299 EFX_FARCH_FILTER_TABLE_RX_MAC,
2300 EFX_FARCH_FILTER_TABLE_RX_DEF, /* RX match pri 4 */
2301 EFX_FARCH_FILTER_TABLE_TX_MAC, /* TX match pri 0 */
2302 EFX_FARCH_FILTER_TABLE_TX_MAC, /* TX match pri 1 */
2303};
2304
2305#define EFX_FARCH_FILTER_INDEX_WIDTH 13
2306#define EFX_FARCH_FILTER_INDEX_MASK ((1 << EFX_FARCH_FILTER_INDEX_WIDTH) - 1)
2307
2308static inline u32
2309efx_farch_filter_make_id(const struct efx_farch_filter_spec *spec,
2310 unsigned int index)
2311{
2312 unsigned int range;
2313
2314 range = efx_farch_filter_type_match_pri[spec->type];
2315 if (!(spec->flags & EFX_FILTER_FLAG_RX))
2316 range += EFX_FARCH_FILTER_MATCH_PRI_COUNT;
2317
2318 return range << EFX_FARCH_FILTER_INDEX_WIDTH | index;
2319}
2320
2321static inline enum efx_farch_filter_table_id
2322efx_farch_filter_id_table_id(u32 id)
2323{
2324 unsigned int range = id >> EFX_FARCH_FILTER_INDEX_WIDTH;
2325
2326 if (range < ARRAY_SIZE(efx_farch_filter_range_table))
2327 return efx_farch_filter_range_table[range];
2328 else
2329 return EFX_FARCH_FILTER_TABLE_COUNT; /* invalid */
2330}
2331
2332static inline unsigned int efx_farch_filter_id_index(u32 id)
2333{
2334 return id & EFX_FARCH_FILTER_INDEX_MASK;
2335}
2336
2337u32 efx_farch_filter_get_rx_id_limit(struct efx_nic *efx)
2338{
2339 struct efx_farch_filter_state *state = efx->filter_state;
2340 unsigned int range = EFX_FARCH_FILTER_MATCH_PRI_COUNT - 1;
2341 enum efx_farch_filter_table_id table_id;
2342
2343 do {
2344 table_id = efx_farch_filter_range_table[range];
2345 if (state->table[table_id].size != 0)
2346 return range << EFX_FARCH_FILTER_INDEX_WIDTH |
2347 state->table[table_id].size;
2348 } while (range--);
2349
2350 return 0;
2351}
2352
2353s32 efx_farch_filter_insert(struct efx_nic *efx,
2354 struct efx_filter_spec *gen_spec,
2355 bool replace_equal)
2356{
2357 struct efx_farch_filter_state *state = efx->filter_state;
2358 struct efx_farch_filter_table *table;
2359 struct efx_farch_filter_spec spec;
2360 efx_oword_t filter;
2361 int rep_index, ins_index;
2362 unsigned int depth = 0;
2363 int rc;
2364
2365 rc = efx_farch_filter_from_gen_spec(&spec, gen_spec);
2366 if (rc)
2367 return rc;
2368
2369 table = &state->table[efx_farch_filter_spec_table_id(&spec)];
2370 if (table->size == 0)
2371 return -EINVAL;
2372
2373 netif_vdbg(efx, hw, efx->net_dev,
2374 "%s: type %d search_limit=%d", __func__, spec.type,
2375 table->search_limit[spec.type]);
2376
2377 if (table->id == EFX_FARCH_FILTER_TABLE_RX_DEF) {
2378 /* One filter spec per type */
2379 BUILD_BUG_ON(EFX_FARCH_FILTER_INDEX_UC_DEF != 0);
2380 BUILD_BUG_ON(EFX_FARCH_FILTER_INDEX_MC_DEF !=
2381 EFX_FARCH_FILTER_MC_DEF - EFX_FARCH_FILTER_UC_DEF);
2382 rep_index = spec.type - EFX_FARCH_FILTER_UC_DEF;
2383 ins_index = rep_index;
2384
2385 spin_lock_bh(&efx->filter_lock);
2386 } else {
2387 /* Search concurrently for
2388 * (1) a filter to be replaced (rep_index): any filter
2389 * with the same match values, up to the current
2390 * search depth for this type, and
2391 * (2) the insertion point (ins_index): (1) or any
2392 * free slot before it or up to the maximum search
2393 * depth for this priority
2394 * We fail if we cannot find (2).
2395 *
2396 * We can stop once either
2397 * (a) we find (1), in which case we have definitely
2398 * found (2) as well; or
2399 * (b) we have searched exhaustively for (1), and have
2400 * either found (2) or searched exhaustively for it
2401 */
2402 u32 key = efx_farch_filter_build(&filter, &spec);
2403 unsigned int hash = efx_farch_filter_hash(key);
2404 unsigned int incr = efx_farch_filter_increment(key);
2405 unsigned int max_rep_depth = table->search_limit[spec.type];
2406 unsigned int max_ins_depth =
2407 spec.priority <= EFX_FILTER_PRI_HINT ?
2408 EFX_FARCH_FILTER_CTL_SRCH_HINT_MAX :
2409 EFX_FARCH_FILTER_CTL_SRCH_MAX;
2410 unsigned int i = hash & (table->size - 1);
2411
2412 ins_index = -1;
2413 depth = 1;
2414
2415 spin_lock_bh(&efx->filter_lock);
2416
2417 for (;;) {
2418 if (!test_bit(i, table->used_bitmap)) {
2419 if (ins_index < 0)
2420 ins_index = i;
2421 } else if (efx_farch_filter_equal(&spec,
2422 &table->spec[i])) {
2423 /* Case (a) */
2424 if (ins_index < 0)
2425 ins_index = i;
2426 rep_index = i;
2427 break;
2428 }
2429
2430 if (depth >= max_rep_depth &&
2431 (ins_index >= 0 || depth >= max_ins_depth)) {
2432 /* Case (b) */
2433 if (ins_index < 0) {
2434 rc = -EBUSY;
2435 goto out;
2436 }
2437 rep_index = -1;
2438 break;
2439 }
2440
2441 i = (i + incr) & (table->size - 1);
2442 ++depth;
2443 }
2444 }
2445
2446 /* If we found a filter to be replaced, check whether we
2447 * should do so
2448 */
2449 if (rep_index >= 0) {
2450 struct efx_farch_filter_spec *saved_spec =
2451 &table->spec[rep_index];
2452
2453 if (spec.priority == saved_spec->priority && !replace_equal) {
2454 rc = -EEXIST;
2455 goto out;
2456 }
Ben Hutchings7665d1a2013-11-21 19:02:18 +00002457 if (spec.priority < saved_spec->priority) {
Ben Hutchingsadd72472012-11-08 01:46:53 +00002458 rc = -EPERM;
2459 goto out;
2460 }
Ben Hutchings7665d1a2013-11-21 19:02:18 +00002461 if (saved_spec->priority == EFX_FILTER_PRI_AUTO ||
2462 saved_spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO)
2463 spec.flags |= EFX_FILTER_FLAG_RX_OVER_AUTO;
Ben Hutchingsadd72472012-11-08 01:46:53 +00002464 }
2465
2466 /* Insert the filter */
2467 if (ins_index != rep_index) {
2468 __set_bit(ins_index, table->used_bitmap);
2469 ++table->used;
2470 }
2471 table->spec[ins_index] = spec;
2472
2473 if (table->id == EFX_FARCH_FILTER_TABLE_RX_DEF) {
2474 efx_farch_filter_push_rx_config(efx);
2475 } else {
2476 if (table->search_limit[spec.type] < depth) {
2477 table->search_limit[spec.type] = depth;
2478 if (spec.flags & EFX_FILTER_FLAG_TX)
2479 efx_farch_filter_push_tx_limits(efx);
2480 else
2481 efx_farch_filter_push_rx_config(efx);
2482 }
2483
2484 efx_writeo(efx, &filter,
2485 table->offset + table->step * ins_index);
2486
2487 /* If we were able to replace a filter by inserting
2488 * at a lower depth, clear the replaced filter
2489 */
2490 if (ins_index != rep_index && rep_index >= 0)
2491 efx_farch_filter_table_clear_entry(efx, table,
2492 rep_index);
2493 }
2494
2495 netif_vdbg(efx, hw, efx->net_dev,
2496 "%s: filter type %d index %d rxq %u set",
2497 __func__, spec.type, ins_index, spec.dmaq_id);
2498 rc = efx_farch_filter_make_id(&spec, ins_index);
2499
2500out:
2501 spin_unlock_bh(&efx->filter_lock);
2502 return rc;
2503}
2504
2505static void
2506efx_farch_filter_table_clear_entry(struct efx_nic *efx,
2507 struct efx_farch_filter_table *table,
2508 unsigned int filter_idx)
2509{
2510 static efx_oword_t filter;
2511
Ben Hutchings14990a52012-11-19 23:08:19 +00002512 EFX_WARN_ON_PARANOID(!test_bit(filter_idx, table->used_bitmap));
Ben Hutchings8803e152012-11-19 23:08:20 +00002513 BUG_ON(table->offset == 0); /* can't clear MAC default filters */
Ben Hutchings14990a52012-11-19 23:08:19 +00002514
2515 __clear_bit(filter_idx, table->used_bitmap);
2516 --table->used;
2517 memset(&table->spec[filter_idx], 0, sizeof(table->spec[0]));
2518
2519 efx_writeo(efx, &filter, table->offset + table->step * filter_idx);
2520
2521 /* If this filter required a greater search depth than
2522 * any other, the search limit for its type can now be
2523 * decreased. However, it is hard to determine that
2524 * unless the table has become completely empty - in
2525 * which case, all its search limits can be set to 0.
2526 */
2527 if (unlikely(table->used == 0)) {
2528 memset(table->search_limit, 0, sizeof(table->search_limit));
2529 if (table->id == EFX_FARCH_FILTER_TABLE_TX_MAC)
2530 efx_farch_filter_push_tx_limits(efx);
2531 else
2532 efx_farch_filter_push_rx_config(efx);
2533 }
2534}
2535
2536static int efx_farch_filter_remove(struct efx_nic *efx,
2537 struct efx_farch_filter_table *table,
2538 unsigned int filter_idx,
2539 enum efx_filter_priority priority)
2540{
2541 struct efx_farch_filter_spec *spec = &table->spec[filter_idx];
2542
2543 if (!test_bit(filter_idx, table->used_bitmap) ||
Ben Hutchingsf7284802013-11-21 19:11:47 +00002544 spec->priority != priority)
Ben Hutchings14990a52012-11-19 23:08:19 +00002545 return -ENOENT;
2546
Ben Hutchings7665d1a2013-11-21 19:02:18 +00002547 if (spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO) {
Ben Hutchingsb59e6ef2013-11-21 19:02:22 +00002548 efx_farch_filter_init_rx_auto(efx, spec);
Ben Hutchingsadd72472012-11-08 01:46:53 +00002549 efx_farch_filter_push_rx_config(efx);
Ben Hutchings14990a52012-11-19 23:08:19 +00002550 } else {
2551 efx_farch_filter_table_clear_entry(efx, table, filter_idx);
Ben Hutchingsadd72472012-11-08 01:46:53 +00002552 }
Ben Hutchings14990a52012-11-19 23:08:19 +00002553
2554 return 0;
Ben Hutchingsadd72472012-11-08 01:46:53 +00002555}
2556
2557int efx_farch_filter_remove_safe(struct efx_nic *efx,
2558 enum efx_filter_priority priority,
2559 u32 filter_id)
2560{
2561 struct efx_farch_filter_state *state = efx->filter_state;
2562 enum efx_farch_filter_table_id table_id;
2563 struct efx_farch_filter_table *table;
2564 unsigned int filter_idx;
2565 struct efx_farch_filter_spec *spec;
2566 int rc;
2567
2568 table_id = efx_farch_filter_id_table_id(filter_id);
2569 if ((unsigned int)table_id >= EFX_FARCH_FILTER_TABLE_COUNT)
2570 return -ENOENT;
2571 table = &state->table[table_id];
2572
2573 filter_idx = efx_farch_filter_id_index(filter_id);
2574 if (filter_idx >= table->size)
2575 return -ENOENT;
2576 spec = &table->spec[filter_idx];
2577
2578 spin_lock_bh(&efx->filter_lock);
Ben Hutchings14990a52012-11-19 23:08:19 +00002579 rc = efx_farch_filter_remove(efx, table, filter_idx, priority);
Ben Hutchingsadd72472012-11-08 01:46:53 +00002580 spin_unlock_bh(&efx->filter_lock);
2581
2582 return rc;
2583}
2584
2585int efx_farch_filter_get_safe(struct efx_nic *efx,
2586 enum efx_filter_priority priority,
2587 u32 filter_id, struct efx_filter_spec *spec_buf)
2588{
2589 struct efx_farch_filter_state *state = efx->filter_state;
2590 enum efx_farch_filter_table_id table_id;
2591 struct efx_farch_filter_table *table;
2592 struct efx_farch_filter_spec *spec;
2593 unsigned int filter_idx;
2594 int rc;
2595
2596 table_id = efx_farch_filter_id_table_id(filter_id);
2597 if ((unsigned int)table_id >= EFX_FARCH_FILTER_TABLE_COUNT)
2598 return -ENOENT;
2599 table = &state->table[table_id];
2600
2601 filter_idx = efx_farch_filter_id_index(filter_id);
2602 if (filter_idx >= table->size)
2603 return -ENOENT;
2604 spec = &table->spec[filter_idx];
2605
2606 spin_lock_bh(&efx->filter_lock);
2607
2608 if (test_bit(filter_idx, table->used_bitmap) &&
2609 spec->priority == priority) {
2610 efx_farch_filter_to_gen_spec(spec_buf, spec);
2611 rc = 0;
2612 } else {
2613 rc = -ENOENT;
2614 }
2615
2616 spin_unlock_bh(&efx->filter_lock);
2617
2618 return rc;
2619}
2620
2621static void
2622efx_farch_filter_table_clear(struct efx_nic *efx,
2623 enum efx_farch_filter_table_id table_id,
2624 enum efx_filter_priority priority)
2625{
2626 struct efx_farch_filter_state *state = efx->filter_state;
2627 struct efx_farch_filter_table *table = &state->table[table_id];
2628 unsigned int filter_idx;
2629
2630 spin_lock_bh(&efx->filter_lock);
Ben Hutchings7665d1a2013-11-21 19:02:18 +00002631 for (filter_idx = 0; filter_idx < table->size; ++filter_idx) {
2632 if (table->spec[filter_idx].priority != EFX_FILTER_PRI_AUTO)
2633 efx_farch_filter_remove(efx, table,
2634 filter_idx, priority);
2635 }
Ben Hutchingsadd72472012-11-08 01:46:53 +00002636 spin_unlock_bh(&efx->filter_lock);
2637}
2638
Ben Hutchingsfbd79122013-11-21 19:15:03 +00002639int efx_farch_filter_clear_rx(struct efx_nic *efx,
Ben Hutchingsadd72472012-11-08 01:46:53 +00002640 enum efx_filter_priority priority)
2641{
2642 efx_farch_filter_table_clear(efx, EFX_FARCH_FILTER_TABLE_RX_IP,
2643 priority);
2644 efx_farch_filter_table_clear(efx, EFX_FARCH_FILTER_TABLE_RX_MAC,
2645 priority);
Ben Hutchings8803e152012-11-19 23:08:20 +00002646 efx_farch_filter_table_clear(efx, EFX_FARCH_FILTER_TABLE_RX_DEF,
2647 priority);
Ben Hutchingsfbd79122013-11-21 19:15:03 +00002648 return 0;
Ben Hutchingsadd72472012-11-08 01:46:53 +00002649}
2650
2651u32 efx_farch_filter_count_rx_used(struct efx_nic *efx,
2652 enum efx_filter_priority priority)
2653{
2654 struct efx_farch_filter_state *state = efx->filter_state;
2655 enum efx_farch_filter_table_id table_id;
2656 struct efx_farch_filter_table *table;
2657 unsigned int filter_idx;
2658 u32 count = 0;
2659
2660 spin_lock_bh(&efx->filter_lock);
2661
2662 for (table_id = EFX_FARCH_FILTER_TABLE_RX_IP;
2663 table_id <= EFX_FARCH_FILTER_TABLE_RX_DEF;
2664 table_id++) {
2665 table = &state->table[table_id];
2666 for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
2667 if (test_bit(filter_idx, table->used_bitmap) &&
2668 table->spec[filter_idx].priority == priority)
2669 ++count;
2670 }
2671 }
2672
2673 spin_unlock_bh(&efx->filter_lock);
2674
2675 return count;
2676}
2677
2678s32 efx_farch_filter_get_rx_ids(struct efx_nic *efx,
2679 enum efx_filter_priority priority,
2680 u32 *buf, u32 size)
2681{
2682 struct efx_farch_filter_state *state = efx->filter_state;
2683 enum efx_farch_filter_table_id table_id;
2684 struct efx_farch_filter_table *table;
2685 unsigned int filter_idx;
2686 s32 count = 0;
2687
2688 spin_lock_bh(&efx->filter_lock);
2689
2690 for (table_id = EFX_FARCH_FILTER_TABLE_RX_IP;
2691 table_id <= EFX_FARCH_FILTER_TABLE_RX_DEF;
2692 table_id++) {
2693 table = &state->table[table_id];
2694 for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
2695 if (test_bit(filter_idx, table->used_bitmap) &&
2696 table->spec[filter_idx].priority == priority) {
2697 if (count == size) {
2698 count = -EMSGSIZE;
2699 goto out;
2700 }
2701 buf[count++] = efx_farch_filter_make_id(
2702 &table->spec[filter_idx], filter_idx);
2703 }
2704 }
2705 }
2706out:
2707 spin_unlock_bh(&efx->filter_lock);
2708
2709 return count;
2710}
2711
2712/* Restore filter stater after reset */
2713void efx_farch_filter_table_restore(struct efx_nic *efx)
2714{
2715 struct efx_farch_filter_state *state = efx->filter_state;
2716 enum efx_farch_filter_table_id table_id;
2717 struct efx_farch_filter_table *table;
2718 efx_oword_t filter;
2719 unsigned int filter_idx;
2720
2721 spin_lock_bh(&efx->filter_lock);
2722
2723 for (table_id = 0; table_id < EFX_FARCH_FILTER_TABLE_COUNT; table_id++) {
2724 table = &state->table[table_id];
2725
2726 /* Check whether this is a regular register table */
2727 if (table->step == 0)
2728 continue;
2729
2730 for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
2731 if (!test_bit(filter_idx, table->used_bitmap))
2732 continue;
2733 efx_farch_filter_build(&filter, &table->spec[filter_idx]);
2734 efx_writeo(efx, &filter,
2735 table->offset + table->step * filter_idx);
2736 }
2737 }
2738
2739 efx_farch_filter_push_rx_config(efx);
2740 efx_farch_filter_push_tx_limits(efx);
2741
2742 spin_unlock_bh(&efx->filter_lock);
2743}
2744
2745void efx_farch_filter_table_remove(struct efx_nic *efx)
2746{
2747 struct efx_farch_filter_state *state = efx->filter_state;
2748 enum efx_farch_filter_table_id table_id;
2749
2750 for (table_id = 0; table_id < EFX_FARCH_FILTER_TABLE_COUNT; table_id++) {
2751 kfree(state->table[table_id].used_bitmap);
2752 vfree(state->table[table_id].spec);
2753 }
2754 kfree(state);
2755}
2756
2757int efx_farch_filter_table_probe(struct efx_nic *efx)
2758{
2759 struct efx_farch_filter_state *state;
2760 struct efx_farch_filter_table *table;
2761 unsigned table_id;
2762
2763 state = kzalloc(sizeof(struct efx_farch_filter_state), GFP_KERNEL);
2764 if (!state)
2765 return -ENOMEM;
2766 efx->filter_state = state;
2767
2768 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
2769 table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP];
2770 table->id = EFX_FARCH_FILTER_TABLE_RX_IP;
2771 table->offset = FR_BZ_RX_FILTER_TBL0;
2772 table->size = FR_BZ_RX_FILTER_TBL0_ROWS;
2773 table->step = FR_BZ_RX_FILTER_TBL0_STEP;
2774 }
2775
2776 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) {
2777 table = &state->table[EFX_FARCH_FILTER_TABLE_RX_MAC];
2778 table->id = EFX_FARCH_FILTER_TABLE_RX_MAC;
2779 table->offset = FR_CZ_RX_MAC_FILTER_TBL0;
2780 table->size = FR_CZ_RX_MAC_FILTER_TBL0_ROWS;
2781 table->step = FR_CZ_RX_MAC_FILTER_TBL0_STEP;
2782
2783 table = &state->table[EFX_FARCH_FILTER_TABLE_RX_DEF];
2784 table->id = EFX_FARCH_FILTER_TABLE_RX_DEF;
2785 table->size = EFX_FARCH_FILTER_SIZE_RX_DEF;
2786
2787 table = &state->table[EFX_FARCH_FILTER_TABLE_TX_MAC];
2788 table->id = EFX_FARCH_FILTER_TABLE_TX_MAC;
2789 table->offset = FR_CZ_TX_MAC_FILTER_TBL0;
2790 table->size = FR_CZ_TX_MAC_FILTER_TBL0_ROWS;
2791 table->step = FR_CZ_TX_MAC_FILTER_TBL0_STEP;
2792 }
2793
2794 for (table_id = 0; table_id < EFX_FARCH_FILTER_TABLE_COUNT; table_id++) {
2795 table = &state->table[table_id];
2796 if (table->size == 0)
2797 continue;
2798 table->used_bitmap = kcalloc(BITS_TO_LONGS(table->size),
2799 sizeof(unsigned long),
2800 GFP_KERNEL);
2801 if (!table->used_bitmap)
2802 goto fail;
2803 table->spec = vzalloc(table->size * sizeof(*table->spec));
2804 if (!table->spec)
2805 goto fail;
2806 }
2807
Ben Hutchings8803e152012-11-19 23:08:20 +00002808 table = &state->table[EFX_FARCH_FILTER_TABLE_RX_DEF];
2809 if (table->size) {
Ben Hutchingsadd72472012-11-08 01:46:53 +00002810 /* RX default filters must always exist */
Ben Hutchings8803e152012-11-19 23:08:20 +00002811 struct efx_farch_filter_spec *spec;
Ben Hutchingsadd72472012-11-08 01:46:53 +00002812 unsigned i;
Ben Hutchings8803e152012-11-19 23:08:20 +00002813
2814 for (i = 0; i < EFX_FARCH_FILTER_SIZE_RX_DEF; i++) {
2815 spec = &table->spec[i];
2816 spec->type = EFX_FARCH_FILTER_UC_DEF + i;
Ben Hutchingsb59e6ef2013-11-21 19:02:22 +00002817 efx_farch_filter_init_rx_auto(efx, spec);
Ben Hutchings8803e152012-11-19 23:08:20 +00002818 __set_bit(i, table->used_bitmap);
2819 }
Ben Hutchingsadd72472012-11-08 01:46:53 +00002820 }
2821
2822 efx_farch_filter_push_rx_config(efx);
2823
2824 return 0;
2825
2826fail:
2827 efx_farch_filter_table_remove(efx);
2828 return -ENOMEM;
2829}
2830
2831/* Update scatter enable flags for filters pointing to our own RX queues */
2832void efx_farch_filter_update_rx_scatter(struct efx_nic *efx)
2833{
2834 struct efx_farch_filter_state *state = efx->filter_state;
2835 enum efx_farch_filter_table_id table_id;
2836 struct efx_farch_filter_table *table;
2837 efx_oword_t filter;
2838 unsigned int filter_idx;
2839
2840 spin_lock_bh(&efx->filter_lock);
2841
2842 for (table_id = EFX_FARCH_FILTER_TABLE_RX_IP;
2843 table_id <= EFX_FARCH_FILTER_TABLE_RX_DEF;
2844 table_id++) {
2845 table = &state->table[table_id];
2846
2847 for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
2848 if (!test_bit(filter_idx, table->used_bitmap) ||
2849 table->spec[filter_idx].dmaq_id >=
2850 efx->n_rx_channels)
2851 continue;
2852
2853 if (efx->rx_scatter)
2854 table->spec[filter_idx].flags |=
2855 EFX_FILTER_FLAG_RX_SCATTER;
2856 else
2857 table->spec[filter_idx].flags &=
2858 ~EFX_FILTER_FLAG_RX_SCATTER;
2859
2860 if (table_id == EFX_FARCH_FILTER_TABLE_RX_DEF)
2861 /* Pushed by efx_farch_filter_push_rx_config() */
2862 continue;
2863
2864 efx_farch_filter_build(&filter, &table->spec[filter_idx]);
2865 efx_writeo(efx, &filter,
2866 table->offset + table->step * filter_idx);
2867 }
2868 }
2869
2870 efx_farch_filter_push_rx_config(efx);
2871
2872 spin_unlock_bh(&efx->filter_lock);
2873}
2874
2875#ifdef CONFIG_RFS_ACCEL
2876
2877s32 efx_farch_filter_rfs_insert(struct efx_nic *efx,
2878 struct efx_filter_spec *gen_spec)
2879{
2880 return efx_farch_filter_insert(efx, gen_spec, true);
2881}
2882
2883bool efx_farch_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
2884 unsigned int index)
2885{
2886 struct efx_farch_filter_state *state = efx->filter_state;
2887 struct efx_farch_filter_table *table =
2888 &state->table[EFX_FARCH_FILTER_TABLE_RX_IP];
2889
2890 if (test_bit(index, table->used_bitmap) &&
2891 table->spec[index].priority == EFX_FILTER_PRI_HINT &&
2892 rps_may_expire_flow(efx->net_dev, table->spec[index].dmaq_id,
2893 flow_id, index)) {
2894 efx_farch_filter_table_clear_entry(efx, table, index);
2895 return true;
2896 }
2897
2898 return false;
2899}
2900
2901#endif /* CONFIG_RFS_ACCEL */
Ben Hutchings964e6132012-11-19 23:08:22 +00002902
2903void efx_farch_filter_sync_rx_mode(struct efx_nic *efx)
2904{
2905 struct net_device *net_dev = efx->net_dev;
2906 struct netdev_hw_addr *ha;
2907 union efx_multicast_hash *mc_hash = &efx->multicast_hash;
2908 u32 crc;
2909 int bit;
2910
2911 netif_addr_lock_bh(net_dev);
2912
2913 efx->unicast_filter = !(net_dev->flags & IFF_PROMISC);
2914
2915 /* Build multicast hash table */
2916 if (net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) {
2917 memset(mc_hash, 0xff, sizeof(*mc_hash));
2918 } else {
2919 memset(mc_hash, 0x00, sizeof(*mc_hash));
2920 netdev_for_each_mc_addr(ha, net_dev) {
2921 crc = ether_crc_le(ETH_ALEN, ha->addr);
2922 bit = crc & (EFX_MCAST_HASH_ENTRIES - 1);
2923 __set_bit_le(bit, mc_hash);
2924 }
2925
2926 /* Broadcast packets go through the multicast hash filter.
2927 * ether_crc_le() of the broadcast address is 0xbe2612ff
2928 * so we always add bit 0xff to the mask.
2929 */
2930 __set_bit_le(0xff, mc_hash);
2931 }
2932
2933 netif_addr_unlock_bh(net_dev);
2934}