blob: 4fa2c88e9693ef173e44e7c8b93af36dd77048b2 [file] [log] [blame]
Mintz, Yuvalaed284c2017-01-01 13:57:02 +02001/* QLogic qede NIC Driver
2 * Copyright (c) 2015-2017 QLogic Corporation
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include <linux/netdevice.h>
33#include <linux/etherdevice.h>
34#include <net/udp_tunnel.h>
35#include <linux/bitops.h>
Mintz, Yuvalf29ffdb2017-01-01 13:57:07 +020036#include <linux/vmalloc.h>
Mintz, Yuvalaed284c2017-01-01 13:57:02 +020037
38#include <linux/qed/qed_if.h>
39#include "qede.h"
40
Chopra, Manishe4917d42017-04-13 04:54:45 -070041#ifdef CONFIG_RFS_ACCEL
42struct qede_arfs_tuple {
43 union {
44 __be32 src_ipv4;
45 struct in6_addr src_ipv6;
46 };
47 union {
48 __be32 dst_ipv4;
49 struct in6_addr dst_ipv6;
50 };
51 __be16 src_port;
52 __be16 dst_port;
53 __be16 eth_proto;
54 u8 ip_proto;
55};
56
57struct qede_arfs_fltr_node {
58#define QEDE_FLTR_VALID 0
59 unsigned long state;
60
61 /* pointer to aRFS packet buffer */
62 void *data;
63
64 /* dma map address of aRFS packet buffer */
65 dma_addr_t mapping;
66
67 /* length of aRFS packet buffer */
68 int buf_len;
69
70 /* tuples to hold from aRFS packet buffer */
71 struct qede_arfs_tuple tuple;
72
73 u32 flow_id;
74 u16 sw_id;
75 u16 rxq_id;
76 u16 next_rxq_id;
77 bool filter_op;
78 bool used;
79 struct hlist_node node;
80};
81
82struct qede_arfs {
83#define QEDE_ARFS_POLL_COUNT 100
84#define QEDE_RFS_FLW_BITSHIFT (4)
85#define QEDE_RFS_FLW_MASK ((1 << QEDE_RFS_FLW_BITSHIFT) - 1)
86 struct hlist_head arfs_hl_head[1 << QEDE_RFS_FLW_BITSHIFT];
87
88 /* lock for filter list access */
89 spinlock_t arfs_list_lock;
90 unsigned long *arfs_fltr_bmap;
91 int filter_count;
92 bool enable;
93};
94
95static void qede_configure_arfs_fltr(struct qede_dev *edev,
96 struct qede_arfs_fltr_node *n,
97 u16 rxq_id, bool add_fltr)
98{
99 const struct qed_eth_ops *op = edev->ops;
100
101 if (n->used)
102 return;
103
104 DP_VERBOSE(edev, NETIF_MSG_RX_STATUS,
105 "%s arfs filter flow_id=%d, sw_id=%d, src_port=%d, dst_port=%d, rxq=%d\n",
106 add_fltr ? "Adding" : "Deleting",
107 n->flow_id, n->sw_id, ntohs(n->tuple.src_port),
108 ntohs(n->tuple.dst_port), rxq_id);
109
110 n->used = true;
111 n->filter_op = add_fltr;
112 op->ntuple_filter_config(edev->cdev, n, n->mapping, n->buf_len, 0,
113 rxq_id, add_fltr);
114}
115
116static void
117qede_free_arfs_filter(struct qede_dev *edev, struct qede_arfs_fltr_node *fltr)
118{
119 kfree(fltr->data);
120 clear_bit(fltr->sw_id, edev->arfs->arfs_fltr_bmap);
121 kfree(fltr);
122}
123
124void qede_arfs_filter_op(void *dev, void *filter, u8 fw_rc)
125{
126 struct qede_arfs_fltr_node *fltr = filter;
127 struct qede_dev *edev = dev;
128
129 if (fw_rc) {
130 DP_NOTICE(edev,
131 "Failed arfs filter configuration fw_rc=%d, flow_id=%d, sw_id=%d, src_port=%d, dst_port=%d, rxq=%d\n",
132 fw_rc, fltr->flow_id, fltr->sw_id,
133 ntohs(fltr->tuple.src_port),
134 ntohs(fltr->tuple.dst_port), fltr->rxq_id);
135
136 spin_lock_bh(&edev->arfs->arfs_list_lock);
137
138 fltr->used = false;
139 clear_bit(QEDE_FLTR_VALID, &fltr->state);
140
141 spin_unlock_bh(&edev->arfs->arfs_list_lock);
142 return;
143 }
144
145 spin_lock_bh(&edev->arfs->arfs_list_lock);
146
147 fltr->used = false;
148
149 if (fltr->filter_op) {
150 set_bit(QEDE_FLTR_VALID, &fltr->state);
151 if (fltr->rxq_id != fltr->next_rxq_id)
152 qede_configure_arfs_fltr(edev, fltr, fltr->rxq_id,
153 false);
154 } else {
155 clear_bit(QEDE_FLTR_VALID, &fltr->state);
156 if (fltr->rxq_id != fltr->next_rxq_id) {
157 fltr->rxq_id = fltr->next_rxq_id;
158 qede_configure_arfs_fltr(edev, fltr,
159 fltr->rxq_id, true);
160 }
161 }
162
163 spin_unlock_bh(&edev->arfs->arfs_list_lock);
164}
165
166/* Should be called while qede_lock is held */
167void qede_process_arfs_filters(struct qede_dev *edev, bool free_fltr)
168{
169 int i;
170
171 for (i = 0; i <= QEDE_RFS_FLW_MASK; i++) {
172 struct hlist_node *temp;
173 struct hlist_head *head;
174 struct qede_arfs_fltr_node *fltr;
175
176 head = &edev->arfs->arfs_hl_head[i];
177
178 hlist_for_each_entry_safe(fltr, temp, head, node) {
179 bool del = false;
180
181 if (edev->state != QEDE_STATE_OPEN)
182 del = true;
183
184 spin_lock_bh(&edev->arfs->arfs_list_lock);
185
186 if ((!test_bit(QEDE_FLTR_VALID, &fltr->state) &&
187 !fltr->used) || free_fltr) {
188 hlist_del(&fltr->node);
189 dma_unmap_single(&edev->pdev->dev,
190 fltr->mapping,
191 fltr->buf_len, DMA_TO_DEVICE);
192 qede_free_arfs_filter(edev, fltr);
193 edev->arfs->filter_count--;
194 } else {
195 if ((rps_may_expire_flow(edev->ndev,
196 fltr->rxq_id,
197 fltr->flow_id,
198 fltr->sw_id) || del) &&
199 !free_fltr)
200 qede_configure_arfs_fltr(edev, fltr,
201 fltr->rxq_id,
202 false);
203 }
204
205 spin_unlock_bh(&edev->arfs->arfs_list_lock);
206 }
207 }
208
209 spin_lock_bh(&edev->arfs->arfs_list_lock);
210
211 if (!edev->arfs->filter_count) {
212 if (edev->arfs->enable) {
213 edev->arfs->enable = false;
214 edev->ops->configure_arfs_searcher(edev->cdev, false);
215 }
216 } else {
217 set_bit(QEDE_SP_ARFS_CONFIG, &edev->sp_flags);
218 schedule_delayed_work(&edev->sp_task,
219 QEDE_SP_TASK_POLL_DELAY);
220 }
221
222 spin_unlock_bh(&edev->arfs->arfs_list_lock);
223}
224
225/* This function waits until all aRFS filters get deleted and freed.
226 * On timeout it frees all filters forcefully.
227 */
228void qede_poll_for_freeing_arfs_filters(struct qede_dev *edev)
229{
230 int count = QEDE_ARFS_POLL_COUNT;
231
232 while (count) {
233 qede_process_arfs_filters(edev, false);
234
235 if (!edev->arfs->filter_count)
236 break;
237
238 msleep(100);
239 count--;
240 }
241
242 if (!count) {
243 DP_NOTICE(edev, "Timeout in polling for arfs filter free\n");
244
245 /* Something is terribly wrong, free forcefully */
246 qede_process_arfs_filters(edev, true);
247 }
248}
249
250int qede_alloc_arfs(struct qede_dev *edev)
251{
252 int i;
253
254 edev->arfs = vzalloc(sizeof(*edev->arfs));
255 if (!edev->arfs)
256 return -ENOMEM;
257
258 spin_lock_init(&edev->arfs->arfs_list_lock);
259
260 for (i = 0; i <= QEDE_RFS_FLW_MASK; i++)
261 INIT_HLIST_HEAD(&edev->arfs->arfs_hl_head[i]);
262
263 edev->ndev->rx_cpu_rmap = alloc_irq_cpu_rmap(QEDE_RSS_COUNT(edev));
264 if (!edev->ndev->rx_cpu_rmap) {
265 vfree(edev->arfs);
266 edev->arfs = NULL;
267 return -ENOMEM;
268 }
269
Dan Carpenterf6ca26f2017-04-19 12:54:33 +0300270 edev->arfs->arfs_fltr_bmap = vzalloc(BITS_TO_LONGS(QEDE_RFS_MAX_FLTR) *
271 sizeof(long));
Chopra, Manishe4917d42017-04-13 04:54:45 -0700272 if (!edev->arfs->arfs_fltr_bmap) {
273 free_irq_cpu_rmap(edev->ndev->rx_cpu_rmap);
274 edev->ndev->rx_cpu_rmap = NULL;
275 vfree(edev->arfs);
276 edev->arfs = NULL;
277 return -ENOMEM;
278 }
279
280 return 0;
281}
282
283void qede_free_arfs(struct qede_dev *edev)
284{
285 if (!edev->arfs)
286 return;
287
288 if (edev->ndev->rx_cpu_rmap)
289 free_irq_cpu_rmap(edev->ndev->rx_cpu_rmap);
290
291 edev->ndev->rx_cpu_rmap = NULL;
292 vfree(edev->arfs->arfs_fltr_bmap);
293 edev->arfs->arfs_fltr_bmap = NULL;
294 vfree(edev->arfs);
295 edev->arfs = NULL;
296}
297
298static bool qede_compare_ip_addr(struct qede_arfs_fltr_node *tpos,
299 const struct sk_buff *skb)
300{
301 if (skb->protocol == htons(ETH_P_IP)) {
302 if (tpos->tuple.src_ipv4 == ip_hdr(skb)->saddr &&
303 tpos->tuple.dst_ipv4 == ip_hdr(skb)->daddr)
304 return true;
305 else
306 return false;
307 } else {
308 struct in6_addr *src = &tpos->tuple.src_ipv6;
309 u8 size = sizeof(struct in6_addr);
310
311 if (!memcmp(src, &ipv6_hdr(skb)->saddr, size) &&
312 !memcmp(&tpos->tuple.dst_ipv6, &ipv6_hdr(skb)->daddr, size))
313 return true;
314 else
315 return false;
316 }
317}
318
319static struct qede_arfs_fltr_node *
320qede_arfs_htbl_key_search(struct hlist_head *h, const struct sk_buff *skb,
321 __be16 src_port, __be16 dst_port, u8 ip_proto)
322{
323 struct qede_arfs_fltr_node *tpos;
324
325 hlist_for_each_entry(tpos, h, node)
326 if (tpos->tuple.ip_proto == ip_proto &&
327 tpos->tuple.eth_proto == skb->protocol &&
328 qede_compare_ip_addr(tpos, skb) &&
329 tpos->tuple.src_port == src_port &&
330 tpos->tuple.dst_port == dst_port)
331 return tpos;
332
333 return NULL;
334}
335
336static struct qede_arfs_fltr_node *
337qede_alloc_filter(struct qede_dev *edev, int min_hlen)
338{
339 struct qede_arfs_fltr_node *n;
340 int bit_id;
341
342 bit_id = find_first_zero_bit(edev->arfs->arfs_fltr_bmap,
343 QEDE_RFS_MAX_FLTR);
344
345 if (bit_id >= QEDE_RFS_MAX_FLTR)
346 return NULL;
347
348 n = kzalloc(sizeof(*n), GFP_ATOMIC);
349 if (!n)
350 return NULL;
351
352 n->data = kzalloc(min_hlen, GFP_ATOMIC);
353 if (!n->data) {
354 kfree(n);
355 return NULL;
356 }
357
358 n->sw_id = (u16)bit_id;
359 set_bit(bit_id, edev->arfs->arfs_fltr_bmap);
360 return n;
361}
362
363int qede_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
364 u16 rxq_index, u32 flow_id)
365{
366 struct qede_dev *edev = netdev_priv(dev);
367 struct qede_arfs_fltr_node *n;
368 int min_hlen, rc, tp_offset;
369 struct ethhdr *eth;
370 __be16 *ports;
371 u16 tbl_idx;
372 u8 ip_proto;
373
374 if (skb->encapsulation)
375 return -EPROTONOSUPPORT;
376
377 if (skb->protocol != htons(ETH_P_IP) &&
378 skb->protocol != htons(ETH_P_IPV6))
379 return -EPROTONOSUPPORT;
380
381 if (skb->protocol == htons(ETH_P_IP)) {
382 ip_proto = ip_hdr(skb)->protocol;
383 tp_offset = sizeof(struct iphdr);
384 } else {
385 ip_proto = ipv6_hdr(skb)->nexthdr;
386 tp_offset = sizeof(struct ipv6hdr);
387 }
388
389 if (ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP)
390 return -EPROTONOSUPPORT;
391
392 ports = (__be16 *)(skb->data + tp_offset);
393 tbl_idx = skb_get_hash_raw(skb) & QEDE_RFS_FLW_MASK;
394
395 spin_lock_bh(&edev->arfs->arfs_list_lock);
396
397 n = qede_arfs_htbl_key_search(&edev->arfs->arfs_hl_head[tbl_idx],
398 skb, ports[0], ports[1], ip_proto);
399
400 if (n) {
401 /* Filter match */
402 n->next_rxq_id = rxq_index;
403
404 if (test_bit(QEDE_FLTR_VALID, &n->state)) {
405 if (n->rxq_id != rxq_index)
406 qede_configure_arfs_fltr(edev, n, n->rxq_id,
407 false);
408 } else {
409 if (!n->used) {
410 n->rxq_id = rxq_index;
411 qede_configure_arfs_fltr(edev, n, n->rxq_id,
412 true);
413 }
414 }
415
416 rc = n->sw_id;
417 goto ret_unlock;
418 }
419
420 min_hlen = ETH_HLEN + skb_headlen(skb);
421
422 n = qede_alloc_filter(edev, min_hlen);
423 if (!n) {
424 rc = -ENOMEM;
425 goto ret_unlock;
426 }
427
428 n->buf_len = min_hlen;
429 n->rxq_id = rxq_index;
430 n->next_rxq_id = rxq_index;
431 n->tuple.src_port = ports[0];
432 n->tuple.dst_port = ports[1];
433 n->flow_id = flow_id;
434
435 if (skb->protocol == htons(ETH_P_IP)) {
436 n->tuple.src_ipv4 = ip_hdr(skb)->saddr;
437 n->tuple.dst_ipv4 = ip_hdr(skb)->daddr;
438 } else {
439 memcpy(&n->tuple.src_ipv6, &ipv6_hdr(skb)->saddr,
440 sizeof(struct in6_addr));
441 memcpy(&n->tuple.dst_ipv6, &ipv6_hdr(skb)->daddr,
442 sizeof(struct in6_addr));
443 }
444
445 eth = (struct ethhdr *)n->data;
446 eth->h_proto = skb->protocol;
447 n->tuple.eth_proto = skb->protocol;
448 n->tuple.ip_proto = ip_proto;
449 memcpy(n->data + ETH_HLEN, skb->data, skb_headlen(skb));
450
451 n->mapping = dma_map_single(&edev->pdev->dev, n->data,
452 n->buf_len, DMA_TO_DEVICE);
453 if (dma_mapping_error(&edev->pdev->dev, n->mapping)) {
454 DP_NOTICE(edev, "Failed to map DMA memory for arfs\n");
455 qede_free_arfs_filter(edev, n);
456 rc = -ENOMEM;
457 goto ret_unlock;
458 }
459
460 INIT_HLIST_NODE(&n->node);
461 hlist_add_head(&n->node, &edev->arfs->arfs_hl_head[tbl_idx]);
462 edev->arfs->filter_count++;
463
464 if (edev->arfs->filter_count == 1 && !edev->arfs->enable) {
465 edev->ops->configure_arfs_searcher(edev->cdev, true);
466 edev->arfs->enable = true;
467 }
468
469 qede_configure_arfs_fltr(edev, n, n->rxq_id, true);
470
471 spin_unlock_bh(&edev->arfs->arfs_list_lock);
472
473 set_bit(QEDE_SP_ARFS_CONFIG, &edev->sp_flags);
474 schedule_delayed_work(&edev->sp_task, 0);
475 return n->sw_id;
476
477ret_unlock:
478 spin_unlock_bh(&edev->arfs->arfs_list_lock);
479 return rc;
480}
481#endif
482
Mintz, Yuvalaed284c2017-01-01 13:57:02 +0200483void qede_force_mac(void *dev, u8 *mac, bool forced)
484{
485 struct qede_dev *edev = dev;
486
487 /* MAC hints take effect only if we haven't set one already */
488 if (is_valid_ether_addr(edev->ndev->dev_addr) && !forced)
489 return;
490
491 ether_addr_copy(edev->ndev->dev_addr, mac);
492 ether_addr_copy(edev->primary_mac, mac);
493}
494
Mintz, Yuvalf29ffdb2017-01-01 13:57:07 +0200495void qede_fill_rss_params(struct qede_dev *edev,
496 struct qed_update_vport_rss_params *rss, u8 *update)
497{
498 bool need_reset = false;
499 int i;
500
501 if (QEDE_RSS_COUNT(edev) <= 1) {
502 memset(rss, 0, sizeof(*rss));
503 *update = 0;
504 return;
505 }
506
507 /* Need to validate current RSS config uses valid entries */
508 for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
509 if (edev->rss_ind_table[i] >= QEDE_RSS_COUNT(edev)) {
510 need_reset = true;
511 break;
512 }
513 }
514
515 if (!(edev->rss_params_inited & QEDE_RSS_INDIR_INITED) || need_reset) {
516 for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
517 u16 indir_val, val;
518
519 val = QEDE_RSS_COUNT(edev);
520 indir_val = ethtool_rxfh_indir_default(i, val);
521 edev->rss_ind_table[i] = indir_val;
522 }
523 edev->rss_params_inited |= QEDE_RSS_INDIR_INITED;
524 }
525
526 /* Now that we have the queue-indirection, prepare the handles */
527 for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
528 u16 idx = QEDE_RX_QUEUE_IDX(edev, edev->rss_ind_table[i]);
529
530 rss->rss_ind_table[i] = edev->fp_array[idx].rxq->handle;
531 }
532
533 if (!(edev->rss_params_inited & QEDE_RSS_KEY_INITED)) {
534 netdev_rss_key_fill(edev->rss_key, sizeof(edev->rss_key));
535 edev->rss_params_inited |= QEDE_RSS_KEY_INITED;
536 }
537 memcpy(rss->rss_key, edev->rss_key, sizeof(rss->rss_key));
538
539 if (!(edev->rss_params_inited & QEDE_RSS_CAPS_INITED)) {
540 edev->rss_caps = QED_RSS_IPV4 | QED_RSS_IPV6 |
541 QED_RSS_IPV4_TCP | QED_RSS_IPV6_TCP;
542 edev->rss_params_inited |= QEDE_RSS_CAPS_INITED;
543 }
544 rss->rss_caps = edev->rss_caps;
545
546 *update = 1;
547}
548
Mintz, Yuvalaed284c2017-01-01 13:57:02 +0200549static int qede_set_ucast_rx_mac(struct qede_dev *edev,
550 enum qed_filter_xcast_params_type opcode,
551 unsigned char mac[ETH_ALEN])
552{
553 struct qed_filter_params filter_cmd;
554
555 memset(&filter_cmd, 0, sizeof(filter_cmd));
556 filter_cmd.type = QED_FILTER_TYPE_UCAST;
557 filter_cmd.filter.ucast.type = opcode;
558 filter_cmd.filter.ucast.mac_valid = 1;
559 ether_addr_copy(filter_cmd.filter.ucast.mac, mac);
560
561 return edev->ops->filter_config(edev->cdev, &filter_cmd);
562}
563
564static int qede_set_ucast_rx_vlan(struct qede_dev *edev,
565 enum qed_filter_xcast_params_type opcode,
566 u16 vid)
567{
568 struct qed_filter_params filter_cmd;
569
570 memset(&filter_cmd, 0, sizeof(filter_cmd));
571 filter_cmd.type = QED_FILTER_TYPE_UCAST;
572 filter_cmd.filter.ucast.type = opcode;
573 filter_cmd.filter.ucast.vlan_valid = 1;
574 filter_cmd.filter.ucast.vlan = vid;
575
576 return edev->ops->filter_config(edev->cdev, &filter_cmd);
577}
578
Mintz, Yuvalf29ffdb2017-01-01 13:57:07 +0200579static int qede_config_accept_any_vlan(struct qede_dev *edev, bool action)
Mintz, Yuvalaed284c2017-01-01 13:57:02 +0200580{
Mintz, Yuvalf29ffdb2017-01-01 13:57:07 +0200581 struct qed_update_vport_params *params;
Mintz, Yuvalaed284c2017-01-01 13:57:02 +0200582 int rc;
583
584 /* Proceed only if action actually needs to be performed */
585 if (edev->accept_any_vlan == action)
Mintz, Yuvalf29ffdb2017-01-01 13:57:07 +0200586 return 0;
Mintz, Yuvalaed284c2017-01-01 13:57:02 +0200587
Mintz, Yuvalf29ffdb2017-01-01 13:57:07 +0200588 params = vzalloc(sizeof(*params));
589 if (!params)
590 return -ENOMEM;
Mintz, Yuvalaed284c2017-01-01 13:57:02 +0200591
Mintz, Yuvalf29ffdb2017-01-01 13:57:07 +0200592 params->vport_id = 0;
593 params->accept_any_vlan = action;
594 params->update_accept_any_vlan_flg = 1;
Mintz, Yuvalaed284c2017-01-01 13:57:02 +0200595
Mintz, Yuvalf29ffdb2017-01-01 13:57:07 +0200596 rc = edev->ops->vport_update(edev->cdev, params);
Mintz, Yuvalaed284c2017-01-01 13:57:02 +0200597 if (rc) {
598 DP_ERR(edev, "Failed to %s accept-any-vlan\n",
599 action ? "enable" : "disable");
600 } else {
601 DP_INFO(edev, "%s accept-any-vlan\n",
602 action ? "enabled" : "disabled");
603 edev->accept_any_vlan = action;
604 }
Mintz, Yuvalf29ffdb2017-01-01 13:57:07 +0200605
606 vfree(params);
607 return 0;
Mintz, Yuvalaed284c2017-01-01 13:57:02 +0200608}
609
610int qede_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
611{
612 struct qede_dev *edev = netdev_priv(dev);
613 struct qede_vlan *vlan, *tmp;
614 int rc = 0;
615
616 DP_VERBOSE(edev, NETIF_MSG_IFUP, "Adding vlan 0x%04x\n", vid);
617
618 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
619 if (!vlan) {
620 DP_INFO(edev, "Failed to allocate struct for vlan\n");
621 return -ENOMEM;
622 }
623 INIT_LIST_HEAD(&vlan->list);
624 vlan->vid = vid;
625 vlan->configured = false;
626
627 /* Verify vlan isn't already configured */
628 list_for_each_entry(tmp, &edev->vlan_list, list) {
629 if (tmp->vid == vlan->vid) {
630 DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
631 "vlan already configured\n");
632 kfree(vlan);
633 return -EEXIST;
634 }
635 }
636
637 /* If interface is down, cache this VLAN ID and return */
638 __qede_lock(edev);
639 if (edev->state != QEDE_STATE_OPEN) {
640 DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
641 "Interface is down, VLAN %d will be configured when interface is up\n",
642 vid);
643 if (vid != 0)
644 edev->non_configured_vlans++;
645 list_add(&vlan->list, &edev->vlan_list);
646 goto out;
647 }
648
649 /* Check for the filter limit.
650 * Note - vlan0 has a reserved filter and can be added without
651 * worrying about quota
652 */
653 if ((edev->configured_vlans < edev->dev_info.num_vlan_filters) ||
654 (vlan->vid == 0)) {
655 rc = qede_set_ucast_rx_vlan(edev,
656 QED_FILTER_XCAST_TYPE_ADD,
657 vlan->vid);
658 if (rc) {
659 DP_ERR(edev, "Failed to configure VLAN %d\n",
660 vlan->vid);
661 kfree(vlan);
662 goto out;
663 }
664 vlan->configured = true;
665
666 /* vlan0 filter isn't consuming out of our quota */
667 if (vlan->vid != 0)
668 edev->configured_vlans++;
669 } else {
670 /* Out of quota; Activate accept-any-VLAN mode */
Mintz, Yuvalf29ffdb2017-01-01 13:57:07 +0200671 if (!edev->non_configured_vlans) {
672 rc = qede_config_accept_any_vlan(edev, true);
673 if (rc) {
674 kfree(vlan);
675 goto out;
676 }
677 }
Mintz, Yuvalaed284c2017-01-01 13:57:02 +0200678
679 edev->non_configured_vlans++;
680 }
681
682 list_add(&vlan->list, &edev->vlan_list);
683
684out:
685 __qede_unlock(edev);
686 return rc;
687}
688
689static void qede_del_vlan_from_list(struct qede_dev *edev,
690 struct qede_vlan *vlan)
691{
692 /* vlan0 filter isn't consuming out of our quota */
693 if (vlan->vid != 0) {
694 if (vlan->configured)
695 edev->configured_vlans--;
696 else
697 edev->non_configured_vlans--;
698 }
699
700 list_del(&vlan->list);
701 kfree(vlan);
702}
703
704int qede_configure_vlan_filters(struct qede_dev *edev)
705{
706 int rc = 0, real_rc = 0, accept_any_vlan = 0;
707 struct qed_dev_eth_info *dev_info;
708 struct qede_vlan *vlan = NULL;
709
710 if (list_empty(&edev->vlan_list))
711 return 0;
712
713 dev_info = &edev->dev_info;
714
715 /* Configure non-configured vlans */
716 list_for_each_entry(vlan, &edev->vlan_list, list) {
717 if (vlan->configured)
718 continue;
719
720 /* We have used all our credits, now enable accept_any_vlan */
721 if ((vlan->vid != 0) &&
722 (edev->configured_vlans == dev_info->num_vlan_filters)) {
723 accept_any_vlan = 1;
724 continue;
725 }
726
727 DP_VERBOSE(edev, NETIF_MSG_IFUP, "Adding vlan %d\n", vlan->vid);
728
729 rc = qede_set_ucast_rx_vlan(edev, QED_FILTER_XCAST_TYPE_ADD,
730 vlan->vid);
731 if (rc) {
732 DP_ERR(edev, "Failed to configure VLAN %u\n",
733 vlan->vid);
734 real_rc = rc;
735 continue;
736 }
737
738 vlan->configured = true;
739 /* vlan0 filter doesn't consume our VLAN filter's quota */
740 if (vlan->vid != 0) {
741 edev->non_configured_vlans--;
742 edev->configured_vlans++;
743 }
744 }
745
746 /* enable accept_any_vlan mode if we have more VLANs than credits,
747 * or remove accept_any_vlan mode if we've actually removed
748 * a non-configured vlan, and all remaining vlans are truly configured.
749 */
750
751 if (accept_any_vlan)
Mintz, Yuvalf29ffdb2017-01-01 13:57:07 +0200752 rc = qede_config_accept_any_vlan(edev, true);
Mintz, Yuvalaed284c2017-01-01 13:57:02 +0200753 else if (!edev->non_configured_vlans)
Mintz, Yuvalf29ffdb2017-01-01 13:57:07 +0200754 rc = qede_config_accept_any_vlan(edev, false);
755
756 if (rc && !real_rc)
757 real_rc = rc;
Mintz, Yuvalaed284c2017-01-01 13:57:02 +0200758
759 return real_rc;
760}
761
762int qede_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
763{
764 struct qede_dev *edev = netdev_priv(dev);
765 struct qede_vlan *vlan = NULL;
766 int rc = 0;
767
768 DP_VERBOSE(edev, NETIF_MSG_IFDOWN, "Removing vlan 0x%04x\n", vid);
769
770 /* Find whether entry exists */
771 __qede_lock(edev);
772 list_for_each_entry(vlan, &edev->vlan_list, list)
773 if (vlan->vid == vid)
774 break;
775
776 if (!vlan || (vlan->vid != vid)) {
777 DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
778 "Vlan isn't configured\n");
779 goto out;
780 }
781
782 if (edev->state != QEDE_STATE_OPEN) {
783 /* As interface is already down, we don't have a VPORT
784 * instance to remove vlan filter. So just update vlan list
785 */
786 DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
787 "Interface is down, removing VLAN from list only\n");
788 qede_del_vlan_from_list(edev, vlan);
789 goto out;
790 }
791
792 /* Remove vlan */
793 if (vlan->configured) {
794 rc = qede_set_ucast_rx_vlan(edev, QED_FILTER_XCAST_TYPE_DEL,
795 vid);
796 if (rc) {
797 DP_ERR(edev, "Failed to remove VLAN %d\n", vid);
798 goto out;
799 }
800 }
801
802 qede_del_vlan_from_list(edev, vlan);
803
804 /* We have removed a VLAN - try to see if we can
805 * configure non-configured VLAN from the list.
806 */
807 rc = qede_configure_vlan_filters(edev);
808
809out:
810 __qede_unlock(edev);
811 return rc;
812}
813
814void qede_vlan_mark_nonconfigured(struct qede_dev *edev)
815{
816 struct qede_vlan *vlan = NULL;
817
818 if (list_empty(&edev->vlan_list))
819 return;
820
821 list_for_each_entry(vlan, &edev->vlan_list, list) {
822 if (!vlan->configured)
823 continue;
824
825 vlan->configured = false;
826
827 /* vlan0 filter isn't consuming out of our quota */
828 if (vlan->vid != 0) {
829 edev->non_configured_vlans++;
830 edev->configured_vlans--;
831 }
832
833 DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
834 "marked vlan %d as non-configured\n", vlan->vid);
835 }
836
837 edev->accept_any_vlan = false;
838}
839
840static void qede_set_features_reload(struct qede_dev *edev,
841 struct qede_reload_args *args)
842{
843 edev->ndev->features = args->u.features;
844}
845
846int qede_set_features(struct net_device *dev, netdev_features_t features)
847{
848 struct qede_dev *edev = netdev_priv(dev);
849 netdev_features_t changes = features ^ dev->features;
850 bool need_reload = false;
851
852 /* No action needed if hardware GRO is disabled during driver load */
853 if (changes & NETIF_F_GRO) {
854 if (dev->features & NETIF_F_GRO)
855 need_reload = !edev->gro_disable;
856 else
857 need_reload = edev->gro_disable;
858 }
859
860 if (need_reload) {
861 struct qede_reload_args args;
862
863 args.u.features = features;
864 args.func = &qede_set_features_reload;
865
866 /* Make sure that we definitely need to reload.
867 * In case of an eBPF attached program, there will be no FW
868 * aggregations, so no need to actually reload.
869 */
870 __qede_lock(edev);
871 if (edev->xdp_prog)
872 args.func(edev, &args);
873 else
874 qede_reload(edev, &args, true);
875 __qede_unlock(edev);
876
877 return 1;
878 }
879
880 return 0;
881}
882
883void qede_udp_tunnel_add(struct net_device *dev, struct udp_tunnel_info *ti)
884{
885 struct qede_dev *edev = netdev_priv(dev);
Chopra, Manish327a2b72017-04-24 10:00:47 -0700886 struct qed_tunn_params tunn_params;
Mintz, Yuvalaed284c2017-01-01 13:57:02 +0200887 u16 t_port = ntohs(ti->port);
Chopra, Manish327a2b72017-04-24 10:00:47 -0700888 int rc;
889
890 memset(&tunn_params, 0, sizeof(tunn_params));
Mintz, Yuvalaed284c2017-01-01 13:57:02 +0200891
892 switch (ti->type) {
893 case UDP_TUNNEL_TYPE_VXLAN:
Chopra, Manish19489c72017-04-24 10:00:45 -0700894 if (!edev->dev_info.common.vxlan_enable)
895 return;
896
Mintz, Yuvalaed284c2017-01-01 13:57:02 +0200897 if (edev->vxlan_dst_port)
898 return;
899
Chopra, Manish327a2b72017-04-24 10:00:47 -0700900 tunn_params.update_vxlan_port = 1;
901 tunn_params.vxlan_port = t_port;
Mintz, Yuvalaed284c2017-01-01 13:57:02 +0200902
Chopra, Manish327a2b72017-04-24 10:00:47 -0700903 __qede_lock(edev);
904 rc = edev->ops->tunn_config(edev->cdev, &tunn_params);
905 __qede_unlock(edev);
Mintz, Yuvalaed284c2017-01-01 13:57:02 +0200906
Chopra, Manish327a2b72017-04-24 10:00:47 -0700907 if (!rc) {
908 edev->vxlan_dst_port = t_port;
909 DP_VERBOSE(edev, QED_MSG_DEBUG, "Added vxlan port=%d\n",
910 t_port);
911 } else {
912 DP_NOTICE(edev, "Failed to add vxlan UDP port=%d\n",
913 t_port);
914 }
915
Mintz, Yuvalaed284c2017-01-01 13:57:02 +0200916 break;
917 case UDP_TUNNEL_TYPE_GENEVE:
Chopra, Manish19489c72017-04-24 10:00:45 -0700918 if (!edev->dev_info.common.geneve_enable)
919 return;
920
Mintz, Yuvalaed284c2017-01-01 13:57:02 +0200921 if (edev->geneve_dst_port)
922 return;
923
Chopra, Manish327a2b72017-04-24 10:00:47 -0700924 tunn_params.update_geneve_port = 1;
925 tunn_params.geneve_port = t_port;
Mintz, Yuvalaed284c2017-01-01 13:57:02 +0200926
Chopra, Manish327a2b72017-04-24 10:00:47 -0700927 __qede_lock(edev);
928 rc = edev->ops->tunn_config(edev->cdev, &tunn_params);
929 __qede_unlock(edev);
930
931 if (!rc) {
932 edev->geneve_dst_port = t_port;
933 DP_VERBOSE(edev, QED_MSG_DEBUG,
934 "Added geneve port=%d\n", t_port);
935 } else {
936 DP_NOTICE(edev, "Failed to add geneve UDP port=%d\n",
937 t_port);
938 }
939
Mintz, Yuvalaed284c2017-01-01 13:57:02 +0200940 break;
941 default:
942 return;
943 }
Mintz, Yuvalaed284c2017-01-01 13:57:02 +0200944}
945
Chopra, Manish327a2b72017-04-24 10:00:47 -0700946void qede_udp_tunnel_del(struct net_device *dev,
947 struct udp_tunnel_info *ti)
Mintz, Yuvalaed284c2017-01-01 13:57:02 +0200948{
949 struct qede_dev *edev = netdev_priv(dev);
Chopra, Manish327a2b72017-04-24 10:00:47 -0700950 struct qed_tunn_params tunn_params;
Mintz, Yuvalaed284c2017-01-01 13:57:02 +0200951 u16 t_port = ntohs(ti->port);
952
Chopra, Manish327a2b72017-04-24 10:00:47 -0700953 memset(&tunn_params, 0, sizeof(tunn_params));
954
Mintz, Yuvalaed284c2017-01-01 13:57:02 +0200955 switch (ti->type) {
956 case UDP_TUNNEL_TYPE_VXLAN:
957 if (t_port != edev->vxlan_dst_port)
958 return;
959
Chopra, Manish327a2b72017-04-24 10:00:47 -0700960 tunn_params.update_vxlan_port = 1;
961 tunn_params.vxlan_port = 0;
962
963 __qede_lock(edev);
964 edev->ops->tunn_config(edev->cdev, &tunn_params);
965 __qede_unlock(edev);
966
Mintz, Yuvalaed284c2017-01-01 13:57:02 +0200967 edev->vxlan_dst_port = 0;
968
969 DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted vxlan port=%d\n",
970 t_port);
971
Mintz, Yuvalaed284c2017-01-01 13:57:02 +0200972 break;
973 case UDP_TUNNEL_TYPE_GENEVE:
974 if (t_port != edev->geneve_dst_port)
975 return;
976
Chopra, Manish327a2b72017-04-24 10:00:47 -0700977 tunn_params.update_geneve_port = 1;
978 tunn_params.geneve_port = 0;
979
980 __qede_lock(edev);
981 edev->ops->tunn_config(edev->cdev, &tunn_params);
982 __qede_unlock(edev);
983
Mintz, Yuvalaed284c2017-01-01 13:57:02 +0200984 edev->geneve_dst_port = 0;
985
986 DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted geneve port=%d\n",
987 t_port);
Mintz, Yuvalaed284c2017-01-01 13:57:02 +0200988 break;
989 default:
990 return;
991 }
Mintz, Yuvalaed284c2017-01-01 13:57:02 +0200992}
993
994static void qede_xdp_reload_func(struct qede_dev *edev,
995 struct qede_reload_args *args)
996{
997 struct bpf_prog *old;
998
999 old = xchg(&edev->xdp_prog, args->u.new_prog);
1000 if (old)
1001 bpf_prog_put(old);
1002}
1003
1004static int qede_xdp_set(struct qede_dev *edev, struct bpf_prog *prog)
1005{
1006 struct qede_reload_args args;
1007
Mintz, Yuvalaed284c2017-01-01 13:57:02 +02001008 /* If we're called, there was already a bpf reference increment */
1009 args.func = &qede_xdp_reload_func;
1010 args.u.new_prog = prog;
1011 qede_reload(edev, &args, false);
1012
1013 return 0;
1014}
1015
1016int qede_xdp(struct net_device *dev, struct netdev_xdp *xdp)
1017{
1018 struct qede_dev *edev = netdev_priv(dev);
1019
Mintz, Yuval40b8c452017-04-07 11:04:59 +03001020 if (IS_VF(edev)) {
1021 DP_NOTICE(edev, "VFs don't support XDP\n");
1022 return -EOPNOTSUPP;
1023 }
1024
Mintz, Yuvalaed284c2017-01-01 13:57:02 +02001025 switch (xdp->command) {
1026 case XDP_SETUP_PROG:
1027 return qede_xdp_set(edev, xdp->prog);
1028 case XDP_QUERY_PROG:
1029 xdp->prog_attached = !!edev->xdp_prog;
1030 return 0;
1031 default:
1032 return -EINVAL;
1033 }
1034}
1035
1036static int qede_set_mcast_rx_mac(struct qede_dev *edev,
1037 enum qed_filter_xcast_params_type opcode,
1038 unsigned char *mac, int num_macs)
1039{
1040 struct qed_filter_params filter_cmd;
1041 int i;
1042
1043 memset(&filter_cmd, 0, sizeof(filter_cmd));
1044 filter_cmd.type = QED_FILTER_TYPE_MCAST;
1045 filter_cmd.filter.mcast.type = opcode;
1046 filter_cmd.filter.mcast.num = num_macs;
1047
1048 for (i = 0; i < num_macs; i++, mac += ETH_ALEN)
1049 ether_addr_copy(filter_cmd.filter.mcast.mac[i], mac);
1050
1051 return edev->ops->filter_config(edev->cdev, &filter_cmd);
1052}
1053
1054int qede_set_mac_addr(struct net_device *ndev, void *p)
1055{
1056 struct qede_dev *edev = netdev_priv(ndev);
1057 struct sockaddr *addr = p;
1058 int rc;
1059
1060 ASSERT_RTNL(); /* @@@TBD To be removed */
1061
1062 DP_INFO(edev, "Set_mac_addr called\n");
1063
1064 if (!is_valid_ether_addr(addr->sa_data)) {
1065 DP_NOTICE(edev, "The MAC address is not valid\n");
1066 return -EFAULT;
1067 }
1068
1069 if (!edev->ops->check_mac(edev->cdev, addr->sa_data)) {
1070 DP_NOTICE(edev, "qed prevents setting MAC\n");
1071 return -EINVAL;
1072 }
1073
1074 ether_addr_copy(ndev->dev_addr, addr->sa_data);
1075
1076 if (!netif_running(ndev)) {
1077 DP_NOTICE(edev, "The device is currently down\n");
1078 return 0;
1079 }
1080
1081 /* Remove the previous primary mac */
1082 rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_DEL,
1083 edev->primary_mac);
1084 if (rc)
1085 return rc;
1086
1087 edev->ops->common->update_mac(edev->cdev, addr->sa_data);
1088
1089 /* Add MAC filter according to the new unicast HW MAC address */
1090 ether_addr_copy(edev->primary_mac, ndev->dev_addr);
1091 return qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_ADD,
1092 edev->primary_mac);
1093}
1094
1095static int
1096qede_configure_mcast_filtering(struct net_device *ndev,
1097 enum qed_filter_rx_mode_type *accept_flags)
1098{
1099 struct qede_dev *edev = netdev_priv(ndev);
1100 unsigned char *mc_macs, *temp;
1101 struct netdev_hw_addr *ha;
1102 int rc = 0, mc_count;
1103 size_t size;
1104
1105 size = 64 * ETH_ALEN;
1106
1107 mc_macs = kzalloc(size, GFP_KERNEL);
1108 if (!mc_macs) {
1109 DP_NOTICE(edev,
1110 "Failed to allocate memory for multicast MACs\n");
1111 rc = -ENOMEM;
1112 goto exit;
1113 }
1114
1115 temp = mc_macs;
1116
1117 /* Remove all previously configured MAC filters */
1118 rc = qede_set_mcast_rx_mac(edev, QED_FILTER_XCAST_TYPE_DEL,
1119 mc_macs, 1);
1120 if (rc)
1121 goto exit;
1122
1123 netif_addr_lock_bh(ndev);
1124
1125 mc_count = netdev_mc_count(ndev);
1126 if (mc_count < 64) {
1127 netdev_for_each_mc_addr(ha, ndev) {
1128 ether_addr_copy(temp, ha->addr);
1129 temp += ETH_ALEN;
1130 }
1131 }
1132
1133 netif_addr_unlock_bh(ndev);
1134
1135 /* Check for all multicast @@@TBD resource allocation */
1136 if ((ndev->flags & IFF_ALLMULTI) || (mc_count > 64)) {
1137 if (*accept_flags == QED_FILTER_RX_MODE_TYPE_REGULAR)
1138 *accept_flags = QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC;
1139 } else {
1140 /* Add all multicast MAC filters */
1141 rc = qede_set_mcast_rx_mac(edev, QED_FILTER_XCAST_TYPE_ADD,
1142 mc_macs, mc_count);
1143 }
1144
1145exit:
1146 kfree(mc_macs);
1147 return rc;
1148}
1149
1150void qede_set_rx_mode(struct net_device *ndev)
1151{
1152 struct qede_dev *edev = netdev_priv(ndev);
1153
1154 set_bit(QEDE_SP_RX_MODE, &edev->sp_flags);
1155 schedule_delayed_work(&edev->sp_task, 0);
1156}
1157
1158/* Must be called with qede_lock held */
1159void qede_config_rx_mode(struct net_device *ndev)
1160{
1161 enum qed_filter_rx_mode_type accept_flags;
1162 struct qede_dev *edev = netdev_priv(ndev);
1163 struct qed_filter_params rx_mode;
1164 unsigned char *uc_macs, *temp;
1165 struct netdev_hw_addr *ha;
1166 int rc, uc_count;
1167 size_t size;
1168
1169 netif_addr_lock_bh(ndev);
1170
1171 uc_count = netdev_uc_count(ndev);
1172 size = uc_count * ETH_ALEN;
1173
1174 uc_macs = kzalloc(size, GFP_ATOMIC);
1175 if (!uc_macs) {
1176 DP_NOTICE(edev, "Failed to allocate memory for unicast MACs\n");
1177 netif_addr_unlock_bh(ndev);
1178 return;
1179 }
1180
1181 temp = uc_macs;
1182 netdev_for_each_uc_addr(ha, ndev) {
1183 ether_addr_copy(temp, ha->addr);
1184 temp += ETH_ALEN;
1185 }
1186
1187 netif_addr_unlock_bh(ndev);
1188
1189 /* Configure the struct for the Rx mode */
1190 memset(&rx_mode, 0, sizeof(struct qed_filter_params));
1191 rx_mode.type = QED_FILTER_TYPE_RX_MODE;
1192
1193 /* Remove all previous unicast secondary macs and multicast macs
1194 * (configrue / leave the primary mac)
1195 */
1196 rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_REPLACE,
1197 edev->primary_mac);
1198 if (rc)
1199 goto out;
1200
1201 /* Check for promiscuous */
Mintz, Yuvalf990c822017-01-01 13:57:08 +02001202 if (ndev->flags & IFF_PROMISC)
Mintz, Yuvalaed284c2017-01-01 13:57:02 +02001203 accept_flags = QED_FILTER_RX_MODE_TYPE_PROMISC;
Mintz, Yuvalf990c822017-01-01 13:57:08 +02001204 else
1205 accept_flags = QED_FILTER_RX_MODE_TYPE_REGULAR;
1206
1207 /* Configure all filters regardless, in case promisc is rejected */
1208 if (uc_count < edev->dev_info.num_mac_filters) {
Mintz, Yuvalaed284c2017-01-01 13:57:02 +02001209 int i;
1210
1211 temp = uc_macs;
1212 for (i = 0; i < uc_count; i++) {
1213 rc = qede_set_ucast_rx_mac(edev,
1214 QED_FILTER_XCAST_TYPE_ADD,
1215 temp);
1216 if (rc)
1217 goto out;
1218
1219 temp += ETH_ALEN;
1220 }
Mintz, Yuvalf990c822017-01-01 13:57:08 +02001221 } else {
1222 accept_flags = QED_FILTER_RX_MODE_TYPE_PROMISC;
Mintz, Yuvalaed284c2017-01-01 13:57:02 +02001223 }
1224
Mintz, Yuvalf990c822017-01-01 13:57:08 +02001225 rc = qede_configure_mcast_filtering(ndev, &accept_flags);
1226 if (rc)
1227 goto out;
1228
Mintz, Yuvalaed284c2017-01-01 13:57:02 +02001229 /* take care of VLAN mode */
1230 if (ndev->flags & IFF_PROMISC) {
1231 qede_config_accept_any_vlan(edev, true);
1232 } else if (!edev->non_configured_vlans) {
1233 /* It's possible that accept_any_vlan mode is set due to a
1234 * previous setting of IFF_PROMISC. If vlan credits are
1235 * sufficient, disable accept_any_vlan.
1236 */
1237 qede_config_accept_any_vlan(edev, false);
1238 }
1239
1240 rx_mode.filter.accept_flags = accept_flags;
1241 edev->ops->filter_config(edev->cdev, &rx_mode);
1242out:
1243 kfree(uc_macs);
1244}