blob: eb5652073ca86dfcbbbdf5c73cfb998a72e09917 [file] [log] [blame]
Mintz, Yuvalaed284c2017-01-01 13:57:02 +02001/* QLogic qede NIC Driver
2 * Copyright (c) 2015-2017 QLogic Corporation
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include <linux/netdevice.h>
33#include <linux/etherdevice.h>
34#include <net/udp_tunnel.h>
35#include <linux/bitops.h>
Mintz, Yuvalf29ffdb2017-01-01 13:57:07 +020036#include <linux/vmalloc.h>
Mintz, Yuvalaed284c2017-01-01 13:57:02 +020037
38#include <linux/qed/qed_if.h>
39#include "qede.h"
40
Chopra, Manishe4917d42017-04-13 04:54:45 -070041#ifdef CONFIG_RFS_ACCEL
42struct qede_arfs_tuple {
43 union {
44 __be32 src_ipv4;
45 struct in6_addr src_ipv6;
46 };
47 union {
48 __be32 dst_ipv4;
49 struct in6_addr dst_ipv6;
50 };
51 __be16 src_port;
52 __be16 dst_port;
53 __be16 eth_proto;
54 u8 ip_proto;
55};
56
57struct qede_arfs_fltr_node {
58#define QEDE_FLTR_VALID 0
59 unsigned long state;
60
61 /* pointer to aRFS packet buffer */
62 void *data;
63
64 /* dma map address of aRFS packet buffer */
65 dma_addr_t mapping;
66
67 /* length of aRFS packet buffer */
68 int buf_len;
69
70 /* tuples to hold from aRFS packet buffer */
71 struct qede_arfs_tuple tuple;
72
73 u32 flow_id;
74 u16 sw_id;
75 u16 rxq_id;
76 u16 next_rxq_id;
77 bool filter_op;
78 bool used;
79 struct hlist_node node;
80};
81
82struct qede_arfs {
83#define QEDE_ARFS_POLL_COUNT 100
84#define QEDE_RFS_FLW_BITSHIFT (4)
85#define QEDE_RFS_FLW_MASK ((1 << QEDE_RFS_FLW_BITSHIFT) - 1)
86 struct hlist_head arfs_hl_head[1 << QEDE_RFS_FLW_BITSHIFT];
87
88 /* lock for filter list access */
89 spinlock_t arfs_list_lock;
90 unsigned long *arfs_fltr_bmap;
91 int filter_count;
92 bool enable;
93};
94
95static void qede_configure_arfs_fltr(struct qede_dev *edev,
96 struct qede_arfs_fltr_node *n,
97 u16 rxq_id, bool add_fltr)
98{
99 const struct qed_eth_ops *op = edev->ops;
100
101 if (n->used)
102 return;
103
104 DP_VERBOSE(edev, NETIF_MSG_RX_STATUS,
105 "%s arfs filter flow_id=%d, sw_id=%d, src_port=%d, dst_port=%d, rxq=%d\n",
106 add_fltr ? "Adding" : "Deleting",
107 n->flow_id, n->sw_id, ntohs(n->tuple.src_port),
108 ntohs(n->tuple.dst_port), rxq_id);
109
110 n->used = true;
111 n->filter_op = add_fltr;
112 op->ntuple_filter_config(edev->cdev, n, n->mapping, n->buf_len, 0,
113 rxq_id, add_fltr);
114}
115
116static void
117qede_free_arfs_filter(struct qede_dev *edev, struct qede_arfs_fltr_node *fltr)
118{
119 kfree(fltr->data);
120 clear_bit(fltr->sw_id, edev->arfs->arfs_fltr_bmap);
121 kfree(fltr);
122}
123
124void qede_arfs_filter_op(void *dev, void *filter, u8 fw_rc)
125{
126 struct qede_arfs_fltr_node *fltr = filter;
127 struct qede_dev *edev = dev;
128
129 if (fw_rc) {
130 DP_NOTICE(edev,
131 "Failed arfs filter configuration fw_rc=%d, flow_id=%d, sw_id=%d, src_port=%d, dst_port=%d, rxq=%d\n",
132 fw_rc, fltr->flow_id, fltr->sw_id,
133 ntohs(fltr->tuple.src_port),
134 ntohs(fltr->tuple.dst_port), fltr->rxq_id);
135
136 spin_lock_bh(&edev->arfs->arfs_list_lock);
137
138 fltr->used = false;
139 clear_bit(QEDE_FLTR_VALID, &fltr->state);
140
141 spin_unlock_bh(&edev->arfs->arfs_list_lock);
142 return;
143 }
144
145 spin_lock_bh(&edev->arfs->arfs_list_lock);
146
147 fltr->used = false;
148
149 if (fltr->filter_op) {
150 set_bit(QEDE_FLTR_VALID, &fltr->state);
151 if (fltr->rxq_id != fltr->next_rxq_id)
152 qede_configure_arfs_fltr(edev, fltr, fltr->rxq_id,
153 false);
154 } else {
155 clear_bit(QEDE_FLTR_VALID, &fltr->state);
156 if (fltr->rxq_id != fltr->next_rxq_id) {
157 fltr->rxq_id = fltr->next_rxq_id;
158 qede_configure_arfs_fltr(edev, fltr,
159 fltr->rxq_id, true);
160 }
161 }
162
163 spin_unlock_bh(&edev->arfs->arfs_list_lock);
164}
165
166/* Should be called while qede_lock is held */
167void qede_process_arfs_filters(struct qede_dev *edev, bool free_fltr)
168{
169 int i;
170
171 for (i = 0; i <= QEDE_RFS_FLW_MASK; i++) {
172 struct hlist_node *temp;
173 struct hlist_head *head;
174 struct qede_arfs_fltr_node *fltr;
175
176 head = &edev->arfs->arfs_hl_head[i];
177
178 hlist_for_each_entry_safe(fltr, temp, head, node) {
179 bool del = false;
180
181 if (edev->state != QEDE_STATE_OPEN)
182 del = true;
183
184 spin_lock_bh(&edev->arfs->arfs_list_lock);
185
186 if ((!test_bit(QEDE_FLTR_VALID, &fltr->state) &&
187 !fltr->used) || free_fltr) {
188 hlist_del(&fltr->node);
189 dma_unmap_single(&edev->pdev->dev,
190 fltr->mapping,
191 fltr->buf_len, DMA_TO_DEVICE);
192 qede_free_arfs_filter(edev, fltr);
193 edev->arfs->filter_count--;
194 } else {
195 if ((rps_may_expire_flow(edev->ndev,
196 fltr->rxq_id,
197 fltr->flow_id,
198 fltr->sw_id) || del) &&
199 !free_fltr)
200 qede_configure_arfs_fltr(edev, fltr,
201 fltr->rxq_id,
202 false);
203 }
204
205 spin_unlock_bh(&edev->arfs->arfs_list_lock);
206 }
207 }
208
209 spin_lock_bh(&edev->arfs->arfs_list_lock);
210
211 if (!edev->arfs->filter_count) {
212 if (edev->arfs->enable) {
213 edev->arfs->enable = false;
214 edev->ops->configure_arfs_searcher(edev->cdev, false);
215 }
216 } else {
217 set_bit(QEDE_SP_ARFS_CONFIG, &edev->sp_flags);
218 schedule_delayed_work(&edev->sp_task,
219 QEDE_SP_TASK_POLL_DELAY);
220 }
221
222 spin_unlock_bh(&edev->arfs->arfs_list_lock);
223}
224
225/* This function waits until all aRFS filters get deleted and freed.
226 * On timeout it frees all filters forcefully.
227 */
228void qede_poll_for_freeing_arfs_filters(struct qede_dev *edev)
229{
230 int count = QEDE_ARFS_POLL_COUNT;
231
232 while (count) {
233 qede_process_arfs_filters(edev, false);
234
235 if (!edev->arfs->filter_count)
236 break;
237
238 msleep(100);
239 count--;
240 }
241
242 if (!count) {
243 DP_NOTICE(edev, "Timeout in polling for arfs filter free\n");
244
245 /* Something is terribly wrong, free forcefully */
246 qede_process_arfs_filters(edev, true);
247 }
248}
249
250int qede_alloc_arfs(struct qede_dev *edev)
251{
252 int i;
253
254 edev->arfs = vzalloc(sizeof(*edev->arfs));
255 if (!edev->arfs)
256 return -ENOMEM;
257
258 spin_lock_init(&edev->arfs->arfs_list_lock);
259
260 for (i = 0; i <= QEDE_RFS_FLW_MASK; i++)
261 INIT_HLIST_HEAD(&edev->arfs->arfs_hl_head[i]);
262
263 edev->ndev->rx_cpu_rmap = alloc_irq_cpu_rmap(QEDE_RSS_COUNT(edev));
264 if (!edev->ndev->rx_cpu_rmap) {
265 vfree(edev->arfs);
266 edev->arfs = NULL;
267 return -ENOMEM;
268 }
269
Dan Carpenterf6ca26f2017-04-19 12:54:33 +0300270 edev->arfs->arfs_fltr_bmap = vzalloc(BITS_TO_LONGS(QEDE_RFS_MAX_FLTR) *
271 sizeof(long));
Chopra, Manishe4917d42017-04-13 04:54:45 -0700272 if (!edev->arfs->arfs_fltr_bmap) {
273 free_irq_cpu_rmap(edev->ndev->rx_cpu_rmap);
274 edev->ndev->rx_cpu_rmap = NULL;
275 vfree(edev->arfs);
276 edev->arfs = NULL;
277 return -ENOMEM;
278 }
279
280 return 0;
281}
282
283void qede_free_arfs(struct qede_dev *edev)
284{
285 if (!edev->arfs)
286 return;
287
288 if (edev->ndev->rx_cpu_rmap)
289 free_irq_cpu_rmap(edev->ndev->rx_cpu_rmap);
290
291 edev->ndev->rx_cpu_rmap = NULL;
292 vfree(edev->arfs->arfs_fltr_bmap);
293 edev->arfs->arfs_fltr_bmap = NULL;
294 vfree(edev->arfs);
295 edev->arfs = NULL;
296}
297
298static bool qede_compare_ip_addr(struct qede_arfs_fltr_node *tpos,
299 const struct sk_buff *skb)
300{
301 if (skb->protocol == htons(ETH_P_IP)) {
302 if (tpos->tuple.src_ipv4 == ip_hdr(skb)->saddr &&
303 tpos->tuple.dst_ipv4 == ip_hdr(skb)->daddr)
304 return true;
305 else
306 return false;
307 } else {
308 struct in6_addr *src = &tpos->tuple.src_ipv6;
309 u8 size = sizeof(struct in6_addr);
310
311 if (!memcmp(src, &ipv6_hdr(skb)->saddr, size) &&
312 !memcmp(&tpos->tuple.dst_ipv6, &ipv6_hdr(skb)->daddr, size))
313 return true;
314 else
315 return false;
316 }
317}
318
319static struct qede_arfs_fltr_node *
320qede_arfs_htbl_key_search(struct hlist_head *h, const struct sk_buff *skb,
321 __be16 src_port, __be16 dst_port, u8 ip_proto)
322{
323 struct qede_arfs_fltr_node *tpos;
324
325 hlist_for_each_entry(tpos, h, node)
326 if (tpos->tuple.ip_proto == ip_proto &&
327 tpos->tuple.eth_proto == skb->protocol &&
328 qede_compare_ip_addr(tpos, skb) &&
329 tpos->tuple.src_port == src_port &&
330 tpos->tuple.dst_port == dst_port)
331 return tpos;
332
333 return NULL;
334}
335
336static struct qede_arfs_fltr_node *
337qede_alloc_filter(struct qede_dev *edev, int min_hlen)
338{
339 struct qede_arfs_fltr_node *n;
340 int bit_id;
341
342 bit_id = find_first_zero_bit(edev->arfs->arfs_fltr_bmap,
343 QEDE_RFS_MAX_FLTR);
344
345 if (bit_id >= QEDE_RFS_MAX_FLTR)
346 return NULL;
347
348 n = kzalloc(sizeof(*n), GFP_ATOMIC);
349 if (!n)
350 return NULL;
351
352 n->data = kzalloc(min_hlen, GFP_ATOMIC);
353 if (!n->data) {
354 kfree(n);
355 return NULL;
356 }
357
358 n->sw_id = (u16)bit_id;
359 set_bit(bit_id, edev->arfs->arfs_fltr_bmap);
360 return n;
361}
362
363int qede_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
364 u16 rxq_index, u32 flow_id)
365{
366 struct qede_dev *edev = netdev_priv(dev);
367 struct qede_arfs_fltr_node *n;
368 int min_hlen, rc, tp_offset;
369 struct ethhdr *eth;
370 __be16 *ports;
371 u16 tbl_idx;
372 u8 ip_proto;
373
374 if (skb->encapsulation)
375 return -EPROTONOSUPPORT;
376
377 if (skb->protocol != htons(ETH_P_IP) &&
378 skb->protocol != htons(ETH_P_IPV6))
379 return -EPROTONOSUPPORT;
380
381 if (skb->protocol == htons(ETH_P_IP)) {
382 ip_proto = ip_hdr(skb)->protocol;
383 tp_offset = sizeof(struct iphdr);
384 } else {
385 ip_proto = ipv6_hdr(skb)->nexthdr;
386 tp_offset = sizeof(struct ipv6hdr);
387 }
388
389 if (ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP)
390 return -EPROTONOSUPPORT;
391
392 ports = (__be16 *)(skb->data + tp_offset);
393 tbl_idx = skb_get_hash_raw(skb) & QEDE_RFS_FLW_MASK;
394
395 spin_lock_bh(&edev->arfs->arfs_list_lock);
396
397 n = qede_arfs_htbl_key_search(&edev->arfs->arfs_hl_head[tbl_idx],
398 skb, ports[0], ports[1], ip_proto);
399
400 if (n) {
401 /* Filter match */
402 n->next_rxq_id = rxq_index;
403
404 if (test_bit(QEDE_FLTR_VALID, &n->state)) {
405 if (n->rxq_id != rxq_index)
406 qede_configure_arfs_fltr(edev, n, n->rxq_id,
407 false);
408 } else {
409 if (!n->used) {
410 n->rxq_id = rxq_index;
411 qede_configure_arfs_fltr(edev, n, n->rxq_id,
412 true);
413 }
414 }
415
416 rc = n->sw_id;
417 goto ret_unlock;
418 }
419
420 min_hlen = ETH_HLEN + skb_headlen(skb);
421
422 n = qede_alloc_filter(edev, min_hlen);
423 if (!n) {
424 rc = -ENOMEM;
425 goto ret_unlock;
426 }
427
428 n->buf_len = min_hlen;
429 n->rxq_id = rxq_index;
430 n->next_rxq_id = rxq_index;
431 n->tuple.src_port = ports[0];
432 n->tuple.dst_port = ports[1];
433 n->flow_id = flow_id;
434
435 if (skb->protocol == htons(ETH_P_IP)) {
436 n->tuple.src_ipv4 = ip_hdr(skb)->saddr;
437 n->tuple.dst_ipv4 = ip_hdr(skb)->daddr;
438 } else {
439 memcpy(&n->tuple.src_ipv6, &ipv6_hdr(skb)->saddr,
440 sizeof(struct in6_addr));
441 memcpy(&n->tuple.dst_ipv6, &ipv6_hdr(skb)->daddr,
442 sizeof(struct in6_addr));
443 }
444
445 eth = (struct ethhdr *)n->data;
446 eth->h_proto = skb->protocol;
447 n->tuple.eth_proto = skb->protocol;
448 n->tuple.ip_proto = ip_proto;
449 memcpy(n->data + ETH_HLEN, skb->data, skb_headlen(skb));
450
451 n->mapping = dma_map_single(&edev->pdev->dev, n->data,
452 n->buf_len, DMA_TO_DEVICE);
453 if (dma_mapping_error(&edev->pdev->dev, n->mapping)) {
454 DP_NOTICE(edev, "Failed to map DMA memory for arfs\n");
455 qede_free_arfs_filter(edev, n);
456 rc = -ENOMEM;
457 goto ret_unlock;
458 }
459
460 INIT_HLIST_NODE(&n->node);
461 hlist_add_head(&n->node, &edev->arfs->arfs_hl_head[tbl_idx]);
462 edev->arfs->filter_count++;
463
464 if (edev->arfs->filter_count == 1 && !edev->arfs->enable) {
465 edev->ops->configure_arfs_searcher(edev->cdev, true);
466 edev->arfs->enable = true;
467 }
468
469 qede_configure_arfs_fltr(edev, n, n->rxq_id, true);
470
471 spin_unlock_bh(&edev->arfs->arfs_list_lock);
472
473 set_bit(QEDE_SP_ARFS_CONFIG, &edev->sp_flags);
474 schedule_delayed_work(&edev->sp_task, 0);
475 return n->sw_id;
476
477ret_unlock:
478 spin_unlock_bh(&edev->arfs->arfs_list_lock);
479 return rc;
480}
481#endif
482
Chopra, Manish97379f12017-04-24 10:00:48 -0700483void qede_udp_ports_update(void *dev, u16 vxlan_port, u16 geneve_port)
484{
485 struct qede_dev *edev = dev;
486
487 if (edev->vxlan_dst_port != vxlan_port)
488 edev->vxlan_dst_port = 0;
489
490 if (edev->geneve_dst_port != geneve_port)
491 edev->geneve_dst_port = 0;
492}
493
Mintz, Yuvalaed284c2017-01-01 13:57:02 +0200494void qede_force_mac(void *dev, u8 *mac, bool forced)
495{
496 struct qede_dev *edev = dev;
497
498 /* MAC hints take effect only if we haven't set one already */
499 if (is_valid_ether_addr(edev->ndev->dev_addr) && !forced)
500 return;
501
502 ether_addr_copy(edev->ndev->dev_addr, mac);
503 ether_addr_copy(edev->primary_mac, mac);
504}
505
Mintz, Yuvalf29ffdb2017-01-01 13:57:07 +0200506void qede_fill_rss_params(struct qede_dev *edev,
507 struct qed_update_vport_rss_params *rss, u8 *update)
508{
509 bool need_reset = false;
510 int i;
511
512 if (QEDE_RSS_COUNT(edev) <= 1) {
513 memset(rss, 0, sizeof(*rss));
514 *update = 0;
515 return;
516 }
517
518 /* Need to validate current RSS config uses valid entries */
519 for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
520 if (edev->rss_ind_table[i] >= QEDE_RSS_COUNT(edev)) {
521 need_reset = true;
522 break;
523 }
524 }
525
526 if (!(edev->rss_params_inited & QEDE_RSS_INDIR_INITED) || need_reset) {
527 for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
528 u16 indir_val, val;
529
530 val = QEDE_RSS_COUNT(edev);
531 indir_val = ethtool_rxfh_indir_default(i, val);
532 edev->rss_ind_table[i] = indir_val;
533 }
534 edev->rss_params_inited |= QEDE_RSS_INDIR_INITED;
535 }
536
537 /* Now that we have the queue-indirection, prepare the handles */
538 for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
539 u16 idx = QEDE_RX_QUEUE_IDX(edev, edev->rss_ind_table[i]);
540
541 rss->rss_ind_table[i] = edev->fp_array[idx].rxq->handle;
542 }
543
544 if (!(edev->rss_params_inited & QEDE_RSS_KEY_INITED)) {
545 netdev_rss_key_fill(edev->rss_key, sizeof(edev->rss_key));
546 edev->rss_params_inited |= QEDE_RSS_KEY_INITED;
547 }
548 memcpy(rss->rss_key, edev->rss_key, sizeof(rss->rss_key));
549
550 if (!(edev->rss_params_inited & QEDE_RSS_CAPS_INITED)) {
551 edev->rss_caps = QED_RSS_IPV4 | QED_RSS_IPV6 |
552 QED_RSS_IPV4_TCP | QED_RSS_IPV6_TCP;
553 edev->rss_params_inited |= QEDE_RSS_CAPS_INITED;
554 }
555 rss->rss_caps = edev->rss_caps;
556
557 *update = 1;
558}
559
Mintz, Yuvalaed284c2017-01-01 13:57:02 +0200560static int qede_set_ucast_rx_mac(struct qede_dev *edev,
561 enum qed_filter_xcast_params_type opcode,
562 unsigned char mac[ETH_ALEN])
563{
564 struct qed_filter_params filter_cmd;
565
566 memset(&filter_cmd, 0, sizeof(filter_cmd));
567 filter_cmd.type = QED_FILTER_TYPE_UCAST;
568 filter_cmd.filter.ucast.type = opcode;
569 filter_cmd.filter.ucast.mac_valid = 1;
570 ether_addr_copy(filter_cmd.filter.ucast.mac, mac);
571
572 return edev->ops->filter_config(edev->cdev, &filter_cmd);
573}
574
575static int qede_set_ucast_rx_vlan(struct qede_dev *edev,
576 enum qed_filter_xcast_params_type opcode,
577 u16 vid)
578{
579 struct qed_filter_params filter_cmd;
580
581 memset(&filter_cmd, 0, sizeof(filter_cmd));
582 filter_cmd.type = QED_FILTER_TYPE_UCAST;
583 filter_cmd.filter.ucast.type = opcode;
584 filter_cmd.filter.ucast.vlan_valid = 1;
585 filter_cmd.filter.ucast.vlan = vid;
586
587 return edev->ops->filter_config(edev->cdev, &filter_cmd);
588}
589
Mintz, Yuvalf29ffdb2017-01-01 13:57:07 +0200590static int qede_config_accept_any_vlan(struct qede_dev *edev, bool action)
Mintz, Yuvalaed284c2017-01-01 13:57:02 +0200591{
Mintz, Yuvalf29ffdb2017-01-01 13:57:07 +0200592 struct qed_update_vport_params *params;
Mintz, Yuvalaed284c2017-01-01 13:57:02 +0200593 int rc;
594
595 /* Proceed only if action actually needs to be performed */
596 if (edev->accept_any_vlan == action)
Mintz, Yuvalf29ffdb2017-01-01 13:57:07 +0200597 return 0;
Mintz, Yuvalaed284c2017-01-01 13:57:02 +0200598
Mintz, Yuvalf29ffdb2017-01-01 13:57:07 +0200599 params = vzalloc(sizeof(*params));
600 if (!params)
601 return -ENOMEM;
Mintz, Yuvalaed284c2017-01-01 13:57:02 +0200602
Mintz, Yuvalf29ffdb2017-01-01 13:57:07 +0200603 params->vport_id = 0;
604 params->accept_any_vlan = action;
605 params->update_accept_any_vlan_flg = 1;
Mintz, Yuvalaed284c2017-01-01 13:57:02 +0200606
Mintz, Yuvalf29ffdb2017-01-01 13:57:07 +0200607 rc = edev->ops->vport_update(edev->cdev, params);
Mintz, Yuvalaed284c2017-01-01 13:57:02 +0200608 if (rc) {
609 DP_ERR(edev, "Failed to %s accept-any-vlan\n",
610 action ? "enable" : "disable");
611 } else {
612 DP_INFO(edev, "%s accept-any-vlan\n",
613 action ? "enabled" : "disabled");
614 edev->accept_any_vlan = action;
615 }
Mintz, Yuvalf29ffdb2017-01-01 13:57:07 +0200616
617 vfree(params);
618 return 0;
Mintz, Yuvalaed284c2017-01-01 13:57:02 +0200619}
620
621int qede_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
622{
623 struct qede_dev *edev = netdev_priv(dev);
624 struct qede_vlan *vlan, *tmp;
625 int rc = 0;
626
627 DP_VERBOSE(edev, NETIF_MSG_IFUP, "Adding vlan 0x%04x\n", vid);
628
629 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
630 if (!vlan) {
631 DP_INFO(edev, "Failed to allocate struct for vlan\n");
632 return -ENOMEM;
633 }
634 INIT_LIST_HEAD(&vlan->list);
635 vlan->vid = vid;
636 vlan->configured = false;
637
638 /* Verify vlan isn't already configured */
639 list_for_each_entry(tmp, &edev->vlan_list, list) {
640 if (tmp->vid == vlan->vid) {
641 DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
642 "vlan already configured\n");
643 kfree(vlan);
644 return -EEXIST;
645 }
646 }
647
648 /* If interface is down, cache this VLAN ID and return */
649 __qede_lock(edev);
650 if (edev->state != QEDE_STATE_OPEN) {
651 DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
652 "Interface is down, VLAN %d will be configured when interface is up\n",
653 vid);
654 if (vid != 0)
655 edev->non_configured_vlans++;
656 list_add(&vlan->list, &edev->vlan_list);
657 goto out;
658 }
659
660 /* Check for the filter limit.
661 * Note - vlan0 has a reserved filter and can be added without
662 * worrying about quota
663 */
664 if ((edev->configured_vlans < edev->dev_info.num_vlan_filters) ||
665 (vlan->vid == 0)) {
666 rc = qede_set_ucast_rx_vlan(edev,
667 QED_FILTER_XCAST_TYPE_ADD,
668 vlan->vid);
669 if (rc) {
670 DP_ERR(edev, "Failed to configure VLAN %d\n",
671 vlan->vid);
672 kfree(vlan);
673 goto out;
674 }
675 vlan->configured = true;
676
677 /* vlan0 filter isn't consuming out of our quota */
678 if (vlan->vid != 0)
679 edev->configured_vlans++;
680 } else {
681 /* Out of quota; Activate accept-any-VLAN mode */
Mintz, Yuvalf29ffdb2017-01-01 13:57:07 +0200682 if (!edev->non_configured_vlans) {
683 rc = qede_config_accept_any_vlan(edev, true);
684 if (rc) {
685 kfree(vlan);
686 goto out;
687 }
688 }
Mintz, Yuvalaed284c2017-01-01 13:57:02 +0200689
690 edev->non_configured_vlans++;
691 }
692
693 list_add(&vlan->list, &edev->vlan_list);
694
695out:
696 __qede_unlock(edev);
697 return rc;
698}
699
700static void qede_del_vlan_from_list(struct qede_dev *edev,
701 struct qede_vlan *vlan)
702{
703 /* vlan0 filter isn't consuming out of our quota */
704 if (vlan->vid != 0) {
705 if (vlan->configured)
706 edev->configured_vlans--;
707 else
708 edev->non_configured_vlans--;
709 }
710
711 list_del(&vlan->list);
712 kfree(vlan);
713}
714
715int qede_configure_vlan_filters(struct qede_dev *edev)
716{
717 int rc = 0, real_rc = 0, accept_any_vlan = 0;
718 struct qed_dev_eth_info *dev_info;
719 struct qede_vlan *vlan = NULL;
720
721 if (list_empty(&edev->vlan_list))
722 return 0;
723
724 dev_info = &edev->dev_info;
725
726 /* Configure non-configured vlans */
727 list_for_each_entry(vlan, &edev->vlan_list, list) {
728 if (vlan->configured)
729 continue;
730
731 /* We have used all our credits, now enable accept_any_vlan */
732 if ((vlan->vid != 0) &&
733 (edev->configured_vlans == dev_info->num_vlan_filters)) {
734 accept_any_vlan = 1;
735 continue;
736 }
737
738 DP_VERBOSE(edev, NETIF_MSG_IFUP, "Adding vlan %d\n", vlan->vid);
739
740 rc = qede_set_ucast_rx_vlan(edev, QED_FILTER_XCAST_TYPE_ADD,
741 vlan->vid);
742 if (rc) {
743 DP_ERR(edev, "Failed to configure VLAN %u\n",
744 vlan->vid);
745 real_rc = rc;
746 continue;
747 }
748
749 vlan->configured = true;
750 /* vlan0 filter doesn't consume our VLAN filter's quota */
751 if (vlan->vid != 0) {
752 edev->non_configured_vlans--;
753 edev->configured_vlans++;
754 }
755 }
756
757 /* enable accept_any_vlan mode if we have more VLANs than credits,
758 * or remove accept_any_vlan mode if we've actually removed
759 * a non-configured vlan, and all remaining vlans are truly configured.
760 */
761
762 if (accept_any_vlan)
Mintz, Yuvalf29ffdb2017-01-01 13:57:07 +0200763 rc = qede_config_accept_any_vlan(edev, true);
Mintz, Yuvalaed284c2017-01-01 13:57:02 +0200764 else if (!edev->non_configured_vlans)
Mintz, Yuvalf29ffdb2017-01-01 13:57:07 +0200765 rc = qede_config_accept_any_vlan(edev, false);
766
767 if (rc && !real_rc)
768 real_rc = rc;
Mintz, Yuvalaed284c2017-01-01 13:57:02 +0200769
770 return real_rc;
771}
772
773int qede_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
774{
775 struct qede_dev *edev = netdev_priv(dev);
776 struct qede_vlan *vlan = NULL;
777 int rc = 0;
778
779 DP_VERBOSE(edev, NETIF_MSG_IFDOWN, "Removing vlan 0x%04x\n", vid);
780
781 /* Find whether entry exists */
782 __qede_lock(edev);
783 list_for_each_entry(vlan, &edev->vlan_list, list)
784 if (vlan->vid == vid)
785 break;
786
787 if (!vlan || (vlan->vid != vid)) {
788 DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
789 "Vlan isn't configured\n");
790 goto out;
791 }
792
793 if (edev->state != QEDE_STATE_OPEN) {
794 /* As interface is already down, we don't have a VPORT
795 * instance to remove vlan filter. So just update vlan list
796 */
797 DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
798 "Interface is down, removing VLAN from list only\n");
799 qede_del_vlan_from_list(edev, vlan);
800 goto out;
801 }
802
803 /* Remove vlan */
804 if (vlan->configured) {
805 rc = qede_set_ucast_rx_vlan(edev, QED_FILTER_XCAST_TYPE_DEL,
806 vid);
807 if (rc) {
808 DP_ERR(edev, "Failed to remove VLAN %d\n", vid);
809 goto out;
810 }
811 }
812
813 qede_del_vlan_from_list(edev, vlan);
814
815 /* We have removed a VLAN - try to see if we can
816 * configure non-configured VLAN from the list.
817 */
818 rc = qede_configure_vlan_filters(edev);
819
820out:
821 __qede_unlock(edev);
822 return rc;
823}
824
825void qede_vlan_mark_nonconfigured(struct qede_dev *edev)
826{
827 struct qede_vlan *vlan = NULL;
828
829 if (list_empty(&edev->vlan_list))
830 return;
831
832 list_for_each_entry(vlan, &edev->vlan_list, list) {
833 if (!vlan->configured)
834 continue;
835
836 vlan->configured = false;
837
838 /* vlan0 filter isn't consuming out of our quota */
839 if (vlan->vid != 0) {
840 edev->non_configured_vlans++;
841 edev->configured_vlans--;
842 }
843
844 DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
845 "marked vlan %d as non-configured\n", vlan->vid);
846 }
847
848 edev->accept_any_vlan = false;
849}
850
851static void qede_set_features_reload(struct qede_dev *edev,
852 struct qede_reload_args *args)
853{
854 edev->ndev->features = args->u.features;
855}
856
857int qede_set_features(struct net_device *dev, netdev_features_t features)
858{
859 struct qede_dev *edev = netdev_priv(dev);
860 netdev_features_t changes = features ^ dev->features;
861 bool need_reload = false;
862
863 /* No action needed if hardware GRO is disabled during driver load */
864 if (changes & NETIF_F_GRO) {
865 if (dev->features & NETIF_F_GRO)
866 need_reload = !edev->gro_disable;
867 else
868 need_reload = edev->gro_disable;
869 }
870
871 if (need_reload) {
872 struct qede_reload_args args;
873
874 args.u.features = features;
875 args.func = &qede_set_features_reload;
876
877 /* Make sure that we definitely need to reload.
878 * In case of an eBPF attached program, there will be no FW
879 * aggregations, so no need to actually reload.
880 */
881 __qede_lock(edev);
882 if (edev->xdp_prog)
883 args.func(edev, &args);
884 else
885 qede_reload(edev, &args, true);
886 __qede_unlock(edev);
887
888 return 1;
889 }
890
891 return 0;
892}
893
894void qede_udp_tunnel_add(struct net_device *dev, struct udp_tunnel_info *ti)
895{
896 struct qede_dev *edev = netdev_priv(dev);
Chopra, Manish327a2b72017-04-24 10:00:47 -0700897 struct qed_tunn_params tunn_params;
Mintz, Yuvalaed284c2017-01-01 13:57:02 +0200898 u16 t_port = ntohs(ti->port);
Chopra, Manish327a2b72017-04-24 10:00:47 -0700899 int rc;
900
901 memset(&tunn_params, 0, sizeof(tunn_params));
Mintz, Yuvalaed284c2017-01-01 13:57:02 +0200902
903 switch (ti->type) {
904 case UDP_TUNNEL_TYPE_VXLAN:
Chopra, Manish19489c72017-04-24 10:00:45 -0700905 if (!edev->dev_info.common.vxlan_enable)
906 return;
907
Mintz, Yuvalaed284c2017-01-01 13:57:02 +0200908 if (edev->vxlan_dst_port)
909 return;
910
Chopra, Manish327a2b72017-04-24 10:00:47 -0700911 tunn_params.update_vxlan_port = 1;
912 tunn_params.vxlan_port = t_port;
Mintz, Yuvalaed284c2017-01-01 13:57:02 +0200913
Chopra, Manish327a2b72017-04-24 10:00:47 -0700914 __qede_lock(edev);
915 rc = edev->ops->tunn_config(edev->cdev, &tunn_params);
916 __qede_unlock(edev);
Mintz, Yuvalaed284c2017-01-01 13:57:02 +0200917
Chopra, Manish327a2b72017-04-24 10:00:47 -0700918 if (!rc) {
919 edev->vxlan_dst_port = t_port;
920 DP_VERBOSE(edev, QED_MSG_DEBUG, "Added vxlan port=%d\n",
921 t_port);
922 } else {
923 DP_NOTICE(edev, "Failed to add vxlan UDP port=%d\n",
924 t_port);
925 }
926
Mintz, Yuvalaed284c2017-01-01 13:57:02 +0200927 break;
928 case UDP_TUNNEL_TYPE_GENEVE:
Chopra, Manish19489c72017-04-24 10:00:45 -0700929 if (!edev->dev_info.common.geneve_enable)
930 return;
931
Mintz, Yuvalaed284c2017-01-01 13:57:02 +0200932 if (edev->geneve_dst_port)
933 return;
934
Chopra, Manish327a2b72017-04-24 10:00:47 -0700935 tunn_params.update_geneve_port = 1;
936 tunn_params.geneve_port = t_port;
Mintz, Yuvalaed284c2017-01-01 13:57:02 +0200937
Chopra, Manish327a2b72017-04-24 10:00:47 -0700938 __qede_lock(edev);
939 rc = edev->ops->tunn_config(edev->cdev, &tunn_params);
940 __qede_unlock(edev);
941
942 if (!rc) {
943 edev->geneve_dst_port = t_port;
944 DP_VERBOSE(edev, QED_MSG_DEBUG,
945 "Added geneve port=%d\n", t_port);
946 } else {
947 DP_NOTICE(edev, "Failed to add geneve UDP port=%d\n",
948 t_port);
949 }
950
Mintz, Yuvalaed284c2017-01-01 13:57:02 +0200951 break;
952 default:
953 return;
954 }
Mintz, Yuvalaed284c2017-01-01 13:57:02 +0200955}
956
Chopra, Manish327a2b72017-04-24 10:00:47 -0700957void qede_udp_tunnel_del(struct net_device *dev,
958 struct udp_tunnel_info *ti)
Mintz, Yuvalaed284c2017-01-01 13:57:02 +0200959{
960 struct qede_dev *edev = netdev_priv(dev);
Chopra, Manish327a2b72017-04-24 10:00:47 -0700961 struct qed_tunn_params tunn_params;
Mintz, Yuvalaed284c2017-01-01 13:57:02 +0200962 u16 t_port = ntohs(ti->port);
963
Chopra, Manish327a2b72017-04-24 10:00:47 -0700964 memset(&tunn_params, 0, sizeof(tunn_params));
965
Mintz, Yuvalaed284c2017-01-01 13:57:02 +0200966 switch (ti->type) {
967 case UDP_TUNNEL_TYPE_VXLAN:
968 if (t_port != edev->vxlan_dst_port)
969 return;
970
Chopra, Manish327a2b72017-04-24 10:00:47 -0700971 tunn_params.update_vxlan_port = 1;
972 tunn_params.vxlan_port = 0;
973
974 __qede_lock(edev);
975 edev->ops->tunn_config(edev->cdev, &tunn_params);
976 __qede_unlock(edev);
977
Mintz, Yuvalaed284c2017-01-01 13:57:02 +0200978 edev->vxlan_dst_port = 0;
979
980 DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted vxlan port=%d\n",
981 t_port);
982
Mintz, Yuvalaed284c2017-01-01 13:57:02 +0200983 break;
984 case UDP_TUNNEL_TYPE_GENEVE:
985 if (t_port != edev->geneve_dst_port)
986 return;
987
Chopra, Manish327a2b72017-04-24 10:00:47 -0700988 tunn_params.update_geneve_port = 1;
989 tunn_params.geneve_port = 0;
990
991 __qede_lock(edev);
992 edev->ops->tunn_config(edev->cdev, &tunn_params);
993 __qede_unlock(edev);
994
Mintz, Yuvalaed284c2017-01-01 13:57:02 +0200995 edev->geneve_dst_port = 0;
996
997 DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted geneve port=%d\n",
998 t_port);
Mintz, Yuvalaed284c2017-01-01 13:57:02 +0200999 break;
1000 default:
1001 return;
1002 }
Mintz, Yuvalaed284c2017-01-01 13:57:02 +02001003}
1004
1005static void qede_xdp_reload_func(struct qede_dev *edev,
1006 struct qede_reload_args *args)
1007{
1008 struct bpf_prog *old;
1009
1010 old = xchg(&edev->xdp_prog, args->u.new_prog);
1011 if (old)
1012 bpf_prog_put(old);
1013}
1014
1015static int qede_xdp_set(struct qede_dev *edev, struct bpf_prog *prog)
1016{
1017 struct qede_reload_args args;
1018
Mintz, Yuvalaed284c2017-01-01 13:57:02 +02001019 /* If we're called, there was already a bpf reference increment */
1020 args.func = &qede_xdp_reload_func;
1021 args.u.new_prog = prog;
1022 qede_reload(edev, &args, false);
1023
1024 return 0;
1025}
1026
1027int qede_xdp(struct net_device *dev, struct netdev_xdp *xdp)
1028{
1029 struct qede_dev *edev = netdev_priv(dev);
1030
Mintz, Yuval40b8c452017-04-07 11:04:59 +03001031 if (IS_VF(edev)) {
1032 DP_NOTICE(edev, "VFs don't support XDP\n");
1033 return -EOPNOTSUPP;
1034 }
1035
Mintz, Yuvalaed284c2017-01-01 13:57:02 +02001036 switch (xdp->command) {
1037 case XDP_SETUP_PROG:
1038 return qede_xdp_set(edev, xdp->prog);
1039 case XDP_QUERY_PROG:
1040 xdp->prog_attached = !!edev->xdp_prog;
1041 return 0;
1042 default:
1043 return -EINVAL;
1044 }
1045}
1046
1047static int qede_set_mcast_rx_mac(struct qede_dev *edev,
1048 enum qed_filter_xcast_params_type opcode,
1049 unsigned char *mac, int num_macs)
1050{
1051 struct qed_filter_params filter_cmd;
1052 int i;
1053
1054 memset(&filter_cmd, 0, sizeof(filter_cmd));
1055 filter_cmd.type = QED_FILTER_TYPE_MCAST;
1056 filter_cmd.filter.mcast.type = opcode;
1057 filter_cmd.filter.mcast.num = num_macs;
1058
1059 for (i = 0; i < num_macs; i++, mac += ETH_ALEN)
1060 ether_addr_copy(filter_cmd.filter.mcast.mac[i], mac);
1061
1062 return edev->ops->filter_config(edev->cdev, &filter_cmd);
1063}
1064
1065int qede_set_mac_addr(struct net_device *ndev, void *p)
1066{
1067 struct qede_dev *edev = netdev_priv(ndev);
1068 struct sockaddr *addr = p;
1069 int rc;
1070
1071 ASSERT_RTNL(); /* @@@TBD To be removed */
1072
1073 DP_INFO(edev, "Set_mac_addr called\n");
1074
1075 if (!is_valid_ether_addr(addr->sa_data)) {
1076 DP_NOTICE(edev, "The MAC address is not valid\n");
1077 return -EFAULT;
1078 }
1079
1080 if (!edev->ops->check_mac(edev->cdev, addr->sa_data)) {
1081 DP_NOTICE(edev, "qed prevents setting MAC\n");
1082 return -EINVAL;
1083 }
1084
1085 ether_addr_copy(ndev->dev_addr, addr->sa_data);
1086
1087 if (!netif_running(ndev)) {
1088 DP_NOTICE(edev, "The device is currently down\n");
1089 return 0;
1090 }
1091
1092 /* Remove the previous primary mac */
1093 rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_DEL,
1094 edev->primary_mac);
1095 if (rc)
1096 return rc;
1097
1098 edev->ops->common->update_mac(edev->cdev, addr->sa_data);
1099
1100 /* Add MAC filter according to the new unicast HW MAC address */
1101 ether_addr_copy(edev->primary_mac, ndev->dev_addr);
1102 return qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_ADD,
1103 edev->primary_mac);
1104}
1105
1106static int
1107qede_configure_mcast_filtering(struct net_device *ndev,
1108 enum qed_filter_rx_mode_type *accept_flags)
1109{
1110 struct qede_dev *edev = netdev_priv(ndev);
1111 unsigned char *mc_macs, *temp;
1112 struct netdev_hw_addr *ha;
1113 int rc = 0, mc_count;
1114 size_t size;
1115
1116 size = 64 * ETH_ALEN;
1117
1118 mc_macs = kzalloc(size, GFP_KERNEL);
1119 if (!mc_macs) {
1120 DP_NOTICE(edev,
1121 "Failed to allocate memory for multicast MACs\n");
1122 rc = -ENOMEM;
1123 goto exit;
1124 }
1125
1126 temp = mc_macs;
1127
1128 /* Remove all previously configured MAC filters */
1129 rc = qede_set_mcast_rx_mac(edev, QED_FILTER_XCAST_TYPE_DEL,
1130 mc_macs, 1);
1131 if (rc)
1132 goto exit;
1133
1134 netif_addr_lock_bh(ndev);
1135
1136 mc_count = netdev_mc_count(ndev);
1137 if (mc_count < 64) {
1138 netdev_for_each_mc_addr(ha, ndev) {
1139 ether_addr_copy(temp, ha->addr);
1140 temp += ETH_ALEN;
1141 }
1142 }
1143
1144 netif_addr_unlock_bh(ndev);
1145
1146 /* Check for all multicast @@@TBD resource allocation */
1147 if ((ndev->flags & IFF_ALLMULTI) || (mc_count > 64)) {
1148 if (*accept_flags == QED_FILTER_RX_MODE_TYPE_REGULAR)
1149 *accept_flags = QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC;
1150 } else {
1151 /* Add all multicast MAC filters */
1152 rc = qede_set_mcast_rx_mac(edev, QED_FILTER_XCAST_TYPE_ADD,
1153 mc_macs, mc_count);
1154 }
1155
1156exit:
1157 kfree(mc_macs);
1158 return rc;
1159}
1160
1161void qede_set_rx_mode(struct net_device *ndev)
1162{
1163 struct qede_dev *edev = netdev_priv(ndev);
1164
1165 set_bit(QEDE_SP_RX_MODE, &edev->sp_flags);
1166 schedule_delayed_work(&edev->sp_task, 0);
1167}
1168
1169/* Must be called with qede_lock held */
1170void qede_config_rx_mode(struct net_device *ndev)
1171{
1172 enum qed_filter_rx_mode_type accept_flags;
1173 struct qede_dev *edev = netdev_priv(ndev);
1174 struct qed_filter_params rx_mode;
1175 unsigned char *uc_macs, *temp;
1176 struct netdev_hw_addr *ha;
1177 int rc, uc_count;
1178 size_t size;
1179
1180 netif_addr_lock_bh(ndev);
1181
1182 uc_count = netdev_uc_count(ndev);
1183 size = uc_count * ETH_ALEN;
1184
1185 uc_macs = kzalloc(size, GFP_ATOMIC);
1186 if (!uc_macs) {
1187 DP_NOTICE(edev, "Failed to allocate memory for unicast MACs\n");
1188 netif_addr_unlock_bh(ndev);
1189 return;
1190 }
1191
1192 temp = uc_macs;
1193 netdev_for_each_uc_addr(ha, ndev) {
1194 ether_addr_copy(temp, ha->addr);
1195 temp += ETH_ALEN;
1196 }
1197
1198 netif_addr_unlock_bh(ndev);
1199
1200 /* Configure the struct for the Rx mode */
1201 memset(&rx_mode, 0, sizeof(struct qed_filter_params));
1202 rx_mode.type = QED_FILTER_TYPE_RX_MODE;
1203
1204 /* Remove all previous unicast secondary macs and multicast macs
1205 * (configrue / leave the primary mac)
1206 */
1207 rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_REPLACE,
1208 edev->primary_mac);
1209 if (rc)
1210 goto out;
1211
1212 /* Check for promiscuous */
Mintz, Yuvalf990c822017-01-01 13:57:08 +02001213 if (ndev->flags & IFF_PROMISC)
Mintz, Yuvalaed284c2017-01-01 13:57:02 +02001214 accept_flags = QED_FILTER_RX_MODE_TYPE_PROMISC;
Mintz, Yuvalf990c822017-01-01 13:57:08 +02001215 else
1216 accept_flags = QED_FILTER_RX_MODE_TYPE_REGULAR;
1217
1218 /* Configure all filters regardless, in case promisc is rejected */
1219 if (uc_count < edev->dev_info.num_mac_filters) {
Mintz, Yuvalaed284c2017-01-01 13:57:02 +02001220 int i;
1221
1222 temp = uc_macs;
1223 for (i = 0; i < uc_count; i++) {
1224 rc = qede_set_ucast_rx_mac(edev,
1225 QED_FILTER_XCAST_TYPE_ADD,
1226 temp);
1227 if (rc)
1228 goto out;
1229
1230 temp += ETH_ALEN;
1231 }
Mintz, Yuvalf990c822017-01-01 13:57:08 +02001232 } else {
1233 accept_flags = QED_FILTER_RX_MODE_TYPE_PROMISC;
Mintz, Yuvalaed284c2017-01-01 13:57:02 +02001234 }
1235
Mintz, Yuvalf990c822017-01-01 13:57:08 +02001236 rc = qede_configure_mcast_filtering(ndev, &accept_flags);
1237 if (rc)
1238 goto out;
1239
Mintz, Yuvalaed284c2017-01-01 13:57:02 +02001240 /* take care of VLAN mode */
1241 if (ndev->flags & IFF_PROMISC) {
1242 qede_config_accept_any_vlan(edev, true);
1243 } else if (!edev->non_configured_vlans) {
1244 /* It's possible that accept_any_vlan mode is set due to a
1245 * previous setting of IFF_PROMISC. If vlan credits are
1246 * sufficient, disable accept_any_vlan.
1247 */
1248 qede_config_accept_any_vlan(edev, false);
1249 }
1250
1251 rx_mode.filter.accept_flags = accept_flags;
1252 edev->ops->filter_config(edev->cdev, &rx_mode);
1253out:
1254 kfree(uc_macs);
1255}