blob: e3e6d9fa69fd109d6daa7ab0ad129dd801cf5ab8 [file] [log] [blame]
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001/*
2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33
Brenden Blanco47a38e12016-07-19 12:16:50 -070034#include <linux/bpf.h>
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -070035#include <linux/etherdevice.h>
36#include <linux/tcp.h>
37#include <linux/if_vlan.h>
38#include <linux/delay.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090039#include <linux/slab.h>
Amir Vadai1eb8c692012-07-18 22:33:52 +000040#include <linux/hash.h>
41#include <net/ip.h>
Eliezer Tamir076bb0c2013-07-10 17:13:17 +030042#include <net/busy_poll.h>
Or Gerlitz1b136de2014-03-27 14:02:04 +020043#include <net/vxlan.h>
Jiri Pirko09d4d082016-02-26 17:32:24 +010044#include <net/devlink.h>
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -070045
46#include <linux/mlx4/driver.h>
47#include <linux/mlx4/device.h>
48#include <linux/mlx4/cmd.h>
49#include <linux/mlx4/cq.h>
50
51#include "mlx4_en.h"
52#include "en_port.h"
53
Martin KaFai Lauea3349a2016-12-07 15:53:13 -080054#define MLX4_EN_MAX_XDP_MTU ((int)(PAGE_SIZE - ETH_HLEN - (2 * VLAN_HLEN) - \
55 XDP_PACKET_HEADROOM))
Martin KaFai Laub45f0672016-12-07 15:53:12 -080056
Amir Vadaid3179662012-12-02 03:49:23 +000057int mlx4_en_setup_tc(struct net_device *dev, u8 up)
Amir Vadai897d7842012-04-04 21:33:27 +000058{
Amir Vadaibc6a4742012-05-17 00:58:10 +000059 struct mlx4_en_priv *priv = netdev_priv(dev);
60 int i;
Amir Vadaid3179662012-12-02 03:49:23 +000061 unsigned int offset = 0;
Amir Vadaibc6a4742012-05-17 00:58:10 +000062
Inbar Karmyf21ad612017-06-29 14:07:56 +030063 if (up && up != MLX4_EN_NUM_UP_HIGH)
Amir Vadai897d7842012-04-04 21:33:27 +000064 return -EINVAL;
65
Amir Vadaibc6a4742012-05-17 00:58:10 +000066 netdev_set_num_tc(dev, up);
Inbar Karmyec327f72017-06-29 14:07:57 +030067 netif_set_real_num_tx_queues(dev, priv->tx_ring_num[TX]);
Amir Vadaibc6a4742012-05-17 00:58:10 +000068 /* Partition Tx queues evenly amongst UP's */
Amir Vadaibc6a4742012-05-17 00:58:10 +000069 for (i = 0; i < up; i++) {
Amir Vadaid3179662012-12-02 03:49:23 +000070 netdev_set_tc_queue(dev, i, priv->num_tx_rings_p_up, offset);
71 offset += priv->num_tx_rings_p_up;
Amir Vadaibc6a4742012-05-17 00:58:10 +000072 }
73
Rana Shahoutaf7d5182016-06-21 12:43:59 +030074#ifdef CONFIG_MLX4_EN_DCB
75 if (!mlx4_is_slave(priv->mdev->dev)) {
76 if (up) {
Tariq Toukan564ed9b2016-09-11 10:56:19 +030077 if (priv->dcbx_cap)
78 priv->flags |= MLX4_EN_FLAG_DCB_ENABLED;
Rana Shahoutaf7d5182016-06-21 12:43:59 +030079 } else {
80 priv->flags &= ~MLX4_EN_FLAG_DCB_ENABLED;
Tariq Toukan564ed9b2016-09-11 10:56:19 +030081 priv->cee_config.pfc_state = false;
Rana Shahoutaf7d5182016-06-21 12:43:59 +030082 }
83 }
84#endif /* CONFIG_MLX4_EN_DCB */
85
Amir Vadai897d7842012-04-04 21:33:27 +000086 return 0;
87}
88
Inbar Karmyec327f72017-06-29 14:07:57 +030089int mlx4_en_alloc_tx_queue_per_tc(struct net_device *dev, u8 tc)
90{
91 struct mlx4_en_priv *priv = netdev_priv(dev);
92 struct mlx4_en_dev *mdev = priv->mdev;
93 struct mlx4_en_port_profile new_prof;
94 struct mlx4_en_priv *tmp;
95 int port_up = 0;
96 int err = 0;
97
98 tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
99 if (!tmp)
100 return -ENOMEM;
101
102 mutex_lock(&mdev->state_lock);
103 memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile));
104 new_prof.num_up = (tc == 0) ? MLX4_EN_NUM_UP_LOW :
105 MLX4_EN_NUM_UP_HIGH;
106 new_prof.tx_ring_num[TX] = new_prof.num_tx_rings_p_up *
107 new_prof.num_up;
108 err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof, true);
109 if (err)
110 goto out;
111
112 if (priv->port_up) {
113 port_up = 1;
114 mlx4_en_stop_port(dev, 1);
115 }
116
117 mlx4_en_safe_replace_resources(priv, tmp);
118 if (port_up) {
119 err = mlx4_en_start_port(dev);
120 if (err) {
121 en_err(priv, "Failed starting port for setup TC\n");
122 goto out;
123 }
124 }
125
126 err = mlx4_en_setup_tc(dev, tc);
127out:
128 mutex_unlock(&mdev->state_lock);
129 kfree(tmp);
130 return err;
131}
132
Jiri Pirkoa5fcf8a2017-06-06 17:00:16 +0200133static int __mlx4_en_setup_tc(struct net_device *dev, u32 handle,
134 u32 chain_index, __be16 proto,
John Fastabend16e5cc62016-02-16 21:16:43 -0800135 struct tc_to_netdev *tc)
John Fastabende4c67342016-02-16 21:16:15 -0800136{
John Fastabend5eb4dce2016-02-29 11:26:13 -0800137 if (tc->type != TC_SETUP_MQPRIO)
John Fastabende4c67342016-02-16 21:16:15 -0800138 return -EINVAL;
139
Inbar Karmyec327f72017-06-29 14:07:57 +0300140 if (tc->mqprio->num_tc && tc->mqprio->num_tc != MLX4_EN_NUM_UP_HIGH)
141 return -EINVAL;
142
Amritha Nambiar56f36ac2017-03-15 10:39:25 -0700143 tc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
144
Inbar Karmyec327f72017-06-29 14:07:57 +0300145 return mlx4_en_alloc_tx_queue_per_tc(dev, tc->mqprio->num_tc);
John Fastabende4c67342016-02-16 21:16:15 -0800146}
147
Amir Vadai1eb8c692012-07-18 22:33:52 +0000148#ifdef CONFIG_RFS_ACCEL
149
150struct mlx4_en_filter {
151 struct list_head next;
152 struct work_struct work;
153
Eyal Perry75a353d2013-11-07 12:19:49 +0200154 u8 ip_proto;
Amir Vadai1eb8c692012-07-18 22:33:52 +0000155 __be32 src_ip;
156 __be32 dst_ip;
157 __be16 src_port;
158 __be16 dst_port;
159
160 int rxq_index;
161 struct mlx4_en_priv *priv;
162 u32 flow_id; /* RFS infrastructure id */
163 int id; /* mlx4_en driver id */
164 u64 reg_id; /* Flow steering API id */
165 u8 activated; /* Used to prevent expiry before filter
166 * is attached
167 */
168 struct hlist_node filter_chain;
169};
170
171static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv);
172
Eyal Perry75a353d2013-11-07 12:19:49 +0200173static enum mlx4_net_trans_rule_id mlx4_ip_proto_to_trans_rule_id(u8 ip_proto)
174{
175 switch (ip_proto) {
176 case IPPROTO_UDP:
177 return MLX4_NET_TRANS_RULE_ID_UDP;
178 case IPPROTO_TCP:
179 return MLX4_NET_TRANS_RULE_ID_TCP;
180 default:
Eyal Perryc3ca5202014-05-14 12:15:16 +0300181 return MLX4_NET_TRANS_RULE_NUM;
Eyal Perry75a353d2013-11-07 12:19:49 +0200182 }
183};
184
Tariq Toukanb6e01232016-11-22 16:20:39 +0200185/* Must not acquire state_lock, as its corresponding work_sync
186 * is done under it.
187 */
Amir Vadai1eb8c692012-07-18 22:33:52 +0000188static void mlx4_en_filter_work(struct work_struct *work)
189{
190 struct mlx4_en_filter *filter = container_of(work,
191 struct mlx4_en_filter,
192 work);
193 struct mlx4_en_priv *priv = filter->priv;
Eyal Perry75a353d2013-11-07 12:19:49 +0200194 struct mlx4_spec_list spec_tcp_udp = {
195 .id = mlx4_ip_proto_to_trans_rule_id(filter->ip_proto),
Amir Vadai1eb8c692012-07-18 22:33:52 +0000196 {
197 .tcp_udp = {
198 .dst_port = filter->dst_port,
199 .dst_port_msk = (__force __be16)-1,
200 .src_port = filter->src_port,
201 .src_port_msk = (__force __be16)-1,
202 },
203 },
204 };
205 struct mlx4_spec_list spec_ip = {
206 .id = MLX4_NET_TRANS_RULE_ID_IPV4,
207 {
208 .ipv4 = {
209 .dst_ip = filter->dst_ip,
210 .dst_ip_msk = (__force __be32)-1,
211 .src_ip = filter->src_ip,
212 .src_ip_msk = (__force __be32)-1,
213 },
214 },
215 };
216 struct mlx4_spec_list spec_eth = {
217 .id = MLX4_NET_TRANS_RULE_ID_ETH,
218 };
219 struct mlx4_net_trans_rule rule = {
220 .list = LIST_HEAD_INIT(rule.list),
221 .queue_mode = MLX4_NET_TRANS_Q_LIFO,
222 .exclusive = 1,
223 .allow_loopback = 1,
Hadar Hen Zionf9162532013-04-24 13:58:45 +0000224 .promisc_mode = MLX4_FS_REGULAR,
Amir Vadai1eb8c692012-07-18 22:33:52 +0000225 .port = priv->port,
226 .priority = MLX4_DOMAIN_RFS,
227 };
228 int rc;
Amir Vadai1eb8c692012-07-18 22:33:52 +0000229 __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
230
Eyal Perryc3ca5202014-05-14 12:15:16 +0300231 if (spec_tcp_udp.id >= MLX4_NET_TRANS_RULE_NUM) {
Eyal Perry75a353d2013-11-07 12:19:49 +0200232 en_warn(priv, "RFS: ignoring unsupported ip protocol (%d)\n",
233 filter->ip_proto);
234 goto ignore;
235 }
Amir Vadai1eb8c692012-07-18 22:33:52 +0000236 list_add_tail(&spec_eth.list, &rule.list);
237 list_add_tail(&spec_ip.list, &rule.list);
Eyal Perry75a353d2013-11-07 12:19:49 +0200238 list_add_tail(&spec_tcp_udp.list, &rule.list);
Amir Vadai1eb8c692012-07-18 22:33:52 +0000239
Amir Vadai1eb8c692012-07-18 22:33:52 +0000240 rule.qpn = priv->rss_map.qps[filter->rxq_index].qpn;
Yan Burman6bbb6d92013-02-07 02:25:20 +0000241 memcpy(spec_eth.eth.dst_mac, priv->dev->dev_addr, ETH_ALEN);
Amir Vadai1eb8c692012-07-18 22:33:52 +0000242 memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
243
244 filter->activated = 0;
245
246 if (filter->reg_id) {
247 rc = mlx4_flow_detach(priv->mdev->dev, filter->reg_id);
248 if (rc && rc != -ENOENT)
249 en_err(priv, "Error detaching flow. rc = %d\n", rc);
250 }
251
252 rc = mlx4_flow_attach(priv->mdev->dev, &rule, &filter->reg_id);
253 if (rc)
254 en_err(priv, "Error attaching flow. err = %d\n", rc);
255
Eyal Perry75a353d2013-11-07 12:19:49 +0200256ignore:
Amir Vadai1eb8c692012-07-18 22:33:52 +0000257 mlx4_en_filter_rfs_expire(priv);
258
259 filter->activated = 1;
260}
261
262static inline struct hlist_head *
263filter_hash_bucket(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip,
264 __be16 src_port, __be16 dst_port)
265{
266 unsigned long l;
267 int bucket_idx;
268
269 l = (__force unsigned long)src_port |
270 ((__force unsigned long)dst_port << 2);
271 l ^= (__force unsigned long)(src_ip ^ dst_ip);
272
273 bucket_idx = hash_long(l, MLX4_EN_FILTER_HASH_SHIFT);
274
275 return &priv->filter_hash[bucket_idx];
276}
277
278static struct mlx4_en_filter *
279mlx4_en_filter_alloc(struct mlx4_en_priv *priv, int rxq_index, __be32 src_ip,
Eyal Perry75a353d2013-11-07 12:19:49 +0200280 __be32 dst_ip, u8 ip_proto, __be16 src_port,
281 __be16 dst_port, u32 flow_id)
Amir Vadai1eb8c692012-07-18 22:33:52 +0000282{
283 struct mlx4_en_filter *filter = NULL;
284
285 filter = kzalloc(sizeof(struct mlx4_en_filter), GFP_ATOMIC);
286 if (!filter)
287 return NULL;
288
289 filter->priv = priv;
290 filter->rxq_index = rxq_index;
291 INIT_WORK(&filter->work, mlx4_en_filter_work);
292
293 filter->src_ip = src_ip;
294 filter->dst_ip = dst_ip;
Eyal Perry75a353d2013-11-07 12:19:49 +0200295 filter->ip_proto = ip_proto;
Amir Vadai1eb8c692012-07-18 22:33:52 +0000296 filter->src_port = src_port;
297 filter->dst_port = dst_port;
298
299 filter->flow_id = flow_id;
300
Amir Vadaiee64c0e2012-07-25 21:21:16 +0000301 filter->id = priv->last_filter_id++ % RPS_NO_FILTER;
Amir Vadai1eb8c692012-07-18 22:33:52 +0000302
303 list_add_tail(&filter->next, &priv->filters);
304 hlist_add_head(&filter->filter_chain,
305 filter_hash_bucket(priv, src_ip, dst_ip, src_port,
306 dst_port));
307
308 return filter;
309}
310
311static void mlx4_en_filter_free(struct mlx4_en_filter *filter)
312{
313 struct mlx4_en_priv *priv = filter->priv;
314 int rc;
315
316 list_del(&filter->next);
317
318 rc = mlx4_flow_detach(priv->mdev->dev, filter->reg_id);
319 if (rc && rc != -ENOENT)
320 en_err(priv, "Error detaching flow. rc = %d\n", rc);
321
322 kfree(filter);
323}
324
325static inline struct mlx4_en_filter *
326mlx4_en_filter_find(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip,
Eyal Perry75a353d2013-11-07 12:19:49 +0200327 u8 ip_proto, __be16 src_port, __be16 dst_port)
Amir Vadai1eb8c692012-07-18 22:33:52 +0000328{
Amir Vadai1eb8c692012-07-18 22:33:52 +0000329 struct mlx4_en_filter *filter;
330 struct mlx4_en_filter *ret = NULL;
331
Sasha Levinb67bfe02013-02-27 17:06:00 -0800332 hlist_for_each_entry(filter,
Amir Vadai1eb8c692012-07-18 22:33:52 +0000333 filter_hash_bucket(priv, src_ip, dst_ip,
334 src_port, dst_port),
335 filter_chain) {
336 if (filter->src_ip == src_ip &&
337 filter->dst_ip == dst_ip &&
Eyal Perry75a353d2013-11-07 12:19:49 +0200338 filter->ip_proto == ip_proto &&
Amir Vadai1eb8c692012-07-18 22:33:52 +0000339 filter->src_port == src_port &&
340 filter->dst_port == dst_port) {
341 ret = filter;
342 break;
343 }
344 }
345
346 return ret;
347}
348
349static int
350mlx4_en_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
351 u16 rxq_index, u32 flow_id)
352{
353 struct mlx4_en_priv *priv = netdev_priv(net_dev);
354 struct mlx4_en_filter *filter;
355 const struct iphdr *ip;
356 const __be16 *ports;
Eyal Perry75a353d2013-11-07 12:19:49 +0200357 u8 ip_proto;
Amir Vadai1eb8c692012-07-18 22:33:52 +0000358 __be32 src_ip;
359 __be32 dst_ip;
360 __be16 src_port;
361 __be16 dst_port;
362 int nhoff = skb_network_offset(skb);
363 int ret = 0;
364
365 if (skb->protocol != htons(ETH_P_IP))
366 return -EPROTONOSUPPORT;
367
368 ip = (const struct iphdr *)(skb->data + nhoff);
369 if (ip_is_fragment(ip))
370 return -EPROTONOSUPPORT;
371
Eyal Perry75a353d2013-11-07 12:19:49 +0200372 if ((ip->protocol != IPPROTO_TCP) && (ip->protocol != IPPROTO_UDP))
373 return -EPROTONOSUPPORT;
Amir Vadai1eb8c692012-07-18 22:33:52 +0000374 ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
375
Eyal Perry75a353d2013-11-07 12:19:49 +0200376 ip_proto = ip->protocol;
Amir Vadai1eb8c692012-07-18 22:33:52 +0000377 src_ip = ip->saddr;
378 dst_ip = ip->daddr;
379 src_port = ports[0];
380 dst_port = ports[1];
381
Amir Vadai1eb8c692012-07-18 22:33:52 +0000382 spin_lock_bh(&priv->filters_lock);
Eyal Perry75a353d2013-11-07 12:19:49 +0200383 filter = mlx4_en_filter_find(priv, src_ip, dst_ip, ip_proto,
384 src_port, dst_port);
Amir Vadai1eb8c692012-07-18 22:33:52 +0000385 if (filter) {
386 if (filter->rxq_index == rxq_index)
387 goto out;
388
389 filter->rxq_index = rxq_index;
390 } else {
391 filter = mlx4_en_filter_alloc(priv, rxq_index,
Eyal Perry75a353d2013-11-07 12:19:49 +0200392 src_ip, dst_ip, ip_proto,
Amir Vadai1eb8c692012-07-18 22:33:52 +0000393 src_port, dst_port, flow_id);
394 if (!filter) {
395 ret = -ENOMEM;
396 goto err;
397 }
398 }
399
400 queue_work(priv->mdev->workqueue, &filter->work);
401
402out:
403 ret = filter->id;
404err:
405 spin_unlock_bh(&priv->filters_lock);
406
407 return ret;
408}
409
Eugenia Emantayev41d942d2013-11-07 12:19:52 +0200410void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv)
Amir Vadai1eb8c692012-07-18 22:33:52 +0000411{
412 struct mlx4_en_filter *filter, *tmp;
413 LIST_HEAD(del_list);
414
415 spin_lock_bh(&priv->filters_lock);
416 list_for_each_entry_safe(filter, tmp, &priv->filters, next) {
417 list_move(&filter->next, &del_list);
418 hlist_del(&filter->filter_chain);
419 }
420 spin_unlock_bh(&priv->filters_lock);
421
422 list_for_each_entry_safe(filter, tmp, &del_list, next) {
423 cancel_work_sync(&filter->work);
424 mlx4_en_filter_free(filter);
425 }
426}
427
428static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv)
429{
430 struct mlx4_en_filter *filter = NULL, *tmp, *last_filter = NULL;
431 LIST_HEAD(del_list);
432 int i = 0;
433
434 spin_lock_bh(&priv->filters_lock);
435 list_for_each_entry_safe(filter, tmp, &priv->filters, next) {
436 if (i > MLX4_EN_FILTER_EXPIRY_QUOTA)
437 break;
438
439 if (filter->activated &&
440 !work_pending(&filter->work) &&
441 rps_may_expire_flow(priv->dev,
442 filter->rxq_index, filter->flow_id,
443 filter->id)) {
444 list_move(&filter->next, &del_list);
445 hlist_del(&filter->filter_chain);
446 } else
447 last_filter = filter;
448
449 i++;
450 }
451
452 if (last_filter && (&last_filter->next != priv->filters.next))
453 list_move(&priv->filters, &last_filter->next);
454
455 spin_unlock_bh(&priv->filters_lock);
456
457 list_for_each_entry_safe(filter, tmp, &del_list, next)
458 mlx4_en_filter_free(filter);
459}
460#endif
461
Patrick McHardy80d5c362013-04-19 02:04:28 +0000462static int mlx4_en_vlan_rx_add_vid(struct net_device *dev,
463 __be16 proto, u16 vid)
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700464{
465 struct mlx4_en_priv *priv = netdev_priv(dev);
466 struct mlx4_en_dev *mdev = priv->mdev;
467 int err;
Eli Cohen4c3eb3c2010-08-26 17:19:22 +0300468 int idx;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700469
Jiri Pirkof1b553f2011-07-20 04:54:22 +0000470 en_dbg(HW, priv, "adding VLAN:%d\n", vid);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700471
Jiri Pirkof1b553f2011-07-20 04:54:22 +0000472 set_bit(vid, priv->active_vlans);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700473
474 /* Add VID to port VLAN filter */
475 mutex_lock(&mdev->state_lock);
476 if (mdev->device_up && priv->port_up) {
Jiri Pirkof1b553f2011-07-20 04:54:22 +0000477 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
Kamal Heib93c098a2016-06-21 14:20:02 +0300478 if (err) {
Yevgeny Petrilin453a6082009-06-01 20:27:13 +0000479 en_err(priv, "Failed configuring VLAN filter\n");
Kamal Heib93c098a2016-06-21 14:20:02 +0300480 goto out;
481 }
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700482 }
Kamal Heib93c098a2016-06-21 14:20:02 +0300483 err = mlx4_register_vlan(mdev->dev, priv->port, vid, &idx);
484 if (err)
485 en_dbg(HW, priv, "Failed adding vlan %d\n", vid);
Eli Cohen4c3eb3c2010-08-26 17:19:22 +0300486
Kamal Heib93c098a2016-06-21 14:20:02 +0300487out:
488 mutex_unlock(&mdev->state_lock);
489 return err;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700490}
491
Patrick McHardy80d5c362013-04-19 02:04:28 +0000492static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev,
493 __be16 proto, u16 vid)
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700494{
495 struct mlx4_en_priv *priv = netdev_priv(dev);
496 struct mlx4_en_dev *mdev = priv->mdev;
Kamal Heib93c098a2016-06-21 14:20:02 +0300497 int err = 0;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700498
Jiri Pirkof1b553f2011-07-20 04:54:22 +0000499 en_dbg(HW, priv, "Killing VID:%d\n", vid);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700500
Jiri Pirkof1b553f2011-07-20 04:54:22 +0000501 clear_bit(vid, priv->active_vlans);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700502
503 /* Remove VID from port VLAN filter */
504 mutex_lock(&mdev->state_lock);
Jack Morgenstein2009d002013-11-03 10:03:19 +0200505 mlx4_unregister_vlan(mdev->dev, priv->port, vid);
Eli Cohen4c3eb3c2010-08-26 17:19:22 +0300506
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700507 if (mdev->device_up && priv->port_up) {
Jiri Pirkof1b553f2011-07-20 04:54:22 +0000508 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700509 if (err)
Yevgeny Petrilin453a6082009-06-01 20:27:13 +0000510 en_err(priv, "Failed configuring VLAN filter\n");
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700511 }
512 mutex_unlock(&mdev->state_lock);
Jiri Pirko8e586132011-12-08 19:52:37 -0500513
Kamal Heib93c098a2016-06-21 14:20:02 +0300514 return err;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700515}
516
Yan Burman6bbb6d92013-02-07 02:25:20 +0000517static void mlx4_en_u64_to_mac(unsigned char dst_mac[ETH_ALEN + 2], u64 src_mac)
518{
Yan Burmanbab6a9e2013-04-02 16:49:45 +0300519 int i;
520 for (i = ETH_ALEN - 1; i >= 0; --i) {
Yan Burman6bbb6d92013-02-07 02:25:20 +0000521 dst_mac[i] = src_mac & 0xff;
522 src_mac >>= 8;
523 }
524 memset(&dst_mac[ETH_ALEN], 0, 2);
525}
526
Or Gerlitz837052d2013-12-23 16:09:44 +0200527
528static int mlx4_en_tunnel_steer_add(struct mlx4_en_priv *priv, unsigned char *addr,
529 int qpn, u64 *reg_id)
530{
531 int err;
Or Gerlitz837052d2013-12-23 16:09:44 +0200532
Or Gerlitz5eff6da2015-01-15 15:28:54 +0200533 if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN ||
534 priv->mdev->dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC)
Or Gerlitz837052d2013-12-23 16:09:44 +0200535 return 0; /* do nothing */
536
Or Gerlitzb95089d2014-08-27 16:47:48 +0300537 err = mlx4_tunnel_steer_add(priv->mdev->dev, addr, priv->port, qpn,
538 MLX4_DOMAIN_NIC, reg_id);
Or Gerlitz837052d2013-12-23 16:09:44 +0200539 if (err) {
540 en_err(priv, "failed to add vxlan steering rule, err %d\n", err);
541 return err;
542 }
543 en_dbg(DRV, priv, "added vxlan steering rule, mac %pM reg_id %llx\n", addr, *reg_id);
544 return 0;
545}
546
547
Yan Burman16a10ff2013-02-07 02:25:22 +0000548static int mlx4_en_uc_steer_add(struct mlx4_en_priv *priv,
549 unsigned char *mac, int *qpn, u64 *reg_id)
550{
551 struct mlx4_en_dev *mdev = priv->mdev;
552 struct mlx4_dev *dev = mdev->dev;
553 int err;
554
555 switch (dev->caps.steering_mode) {
556 case MLX4_STEERING_MODE_B0: {
557 struct mlx4_qp qp;
558 u8 gid[16] = {0};
559
560 qp.qpn = *qpn;
561 memcpy(&gid[10], mac, ETH_ALEN);
562 gid[5] = priv->port;
563
564 err = mlx4_unicast_attach(dev, &qp, gid, 0, MLX4_PROT_ETH);
565 break;
566 }
567 case MLX4_STEERING_MODE_DEVICE_MANAGED: {
568 struct mlx4_spec_list spec_eth = { {NULL} };
569 __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
570
571 struct mlx4_net_trans_rule rule = {
572 .queue_mode = MLX4_NET_TRANS_Q_FIFO,
573 .exclusive = 0,
574 .allow_loopback = 1,
Hadar Hen Zionf9162532013-04-24 13:58:45 +0000575 .promisc_mode = MLX4_FS_REGULAR,
Yan Burman16a10ff2013-02-07 02:25:22 +0000576 .priority = MLX4_DOMAIN_NIC,
577 };
578
579 rule.port = priv->port;
580 rule.qpn = *qpn;
581 INIT_LIST_HEAD(&rule.list);
582
583 spec_eth.id = MLX4_NET_TRANS_RULE_ID_ETH;
584 memcpy(spec_eth.eth.dst_mac, mac, ETH_ALEN);
585 memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
586 list_add_tail(&spec_eth.list, &rule.list);
587
588 err = mlx4_flow_attach(dev, &rule, reg_id);
589 break;
590 }
591 default:
592 return -EINVAL;
593 }
594 if (err)
595 en_warn(priv, "Failed Attaching Unicast\n");
596
597 return err;
598}
599
600static void mlx4_en_uc_steer_release(struct mlx4_en_priv *priv,
601 unsigned char *mac, int qpn, u64 reg_id)
602{
603 struct mlx4_en_dev *mdev = priv->mdev;
604 struct mlx4_dev *dev = mdev->dev;
605
606 switch (dev->caps.steering_mode) {
607 case MLX4_STEERING_MODE_B0: {
608 struct mlx4_qp qp;
609 u8 gid[16] = {0};
610
611 qp.qpn = qpn;
612 memcpy(&gid[10], mac, ETH_ALEN);
613 gid[5] = priv->port;
614
615 mlx4_unicast_detach(dev, &qp, gid, MLX4_PROT_ETH);
616 break;
617 }
618 case MLX4_STEERING_MODE_DEVICE_MANAGED: {
619 mlx4_flow_detach(dev, reg_id);
620 break;
621 }
622 default:
623 en_err(priv, "Invalid steering mode.\n");
624 }
625}
626
627static int mlx4_en_get_qp(struct mlx4_en_priv *priv)
628{
629 struct mlx4_en_dev *mdev = priv->mdev;
630 struct mlx4_dev *dev = mdev->dev;
Yan Burman16a10ff2013-02-07 02:25:22 +0000631 int index = 0;
632 int err = 0;
Yan Burman16a10ff2013-02-07 02:25:22 +0000633 int *qpn = &priv->base_qpn;
Eugenia Emantayev98133372014-03-02 10:25:01 +0200634 u64 mac = mlx4_mac_to_u64(priv->dev->dev_addr);
Yan Burman16a10ff2013-02-07 02:25:22 +0000635
636 en_dbg(DRV, priv, "Registering MAC: %pM for adding\n",
637 priv->dev->dev_addr);
638 index = mlx4_register_mac(dev, priv->port, mac);
639 if (index < 0) {
640 err = index;
641 en_err(priv, "Failed adding MAC: %pM\n",
642 priv->dev->dev_addr);
643 return err;
644 }
645
Saeed Mahameed4931c6e2017-06-15 14:35:32 +0300646 en_info(priv, "Steering Mode %d\n", dev->caps.steering_mode);
647
Yan Burman16a10ff2013-02-07 02:25:22 +0000648 if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) {
649 int base_qpn = mlx4_get_base_qpn(dev, priv->port);
650 *qpn = base_qpn + index;
651 return 0;
652 }
653
Moshe Shemeshf3301872017-06-21 09:29:36 +0300654 err = mlx4_qp_reserve_range(dev, 1, 1, qpn, MLX4_RESERVE_A0_QP,
655 MLX4_RES_USAGE_DRIVER);
Yan Burman16a10ff2013-02-07 02:25:22 +0000656 en_dbg(DRV, priv, "Reserved qp %d\n", *qpn);
657 if (err) {
658 en_err(priv, "Failed to reserve qp for mac registration\n");
Ido Shamayba4b87ae2015-10-08 17:14:01 +0300659 mlx4_unregister_mac(dev, priv->port, mac);
660 return err;
Yan Burman16a10ff2013-02-07 02:25:22 +0000661 }
662
Yan Burmanc07cb4b2013-02-07 02:25:25 +0000663 return 0;
Yan Burman16a10ff2013-02-07 02:25:22 +0000664}
665
666static void mlx4_en_put_qp(struct mlx4_en_priv *priv)
667{
668 struct mlx4_en_dev *mdev = priv->mdev;
669 struct mlx4_dev *dev = mdev->dev;
Yan Burman16a10ff2013-02-07 02:25:22 +0000670 int qpn = priv->base_qpn;
Yan Burman16a10ff2013-02-07 02:25:22 +0000671
Yan Burman83a5a6c2013-03-07 03:46:56 +0000672 if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) {
Ido Shamayba4b87ae2015-10-08 17:14:01 +0300673 u64 mac = mlx4_mac_to_u64(priv->dev->dev_addr);
Yan Burman83a5a6c2013-03-07 03:46:56 +0000674 en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n",
675 priv->dev->dev_addr);
676 mlx4_unregister_mac(dev, priv->port, mac);
677 } else {
Yan Burman83a5a6c2013-03-07 03:46:56 +0000678 en_dbg(DRV, priv, "Releasing qp: port %d, qpn %d\n",
679 priv->port, qpn);
680 mlx4_qp_release_range(dev, qpn, 1);
681 priv->flags &= ~MLX4_EN_FLAG_FORCE_PROMISC;
Yan Burman16a10ff2013-02-07 02:25:22 +0000682 }
683}
684
685static int mlx4_en_replace_mac(struct mlx4_en_priv *priv, int qpn,
Yan Burman90bbb742013-02-07 02:25:24 +0000686 unsigned char *new_mac, unsigned char *prev_mac)
Yan Burman16a10ff2013-02-07 02:25:22 +0000687{
688 struct mlx4_en_dev *mdev = priv->mdev;
689 struct mlx4_dev *dev = mdev->dev;
Yan Burman16a10ff2013-02-07 02:25:22 +0000690 int err = 0;
Eugenia Emantayev98133372014-03-02 10:25:01 +0200691 u64 new_mac_u64 = mlx4_mac_to_u64(new_mac);
Yan Burman16a10ff2013-02-07 02:25:22 +0000692
693 if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) {
Yan Burmanc07cb4b2013-02-07 02:25:25 +0000694 struct hlist_head *bucket;
695 unsigned int mac_hash;
696 struct mlx4_mac_entry *entry;
Sasha Levinb67bfe02013-02-27 17:06:00 -0800697 struct hlist_node *tmp;
Eugenia Emantayev98133372014-03-02 10:25:01 +0200698 u64 prev_mac_u64 = mlx4_mac_to_u64(prev_mac);
Yan Burman16a10ff2013-02-07 02:25:22 +0000699
Yan Burmanc07cb4b2013-02-07 02:25:25 +0000700 bucket = &priv->mac_hash[prev_mac[MLX4_EN_MAC_HASH_IDX]];
Sasha Levinb67bfe02013-02-27 17:06:00 -0800701 hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
Yan Burmanc07cb4b2013-02-07 02:25:25 +0000702 if (ether_addr_equal_64bits(entry->mac, prev_mac)) {
703 mlx4_en_uc_steer_release(priv, entry->mac,
704 qpn, entry->reg_id);
705 mlx4_unregister_mac(dev, priv->port,
706 prev_mac_u64);
707 hlist_del_rcu(&entry->hlist);
708 synchronize_rcu();
709 memcpy(entry->mac, new_mac, ETH_ALEN);
710 entry->reg_id = 0;
711 mac_hash = new_mac[MLX4_EN_MAC_HASH_IDX];
712 hlist_add_head_rcu(&entry->hlist,
713 &priv->mac_hash[mac_hash]);
714 mlx4_register_mac(dev, priv->port, new_mac_u64);
715 err = mlx4_en_uc_steer_add(priv, new_mac,
716 &qpn,
717 &entry->reg_id);
Or Gerlitz2a2083f2014-03-12 17:16:31 +0200718 if (err)
719 return err;
720 if (priv->tunnel_reg_id) {
721 mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id);
722 priv->tunnel_reg_id = 0;
723 }
724 err = mlx4_en_tunnel_steer_add(priv, new_mac, qpn,
725 &priv->tunnel_reg_id);
Yan Burmanc07cb4b2013-02-07 02:25:25 +0000726 return err;
727 }
728 }
729 return -EINVAL;
Yan Burman16a10ff2013-02-07 02:25:22 +0000730 }
731
732 return __mlx4_replace_mac(dev, priv->port, qpn, new_mac_u64);
733}
734
Noa Osherovich2695bab2014-07-08 11:25:24 +0300735static int mlx4_en_do_set_mac(struct mlx4_en_priv *priv,
736 unsigned char new_mac[ETH_ALEN + 2])
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700737{
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700738 int err = 0;
739
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700740 if (priv->port_up) {
741 /* Remove old MAC and insert the new one */
Yan Burman16a10ff2013-02-07 02:25:22 +0000742 err = mlx4_en_replace_mac(priv, priv->base_qpn,
Noa Osherovich2695bab2014-07-08 11:25:24 +0300743 new_mac, priv->current_mac);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700744 if (err)
Yevgeny Petrilin453a6082009-06-01 20:27:13 +0000745 en_err(priv, "Failed changing HW MAC address\n");
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700746 } else
Yan Burman48e551f2013-02-07 02:25:21 +0000747 en_dbg(HW, priv, "Port is down while registering mac, exiting...\n");
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700748
Noa Osherovich2695bab2014-07-08 11:25:24 +0300749 if (!err)
750 memcpy(priv->current_mac, new_mac, sizeof(priv->current_mac));
Shani Michaelliee755322014-05-14 12:15:12 +0300751
Yan Burmanbfa8ab42013-03-07 03:46:55 +0000752 return err;
753}
754
755static int mlx4_en_set_mac(struct net_device *dev, void *addr)
756{
757 struct mlx4_en_priv *priv = netdev_priv(dev);
758 struct mlx4_en_dev *mdev = priv->mdev;
759 struct sockaddr *saddr = addr;
Noa Osherovich2695bab2014-07-08 11:25:24 +0300760 unsigned char new_mac[ETH_ALEN + 2];
Yan Burmanbfa8ab42013-03-07 03:46:55 +0000761 int err;
762
763 if (!is_valid_ether_addr(saddr->sa_data))
764 return -EADDRNOTAVAIL;
765
Yan Burmanbfa8ab42013-03-07 03:46:55 +0000766 mutex_lock(&mdev->state_lock);
Noa Osherovich2695bab2014-07-08 11:25:24 +0300767 memcpy(new_mac, saddr->sa_data, ETH_ALEN);
768 err = mlx4_en_do_set_mac(priv, new_mac);
769 if (!err)
770 memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700771 mutex_unlock(&mdev->state_lock);
Yan Burmanbfa8ab42013-03-07 03:46:55 +0000772
773 return err;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700774}
775
776static void mlx4_en_clear_list(struct net_device *dev)
777{
778 struct mlx4_en_priv *priv = netdev_priv(dev);
Yevgeny Petrilin6d199932012-07-05 04:03:43 +0000779 struct mlx4_en_mc_list *tmp, *mc_to_del;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700780
Yevgeny Petrilin6d199932012-07-05 04:03:43 +0000781 list_for_each_entry_safe(mc_to_del, tmp, &priv->mc_list, list) {
782 list_del(&mc_to_del->list);
783 kfree(mc_to_del);
784 }
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700785}
786
787static void mlx4_en_cache_mclist(struct net_device *dev)
788{
789 struct mlx4_en_priv *priv = netdev_priv(dev);
Jiri Pirko22bedad32010-04-01 21:22:57 +0000790 struct netdev_hw_addr *ha;
Yevgeny Petrilin6d199932012-07-05 04:03:43 +0000791 struct mlx4_en_mc_list *tmp;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700792
Alexander Guller0e035672011-12-19 04:02:58 +0000793 mlx4_en_clear_list(dev);
Yevgeny Petrilin6d199932012-07-05 04:03:43 +0000794 netdev_for_each_mc_addr(ha, dev) {
795 tmp = kzalloc(sizeof(struct mlx4_en_mc_list), GFP_ATOMIC);
796 if (!tmp) {
Yevgeny Petrilin6d199932012-07-05 04:03:43 +0000797 mlx4_en_clear_list(dev);
798 return;
799 }
800 memcpy(tmp->addr, ha->addr, ETH_ALEN);
801 list_add_tail(&tmp->list, &priv->mc_list);
802 }
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700803}
804
Yevgeny Petrilin6d199932012-07-05 04:03:43 +0000805static void update_mclist_flags(struct mlx4_en_priv *priv,
806 struct list_head *dst,
807 struct list_head *src)
808{
809 struct mlx4_en_mc_list *dst_tmp, *src_tmp, *new_mc;
810 bool found;
811
812 /* Find all the entries that should be removed from dst,
813 * These are the entries that are not found in src
814 */
815 list_for_each_entry(dst_tmp, dst, list) {
816 found = false;
817 list_for_each_entry(src_tmp, src, list) {
dingtianhongc0623e52013-12-30 15:40:55 +0800818 if (ether_addr_equal(dst_tmp->addr, src_tmp->addr)) {
Yevgeny Petrilin6d199932012-07-05 04:03:43 +0000819 found = true;
820 break;
821 }
822 }
823 if (!found)
824 dst_tmp->action = MCLIST_REM;
825 }
826
827 /* Add entries that exist in src but not in dst
828 * mark them as need to add
829 */
830 list_for_each_entry(src_tmp, src, list) {
831 found = false;
832 list_for_each_entry(dst_tmp, dst, list) {
dingtianhongc0623e52013-12-30 15:40:55 +0800833 if (ether_addr_equal(dst_tmp->addr, src_tmp->addr)) {
Yevgeny Petrilin6d199932012-07-05 04:03:43 +0000834 dst_tmp->action = MCLIST_NONE;
835 found = true;
836 break;
837 }
838 }
839 if (!found) {
Joe Perches14f8dc42013-02-07 11:46:27 +0000840 new_mc = kmemdup(src_tmp,
841 sizeof(struct mlx4_en_mc_list),
Yevgeny Petrilin6d199932012-07-05 04:03:43 +0000842 GFP_KERNEL);
Joe Perches14f8dc42013-02-07 11:46:27 +0000843 if (!new_mc)
Yevgeny Petrilin6d199932012-07-05 04:03:43 +0000844 return;
Joe Perches14f8dc42013-02-07 11:46:27 +0000845
Yevgeny Petrilin6d199932012-07-05 04:03:43 +0000846 new_mc->action = MCLIST_ADD;
847 list_add_tail(&new_mc->list, dst);
848 }
849 }
850}
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700851
Yan Burman0eb74fd2013-02-07 02:25:23 +0000852static void mlx4_en_set_rx_mode(struct net_device *dev)
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700853{
854 struct mlx4_en_priv *priv = netdev_priv(dev);
855
856 if (!priv->port_up)
857 return;
858
Yan Burman0eb74fd2013-02-07 02:25:23 +0000859 queue_work(priv->mdev->workqueue, &priv->rx_mode_task);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700860}
861
Yan Burman0eb74fd2013-02-07 02:25:23 +0000862static void mlx4_en_set_promisc_mode(struct mlx4_en_priv *priv,
863 struct mlx4_en_dev *mdev)
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700864{
Hadar Hen Zionc96d97f2012-07-05 04:03:44 +0000865 int err = 0;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700866
Yan Burman0eb74fd2013-02-07 02:25:23 +0000867 if (!(priv->flags & MLX4_EN_FLAG_PROMISC)) {
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700868 if (netif_msg_rx_status(priv))
Yan Burman0eb74fd2013-02-07 02:25:23 +0000869 en_warn(priv, "Entering promiscuous mode\n");
870 priv->flags |= MLX4_EN_FLAG_PROMISC;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700871
Yan Burman0eb74fd2013-02-07 02:25:23 +0000872 /* Enable promiscouos mode */
Hadar Hen Zionc96d97f2012-07-05 04:03:44 +0000873 switch (mdev->dev->caps.steering_mode) {
Hadar Hen Zion592e49d2012-07-05 04:03:48 +0000874 case MLX4_STEERING_MODE_DEVICE_MANAGED:
Yan Burman0eb74fd2013-02-07 02:25:23 +0000875 err = mlx4_flow_steer_promisc_add(mdev->dev,
876 priv->port,
877 priv->base_qpn,
Hadar Hen Zionf9162532013-04-24 13:58:45 +0000878 MLX4_FS_ALL_DEFAULT);
Hadar Hen Zion592e49d2012-07-05 04:03:48 +0000879 if (err)
Yan Burman0eb74fd2013-02-07 02:25:23 +0000880 en_err(priv, "Failed enabling promiscuous mode\n");
881 priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
Hadar Hen Zion592e49d2012-07-05 04:03:48 +0000882 break;
883
Hadar Hen Zionc96d97f2012-07-05 04:03:44 +0000884 case MLX4_STEERING_MODE_B0:
Yan Burman0eb74fd2013-02-07 02:25:23 +0000885 err = mlx4_unicast_promisc_add(mdev->dev,
886 priv->base_qpn,
887 priv->port);
Yevgeny Petrilin16792002011-03-22 22:38:31 +0000888 if (err)
Yan Burman0eb74fd2013-02-07 02:25:23 +0000889 en_err(priv, "Failed enabling unicast promiscuous mode\n");
890
891 /* Add the default qp number as multicast
892 * promisc
893 */
894 if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
895 err = mlx4_multicast_promisc_add(mdev->dev,
896 priv->base_qpn,
897 priv->port);
Hadar Hen Zionc96d97f2012-07-05 04:03:44 +0000898 if (err)
Yan Burman0eb74fd2013-02-07 02:25:23 +0000899 en_err(priv, "Failed enabling multicast promiscuous mode\n");
900 priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
Hadar Hen Zionc96d97f2012-07-05 04:03:44 +0000901 }
902 break;
903
904 case MLX4_STEERING_MODE_A0:
905 err = mlx4_SET_PORT_qpn_calc(mdev->dev,
906 priv->port,
Yan Burman0eb74fd2013-02-07 02:25:23 +0000907 priv->base_qpn,
908 1);
Hadar Hen Zionc96d97f2012-07-05 04:03:44 +0000909 if (err)
Yan Burman0eb74fd2013-02-07 02:25:23 +0000910 en_err(priv, "Failed enabling promiscuous mode\n");
Hadar Hen Zionc96d97f2012-07-05 04:03:44 +0000911 break;
Yevgeny Petrilin16792002011-03-22 22:38:31 +0000912 }
913
Yan Burman0eb74fd2013-02-07 02:25:23 +0000914 /* Disable port multicast filter (unconditionally) */
915 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
916 0, MLX4_MCAST_DISABLE);
917 if (err)
918 en_err(priv, "Failed disabling multicast filter\n");
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700919 }
Yan Burman0eb74fd2013-02-07 02:25:23 +0000920}
921
922static void mlx4_en_clear_promisc_mode(struct mlx4_en_priv *priv,
923 struct mlx4_en_dev *mdev)
924{
925 int err = 0;
926
927 if (netif_msg_rx_status(priv))
928 en_warn(priv, "Leaving promiscuous mode\n");
929 priv->flags &= ~MLX4_EN_FLAG_PROMISC;
930
931 /* Disable promiscouos mode */
932 switch (mdev->dev->caps.steering_mode) {
933 case MLX4_STEERING_MODE_DEVICE_MANAGED:
934 err = mlx4_flow_steer_promisc_remove(mdev->dev,
935 priv->port,
Hadar Hen Zionf9162532013-04-24 13:58:45 +0000936 MLX4_FS_ALL_DEFAULT);
Yan Burman0eb74fd2013-02-07 02:25:23 +0000937 if (err)
938 en_err(priv, "Failed disabling promiscuous mode\n");
939 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
940 break;
941
942 case MLX4_STEERING_MODE_B0:
943 err = mlx4_unicast_promisc_remove(mdev->dev,
944 priv->base_qpn,
945 priv->port);
946 if (err)
947 en_err(priv, "Failed disabling unicast promiscuous mode\n");
948 /* Disable Multicast promisc */
949 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
950 err = mlx4_multicast_promisc_remove(mdev->dev,
951 priv->base_qpn,
952 priv->port);
953 if (err)
954 en_err(priv, "Failed disabling multicast promiscuous mode\n");
955 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
956 }
957 break;
958
959 case MLX4_STEERING_MODE_A0:
960 err = mlx4_SET_PORT_qpn_calc(mdev->dev,
961 priv->port,
962 priv->base_qpn, 0);
963 if (err)
964 en_err(priv, "Failed disabling promiscuous mode\n");
965 break;
966 }
Yan Burman0eb74fd2013-02-07 02:25:23 +0000967}
968
969static void mlx4_en_do_multicast(struct mlx4_en_priv *priv,
970 struct net_device *dev,
971 struct mlx4_en_dev *mdev)
972{
973 struct mlx4_en_mc_list *mclist, *tmp;
974 u64 mcast_addr = 0;
975 u8 mc_list[16] = {0};
976 int err = 0;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700977
978 /* Enable/disable the multicast filter according to IFF_ALLMULTI */
979 if (dev->flags & IFF_ALLMULTI) {
980 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
981 0, MLX4_MCAST_DISABLE);
982 if (err)
Yevgeny Petrilin453a6082009-06-01 20:27:13 +0000983 en_err(priv, "Failed disabling multicast filter\n");
Yevgeny Petrilin16792002011-03-22 22:38:31 +0000984
985 /* Add the default qp number as multicast promisc */
986 if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
Hadar Hen Zionc96d97f2012-07-05 04:03:44 +0000987 switch (mdev->dev->caps.steering_mode) {
Hadar Hen Zion592e49d2012-07-05 04:03:48 +0000988 case MLX4_STEERING_MODE_DEVICE_MANAGED:
989 err = mlx4_flow_steer_promisc_add(mdev->dev,
990 priv->port,
991 priv->base_qpn,
Hadar Hen Zionf9162532013-04-24 13:58:45 +0000992 MLX4_FS_MC_DEFAULT);
Hadar Hen Zion592e49d2012-07-05 04:03:48 +0000993 break;
994
Hadar Hen Zionc96d97f2012-07-05 04:03:44 +0000995 case MLX4_STEERING_MODE_B0:
996 err = mlx4_multicast_promisc_add(mdev->dev,
997 priv->base_qpn,
998 priv->port);
999 break;
1000
1001 case MLX4_STEERING_MODE_A0:
1002 break;
1003 }
Yevgeny Petrilin16792002011-03-22 22:38:31 +00001004 if (err)
1005 en_err(priv, "Failed entering multicast promisc mode\n");
1006 priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
1007 }
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001008 } else {
Yevgeny Petrilin16792002011-03-22 22:38:31 +00001009 /* Disable Multicast promisc */
1010 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
Hadar Hen Zionc96d97f2012-07-05 04:03:44 +00001011 switch (mdev->dev->caps.steering_mode) {
Hadar Hen Zion592e49d2012-07-05 04:03:48 +00001012 case MLX4_STEERING_MODE_DEVICE_MANAGED:
1013 err = mlx4_flow_steer_promisc_remove(mdev->dev,
1014 priv->port,
Hadar Hen Zionf9162532013-04-24 13:58:45 +00001015 MLX4_FS_MC_DEFAULT);
Hadar Hen Zion592e49d2012-07-05 04:03:48 +00001016 break;
1017
Hadar Hen Zionc96d97f2012-07-05 04:03:44 +00001018 case MLX4_STEERING_MODE_B0:
1019 err = mlx4_multicast_promisc_remove(mdev->dev,
1020 priv->base_qpn,
1021 priv->port);
1022 break;
1023
1024 case MLX4_STEERING_MODE_A0:
1025 break;
1026 }
Yevgeny Petrilin16792002011-03-22 22:38:31 +00001027 if (err)
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001028 en_err(priv, "Failed disabling multicast promiscuous mode\n");
Yevgeny Petrilin16792002011-03-22 22:38:31 +00001029 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
1030 }
Jiri Pirkoff6e2162010-03-01 05:09:14 +00001031
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001032 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
1033 0, MLX4_MCAST_DISABLE);
1034 if (err)
Yevgeny Petrilin453a6082009-06-01 20:27:13 +00001035 en_err(priv, "Failed disabling multicast filter\n");
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001036
1037 /* Flush mcast filter and init it with broadcast address */
1038 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, ETH_BCAST,
1039 1, MLX4_MCAST_CONFIG);
1040
1041 /* Update multicast list - we cache all addresses so they won't
1042 * change while HW is updated holding the command semaphor */
Eugenia Emantayevdbd501a2013-01-24 01:54:16 +00001043 netif_addr_lock_bh(dev);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001044 mlx4_en_cache_mclist(dev);
Eugenia Emantayevdbd501a2013-01-24 01:54:16 +00001045 netif_addr_unlock_bh(dev);
Yevgeny Petrilin6d199932012-07-05 04:03:43 +00001046 list_for_each_entry(mclist, &priv->mc_list, list) {
Eugenia Emantayev98133372014-03-02 10:25:01 +02001047 mcast_addr = mlx4_mac_to_u64(mclist->addr);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001048 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port,
1049 mcast_addr, 0, MLX4_MCAST_CONFIG);
1050 }
1051 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
1052 0, MLX4_MCAST_ENABLE);
1053 if (err)
Yevgeny Petrilin453a6082009-06-01 20:27:13 +00001054 en_err(priv, "Failed enabling multicast filter\n");
Yevgeny Petrilin6d199932012-07-05 04:03:43 +00001055
1056 update_mclist_flags(priv, &priv->curr_list, &priv->mc_list);
1057 list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) {
1058 if (mclist->action == MCLIST_REM) {
1059 /* detach this address and delete from list */
1060 memcpy(&mc_list[10], mclist->addr, ETH_ALEN);
1061 mc_list[5] = priv->port;
1062 err = mlx4_multicast_detach(mdev->dev,
Saeed Mahameed4931c6e2017-06-15 14:35:32 +03001063 priv->rss_map.indir_qp,
Yevgeny Petrilin6d199932012-07-05 04:03:43 +00001064 mc_list,
Hadar Hen Zion0ff1fb62012-07-05 04:03:46 +00001065 MLX4_PROT_ETH,
1066 mclist->reg_id);
Yevgeny Petrilin6d199932012-07-05 04:03:43 +00001067 if (err)
1068 en_err(priv, "Fail to detach multicast address\n");
1069
Or Gerlitz837052d2013-12-23 16:09:44 +02001070 if (mclist->tunnel_reg_id) {
1071 err = mlx4_flow_detach(priv->mdev->dev, mclist->tunnel_reg_id);
1072 if (err)
1073 en_err(priv, "Failed to detach multicast address\n");
1074 }
1075
Yevgeny Petrilin6d199932012-07-05 04:03:43 +00001076 /* remove from list */
1077 list_del(&mclist->list);
1078 kfree(mclist);
Dan Carpenter9c645082012-07-10 20:34:07 +00001079 } else if (mclist->action == MCLIST_ADD) {
Yevgeny Petrilin6d199932012-07-05 04:03:43 +00001080 /* attach the address */
1081 memcpy(&mc_list[10], mclist->addr, ETH_ALEN);
Hadar Hen Zion0ff1fb62012-07-05 04:03:46 +00001082 /* needed for B0 steering support */
Yevgeny Petrilin6d199932012-07-05 04:03:43 +00001083 mc_list[5] = priv->port;
1084 err = mlx4_multicast_attach(mdev->dev,
Saeed Mahameed4931c6e2017-06-15 14:35:32 +03001085 priv->rss_map.indir_qp,
Hadar Hen Zion0ff1fb62012-07-05 04:03:46 +00001086 mc_list,
1087 priv->port, 0,
1088 MLX4_PROT_ETH,
1089 &mclist->reg_id);
Yevgeny Petrilin6d199932012-07-05 04:03:43 +00001090 if (err)
1091 en_err(priv, "Fail to attach multicast address\n");
1092
Or Gerlitz837052d2013-12-23 16:09:44 +02001093 err = mlx4_en_tunnel_steer_add(priv, &mc_list[10], priv->base_qpn,
1094 &mclist->tunnel_reg_id);
1095 if (err)
1096 en_err(priv, "Failed to attach multicast address\n");
Yevgeny Petrilin6d199932012-07-05 04:03:43 +00001097 }
1098 }
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001099 }
Yan Burman0eb74fd2013-02-07 02:25:23 +00001100}
1101
Yan Burmancc5387f2013-02-07 02:25:26 +00001102static void mlx4_en_do_uc_filter(struct mlx4_en_priv *priv,
1103 struct net_device *dev,
1104 struct mlx4_en_dev *mdev)
1105{
1106 struct netdev_hw_addr *ha;
1107 struct mlx4_mac_entry *entry;
Sasha Levinb67bfe02013-02-27 17:06:00 -08001108 struct hlist_node *tmp;
Yan Burmancc5387f2013-02-07 02:25:26 +00001109 bool found;
1110 u64 mac;
1111 int err = 0;
1112 struct hlist_head *bucket;
1113 unsigned int i;
1114 int removed = 0;
1115 u32 prev_flags;
1116
1117 /* Note that we do not need to protect our mac_hash traversal with rcu,
1118 * since all modification code is protected by mdev->state_lock
1119 */
1120
1121 /* find what to remove */
1122 for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) {
1123 bucket = &priv->mac_hash[i];
Sasha Levinb67bfe02013-02-27 17:06:00 -08001124 hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
Yan Burmancc5387f2013-02-07 02:25:26 +00001125 found = false;
1126 netdev_for_each_uc_addr(ha, dev) {
1127 if (ether_addr_equal_64bits(entry->mac,
1128 ha->addr)) {
1129 found = true;
1130 break;
1131 }
1132 }
1133
1134 /* MAC address of the port is not in uc list */
Noa Osherovich2695bab2014-07-08 11:25:24 +03001135 if (ether_addr_equal_64bits(entry->mac,
1136 priv->current_mac))
Yan Burmancc5387f2013-02-07 02:25:26 +00001137 found = true;
1138
1139 if (!found) {
Eugenia Emantayev98133372014-03-02 10:25:01 +02001140 mac = mlx4_mac_to_u64(entry->mac);
Yan Burmancc5387f2013-02-07 02:25:26 +00001141 mlx4_en_uc_steer_release(priv, entry->mac,
1142 priv->base_qpn,
1143 entry->reg_id);
1144 mlx4_unregister_mac(mdev->dev, priv->port, mac);
1145
1146 hlist_del_rcu(&entry->hlist);
1147 kfree_rcu(entry, rcu);
1148 en_dbg(DRV, priv, "Removed MAC %pM on port:%d\n",
1149 entry->mac, priv->port);
1150 ++removed;
1151 }
1152 }
1153 }
1154
1155 /* if we didn't remove anything, there is no use in trying to add
1156 * again once we are in a forced promisc mode state
1157 */
1158 if ((priv->flags & MLX4_EN_FLAG_FORCE_PROMISC) && 0 == removed)
1159 return;
1160
1161 prev_flags = priv->flags;
1162 priv->flags &= ~MLX4_EN_FLAG_FORCE_PROMISC;
1163
1164 /* find what to add */
1165 netdev_for_each_uc_addr(ha, dev) {
1166 found = false;
1167 bucket = &priv->mac_hash[ha->addr[MLX4_EN_MAC_HASH_IDX]];
Sasha Levinb67bfe02013-02-27 17:06:00 -08001168 hlist_for_each_entry(entry, bucket, hlist) {
Yan Burmancc5387f2013-02-07 02:25:26 +00001169 if (ether_addr_equal_64bits(entry->mac, ha->addr)) {
1170 found = true;
1171 break;
1172 }
1173 }
1174
1175 if (!found) {
1176 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1177 if (!entry) {
1178 en_err(priv, "Failed adding MAC %pM on port:%d (out of memory)\n",
1179 ha->addr, priv->port);
1180 priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC;
1181 break;
1182 }
Eugenia Emantayev98133372014-03-02 10:25:01 +02001183 mac = mlx4_mac_to_u64(ha->addr);
Yan Burmancc5387f2013-02-07 02:25:26 +00001184 memcpy(entry->mac, ha->addr, ETH_ALEN);
1185 err = mlx4_register_mac(mdev->dev, priv->port, mac);
1186 if (err < 0) {
1187 en_err(priv, "Failed registering MAC %pM on port %d: %d\n",
1188 ha->addr, priv->port, err);
1189 kfree(entry);
1190 priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC;
1191 break;
1192 }
1193 err = mlx4_en_uc_steer_add(priv, ha->addr,
1194 &priv->base_qpn,
1195 &entry->reg_id);
1196 if (err) {
1197 en_err(priv, "Failed adding MAC %pM on port %d: %d\n",
1198 ha->addr, priv->port, err);
1199 mlx4_unregister_mac(mdev->dev, priv->port, mac);
1200 kfree(entry);
1201 priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC;
1202 break;
1203 } else {
1204 unsigned int mac_hash;
1205 en_dbg(DRV, priv, "Added MAC %pM on port:%d\n",
1206 ha->addr, priv->port);
1207 mac_hash = ha->addr[MLX4_EN_MAC_HASH_IDX];
1208 bucket = &priv->mac_hash[mac_hash];
1209 hlist_add_head_rcu(&entry->hlist, bucket);
1210 }
1211 }
1212 }
1213
1214 if (priv->flags & MLX4_EN_FLAG_FORCE_PROMISC) {
1215 en_warn(priv, "Forcing promiscuous mode on port:%d\n",
1216 priv->port);
1217 } else if (prev_flags & MLX4_EN_FLAG_FORCE_PROMISC) {
1218 en_warn(priv, "Stop forcing promiscuous mode on port:%d\n",
1219 priv->port);
1220 }
1221}
1222
Yan Burman0eb74fd2013-02-07 02:25:23 +00001223static void mlx4_en_do_set_rx_mode(struct work_struct *work)
1224{
1225 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
1226 rx_mode_task);
1227 struct mlx4_en_dev *mdev = priv->mdev;
1228 struct net_device *dev = priv->dev;
1229
1230 mutex_lock(&mdev->state_lock);
1231 if (!mdev->device_up) {
1232 en_dbg(HW, priv, "Card is not up, ignoring rx mode change.\n");
1233 goto out;
1234 }
1235 if (!priv->port_up) {
1236 en_dbg(HW, priv, "Port is down, ignoring rx mode change.\n");
1237 goto out;
1238 }
1239
1240 if (!netif_carrier_ok(dev)) {
1241 if (!mlx4_en_QUERY_PORT(mdev, priv->port)) {
1242 if (priv->port_state.link_state) {
1243 priv->last_link_state = MLX4_DEV_EVENT_PORT_UP;
1244 netif_carrier_on(dev);
1245 en_dbg(LINK, priv, "Link Up\n");
1246 }
1247 }
1248 }
1249
Yan Burmancc5387f2013-02-07 02:25:26 +00001250 if (dev->priv_flags & IFF_UNICAST_FLT)
1251 mlx4_en_do_uc_filter(priv, dev, mdev);
1252
Yan Burman0eb74fd2013-02-07 02:25:23 +00001253 /* Promsicuous mode: disable all filters */
Yan Burmancc5387f2013-02-07 02:25:26 +00001254 if ((dev->flags & IFF_PROMISC) ||
1255 (priv->flags & MLX4_EN_FLAG_FORCE_PROMISC)) {
Yan Burman0eb74fd2013-02-07 02:25:23 +00001256 mlx4_en_set_promisc_mode(priv, mdev);
1257 goto out;
1258 }
1259
1260 /* Not in promiscuous mode */
1261 if (priv->flags & MLX4_EN_FLAG_PROMISC)
1262 mlx4_en_clear_promisc_mode(priv, mdev);
1263
1264 mlx4_en_do_multicast(priv, dev, mdev);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001265out:
1266 mutex_unlock(&mdev->state_lock);
1267}
1268
1269#ifdef CONFIG_NET_POLL_CONTROLLER
1270static void mlx4_en_netpoll(struct net_device *dev)
1271{
1272 struct mlx4_en_priv *priv = netdev_priv(dev);
1273 struct mlx4_en_cq *cq;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001274 int i;
1275
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02001276 for (i = 0; i < priv->tx_ring_num[TX]; i++) {
1277 cq = priv->tx_cq[TX][i];
Chris Masonc98235c2014-04-15 18:09:24 -04001278 napi_schedule(&cq->napi);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001279 }
1280}
1281#endif
1282
Ido Shamayba4b87ae2015-10-08 17:14:01 +03001283static int mlx4_en_set_rss_steer_rules(struct mlx4_en_priv *priv)
1284{
1285 u64 reg_id;
1286 int err = 0;
1287 int *qpn = &priv->base_qpn;
1288 struct mlx4_mac_entry *entry;
1289
1290 err = mlx4_en_uc_steer_add(priv, priv->dev->dev_addr, qpn, &reg_id);
1291 if (err)
1292 return err;
1293
1294 err = mlx4_en_tunnel_steer_add(priv, priv->dev->dev_addr, *qpn,
1295 &priv->tunnel_reg_id);
1296 if (err)
1297 goto tunnel_err;
1298
1299 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1300 if (!entry) {
1301 err = -ENOMEM;
1302 goto alloc_err;
1303 }
1304
1305 memcpy(entry->mac, priv->dev->dev_addr, sizeof(entry->mac));
1306 memcpy(priv->current_mac, entry->mac, sizeof(priv->current_mac));
1307 entry->reg_id = reg_id;
1308 hlist_add_head_rcu(&entry->hlist,
1309 &priv->mac_hash[entry->mac[MLX4_EN_MAC_HASH_IDX]]);
1310
1311 return 0;
1312
1313alloc_err:
1314 if (priv->tunnel_reg_id)
1315 mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id);
1316
1317tunnel_err:
1318 mlx4_en_uc_steer_release(priv, priv->dev->dev_addr, *qpn, reg_id);
1319 return err;
1320}
1321
1322static void mlx4_en_delete_rss_steer_rules(struct mlx4_en_priv *priv)
1323{
1324 u64 mac;
1325 unsigned int i;
1326 int qpn = priv->base_qpn;
1327 struct hlist_head *bucket;
1328 struct hlist_node *tmp;
1329 struct mlx4_mac_entry *entry;
1330
1331 for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) {
1332 bucket = &priv->mac_hash[i];
1333 hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
1334 mac = mlx4_mac_to_u64(entry->mac);
1335 en_dbg(DRV, priv, "Registering MAC:%pM for deleting\n",
1336 entry->mac);
1337 mlx4_en_uc_steer_release(priv, entry->mac,
1338 qpn, entry->reg_id);
1339
1340 mlx4_unregister_mac(priv->mdev->dev, priv->port, mac);
1341 hlist_del_rcu(&entry->hlist);
1342 kfree_rcu(entry, rcu);
1343 }
1344 }
1345
1346 if (priv->tunnel_reg_id) {
1347 mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id);
1348 priv->tunnel_reg_id = 0;
1349 }
1350}
1351
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001352static void mlx4_en_tx_timeout(struct net_device *dev)
1353{
1354 struct mlx4_en_priv *priv = netdev_priv(dev);
1355 struct mlx4_en_dev *mdev = priv->mdev;
Yevgeny Petrilinb944ebe2013-06-25 12:09:34 +03001356 int i;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001357
1358 if (netif_msg_timer(priv))
Yevgeny Petrilin453a6082009-06-01 20:27:13 +00001359 en_warn(priv, "Tx timeout called on port:%d\n", priv->port);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001360
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02001361 for (i = 0; i < priv->tx_ring_num[TX]; i++) {
1362 struct mlx4_en_tx_ring *tx_ring = priv->tx_ring[TX][i];
1363
Yevgeny Petrilinb944ebe2013-06-25 12:09:34 +03001364 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, i)))
1365 continue;
1366 en_warn(priv, "TX timeout on queue: %d, QP: 0x%x, CQ: 0x%x, Cons: 0x%x, Prod: 0x%x\n",
Eric Dumazete3f42f82016-11-22 15:56:10 -08001367 i, tx_ring->qpn, tx_ring->sp_cqn,
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02001368 tx_ring->cons, tx_ring->prod);
Yevgeny Petrilinb944ebe2013-06-25 12:09:34 +03001369 }
1370
Yevgeny Petrilin1e338db2009-04-20 04:26:05 +00001371 priv->port_stats.tx_timeout++;
Yevgeny Petrilin453a6082009-06-01 20:27:13 +00001372 en_dbg(DRV, priv, "Scheduling watchdog\n");
Yevgeny Petrilin1e338db2009-04-20 04:26:05 +00001373 queue_work(mdev->workqueue, &priv->watchdog_task);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001374}
1375
1376
stephen hemmingerbc1f4472017-01-06 19:12:52 -08001377static void
Eric Dumazet9ed17db172016-05-25 09:50:38 -07001378mlx4_en_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001379{
1380 struct mlx4_en_priv *priv = netdev_priv(dev);
1381
1382 spin_lock_bh(&priv->stats_lock);
Eric Dumazet40931b82016-11-25 07:46:20 -08001383 mlx4_en_fold_software_stats(dev);
Eric Dumazetf73a6f42016-05-25 09:50:39 -07001384 netdev_stats_to_stats64(stats, &dev->stats);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001385 spin_unlock_bh(&priv->stats_lock);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001386}
1387
1388static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
1389{
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001390 struct mlx4_en_cq *cq;
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02001391 int i, t;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001392
1393 /* If we haven't received a specific coalescing setting
Martin Olsson98a17082009-04-22 18:21:29 +02001394 * (module param), we set the moderation parameters as follows:
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001395 * - moder_cnt is set to the number of mtu sized packets to
Eric Dumazetecfd2ce2012-11-05 16:20:42 +00001396 * satisfy our coalescing target.
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001397 * - moder_time is set to a fixed value.
1398 */
Yevgeny Petrilin3db36fb2009-06-01 23:23:13 +00001399 priv->rx_frames = MLX4_EN_RX_COAL_TARGET;
Yevgeny Petrilin60b9f9e2008-12-25 18:19:47 -08001400 priv->rx_usecs = MLX4_EN_RX_COAL_TIME;
Yevgeny Petrilina19a8482012-04-23 02:18:33 +00001401 priv->tx_frames = MLX4_EN_TX_COAL_PKTS;
1402 priv->tx_usecs = MLX4_EN_TX_COAL_TIME;
Colin Ian King593814d2017-06-26 13:53:46 +01001403 en_dbg(INTR, priv, "Default coalescing params for mtu:%d - rx_frames:%d rx_usecs:%d\n",
Yan Burman48e551f2013-02-07 02:25:21 +00001404 priv->dev->mtu, priv->rx_frames, priv->rx_usecs);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001405
1406 /* Setup cq moderation params */
1407 for (i = 0; i < priv->rx_ring_num; i++) {
Eugenia Emantayev41d942d2013-11-07 12:19:52 +02001408 cq = priv->rx_cq[i];
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001409 cq->moder_cnt = priv->rx_frames;
1410 cq->moder_time = priv->rx_usecs;
Alexander Guller6b4d8d92011-10-09 05:38:23 +00001411 priv->last_moder_time[i] = MLX4_EN_AUTO_CONF;
1412 priv->last_moder_packets[i] = 0;
1413 priv->last_moder_bytes[i] = 0;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001414 }
1415
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02001416 for (t = 0 ; t < MLX4_EN_NUM_TX_TYPES; t++) {
1417 for (i = 0; i < priv->tx_ring_num[t]; i++) {
1418 cq = priv->tx_cq[t][i];
1419 cq->moder_cnt = priv->tx_frames;
1420 cq->moder_time = priv->tx_usecs;
1421 }
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001422 }
1423
1424 /* Reset auto-moderation params */
1425 priv->pkt_rate_low = MLX4_EN_RX_RATE_LOW;
1426 priv->rx_usecs_low = MLX4_EN_RX_COAL_TIME_LOW;
1427 priv->pkt_rate_high = MLX4_EN_RX_RATE_HIGH;
1428 priv->rx_usecs_high = MLX4_EN_RX_COAL_TIME_HIGH;
1429 priv->sample_interval = MLX4_EN_SAMPLE_INTERVAL;
Yevgeny Petrilin60b9f9e2008-12-25 18:19:47 -08001430 priv->adaptive_rx_coal = 1;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001431 priv->last_moder_jiffies = 0;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001432 priv->last_moder_tx_packets = 0;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001433}
1434
1435static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
1436{
1437 unsigned long period = (unsigned long) (jiffies - priv->last_moder_jiffies);
Eric Dumazetf5a57722017-02-16 15:23:27 -08001438 u32 pkt_rate_high, pkt_rate_low;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001439 struct mlx4_en_cq *cq;
1440 unsigned long packets;
1441 unsigned long rate;
1442 unsigned long avg_pkt_size;
1443 unsigned long rx_packets;
1444 unsigned long rx_bytes;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001445 unsigned long rx_pkt_diff;
1446 int moder_time;
Alexander Guller6b4d8d92011-10-09 05:38:23 +00001447 int ring, err;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001448
1449 if (!priv->adaptive_rx_coal || period < priv->sample_interval * HZ)
1450 return;
1451
Eric Dumazetf5a57722017-02-16 15:23:27 -08001452 pkt_rate_low = READ_ONCE(priv->pkt_rate_low);
1453 pkt_rate_high = READ_ONCE(priv->pkt_rate_high);
1454
Alexander Guller6b4d8d92011-10-09 05:38:23 +00001455 for (ring = 0; ring < priv->rx_ring_num; ring++) {
Eric Dumazetb9972d22016-11-23 09:46:52 -08001456 rx_packets = READ_ONCE(priv->rx_ring[ring]->packets);
1457 rx_bytes = READ_ONCE(priv->rx_ring[ring]->bytes);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001458
Eric Dumazetf5a57722017-02-16 15:23:27 -08001459 rx_pkt_diff = rx_packets - priv->last_moder_packets[ring];
Alexander Guller6b4d8d92011-10-09 05:38:23 +00001460 packets = rx_pkt_diff;
1461 rate = packets * HZ / period;
Eric Dumazetf5a57722017-02-16 15:23:27 -08001462 avg_pkt_size = packets ? (rx_bytes -
1463 priv->last_moder_bytes[ring]) / packets : 0;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001464
Alexander Guller6b4d8d92011-10-09 05:38:23 +00001465 /* Apply auto-moderation only when packet rate
1466 * exceeds a rate that it matters */
1467 if (rate > (MLX4_EN_RX_RATE_THRESH / priv->rx_ring_num) &&
1468 avg_pkt_size > MLX4_EN_AVG_PKT_SMALL) {
Eric Dumazetf5a57722017-02-16 15:23:27 -08001469 if (rate <= pkt_rate_low)
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001470 moder_time = priv->rx_usecs_low;
Eric Dumazetf5a57722017-02-16 15:23:27 -08001471 else if (rate >= pkt_rate_high)
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001472 moder_time = priv->rx_usecs_high;
1473 else
Eric Dumazetf5a57722017-02-16 15:23:27 -08001474 moder_time = (rate - pkt_rate_low) *
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001475 (priv->rx_usecs_high - priv->rx_usecs_low) /
Eric Dumazetf5a57722017-02-16 15:23:27 -08001476 (pkt_rate_high - pkt_rate_low) +
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001477 priv->rx_usecs_low;
Alexander Guller6b4d8d92011-10-09 05:38:23 +00001478 } else {
1479 moder_time = priv->rx_usecs_low;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001480 }
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001481
Eric Dumazetf5a57722017-02-16 15:23:27 -08001482 cq = priv->rx_cq[ring];
1483 if (moder_time != priv->last_moder_time[ring] ||
1484 cq->moder_cnt != priv->rx_frames) {
Alexander Guller6b4d8d92011-10-09 05:38:23 +00001485 priv->last_moder_time[ring] = moder_time;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001486 cq->moder_time = moder_time;
Sagi Grimberga1c66932013-06-04 05:13:26 +00001487 cq->moder_cnt = priv->rx_frames;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001488 err = mlx4_en_set_cq_moder(priv, cq);
Alexander Guller6b4d8d92011-10-09 05:38:23 +00001489 if (err)
Yan Burman48e551f2013-02-07 02:25:21 +00001490 en_err(priv, "Failed modifying moderation for cq:%d\n",
1491 ring);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001492 }
Alexander Guller6b4d8d92011-10-09 05:38:23 +00001493 priv->last_moder_packets[ring] = rx_packets;
1494 priv->last_moder_bytes[ring] = rx_bytes;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001495 }
1496
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001497 priv->last_moder_jiffies = jiffies;
1498}
1499
1500static void mlx4_en_do_get_stats(struct work_struct *work)
1501{
Jean Delvarebf6aede2009-04-02 16:56:54 -07001502 struct delayed_work *delay = to_delayed_work(work);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001503 struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv,
1504 stats_task);
1505 struct mlx4_en_dev *mdev = priv->mdev;
1506 int err;
1507
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001508 mutex_lock(&mdev->state_lock);
1509 if (mdev->device_up) {
Jack Morgenstein6123db2e2013-06-25 12:09:30 +03001510 if (priv->port_up) {
1511 err = mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 0);
1512 if (err)
1513 en_dbg(HW, priv, "Could not update stats\n");
Eugenia Emantayev2d518372013-01-24 01:54:14 +00001514
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001515 mlx4_en_auto_moderation(priv);
Jack Morgenstein6123db2e2013-06-25 12:09:30 +03001516 }
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001517
1518 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
1519 }
Yevgeny Petrilind7e1a482010-08-24 03:46:38 +00001520 if (mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port]) {
Noa Osherovich2695bab2014-07-08 11:25:24 +03001521 mlx4_en_do_set_mac(priv, priv->current_mac);
Yevgeny Petrilind7e1a482010-08-24 03:46:38 +00001522 mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port] = 0;
1523 }
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001524 mutex_unlock(&mdev->state_lock);
1525}
1526
Amir Vadaib6c39bf2013-04-23 06:06:51 +00001527/* mlx4_en_service_task - Run service task for tasks that needed to be done
1528 * periodically
1529 */
1530static void mlx4_en_service_task(struct work_struct *work)
1531{
1532 struct delayed_work *delay = to_delayed_work(work);
1533 struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv,
1534 service_task);
1535 struct mlx4_en_dev *mdev = priv->mdev;
1536
1537 mutex_lock(&mdev->state_lock);
1538 if (mdev->device_up) {
Amir Vadaidc8142e2013-04-25 05:22:24 +00001539 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
1540 mlx4_en_ptp_overflow_check(mdev);
Amir Vadaib6c39bf2013-04-23 06:06:51 +00001541
Ido Shamay07841f92015-04-30 17:32:46 +03001542 mlx4_en_recover_from_oom(priv);
Amir Vadaib6c39bf2013-04-23 06:06:51 +00001543 queue_delayed_work(mdev->workqueue, &priv->service_task,
1544 SERVICE_TASK_DELAY);
1545 }
1546 mutex_unlock(&mdev->state_lock);
1547}
1548
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001549static void mlx4_en_linkstate(struct work_struct *work)
1550{
1551 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
1552 linkstate_task);
1553 struct mlx4_en_dev *mdev = priv->mdev;
1554 int linkstate = priv->link_state;
1555
1556 mutex_lock(&mdev->state_lock);
1557 /* If observable port state changed set carrier state and
1558 * report to system log */
1559 if (priv->last_link_state != linkstate) {
1560 if (linkstate == MLX4_DEV_EVENT_PORT_DOWN) {
Yevgeny Petriline5cc44b2010-08-24 03:46:01 +00001561 en_info(priv, "Link Down\n");
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001562 netif_carrier_off(priv->dev);
1563 } else {
Yevgeny Petriline5cc44b2010-08-24 03:46:01 +00001564 en_info(priv, "Link Up\n");
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001565 netif_carrier_on(priv->dev);
1566 }
1567 }
1568 priv->last_link_state = linkstate;
1569 mutex_unlock(&mdev->state_lock);
1570}
1571
Yuval Atias9e311e72014-06-09 10:24:39 +03001572static int mlx4_en_init_affinity_hint(struct mlx4_en_priv *priv, int ring_idx)
1573{
1574 struct mlx4_en_rx_ring *ring = priv->rx_ring[ring_idx];
1575 int numa_node = priv->mdev->dev->numa_node;
Yuval Atias9e311e72014-06-09 10:24:39 +03001576
1577 if (!zalloc_cpumask_var(&ring->affinity_mask, GFP_KERNEL))
1578 return -ENOMEM;
1579
Rusty Russellf36963c2015-05-09 03:14:13 +09301580 cpumask_set_cpu(cpumask_local_spread(ring_idx, numa_node),
1581 ring->affinity_mask);
1582 return 0;
Yuval Atias9e311e72014-06-09 10:24:39 +03001583}
1584
1585static void mlx4_en_free_affinity_hint(struct mlx4_en_priv *priv, int ring_idx)
1586{
1587 free_cpumask_var(priv->rx_ring[ring_idx]->affinity_mask);
1588}
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001589
Brenden Blanco9ecc2d82016-07-19 12:16:55 -07001590static void mlx4_en_init_recycle_ring(struct mlx4_en_priv *priv,
1591 int tx_ring_idx)
1592{
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02001593 struct mlx4_en_tx_ring *tx_ring = priv->tx_ring[TX_XDP][tx_ring_idx];
1594 int rr_index = tx_ring_idx;
Brenden Blanco9ecc2d82016-07-19 12:16:55 -07001595
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02001596 tx_ring->free_tx_desc = mlx4_en_recycle_tx_desc;
1597 tx_ring->recycle_ring = priv->rx_ring[rr_index];
1598 en_dbg(DRV, priv, "Set tx_ring[%d][%d]->recycle_ring = rx_ring[%d]\n",
1599 TX_XDP, tx_ring_idx, rr_index);
Brenden Blanco9ecc2d82016-07-19 12:16:55 -07001600}
1601
Yevgeny Petrilin18cc42a2008-12-29 18:39:20 -08001602int mlx4_en_start_port(struct net_device *dev)
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001603{
1604 struct mlx4_en_priv *priv = netdev_priv(dev);
1605 struct mlx4_en_dev *mdev = priv->mdev;
1606 struct mlx4_en_cq *cq;
1607 struct mlx4_en_tx_ring *tx_ring;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001608 int rx_index = 0;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001609 int err = 0;
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02001610 int i, t;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001611 int j;
Yevgeny Petrilin16792002011-03-22 22:38:31 +00001612 u8 mc_list[16] = {0};
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001613
1614 if (priv->port_up) {
Yevgeny Petrilin453a6082009-06-01 20:27:13 +00001615 en_dbg(DRV, priv, "start port called while port already up\n");
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001616 return 0;
1617 }
1618
Yevgeny Petrilin6d199932012-07-05 04:03:43 +00001619 INIT_LIST_HEAD(&priv->mc_list);
1620 INIT_LIST_HEAD(&priv->curr_list);
Hadar Hen Zion0d256c02013-01-30 23:07:08 +00001621 INIT_LIST_HEAD(&priv->ethtool_list);
1622 memset(&priv->ethtool_rules[0], 0,
1623 sizeof(struct ethtool_flow_id) * MAX_NUM_OF_FS_RULES);
Yevgeny Petrilin6d199932012-07-05 04:03:43 +00001624
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001625 /* Calculate Rx buf size */
1626 dev->mtu = min(dev->mtu, priv->max_mtu);
1627 mlx4_en_calc_rx_buf(dev);
Yevgeny Petrilin453a6082009-06-01 20:27:13 +00001628 en_dbg(DRV, priv, "Rx buf size:%d\n", priv->rx_skb_size);
Yevgeny Petrilin38aab072009-05-24 03:17:11 +00001629
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001630 /* Configure rx cq's and rings */
Yevgeny Petrilin38aab072009-05-24 03:17:11 +00001631 err = mlx4_en_activate_rx_rings(priv);
1632 if (err) {
Yevgeny Petrilin453a6082009-06-01 20:27:13 +00001633 en_err(priv, "Failed to activate RX rings\n");
Yevgeny Petrilin38aab072009-05-24 03:17:11 +00001634 return err;
1635 }
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001636 for (i = 0; i < priv->rx_ring_num; i++) {
Eugenia Emantayev41d942d2013-11-07 12:19:52 +02001637 cq = priv->rx_cq[i];
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001638
Yuval Atias9e311e72014-06-09 10:24:39 +03001639 err = mlx4_en_init_affinity_hint(priv, i);
1640 if (err) {
1641 en_err(priv, "Failed preparing IRQ affinity hint\n");
1642 goto cq_err;
1643 }
1644
Alexander Guller76532d02011-10-09 05:26:31 +00001645 err = mlx4_en_activate_cq(priv, cq, i);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001646 if (err) {
Yevgeny Petrilin453a6082009-06-01 20:27:13 +00001647 en_err(priv, "Failed activating Rx CQ\n");
Yuval Atias9e311e72014-06-09 10:24:39 +03001648 mlx4_en_free_affinity_hint(priv, i);
Yevgeny Petrilina4233302009-04-26 20:41:34 +00001649 goto cq_err;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001650 }
Ido Shamayc3f25112014-12-16 13:28:54 +02001651
1652 for (j = 0; j < cq->size; j++) {
1653 struct mlx4_cqe *cqe = NULL;
1654
1655 cqe = mlx4_en_get_cqe(cq->buf, j, priv->cqe_size) +
1656 priv->cqe_factor;
1657 cqe->owner_sr_opcode = MLX4_CQE_OWNER_MASK;
1658 }
1659
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001660 err = mlx4_en_set_cq_moder(priv, cq);
1661 if (err) {
Joe Perches1a91de22014-05-07 12:52:57 -07001662 en_err(priv, "Failed setting cq moderation parameters\n");
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001663 mlx4_en_deactivate_cq(priv, cq);
Yuval Atias9e311e72014-06-09 10:24:39 +03001664 mlx4_en_free_affinity_hint(priv, i);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001665 goto cq_err;
1666 }
1667 mlx4_en_arm_cq(priv, cq);
Eugenia Emantayev41d942d2013-11-07 12:19:52 +02001668 priv->rx_ring[i]->cqn = cq->mcq.cqn;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001669 ++rx_index;
1670 }
1671
Eugenia Emantayevffe455a2011-12-13 04:16:21 +00001672 /* Set qp number */
1673 en_dbg(DRV, priv, "Getting qp number for port %d\n", priv->port);
Yan Burman16a10ff2013-02-07 02:25:22 +00001674 err = mlx4_en_get_qp(priv);
Yevgeny Petrilin16792002011-03-22 22:38:31 +00001675 if (err) {
Eugenia Emantayevffe455a2011-12-13 04:16:21 +00001676 en_err(priv, "Failed getting eth qp\n");
Yevgeny Petrilin16792002011-03-22 22:38:31 +00001677 goto cq_err;
1678 }
1679 mdev->mac_removed[priv->port] = 0;
1680
Eran Ben Elisha6de5f7f2015-06-15 17:59:02 +03001681 priv->counter_index =
1682 mlx4_get_default_counter_index(mdev->dev, priv->port);
1683
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001684 err = mlx4_en_config_rss_steer(priv);
1685 if (err) {
Yevgeny Petrilin453a6082009-06-01 20:27:13 +00001686 en_err(priv, "Failed configuring rss steering\n");
Yevgeny Petrilin16792002011-03-22 22:38:31 +00001687 goto mac_err;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001688 }
1689
Hadar Hen Zioncabdc8ee2012-07-05 04:03:50 +00001690 err = mlx4_en_create_drop_qp(priv);
1691 if (err)
1692 goto rss_err;
1693
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001694 /* Configure tx cq's and rings */
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02001695 for (t = 0 ; t < MLX4_EN_NUM_TX_TYPES; t++) {
Tariq Toukaneb9def62016-12-22 14:32:58 +02001696 u8 num_tx_rings_p_up = t == TX ?
1697 priv->num_tx_rings_p_up : priv->tx_ring_num[t];
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02001698
1699 for (i = 0; i < priv->tx_ring_num[t]; i++) {
1700 /* Configure cq */
1701 cq = priv->tx_cq[t][i];
1702 err = mlx4_en_activate_cq(priv, cq, i);
1703 if (err) {
1704 en_err(priv, "Failed allocating Tx CQ\n");
1705 goto tx_err;
1706 }
1707 err = mlx4_en_set_cq_moder(priv, cq);
1708 if (err) {
1709 en_err(priv, "Failed setting cq moderation parameters\n");
1710 mlx4_en_deactivate_cq(priv, cq);
1711 goto tx_err;
1712 }
1713 en_dbg(DRV, priv,
1714 "Resetting index of collapsed CQ:%d to -1\n", i);
1715 cq->buf->wqe_index = cpu_to_be16(0xffff);
1716
1717 /* Configure ring */
1718 tx_ring = priv->tx_ring[t][i];
1719 err = mlx4_en_activate_tx_ring(priv, tx_ring,
1720 cq->mcq.cqn,
1721 i / num_tx_rings_p_up);
1722 if (err) {
1723 en_err(priv, "Failed allocating Tx ring\n");
1724 mlx4_en_deactivate_cq(priv, cq);
1725 goto tx_err;
1726 }
1727 if (t != TX_XDP) {
1728 tx_ring->tx_queue = netdev_get_tx_queue(dev, i);
1729 tx_ring->recycle_ring = NULL;
Tariq Toukan6c785112017-06-15 14:35:37 +03001730
1731 /* Arm CQ for TX completions */
1732 mlx4_en_arm_cq(priv, cq);
1733
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02001734 } else {
1735 mlx4_en_init_recycle_ring(priv, i);
Tariq Toukan6c785112017-06-15 14:35:37 +03001736 /* XDP TX CQ should never be armed */
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02001737 }
1738
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02001739 /* Set initial ownership of all Tx TXBBs to SW (1) */
1740 for (j = 0; j < tx_ring->buf_size; j += STAMP_STRIDE)
1741 *((u32 *)(tx_ring->buf + j)) = 0xffffffff;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001742 }
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001743 }
1744
1745 /* Configure port */
1746 err = mlx4_SET_PORT_general(mdev->dev, priv->port,
1747 priv->rx_skb_size + ETH_FCS_LEN,
Yevgeny Petrilind53b93f2008-11-05 04:48:36 +00001748 priv->prof->tx_pause,
1749 priv->prof->tx_ppp,
1750 priv->prof->rx_pause,
1751 priv->prof->rx_ppp);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001752 if (err) {
Yan Burman48e551f2013-02-07 02:25:21 +00001753 en_err(priv, "Failed setting port general configurations for port %d, with error %d\n",
1754 priv->port, err);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001755 goto tx_err;
1756 }
Shaker Daibes40fb4fc2017-01-29 18:56:18 +02001757
1758 err = mlx4_SET_PORT_user_mtu(mdev->dev, priv->port, dev->mtu);
1759 if (err) {
1760 en_err(priv, "Failed to pass user MTU(%d) to Firmware for port %d, with error %d\n",
1761 dev->mtu, priv->port, err);
1762 goto tx_err;
1763 }
1764
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001765 /* Set default qp number */
1766 err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, priv->base_qpn, 0);
1767 if (err) {
Yevgeny Petrilin453a6082009-06-01 20:27:13 +00001768 en_err(priv, "Failed setting default qp numbers\n");
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001769 goto tx_err;
1770 }
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001771
Or Gerlitz837052d2013-12-23 16:09:44 +02001772 if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
Or Gerlitz1b136de2014-03-27 14:02:04 +02001773 err = mlx4_SET_PORT_VXLAN(mdev->dev, priv->port, VXLAN_STEER_BY_OUTER_MAC, 1);
Or Gerlitz837052d2013-12-23 16:09:44 +02001774 if (err) {
1775 en_err(priv, "Failed setting port L2 tunnel configuration, err %d\n",
1776 err);
1777 goto tx_err;
1778 }
1779 }
1780
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001781 /* Init port */
Yevgeny Petrilin453a6082009-06-01 20:27:13 +00001782 en_dbg(HW, priv, "Initializing port\n");
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001783 err = mlx4_INIT_PORT(mdev->dev, priv->port);
1784 if (err) {
Yevgeny Petrilin453a6082009-06-01 20:27:13 +00001785 en_err(priv, "Failed Initializing port\n");
Yevgeny Petrilin16792002011-03-22 22:38:31 +00001786 goto tx_err;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001787 }
1788
Ido Shamayba4b87ae2015-10-08 17:14:01 +03001789 /* Set Unicast and VXLAN steering rules */
1790 if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0 &&
1791 mlx4_en_set_rss_steer_rules(priv))
1792 mlx4_warn(mdev, "Failed setting steering rules\n");
1793
Yevgeny Petrilin16792002011-03-22 22:38:31 +00001794 /* Attach rx QP to bradcast address */
Joe Perchesc7bf7162015-03-02 19:54:47 -08001795 eth_broadcast_addr(&mc_list[10]);
Hadar Hen Zion0ff1fb62012-07-05 04:03:46 +00001796 mc_list[5] = priv->port; /* needed for B0 steering support */
Saeed Mahameed4931c6e2017-06-15 14:35:32 +03001797 if (mlx4_multicast_attach(mdev->dev, priv->rss_map.indir_qp, mc_list,
Hadar Hen Zion0ff1fb62012-07-05 04:03:46 +00001798 priv->port, 0, MLX4_PROT_ETH,
1799 &priv->broadcast_id))
Yevgeny Petrilin16792002011-03-22 22:38:31 +00001800 mlx4_warn(mdev, "Failed Attaching Broadcast\n");
1801
Herbert Xub5845f92011-03-27 01:01:26 +00001802 /* Must redo promiscuous mode setup. */
1803 priv->flags &= ~(MLX4_EN_FLAG_PROMISC | MLX4_EN_FLAG_MC_PROMISC);
1804
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001805 /* Schedule multicast task to populate multicast list */
Yan Burman0eb74fd2013-02-07 02:25:23 +00001806 queue_work(mdev->workqueue, &priv->rx_mode_task);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001807
Or Gerlitz9737c6a2014-11-18 17:51:27 +02001808 if (priv->mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
Alexander Duycka8312742016-06-16 12:22:30 -07001809 udp_tunnel_get_rx_info(dev);
1810
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001811 priv->port_up = true;
Erez Shitrit8d59de82016-10-27 16:27:17 +03001812
1813 /* Process all completions if exist to prevent
1814 * the queues freezing if they are full
1815 */
Eric Dumazet8cf699e2017-01-13 08:39:24 -08001816 for (i = 0; i < priv->rx_ring_num; i++) {
1817 local_bh_disable();
Erez Shitrit8d59de82016-10-27 16:27:17 +03001818 napi_schedule(&priv->rx_cq[i]->napi);
Eric Dumazet8cf699e2017-01-13 08:39:24 -08001819 local_bh_enable();
1820 }
Erez Shitrit8d59de82016-10-27 16:27:17 +03001821
Yevgeny Petrilina11faac2009-06-20 22:15:46 +00001822 netif_tx_start_all_queues(dev);
Amir Vadai3484aac2013-01-30 23:07:11 +00001823 netif_device_attach(dev);
1824
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001825 return 0;
1826
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001827tx_err:
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02001828 if (t == MLX4_EN_NUM_TX_TYPES) {
1829 t--;
1830 i = priv->tx_ring_num[t];
1831 }
1832 while (t >= 0) {
1833 while (i--) {
1834 mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[t][i]);
1835 mlx4_en_deactivate_cq(priv, priv->tx_cq[t][i]);
1836 }
1837 if (!t--)
1838 break;
1839 i = priv->tx_ring_num[t];
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001840 }
Hadar Hen Zioncabdc8ee2012-07-05 04:03:50 +00001841 mlx4_en_destroy_drop_qp(priv);
1842rss_err:
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001843 mlx4_en_release_rss_steer(priv);
Yevgeny Petrilin16792002011-03-22 22:38:31 +00001844mac_err:
Yan Burman16a10ff2013-02-07 02:25:22 +00001845 mlx4_en_put_qp(priv);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001846cq_err:
Yuval Atias9e311e72014-06-09 10:24:39 +03001847 while (rx_index--) {
Eugenia Emantayev41d942d2013-11-07 12:19:52 +02001848 mlx4_en_deactivate_cq(priv, priv->rx_cq[rx_index]);
Benjamin Poirierf94813f2015-04-29 15:59:35 -07001849 mlx4_en_free_affinity_hint(priv, rx_index);
Yuval Atias9e311e72014-06-09 10:24:39 +03001850 }
Yevgeny Petrilin38aab072009-05-24 03:17:11 +00001851 for (i = 0; i < priv->rx_ring_num; i++)
Eugenia Emantayev41d942d2013-11-07 12:19:52 +02001852 mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001853
1854 return err; /* need to close devices */
1855}
1856
1857
Amir Vadai3484aac2013-01-30 23:07:11 +00001858void mlx4_en_stop_port(struct net_device *dev, int detach)
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001859{
1860 struct mlx4_en_priv *priv = netdev_priv(dev);
1861 struct mlx4_en_dev *mdev = priv->mdev;
Yevgeny Petrilin6d199932012-07-05 04:03:43 +00001862 struct mlx4_en_mc_list *mclist, *tmp;
Hadar Hen Zion0d256c02013-01-30 23:07:08 +00001863 struct ethtool_flow_id *flow, *tmp_flow;
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02001864 int i, t;
Yevgeny Petrilin16792002011-03-22 22:38:31 +00001865 u8 mc_list[16] = {0};
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001866
1867 if (!priv->port_up) {
Yevgeny Petrilin453a6082009-06-01 20:27:13 +00001868 en_dbg(DRV, priv, "stop port called while port already down\n");
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001869 return;
1870 }
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001871
Eugenia Emantayev0cc5c8b2013-06-25 12:09:33 +03001872 /* close port*/
1873 mlx4_CLOSE_PORT(mdev->dev, priv->port);
1874
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001875 /* Synchronize with tx routine */
1876 netif_tx_lock_bh(dev);
Amir Vadai3484aac2013-01-30 23:07:11 +00001877 if (detach)
1878 netif_device_detach(dev);
Yevgeny Petrilin3c05f5e2009-06-20 22:15:52 +00001879 netif_tx_stop_all_queues(dev);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001880 netif_tx_unlock_bh(dev);
1881
Amir Vadai3484aac2013-01-30 23:07:11 +00001882 netif_tx_disable(dev);
1883
Eric Dumazet7f7bf162016-12-01 05:02:06 -08001884 spin_lock_bh(&priv->stats_lock);
1885 mlx4_en_fold_software_stats(dev);
Yevgeny Petrilin7c287382010-08-24 03:45:45 +00001886 /* Set port as not active */
Yevgeny Petrilin3c05f5e2009-06-20 22:15:52 +00001887 priv->port_up = false;
Eric Dumazet7f7bf162016-12-01 05:02:06 -08001888 spin_unlock_bh(&priv->stats_lock);
1889
Eran Ben Elisha6de5f7f2015-06-15 17:59:02 +03001890 priv->counter_index = MLX4_SINK_COUNTER_INDEX(mdev->dev);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001891
Aviad Yehezkeldb0e7cb2013-01-24 01:54:15 +00001892 /* Promsicuous mode */
1893 if (mdev->dev->caps.steering_mode ==
1894 MLX4_STEERING_MODE_DEVICE_MANAGED) {
1895 priv->flags &= ~(MLX4_EN_FLAG_PROMISC |
1896 MLX4_EN_FLAG_MC_PROMISC);
1897 mlx4_flow_steer_promisc_remove(mdev->dev,
1898 priv->port,
Hadar Hen Zionf9162532013-04-24 13:58:45 +00001899 MLX4_FS_ALL_DEFAULT);
Aviad Yehezkeldb0e7cb2013-01-24 01:54:15 +00001900 mlx4_flow_steer_promisc_remove(mdev->dev,
1901 priv->port,
Hadar Hen Zionf9162532013-04-24 13:58:45 +00001902 MLX4_FS_MC_DEFAULT);
Aviad Yehezkeldb0e7cb2013-01-24 01:54:15 +00001903 } else if (priv->flags & MLX4_EN_FLAG_PROMISC) {
1904 priv->flags &= ~MLX4_EN_FLAG_PROMISC;
1905
1906 /* Disable promiscouos mode */
1907 mlx4_unicast_promisc_remove(mdev->dev, priv->base_qpn,
1908 priv->port);
1909
1910 /* Disable Multicast promisc */
1911 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
1912 mlx4_multicast_promisc_remove(mdev->dev, priv->base_qpn,
1913 priv->port);
1914 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
1915 }
1916 }
1917
Yevgeny Petrilin16792002011-03-22 22:38:31 +00001918 /* Detach All multicasts */
Joe Perchesc7bf7162015-03-02 19:54:47 -08001919 eth_broadcast_addr(&mc_list[10]);
Hadar Hen Zion0ff1fb62012-07-05 04:03:46 +00001920 mc_list[5] = priv->port; /* needed for B0 steering support */
Saeed Mahameed4931c6e2017-06-15 14:35:32 +03001921 mlx4_multicast_detach(mdev->dev, priv->rss_map.indir_qp, mc_list,
Hadar Hen Zion0ff1fb62012-07-05 04:03:46 +00001922 MLX4_PROT_ETH, priv->broadcast_id);
Yevgeny Petrilin6d199932012-07-05 04:03:43 +00001923 list_for_each_entry(mclist, &priv->curr_list, list) {
1924 memcpy(&mc_list[10], mclist->addr, ETH_ALEN);
Yevgeny Petrilin16792002011-03-22 22:38:31 +00001925 mc_list[5] = priv->port;
Saeed Mahameed4931c6e2017-06-15 14:35:32 +03001926 mlx4_multicast_detach(mdev->dev, priv->rss_map.indir_qp,
Hadar Hen Zion0ff1fb62012-07-05 04:03:46 +00001927 mc_list, MLX4_PROT_ETH, mclist->reg_id);
Or Gerlitzde123262014-03-13 14:52:15 +02001928 if (mclist->tunnel_reg_id)
1929 mlx4_flow_detach(mdev->dev, mclist->tunnel_reg_id);
Yevgeny Petrilin16792002011-03-22 22:38:31 +00001930 }
1931 mlx4_en_clear_list(dev);
Yevgeny Petrilin6d199932012-07-05 04:03:43 +00001932 list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) {
1933 list_del(&mclist->list);
1934 kfree(mclist);
1935 }
1936
Yevgeny Petrilin16792002011-03-22 22:38:31 +00001937 /* Flush multicast filter */
1938 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 1, MLX4_MCAST_CONFIG);
1939
Hadar Hen Zion6efb5fa2013-03-21 05:55:53 +00001940 /* Remove flow steering rules for the port*/
1941 if (mdev->dev->caps.steering_mode ==
1942 MLX4_STEERING_MODE_DEVICE_MANAGED) {
1943 ASSERT_RTNL();
1944 list_for_each_entry_safe(flow, tmp_flow,
1945 &priv->ethtool_list, list) {
1946 mlx4_flow_detach(mdev->dev, flow->id);
1947 list_del(&flow->list);
1948 }
1949 }
1950
Hadar Hen Zioncabdc8ee2012-07-05 04:03:50 +00001951 mlx4_en_destroy_drop_qp(priv);
1952
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001953 /* Free TX Rings */
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02001954 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
1955 for (i = 0; i < priv->tx_ring_num[t]; i++) {
1956 mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[t][i]);
1957 mlx4_en_deactivate_cq(priv, priv->tx_cq[t][i]);
1958 }
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001959 }
1960 msleep(10);
1961
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02001962 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++)
1963 for (i = 0; i < priv->tx_ring_num[t]; i++)
1964 mlx4_en_free_tx_buf(dev, priv->tx_ring[t][i]);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001965
Ido Shamayba4b87ae2015-10-08 17:14:01 +03001966 if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0)
1967 mlx4_en_delete_rss_steer_rules(priv);
1968
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001969 /* Free RSS qps */
1970 mlx4_en_release_rss_steer(priv);
1971
Eugenia Emantayevffe455a2011-12-13 04:16:21 +00001972 /* Unregister Mac address for the port */
Yan Burman16a10ff2013-02-07 02:25:22 +00001973 mlx4_en_put_qp(priv);
Or Gerlitz5930e8d2013-10-15 16:55:22 +02001974 if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN))
Matan Barak955154f2013-01-30 23:07:10 +00001975 mdev->mac_removed[priv->port] = 1;
Eugenia Emantayevffe455a2011-12-13 04:16:21 +00001976
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001977 /* Free RX Rings */
1978 for (i = 0; i < priv->rx_ring_num; i++) {
Eugenia Emantayev41d942d2013-11-07 12:19:52 +02001979 struct mlx4_en_cq *cq = priv->rx_cq[i];
Amir Vadai9e77a2b2013-06-18 16:18:27 +03001980
Ido Shamayf4a36752014-10-27 11:37:45 +02001981 napi_synchronize(&cq->napi);
Eugenia Emantayev41d942d2013-11-07 12:19:52 +02001982 mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]);
Amir Vadai9e77a2b2013-06-18 16:18:27 +03001983 mlx4_en_deactivate_cq(priv, cq);
Yuval Atias9e311e72014-06-09 10:24:39 +03001984
1985 mlx4_en_free_affinity_hint(priv, i);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001986 }
1987}
1988
1989static void mlx4_en_restart(struct work_struct *work)
1990{
1991 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
1992 watchdog_task);
1993 struct mlx4_en_dev *mdev = priv->mdev;
1994 struct net_device *dev = priv->dev;
1995
Yevgeny Petrilin453a6082009-06-01 20:27:13 +00001996 en_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port);
Yevgeny Petrilin1e338db2009-04-20 04:26:05 +00001997
Hannes Frederic Sowa0c5c3252016-04-18 21:19:44 +02001998 rtnl_lock();
Yevgeny Petrilin1e338db2009-04-20 04:26:05 +00001999 mutex_lock(&mdev->state_lock);
2000 if (priv->port_up) {
Amir Vadai3484aac2013-01-30 23:07:11 +00002001 mlx4_en_stop_port(dev, 1);
Yevgeny Petrilin1e338db2009-04-20 04:26:05 +00002002 if (mlx4_en_start_port(dev))
Yevgeny Petrilin453a6082009-06-01 20:27:13 +00002003 en_err(priv, "Failed restarting port %d\n", priv->port);
Yevgeny Petrilin1e338db2009-04-20 04:26:05 +00002004 }
2005 mutex_unlock(&mdev->state_lock);
Hannes Frederic Sowa0c5c3252016-04-18 21:19:44 +02002006 rtnl_unlock();
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002007}
2008
Eugenia Emantayevb477ba62012-01-19 09:42:37 +00002009static void mlx4_en_clear_stats(struct net_device *dev)
2010{
2011 struct mlx4_en_priv *priv = netdev_priv(dev);
2012 struct mlx4_en_dev *mdev = priv->mdev;
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02002013 struct mlx4_en_tx_ring **tx_ring;
Eugenia Emantayevb477ba62012-01-19 09:42:37 +00002014 int i;
2015
Tariq Toukaneb4b6782016-10-27 16:27:22 +03002016 if (!mlx4_is_slave(mdev->dev))
2017 if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1))
2018 en_dbg(HW, priv, "Failed dumping statistics\n");
Eugenia Emantayevb477ba62012-01-19 09:42:37 +00002019
Eugenia Emantayevb477ba62012-01-19 09:42:37 +00002020 memset(&priv->pstats, 0, sizeof(priv->pstats));
2021 memset(&priv->pkstats, 0, sizeof(priv->pkstats));
2022 memset(&priv->port_stats, 0, sizeof(priv->port_stats));
Matan Barak0b131562015-03-30 17:45:25 +03002023 memset(&priv->rx_flowstats, 0, sizeof(priv->rx_flowstats));
2024 memset(&priv->tx_flowstats, 0, sizeof(priv->tx_flowstats));
2025 memset(&priv->rx_priority_flowstats, 0,
2026 sizeof(priv->rx_priority_flowstats));
2027 memset(&priv->tx_priority_flowstats, 0,
2028 sizeof(priv->tx_priority_flowstats));
Eran Ben Elishab42de4d2015-06-15 17:59:06 +03002029 memset(&priv->pf_stats, 0, sizeof(priv->pf_stats));
Eugenia Emantayevb477ba62012-01-19 09:42:37 +00002030
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02002031 tx_ring = priv->tx_ring[TX];
2032 for (i = 0; i < priv->tx_ring_num[TX]; i++) {
2033 tx_ring[i]->bytes = 0;
2034 tx_ring[i]->packets = 0;
2035 tx_ring[i]->tx_csum = 0;
2036 tx_ring[i]->tx_dropped = 0;
2037 tx_ring[i]->queue_stopped = 0;
2038 tx_ring[i]->wake_queue = 0;
2039 tx_ring[i]->tso_packets = 0;
2040 tx_ring[i]->xmit_more = 0;
Eugenia Emantayevb477ba62012-01-19 09:42:37 +00002041 }
2042 for (i = 0; i < priv->rx_ring_num; i++) {
Eugenia Emantayev41d942d2013-11-07 12:19:52 +02002043 priv->rx_ring[i]->bytes = 0;
2044 priv->rx_ring[i]->packets = 0;
2045 priv->rx_ring[i]->csum_ok = 0;
2046 priv->rx_ring[i]->csum_none = 0;
Shani Michaelif8c64552014-11-09 13:51:53 +02002047 priv->rx_ring[i]->csum_complete = 0;
Eugenia Emantayevb477ba62012-01-19 09:42:37 +00002048 }
2049}
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002050
2051static int mlx4_en_open(struct net_device *dev)
2052{
2053 struct mlx4_en_priv *priv = netdev_priv(dev);
2054 struct mlx4_en_dev *mdev = priv->mdev;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002055 int err = 0;
2056
2057 mutex_lock(&mdev->state_lock);
2058
2059 if (!mdev->device_up) {
Yevgeny Petrilin453a6082009-06-01 20:27:13 +00002060 en_err(priv, "Cannot open - device down/disabled\n");
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002061 err = -EBUSY;
2062 goto out;
2063 }
2064
Eugenia Emantayevb477ba62012-01-19 09:42:37 +00002065 /* Reset HW statistics and SW counters */
2066 mlx4_en_clear_stats(dev);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002067
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002068 err = mlx4_en_start_port(dev);
2069 if (err)
Yevgeny Petrilin453a6082009-06-01 20:27:13 +00002070 en_err(priv, "Failed starting port:%d\n", priv->port);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002071
2072out:
2073 mutex_unlock(&mdev->state_lock);
2074 return err;
2075}
2076
2077
2078static int mlx4_en_close(struct net_device *dev)
2079{
2080 struct mlx4_en_priv *priv = netdev_priv(dev);
2081 struct mlx4_en_dev *mdev = priv->mdev;
2082
Yevgeny Petrilin453a6082009-06-01 20:27:13 +00002083 en_dbg(IFDOWN, priv, "Close port called\n");
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002084
2085 mutex_lock(&mdev->state_lock);
2086
Amir Vadai3484aac2013-01-30 23:07:11 +00002087 mlx4_en_stop_port(dev, 0);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002088 netif_carrier_off(dev);
2089
2090 mutex_unlock(&mdev->state_lock);
2091 return 0;
2092}
2093
Eugenia Emantayevec25bc02016-07-18 18:35:12 +03002094static void mlx4_en_free_resources(struct mlx4_en_priv *priv)
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002095{
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02002096 int i, t;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002097
Amir Vadai1eb8c692012-07-18 22:33:52 +00002098#ifdef CONFIG_RFS_ACCEL
Amir Vadai1eb8c692012-07-18 22:33:52 +00002099 priv->dev->rx_cpu_rmap = NULL;
2100#endif
2101
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02002102 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
2103 for (i = 0; i < priv->tx_ring_num[t]; i++) {
2104 if (priv->tx_ring[t] && priv->tx_ring[t][i])
2105 mlx4_en_destroy_tx_ring(priv,
2106 &priv->tx_ring[t][i]);
2107 if (priv->tx_cq[t] && priv->tx_cq[t][i])
2108 mlx4_en_destroy_cq(priv, &priv->tx_cq[t][i]);
2109 }
Martin KaFai Lauf32b20e82017-01-31 22:35:32 -08002110 kfree(priv->tx_ring[t]);
2111 kfree(priv->tx_cq[t]);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002112 }
2113
2114 for (i = 0; i < priv->rx_ring_num; i++) {
Eugenia Emantayev41d942d2013-11-07 12:19:52 +02002115 if (priv->rx_ring[i])
Thadeu Lima de Souza Cascardo68355f72012-02-06 08:39:49 +00002116 mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i],
2117 priv->prof->rx_ring_size, priv->stride);
Eugenia Emantayev41d942d2013-11-07 12:19:52 +02002118 if (priv->rx_cq[i])
Alexander Gullerfe0af032011-10-09 05:26:46 +00002119 mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002120 }
Yevgeny Petrilin044ca2a2012-06-25 00:24:13 +00002121
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002122}
2123
Eugenia Emantayevec25bc02016-07-18 18:35:12 +03002124static int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002125{
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002126 struct mlx4_en_port_profile *prof = priv->prof;
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02002127 int i, t;
Eugenia Emantayev163561a2013-11-07 12:19:54 +02002128 int node;
Yevgeny Petrilin87a5c382011-03-22 22:38:52 +00002129
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002130 /* Create tx Rings */
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02002131 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
2132 for (i = 0; i < priv->tx_ring_num[t]; i++) {
2133 node = cpu_to_node(i % num_online_cpus());
2134 if (mlx4_en_create_cq(priv, &priv->tx_cq[t][i],
2135 prof->tx_ring_size, i, t, node))
2136 goto err;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002137
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02002138 if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[t][i],
2139 prof->tx_ring_size,
2140 TXBB_SIZE, node, i))
2141 goto err;
2142 }
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002143 }
2144
2145 /* Create rx Rings */
2146 for (i = 0; i < priv->rx_ring_num; i++) {
Eugenia Emantayev163561a2013-11-07 12:19:54 +02002147 node = cpu_to_node(i % num_online_cpus());
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002148 if (mlx4_en_create_cq(priv, &priv->rx_cq[i],
Eugenia Emantayev163561a2013-11-07 12:19:54 +02002149 prof->rx_ring_size, i, RX, node))
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002150 goto err;
2151
2152 if (mlx4_en_create_rx_ring(priv, &priv->rx_ring[i],
Eugenia Emantayev163561a2013-11-07 12:19:54 +02002153 prof->rx_ring_size, priv->stride,
2154 node))
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002155 goto err;
2156 }
2157
Amir Vadai1eb8c692012-07-18 22:33:52 +00002158#ifdef CONFIG_RFS_ACCEL
Matan Barakc66fa192015-05-31 09:30:16 +03002159 priv->dev->rx_cpu_rmap = mlx4_get_cpu_rmap(priv->mdev->dev, priv->port);
Amir Vadai1eb8c692012-07-18 22:33:52 +00002160#endif
2161
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002162 return 0;
2163
2164err:
Yevgeny Petrilin453a6082009-06-01 20:27:13 +00002165 en_err(priv, "Failed to allocate NIC resources\n");
Eugenia Emantayev41d942d2013-11-07 12:19:52 +02002166 for (i = 0; i < priv->rx_ring_num; i++) {
2167 if (priv->rx_ring[i])
2168 mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i],
2169 prof->rx_ring_size,
2170 priv->stride);
2171 if (priv->rx_cq[i])
2172 mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
2173 }
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02002174 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
2175 for (i = 0; i < priv->tx_ring_num[t]; i++) {
2176 if (priv->tx_ring[t][i])
2177 mlx4_en_destroy_tx_ring(priv,
2178 &priv->tx_ring[t][i]);
2179 if (priv->tx_cq[t][i])
2180 mlx4_en_destroy_cq(priv, &priv->tx_cq[t][i]);
2181 }
Eugenia Emantayev41d942d2013-11-07 12:19:52 +02002182 }
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002183 return -ENOMEM;
2184}
2185
2186
Eugenia Emantayevec25bc02016-07-18 18:35:12 +03002187static int mlx4_en_copy_priv(struct mlx4_en_priv *dst,
2188 struct mlx4_en_priv *src,
2189 struct mlx4_en_port_profile *prof)
2190{
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02002191 int t;
2192
Eugenia Emantayevec25bc02016-07-18 18:35:12 +03002193 memcpy(&dst->hwtstamp_config, &prof->hwtstamp_config,
2194 sizeof(dst->hwtstamp_config));
Inbar Karmyec327f72017-06-29 14:07:57 +03002195 dst->num_tx_rings_p_up = prof->num_tx_rings_p_up;
Eugenia Emantayevec25bc02016-07-18 18:35:12 +03002196 dst->rx_ring_num = prof->rx_ring_num;
2197 dst->flags = prof->flags;
2198 dst->mdev = src->mdev;
2199 dst->port = src->port;
2200 dst->dev = src->dev;
2201 dst->prof = prof;
2202 dst->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
2203 DS_SIZE * MLX4_EN_MAX_RX_FRAGS);
2204
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02002205 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
2206 dst->tx_ring_num[t] = prof->tx_ring_num[t];
2207 if (!dst->tx_ring_num[t])
2208 continue;
Eugenia Emantayevec25bc02016-07-18 18:35:12 +03002209
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02002210 dst->tx_ring[t] = kzalloc(sizeof(struct mlx4_en_tx_ring *) *
2211 MAX_TX_RINGS, GFP_KERNEL);
2212 if (!dst->tx_ring[t])
2213 goto err_free_tx;
2214
2215 dst->tx_cq[t] = kzalloc(sizeof(struct mlx4_en_cq *) *
2216 MAX_TX_RINGS, GFP_KERNEL);
2217 if (!dst->tx_cq[t]) {
2218 kfree(dst->tx_ring[t]);
2219 goto err_free_tx;
2220 }
Eugenia Emantayevec25bc02016-07-18 18:35:12 +03002221 }
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02002222
Eugenia Emantayevec25bc02016-07-18 18:35:12 +03002223 return 0;
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02002224
2225err_free_tx:
2226 while (t--) {
2227 kfree(dst->tx_ring[t]);
2228 kfree(dst->tx_cq[t]);
2229 }
2230 return -ENOMEM;
Eugenia Emantayevec25bc02016-07-18 18:35:12 +03002231}
2232
2233static void mlx4_en_update_priv(struct mlx4_en_priv *dst,
2234 struct mlx4_en_priv *src)
2235{
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02002236 int t;
Eugenia Emantayevec25bc02016-07-18 18:35:12 +03002237 memcpy(dst->rx_ring, src->rx_ring,
2238 sizeof(struct mlx4_en_rx_ring *) * src->rx_ring_num);
2239 memcpy(dst->rx_cq, src->rx_cq,
2240 sizeof(struct mlx4_en_cq *) * src->rx_ring_num);
2241 memcpy(&dst->hwtstamp_config, &src->hwtstamp_config,
2242 sizeof(dst->hwtstamp_config));
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02002243 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
2244 dst->tx_ring_num[t] = src->tx_ring_num[t];
2245 dst->tx_ring[t] = src->tx_ring[t];
2246 dst->tx_cq[t] = src->tx_cq[t];
2247 }
Inbar Karmyec327f72017-06-29 14:07:57 +03002248 dst->num_tx_rings_p_up = src->num_tx_rings_p_up;
Eugenia Emantayevec25bc02016-07-18 18:35:12 +03002249 dst->rx_ring_num = src->rx_ring_num;
Eugenia Emantayevec25bc02016-07-18 18:35:12 +03002250 memcpy(dst->prof, src->prof, sizeof(struct mlx4_en_port_profile));
2251}
2252
2253int mlx4_en_try_alloc_resources(struct mlx4_en_priv *priv,
2254 struct mlx4_en_priv *tmp,
Martin KaFai Lau770f8222017-01-31 22:35:33 -08002255 struct mlx4_en_port_profile *prof,
2256 bool carry_xdp_prog)
Eugenia Emantayevec25bc02016-07-18 18:35:12 +03002257{
Martin KaFai Lau770f8222017-01-31 22:35:33 -08002258 struct bpf_prog *xdp_prog;
2259 int i, t;
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02002260
Eugenia Emantayevec25bc02016-07-18 18:35:12 +03002261 mlx4_en_copy_priv(tmp, priv, prof);
2262
2263 if (mlx4_en_alloc_resources(tmp)) {
2264 en_warn(priv,
2265 "%s: Resource allocation failed, using previous configuration\n",
2266 __func__);
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02002267 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
2268 kfree(tmp->tx_ring[t]);
2269 kfree(tmp->tx_cq[t]);
2270 }
Eugenia Emantayevec25bc02016-07-18 18:35:12 +03002271 return -ENOMEM;
2272 }
Martin KaFai Lau770f8222017-01-31 22:35:33 -08002273
2274 /* All rx_rings has the same xdp_prog. Pick the first one. */
2275 xdp_prog = rcu_dereference_protected(
2276 priv->rx_ring[0]->xdp_prog,
2277 lockdep_is_held(&priv->mdev->state_lock));
2278
2279 if (xdp_prog && carry_xdp_prog) {
2280 xdp_prog = bpf_prog_add(xdp_prog, tmp->rx_ring_num);
2281 if (IS_ERR(xdp_prog)) {
2282 mlx4_en_free_resources(tmp);
2283 return PTR_ERR(xdp_prog);
2284 }
2285 for (i = 0; i < tmp->rx_ring_num; i++)
2286 rcu_assign_pointer(tmp->rx_ring[i]->xdp_prog,
2287 xdp_prog);
2288 }
2289
Eugenia Emantayevec25bc02016-07-18 18:35:12 +03002290 return 0;
2291}
2292
2293void mlx4_en_safe_replace_resources(struct mlx4_en_priv *priv,
2294 struct mlx4_en_priv *tmp)
2295{
2296 mlx4_en_free_resources(priv);
2297 mlx4_en_update_priv(priv, tmp);
2298}
2299
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002300void mlx4_en_destroy_netdev(struct net_device *dev)
2301{
2302 struct mlx4_en_priv *priv = netdev_priv(dev);
2303 struct mlx4_en_dev *mdev = priv->mdev;
2304
Yevgeny Petrilin453a6082009-06-01 20:27:13 +00002305 en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002306
2307 /* Unregister device - this will close the port if it was up */
Jiri Pirko09d4d082016-02-26 17:32:24 +01002308 if (priv->registered) {
2309 devlink_port_type_clear(mlx4_get_devlink_port(mdev->dev,
2310 priv->port));
Tariq Toukanb4353702016-11-27 19:20:51 +02002311 unregister_netdev(dev);
Jiri Pirko09d4d082016-02-26 17:32:24 +01002312 }
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002313
2314 if (priv->allocated)
2315 mlx4_free_hwq_res(mdev->dev, &priv->res, MLX4_EN_PAGE_SIZE);
2316
2317 cancel_delayed_work(&priv->stats_task);
Amir Vadaib6c39bf2013-04-23 06:06:51 +00002318 cancel_delayed_work(&priv->service_task);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002319 /* flush any pending task for this netdev */
2320 flush_workqueue(mdev->workqueue);
2321
Eugenia Emantayev90683062015-12-17 15:35:38 +02002322 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
2323 mlx4_en_remove_timestamp(mdev);
2324
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002325 /* Detach the netdev so tasks would not attempt to access it */
2326 mutex_lock(&mdev->state_lock);
2327 mdev->pndev[priv->port] = NULL;
Moni Shoua5da03542015-02-03 16:48:34 +02002328 mdev->upper[priv->port] = NULL;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002329
Eugenia Emantayev30f56e32016-07-18 18:35:11 +03002330#ifdef CONFIG_RFS_ACCEL
2331 mlx4_en_cleanup_filters(priv);
2332#endif
2333
Alexander Gullerfe0af032011-10-09 05:26:46 +00002334 mlx4_en_free_resources(priv);
Tariq Toukanb6e01232016-11-22 16:20:39 +02002335 mutex_unlock(&mdev->state_lock);
Amir Vadai564c2742012-04-04 21:33:26 +00002336
Tariq Toukanb4353702016-11-27 19:20:51 +02002337 free_netdev(dev);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002338}
2339
Martin KaFai Laub45f0672016-12-07 15:53:12 -08002340static bool mlx4_en_check_xdp_mtu(struct net_device *dev, int mtu)
2341{
2342 struct mlx4_en_priv *priv = netdev_priv(dev);
2343
2344 if (mtu > MLX4_EN_MAX_XDP_MTU) {
2345 en_err(priv, "mtu:%d > max:%d when XDP prog is attached\n",
2346 mtu, MLX4_EN_MAX_XDP_MTU);
2347 return false;
2348 }
2349
2350 return true;
2351}
2352
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002353static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
2354{
2355 struct mlx4_en_priv *priv = netdev_priv(dev);
2356 struct mlx4_en_dev *mdev = priv->mdev;
2357 int err = 0;
2358
Yevgeny Petrilin453a6082009-06-01 20:27:13 +00002359 en_dbg(DRV, priv, "Change MTU called - current:%d new:%d\n",
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002360 dev->mtu, new_mtu);
2361
Martin KaFai Laub45f0672016-12-07 15:53:12 -08002362 if (priv->tx_ring_num[TX_XDP] &&
2363 !mlx4_en_check_xdp_mtu(dev, new_mtu))
Martin KaFai Lau9f9b74e2017-01-10 09:41:49 -08002364 return -EOPNOTSUPP;
Martin KaFai Laub45f0672016-12-07 15:53:12 -08002365
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002366 dev->mtu = new_mtu;
2367
2368 if (netif_running(dev)) {
2369 mutex_lock(&mdev->state_lock);
2370 if (!mdev->device_up) {
2371 /* NIC is probably restarting - let watchdog task reset
2372 * the port */
Yevgeny Petrilin453a6082009-06-01 20:27:13 +00002373 en_dbg(DRV, priv, "Change MTU called with card down!?\n");
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002374 } else {
Amir Vadai3484aac2013-01-30 23:07:11 +00002375 mlx4_en_stop_port(dev, 1);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002376 err = mlx4_en_start_port(dev);
2377 if (err) {
Yevgeny Petrilin453a6082009-06-01 20:27:13 +00002378 en_err(priv, "Failed restarting port:%d\n",
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002379 priv->port);
2380 queue_work(mdev->workqueue, &priv->watchdog_task);
2381 }
2382 }
2383 mutex_unlock(&mdev->state_lock);
2384 }
2385 return 0;
2386}
2387
Ben Hutchings100dbda2013-11-18 23:13:31 +00002388static int mlx4_en_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
Amir Vadaiec693d42013-04-23 06:06:49 +00002389{
2390 struct mlx4_en_priv *priv = netdev_priv(dev);
2391 struct mlx4_en_dev *mdev = priv->mdev;
2392 struct hwtstamp_config config;
2393
2394 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
2395 return -EFAULT;
2396
2397 /* reserved for future extensions */
2398 if (config.flags)
2399 return -EINVAL;
2400
2401 /* device doesn't support time stamping */
2402 if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS))
2403 return -EINVAL;
2404
2405 /* TX HW timestamp */
2406 switch (config.tx_type) {
2407 case HWTSTAMP_TX_OFF:
2408 case HWTSTAMP_TX_ON:
2409 break;
2410 default:
2411 return -ERANGE;
2412 }
2413
2414 /* RX HW timestamp */
2415 switch (config.rx_filter) {
2416 case HWTSTAMP_FILTER_NONE:
2417 break;
2418 case HWTSTAMP_FILTER_ALL:
2419 case HWTSTAMP_FILTER_SOME:
2420 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
2421 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
2422 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
2423 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
2424 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
2425 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
2426 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
2427 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
2428 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
2429 case HWTSTAMP_FILTER_PTP_V2_EVENT:
2430 case HWTSTAMP_FILTER_PTP_V2_SYNC:
2431 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
Miroslav Lichvare3412572017-05-19 17:52:36 +02002432 case HWTSTAMP_FILTER_NTP_ALL:
Amir Vadaiec693d42013-04-23 06:06:49 +00002433 config.rx_filter = HWTSTAMP_FILTER_ALL;
2434 break;
2435 default:
2436 return -ERANGE;
2437 }
2438
Saeed Mahameed7787fa62014-10-27 11:37:42 +02002439 if (mlx4_en_reset_config(dev, config, dev->features)) {
Amir Vadaiec693d42013-04-23 06:06:49 +00002440 config.tx_type = HWTSTAMP_TX_OFF;
2441 config.rx_filter = HWTSTAMP_FILTER_NONE;
2442 }
2443
2444 return copy_to_user(ifr->ifr_data, &config,
2445 sizeof(config)) ? -EFAULT : 0;
2446}
2447
Ben Hutchings100dbda2013-11-18 23:13:31 +00002448static int mlx4_en_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
2449{
2450 struct mlx4_en_priv *priv = netdev_priv(dev);
2451
2452 return copy_to_user(ifr->ifr_data, &priv->hwtstamp_config,
2453 sizeof(priv->hwtstamp_config)) ? -EFAULT : 0;
2454}
2455
Amir Vadaiec693d42013-04-23 06:06:49 +00002456static int mlx4_en_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2457{
2458 switch (cmd) {
2459 case SIOCSHWTSTAMP:
Ben Hutchings100dbda2013-11-18 23:13:31 +00002460 return mlx4_en_hwtstamp_set(dev, ifr);
2461 case SIOCGHWTSTAMP:
2462 return mlx4_en_hwtstamp_get(dev, ifr);
Amir Vadaiec693d42013-04-23 06:06:49 +00002463 default:
2464 return -EOPNOTSUPP;
2465 }
2466}
2467
Hadar Hen Zione38af4f2015-07-27 14:46:34 +03002468static netdev_features_t mlx4_en_fix_features(struct net_device *netdev,
2469 netdev_features_t features)
2470{
2471 struct mlx4_en_priv *en_priv = netdev_priv(netdev);
2472 struct mlx4_en_dev *mdev = en_priv->mdev;
2473
2474 /* Since there is no support for separate RX C-TAG/S-TAG vlan accel
2475 * enable/disable make sure S-TAG flag is always in same state as
2476 * C-TAG.
2477 */
2478 if (features & NETIF_F_HW_VLAN_CTAG_RX &&
2479 !(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN))
2480 features |= NETIF_F_HW_VLAN_STAG_RX;
2481 else
2482 features &= ~NETIF_F_HW_VLAN_STAG_RX;
2483
2484 return features;
2485}
2486
Amir Vadai60d6fe92011-11-26 19:55:19 +00002487static int mlx4_en_set_features(struct net_device *netdev,
2488 netdev_features_t features)
2489{
2490 struct mlx4_en_priv *priv = netdev_priv(netdev);
Muhammad Mahajnaf0df3502015-04-02 16:31:21 +03002491 bool reset = false;
Saeed Mahameed537f6f92014-10-27 11:37:43 +02002492 int ret = 0;
2493
Muhammad Mahajnaf0df3502015-04-02 16:31:21 +03002494 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_RXFCS)) {
2495 en_info(priv, "Turn %s RX-FCS\n",
2496 (features & NETIF_F_RXFCS) ? "ON" : "OFF");
2497 reset = true;
2498 }
2499
Muhammad Mahajna78500b82015-04-02 16:31:22 +03002500 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_RXALL)) {
2501 u8 ignore_fcs_value = (features & NETIF_F_RXALL) ? 1 : 0;
2502
2503 en_info(priv, "Turn %s RX-ALL\n",
2504 ignore_fcs_value ? "ON" : "OFF");
2505 ret = mlx4_SET_PORT_fcs_check(priv->mdev->dev,
2506 priv->port, ignore_fcs_value);
2507 if (ret)
2508 return ret;
2509 }
2510
Saeed Mahameed537f6f92014-10-27 11:37:43 +02002511 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_HW_VLAN_CTAG_RX)) {
2512 en_info(priv, "Turn %s RX vlan strip offload\n",
2513 (features & NETIF_F_HW_VLAN_CTAG_RX) ? "ON" : "OFF");
Muhammad Mahajnaf0df3502015-04-02 16:31:21 +03002514 reset = true;
Saeed Mahameed537f6f92014-10-27 11:37:43 +02002515 }
Amir Vadai60d6fe92011-11-26 19:55:19 +00002516
Ido Shamaycfb53f32015-02-03 17:57:21 +02002517 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_HW_VLAN_CTAG_TX))
2518 en_info(priv, "Turn %s TX vlan strip offload\n",
2519 (features & NETIF_F_HW_VLAN_CTAG_TX) ? "ON" : "OFF");
2520
Hadar Hen Zione38af4f2015-07-27 14:46:34 +03002521 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_HW_VLAN_STAG_TX))
2522 en_info(priv, "Turn %s TX S-VLAN strip offload\n",
2523 (features & NETIF_F_HW_VLAN_STAG_TX) ? "ON" : "OFF");
2524
Ido Shamay241a08c2015-04-02 16:31:07 +03002525 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_LOOPBACK)) {
2526 en_info(priv, "Turn %s loopback\n",
2527 (features & NETIF_F_LOOPBACK) ? "ON" : "OFF");
2528 mlx4_en_update_loopback_state(netdev, features);
2529 }
Yan Burman79aeacc2013-02-07 02:25:19 +00002530
Muhammad Mahajnaf0df3502015-04-02 16:31:21 +03002531 if (reset) {
2532 ret = mlx4_en_reset_config(netdev, priv->hwtstamp_config,
2533 features);
2534 if (ret)
2535 return ret;
2536 }
Amir Vadai60d6fe92011-11-26 19:55:19 +00002537
Muhammad Mahajnaf0df3502015-04-02 16:31:21 +03002538 return 0;
Amir Vadai60d6fe92011-11-26 19:55:19 +00002539}
2540
Rony Efraim8f7ba3c2013-04-25 05:22:27 +00002541static int mlx4_en_set_vf_mac(struct net_device *dev, int queue, u8 *mac)
2542{
2543 struct mlx4_en_priv *en_priv = netdev_priv(dev);
2544 struct mlx4_en_dev *mdev = en_priv->mdev;
Rony Efraim8f7ba3c2013-04-25 05:22:27 +00002545
Eugenia Emantayev745d8ae2017-02-23 12:02:42 +02002546 return mlx4_set_vf_mac(mdev->dev, en_priv->port, queue, mac);
Rony Efraim8f7ba3c2013-04-25 05:22:27 +00002547}
2548
Moshe Shemesh79aab092016-09-22 12:11:15 +03002549static int mlx4_en_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos,
2550 __be16 vlan_proto)
Rony Efraim3f7fb022013-04-25 05:22:28 +00002551{
2552 struct mlx4_en_priv *en_priv = netdev_priv(dev);
2553 struct mlx4_en_dev *mdev = en_priv->mdev;
2554
Moshe Shemeshb42959d2016-09-22 12:11:16 +03002555 return mlx4_set_vf_vlan(mdev->dev, en_priv->port, vf, vlan, qos,
2556 vlan_proto);
Rony Efraim3f7fb022013-04-25 05:22:28 +00002557}
2558
Ido Shamaycda373f2015-04-02 16:31:16 +03002559static int mlx4_en_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate,
2560 int max_tx_rate)
2561{
2562 struct mlx4_en_priv *en_priv = netdev_priv(dev);
2563 struct mlx4_en_dev *mdev = en_priv->mdev;
2564
2565 return mlx4_set_vf_rate(mdev->dev, en_priv->port, vf, min_tx_rate,
2566 max_tx_rate);
2567}
2568
Rony Efraime6b6a232013-04-25 05:22:29 +00002569static int mlx4_en_set_vf_spoofchk(struct net_device *dev, int vf, bool setting)
2570{
2571 struct mlx4_en_priv *en_priv = netdev_priv(dev);
2572 struct mlx4_en_dev *mdev = en_priv->mdev;
2573
2574 return mlx4_set_vf_spoofchk(mdev->dev, en_priv->port, vf, setting);
2575}
2576
Rony Efraim2cccb9e2013-04-25 05:22:30 +00002577static int mlx4_en_get_vf_config(struct net_device *dev, int vf, struct ifla_vf_info *ivf)
2578{
2579 struct mlx4_en_priv *en_priv = netdev_priv(dev);
2580 struct mlx4_en_dev *mdev = en_priv->mdev;
2581
2582 return mlx4_get_vf_config(mdev->dev, en_priv->port, vf, ivf);
2583}
Rony Efraim8f7ba3c2013-04-25 05:22:27 +00002584
Rony Efraim948e3062013-06-13 13:19:11 +03002585static int mlx4_en_set_vf_link_state(struct net_device *dev, int vf, int link_state)
2586{
2587 struct mlx4_en_priv *en_priv = netdev_priv(dev);
2588 struct mlx4_en_dev *mdev = en_priv->mdev;
2589
2590 return mlx4_set_vf_link_state(mdev->dev, en_priv->port, vf, link_state);
2591}
Hadar Hen Zion84c86402013-12-19 21:20:13 +02002592
Eran Ben Elisha62a89052015-06-15 17:59:08 +03002593static int mlx4_en_get_vf_stats(struct net_device *dev, int vf,
2594 struct ifla_vf_stats *vf_stats)
2595{
2596 struct mlx4_en_priv *en_priv = netdev_priv(dev);
2597 struct mlx4_en_dev *mdev = en_priv->mdev;
2598
2599 return mlx4_get_vf_stats(mdev->dev, en_priv->port, vf, vf_stats);
2600}
2601
Hadar Hen Zion84c86402013-12-19 21:20:13 +02002602#define PORT_ID_BYTE_LEN 8
2603static int mlx4_en_get_phys_port_id(struct net_device *dev,
Jiri Pirko02637fc2014-11-28 14:34:16 +01002604 struct netdev_phys_item_id *ppid)
Hadar Hen Zion84c86402013-12-19 21:20:13 +02002605{
2606 struct mlx4_en_priv *priv = netdev_priv(dev);
2607 struct mlx4_dev *mdev = priv->mdev->dev;
2608 int i;
2609 u64 phys_port_id = mdev->caps.phys_port_id[priv->port];
2610
2611 if (!phys_port_id)
2612 return -EOPNOTSUPP;
2613
2614 ppid->id_len = sizeof(phys_port_id);
2615 for (i = PORT_ID_BYTE_LEN - 1; i >= 0; --i) {
2616 ppid->id[i] = phys_port_id & 0xff;
2617 phys_port_id >>= 8;
2618 }
2619 return 0;
2620}
2621
Or Gerlitz1b136de2014-03-27 14:02:04 +02002622static void mlx4_en_add_vxlan_offloads(struct work_struct *work)
2623{
2624 int ret;
2625 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
2626 vxlan_add_task);
2627
2628 ret = mlx4_config_vxlan_port(priv->mdev->dev, priv->vxlan_port);
2629 if (ret)
2630 goto out;
2631
2632 ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port,
2633 VXLAN_STEER_BY_OUTER_MAC, 1);
2634out:
Or Gerlitzf4a1edd2014-11-09 14:25:39 +02002635 if (ret) {
Or Gerlitz1b136de2014-03-27 14:02:04 +02002636 en_err(priv, "failed setting L2 tunnel configuration ret %d\n", ret);
Or Gerlitzf4a1edd2014-11-09 14:25:39 +02002637 return;
2638 }
2639
2640 /* set offloads */
Alexander Duyck09067122016-05-02 09:38:37 -07002641 priv->dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2642 NETIF_F_RXCSUM |
2643 NETIF_F_TSO | NETIF_F_TSO6 |
2644 NETIF_F_GSO_UDP_TUNNEL |
Alexander Duyck3c9346b2016-05-02 09:38:30 -07002645 NETIF_F_GSO_UDP_TUNNEL_CSUM |
2646 NETIF_F_GSO_PARTIAL;
Or Gerlitz1b136de2014-03-27 14:02:04 +02002647}
2648
2649static void mlx4_en_del_vxlan_offloads(struct work_struct *work)
2650{
2651 int ret;
2652 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
2653 vxlan_del_task);
Or Gerlitzf4a1edd2014-11-09 14:25:39 +02002654 /* unset offloads */
Alexander Duyck09067122016-05-02 09:38:37 -07002655 priv->dev->hw_enc_features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2656 NETIF_F_RXCSUM |
2657 NETIF_F_TSO | NETIF_F_TSO6 |
2658 NETIF_F_GSO_UDP_TUNNEL |
Alexander Duyck3c9346b2016-05-02 09:38:30 -07002659 NETIF_F_GSO_UDP_TUNNEL_CSUM |
2660 NETIF_F_GSO_PARTIAL);
Or Gerlitz1b136de2014-03-27 14:02:04 +02002661
2662 ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port,
2663 VXLAN_STEER_BY_OUTER_MAC, 0);
2664 if (ret)
2665 en_err(priv, "failed setting L2 tunnel configuration ret %d\n", ret);
2666
2667 priv->vxlan_port = 0;
2668}
2669
2670static void mlx4_en_add_vxlan_port(struct net_device *dev,
Alexander Duycka8312742016-06-16 12:22:30 -07002671 struct udp_tunnel_info *ti)
Or Gerlitz1b136de2014-03-27 14:02:04 +02002672{
2673 struct mlx4_en_priv *priv = netdev_priv(dev);
Alexander Duycka8312742016-06-16 12:22:30 -07002674 __be16 port = ti->port;
Or Gerlitz1b136de2014-03-27 14:02:04 +02002675 __be16 current_port;
2676
Alexander Duycka8312742016-06-16 12:22:30 -07002677 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
Or Gerlitz1b136de2014-03-27 14:02:04 +02002678 return;
2679
Alexander Duycka8312742016-06-16 12:22:30 -07002680 if (ti->sa_family != AF_INET)
2681 return;
2682
2683 if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
Or Gerlitz1b136de2014-03-27 14:02:04 +02002684 return;
2685
2686 current_port = priv->vxlan_port;
2687 if (current_port && current_port != port) {
2688 en_warn(priv, "vxlan port %d configured, can't add port %d\n",
2689 ntohs(current_port), ntohs(port));
2690 return;
2691 }
2692
2693 priv->vxlan_port = port;
2694 queue_work(priv->mdev->workqueue, &priv->vxlan_add_task);
2695}
2696
2697static void mlx4_en_del_vxlan_port(struct net_device *dev,
Alexander Duycka8312742016-06-16 12:22:30 -07002698 struct udp_tunnel_info *ti)
Or Gerlitz1b136de2014-03-27 14:02:04 +02002699{
2700 struct mlx4_en_priv *priv = netdev_priv(dev);
Alexander Duycka8312742016-06-16 12:22:30 -07002701 __be16 port = ti->port;
Or Gerlitz1b136de2014-03-27 14:02:04 +02002702 __be16 current_port;
2703
Alexander Duycka8312742016-06-16 12:22:30 -07002704 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
Or Gerlitz1b136de2014-03-27 14:02:04 +02002705 return;
2706
Alexander Duycka8312742016-06-16 12:22:30 -07002707 if (ti->sa_family != AF_INET)
2708 return;
2709
2710 if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
Or Gerlitz1b136de2014-03-27 14:02:04 +02002711 return;
2712
2713 current_port = priv->vxlan_port;
2714 if (current_port != port) {
2715 en_dbg(DRV, priv, "vxlan port %d isn't configured, ignoring\n", ntohs(port));
2716 return;
2717 }
2718
2719 queue_work(priv->mdev->workqueue, &priv->vxlan_del_task);
2720}
Joe Stringer956bdab2014-11-13 16:38:14 -08002721
Jesse Gross5f352272014-12-23 22:37:26 -08002722static netdev_features_t mlx4_en_features_check(struct sk_buff *skb,
2723 struct net_device *dev,
2724 netdev_features_t features)
Joe Stringer956bdab2014-11-13 16:38:14 -08002725{
Toshiaki Makita8cb65d02015-03-27 14:31:12 +09002726 features = vlan_features_check(skb, features);
Alexander Duyck09067122016-05-02 09:38:37 -07002727 features = vxlan_features_check(skb, features);
2728
2729 /* The ConnectX-3 doesn't support outer IPv6 checksums but it does
2730 * support inner IPv6 checksums and segmentation so we need to
2731 * strip that feature if this is an IPv6 encapsulated frame.
2732 */
2733 if (skb->encapsulation &&
Alexander Duycka5472242016-06-15 14:42:11 -07002734 (skb->ip_summed == CHECKSUM_PARTIAL)) {
2735 struct mlx4_en_priv *priv = netdev_priv(dev);
2736
2737 if (!priv->vxlan_port ||
2738 (ip_hdr(skb)->version != 4) ||
2739 (udp_hdr(skb)->dest != priv->vxlan_port))
2740 features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
2741 }
Alexander Duyck09067122016-05-02 09:38:37 -07002742
2743 return features;
Joe Stringer956bdab2014-11-13 16:38:14 -08002744}
Or Gerlitz1b136de2014-03-27 14:02:04 +02002745
Wu Fengguangde1cf8a2015-03-19 08:51:27 +08002746static int mlx4_en_set_tx_maxrate(struct net_device *dev, int queue_index, u32 maxrate)
Or Gerlitzc10e4fc2015-03-18 14:57:35 +02002747{
2748 struct mlx4_en_priv *priv = netdev_priv(dev);
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02002749 struct mlx4_en_tx_ring *tx_ring = priv->tx_ring[TX][queue_index];
Or Gerlitzc10e4fc2015-03-18 14:57:35 +02002750 struct mlx4_update_qp_params params;
2751 int err;
2752
2753 if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QP_RATE_LIMIT))
2754 return -EOPNOTSUPP;
2755
2756 /* rate provided to us in Mbs, check if it fits into 12 bits, if not use Gbs */
2757 if (maxrate >> 12) {
2758 params.rate_unit = MLX4_QP_RATE_LIMIT_GBS;
2759 params.rate_val = maxrate / 1000;
2760 } else if (maxrate) {
2761 params.rate_unit = MLX4_QP_RATE_LIMIT_MBS;
2762 params.rate_val = maxrate;
2763 } else { /* zero serves to revoke the QP rate-limitation */
2764 params.rate_unit = 0;
2765 params.rate_val = 0;
2766 }
2767
2768 err = mlx4_update_qp(priv->mdev->dev, tx_ring->qpn, MLX4_UPDATE_QP_RATE_LIMIT,
2769 &params);
2770 return err;
2771}
2772
Brenden Blanco47a38e12016-07-19 12:16:50 -07002773static int mlx4_xdp_set(struct net_device *dev, struct bpf_prog *prog)
2774{
2775 struct mlx4_en_priv *priv = netdev_priv(dev);
Brenden Blancod576acf2016-07-19 12:16:52 -07002776 struct mlx4_en_dev *mdev = priv->mdev;
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02002777 struct mlx4_en_port_profile new_prof;
Brenden Blanco47a38e12016-07-19 12:16:50 -07002778 struct bpf_prog *old_prog;
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02002779 struct mlx4_en_priv *tmp;
2780 int tx_changed = 0;
Brenden Blanco47a38e12016-07-19 12:16:50 -07002781 int xdp_ring_num;
Brenden Blancod576acf2016-07-19 12:16:52 -07002782 int port_up = 0;
2783 int err;
Brenden Blanco47a38e12016-07-19 12:16:50 -07002784 int i;
2785
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02002786 xdp_ring_num = prog ? priv->rx_ring_num : 0;
Brenden Blanco47a38e12016-07-19 12:16:50 -07002787
Brenden Blancod576acf2016-07-19 12:16:52 -07002788 /* No need to reconfigure buffers when simply swapping the
2789 * program for a new one.
2790 */
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02002791 if (priv->tx_ring_num[TX_XDP] == xdp_ring_num) {
Brenden Blancod576acf2016-07-19 12:16:52 -07002792 if (prog) {
2793 prog = bpf_prog_add(prog, priv->rx_ring_num - 1);
2794 if (IS_ERR(prog))
2795 return PTR_ERR(prog);
2796 }
Brenden Blanco326fe022016-09-03 21:29:58 -07002797 mutex_lock(&mdev->state_lock);
Brenden Blancod576acf2016-07-19 12:16:52 -07002798 for (i = 0; i < priv->rx_ring_num; i++) {
Brenden Blanco326fe022016-09-03 21:29:58 -07002799 old_prog = rcu_dereference_protected(
2800 priv->rx_ring[i]->xdp_prog,
2801 lockdep_is_held(&mdev->state_lock));
2802 rcu_assign_pointer(priv->rx_ring[i]->xdp_prog, prog);
Brenden Blancod576acf2016-07-19 12:16:52 -07002803 if (old_prog)
2804 bpf_prog_put(old_prog);
2805 }
Brenden Blanco326fe022016-09-03 21:29:58 -07002806 mutex_unlock(&mdev->state_lock);
Brenden Blancod576acf2016-07-19 12:16:52 -07002807 return 0;
2808 }
2809
Martin KaFai Laub45f0672016-12-07 15:53:12 -08002810 if (!mlx4_en_check_xdp_mtu(dev, dev->mtu))
Brenden Blanco47a38e12016-07-19 12:16:50 -07002811 return -EOPNOTSUPP;
Brenden Blanco47a38e12016-07-19 12:16:50 -07002812
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02002813 tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
2814 if (!tmp)
2815 return -ENOMEM;
Brenden Blanco9ecc2d82016-07-19 12:16:55 -07002816
Brenden Blanco47a38e12016-07-19 12:16:50 -07002817 if (prog) {
2818 prog = bpf_prog_add(prog, priv->rx_ring_num - 1);
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02002819 if (IS_ERR(prog)) {
2820 err = PTR_ERR(prog);
2821 goto out;
2822 }
Brenden Blanco47a38e12016-07-19 12:16:50 -07002823 }
2824
Brenden Blancod576acf2016-07-19 12:16:52 -07002825 mutex_lock(&mdev->state_lock);
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02002826 memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile));
2827 new_prof.tx_ring_num[TX_XDP] = xdp_ring_num;
2828
2829 if (priv->tx_ring_num[TX] + xdp_ring_num > MAX_TX_RINGS) {
2830 tx_changed = 1;
2831 new_prof.tx_ring_num[TX] =
Inbar Karmyf21ad612017-06-29 14:07:56 +03002832 MAX_TX_RINGS - ALIGN(xdp_ring_num, priv->prof->num_up);
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02002833 en_warn(priv, "Reducing the number of TX rings, to not exceed the max total rings number.\n");
2834 }
2835
Martin KaFai Lau770f8222017-01-31 22:35:33 -08002836 err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof, false);
Daniel Borkmannc5405942016-11-09 22:02:34 +01002837 if (err) {
2838 if (prog)
2839 bpf_prog_sub(prog, priv->rx_ring_num - 1);
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02002840 goto unlock_out;
Daniel Borkmannc5405942016-11-09 22:02:34 +01002841 }
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02002842
Brenden Blancod576acf2016-07-19 12:16:52 -07002843 if (priv->port_up) {
2844 port_up = 1;
2845 mlx4_en_stop_port(dev, 1);
2846 }
2847
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02002848 mlx4_en_safe_replace_resources(priv, tmp);
2849 if (tx_changed)
2850 netif_set_real_num_tx_queues(dev, priv->tx_ring_num[TX]);
Brenden Blanco47a38e12016-07-19 12:16:50 -07002851
Brenden Blanco47a38e12016-07-19 12:16:50 -07002852 for (i = 0; i < priv->rx_ring_num; i++) {
Brenden Blanco326fe022016-09-03 21:29:58 -07002853 old_prog = rcu_dereference_protected(
2854 priv->rx_ring[i]->xdp_prog,
2855 lockdep_is_held(&mdev->state_lock));
2856 rcu_assign_pointer(priv->rx_ring[i]->xdp_prog, prog);
Brenden Blanco47a38e12016-07-19 12:16:50 -07002857 if (old_prog)
2858 bpf_prog_put(old_prog);
2859 }
2860
Brenden Blancod576acf2016-07-19 12:16:52 -07002861 if (port_up) {
2862 err = mlx4_en_start_port(dev);
2863 if (err) {
2864 en_err(priv, "Failed starting port %d for XDP change\n",
2865 priv->port);
2866 queue_work(mdev->workqueue, &priv->watchdog_task);
2867 }
2868 }
2869
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02002870unlock_out:
Brenden Blancod576acf2016-07-19 12:16:52 -07002871 mutex_unlock(&mdev->state_lock);
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02002872out:
2873 kfree(tmp);
2874 return err;
Brenden Blanco47a38e12016-07-19 12:16:50 -07002875}
2876
Martin KaFai Lau2e37e9b2017-06-15 17:29:10 -07002877static u32 mlx4_xdp_query(struct net_device *dev)
Brenden Blanco47a38e12016-07-19 12:16:50 -07002878{
2879 struct mlx4_en_priv *priv = netdev_priv(dev);
Martin KaFai Lau2e37e9b2017-06-15 17:29:10 -07002880 struct mlx4_en_dev *mdev = priv->mdev;
2881 const struct bpf_prog *xdp_prog;
2882 u32 prog_id = 0;
Brenden Blanco47a38e12016-07-19 12:16:50 -07002883
Martin KaFai Lau2e37e9b2017-06-15 17:29:10 -07002884 if (!priv->tx_ring_num[TX_XDP])
2885 return prog_id;
2886
2887 mutex_lock(&mdev->state_lock);
2888 xdp_prog = rcu_dereference_protected(
2889 priv->rx_ring[0]->xdp_prog,
2890 lockdep_is_held(&mdev->state_lock));
2891 if (xdp_prog)
2892 prog_id = xdp_prog->aux->id;
2893 mutex_unlock(&mdev->state_lock);
2894
2895 return prog_id;
Brenden Blanco47a38e12016-07-19 12:16:50 -07002896}
2897
2898static int mlx4_xdp(struct net_device *dev, struct netdev_xdp *xdp)
2899{
2900 switch (xdp->command) {
2901 case XDP_SETUP_PROG:
2902 return mlx4_xdp_set(dev, xdp->prog);
2903 case XDP_QUERY_PROG:
Martin KaFai Lau2e37e9b2017-06-15 17:29:10 -07002904 xdp->prog_id = mlx4_xdp_query(dev);
2905 xdp->prog_attached = !!xdp->prog_id;
Brenden Blanco47a38e12016-07-19 12:16:50 -07002906 return 0;
2907 default:
2908 return -EINVAL;
2909 }
2910}
2911
Stephen Hemminger3addc562008-11-21 17:30:58 -08002912static const struct net_device_ops mlx4_netdev_ops = {
2913 .ndo_open = mlx4_en_open,
2914 .ndo_stop = mlx4_en_close,
2915 .ndo_start_xmit = mlx4_en_xmit,
Yevgeny Petrilinf813cad2009-06-01 23:24:07 +00002916 .ndo_select_queue = mlx4_en_select_queue,
Eric Dumazet9ed17db172016-05-25 09:50:38 -07002917 .ndo_get_stats64 = mlx4_en_get_stats64,
Yan Burman0eb74fd2013-02-07 02:25:23 +00002918 .ndo_set_rx_mode = mlx4_en_set_rx_mode,
Stephen Hemminger3addc562008-11-21 17:30:58 -08002919 .ndo_set_mac_address = mlx4_en_set_mac,
Stephen Hemminger52255bb2009-01-09 10:45:37 +00002920 .ndo_validate_addr = eth_validate_addr,
Stephen Hemminger3addc562008-11-21 17:30:58 -08002921 .ndo_change_mtu = mlx4_en_change_mtu,
Amir Vadaiec693d42013-04-23 06:06:49 +00002922 .ndo_do_ioctl = mlx4_en_ioctl,
Stephen Hemminger3addc562008-11-21 17:30:58 -08002923 .ndo_tx_timeout = mlx4_en_tx_timeout,
Stephen Hemminger3addc562008-11-21 17:30:58 -08002924 .ndo_vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid,
2925 .ndo_vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid,
2926#ifdef CONFIG_NET_POLL_CONTROLLER
2927 .ndo_poll_controller = mlx4_en_netpoll,
2928#endif
Amir Vadai60d6fe92011-11-26 19:55:19 +00002929 .ndo_set_features = mlx4_en_set_features,
Hadar Hen Zione38af4f2015-07-27 14:46:34 +03002930 .ndo_fix_features = mlx4_en_fix_features,
John Fastabende4c67342016-02-16 21:16:15 -08002931 .ndo_setup_tc = __mlx4_en_setup_tc,
Amir Vadai1eb8c692012-07-18 22:33:52 +00002932#ifdef CONFIG_RFS_ACCEL
2933 .ndo_rx_flow_steer = mlx4_en_filter_rfs,
2934#endif
Hadar Hen Zion84c86402013-12-19 21:20:13 +02002935 .ndo_get_phys_port_id = mlx4_en_get_phys_port_id,
Alexander Duycka8312742016-06-16 12:22:30 -07002936 .ndo_udp_tunnel_add = mlx4_en_add_vxlan_port,
2937 .ndo_udp_tunnel_del = mlx4_en_del_vxlan_port,
Jesse Gross5f352272014-12-23 22:37:26 -08002938 .ndo_features_check = mlx4_en_features_check,
Or Gerlitzc10e4fc2015-03-18 14:57:35 +02002939 .ndo_set_tx_maxrate = mlx4_en_set_tx_maxrate,
Brenden Blanco47a38e12016-07-19 12:16:50 -07002940 .ndo_xdp = mlx4_xdp,
Stephen Hemminger3addc562008-11-21 17:30:58 -08002941};
2942
Rony Efraim8f7ba3c2013-04-25 05:22:27 +00002943static const struct net_device_ops mlx4_netdev_ops_master = {
2944 .ndo_open = mlx4_en_open,
2945 .ndo_stop = mlx4_en_close,
2946 .ndo_start_xmit = mlx4_en_xmit,
2947 .ndo_select_queue = mlx4_en_select_queue,
Eric Dumazet9ed17db172016-05-25 09:50:38 -07002948 .ndo_get_stats64 = mlx4_en_get_stats64,
Rony Efraim8f7ba3c2013-04-25 05:22:27 +00002949 .ndo_set_rx_mode = mlx4_en_set_rx_mode,
2950 .ndo_set_mac_address = mlx4_en_set_mac,
2951 .ndo_validate_addr = eth_validate_addr,
2952 .ndo_change_mtu = mlx4_en_change_mtu,
2953 .ndo_tx_timeout = mlx4_en_tx_timeout,
2954 .ndo_vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid,
2955 .ndo_vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid,
2956 .ndo_set_vf_mac = mlx4_en_set_vf_mac,
Rony Efraim3f7fb022013-04-25 05:22:28 +00002957 .ndo_set_vf_vlan = mlx4_en_set_vf_vlan,
Ido Shamaycda373f2015-04-02 16:31:16 +03002958 .ndo_set_vf_rate = mlx4_en_set_vf_rate,
Rony Efraime6b6a232013-04-25 05:22:29 +00002959 .ndo_set_vf_spoofchk = mlx4_en_set_vf_spoofchk,
Rony Efraim948e3062013-06-13 13:19:11 +03002960 .ndo_set_vf_link_state = mlx4_en_set_vf_link_state,
Eran Ben Elisha62a89052015-06-15 17:59:08 +03002961 .ndo_get_vf_stats = mlx4_en_get_vf_stats,
Rony Efraim2cccb9e2013-04-25 05:22:30 +00002962 .ndo_get_vf_config = mlx4_en_get_vf_config,
Rony Efraim8f7ba3c2013-04-25 05:22:27 +00002963#ifdef CONFIG_NET_POLL_CONTROLLER
2964 .ndo_poll_controller = mlx4_en_netpoll,
2965#endif
2966 .ndo_set_features = mlx4_en_set_features,
Hadar Hen Zione38af4f2015-07-27 14:46:34 +03002967 .ndo_fix_features = mlx4_en_fix_features,
John Fastabende4c67342016-02-16 21:16:15 -08002968 .ndo_setup_tc = __mlx4_en_setup_tc,
Rony Efraim8f7ba3c2013-04-25 05:22:27 +00002969#ifdef CONFIG_RFS_ACCEL
2970 .ndo_rx_flow_steer = mlx4_en_filter_rfs,
2971#endif
Hadar Hen Zion84c86402013-12-19 21:20:13 +02002972 .ndo_get_phys_port_id = mlx4_en_get_phys_port_id,
Alexander Duycka8312742016-06-16 12:22:30 -07002973 .ndo_udp_tunnel_add = mlx4_en_add_vxlan_port,
2974 .ndo_udp_tunnel_del = mlx4_en_del_vxlan_port,
Jesse Gross5f352272014-12-23 22:37:26 -08002975 .ndo_features_check = mlx4_en_features_check,
Or Gerlitzc10e4fc2015-03-18 14:57:35 +02002976 .ndo_set_tx_maxrate = mlx4_en_set_tx_maxrate,
Brenden Blanco47a38e12016-07-19 12:16:50 -07002977 .ndo_xdp = mlx4_xdp,
Rony Efraim8f7ba3c2013-04-25 05:22:27 +00002978};
2979
Moni Shoua5da03542015-02-03 16:48:34 +02002980struct mlx4_en_bond {
2981 struct work_struct work;
2982 struct mlx4_en_priv *priv;
2983 int is_bonded;
2984 struct mlx4_port_map port_map;
2985};
2986
2987static void mlx4_en_bond_work(struct work_struct *work)
2988{
2989 struct mlx4_en_bond *bond = container_of(work,
2990 struct mlx4_en_bond,
2991 work);
2992 int err = 0;
2993 struct mlx4_dev *dev = bond->priv->mdev->dev;
2994
2995 if (bond->is_bonded) {
2996 if (!mlx4_is_bonded(dev)) {
2997 err = mlx4_bond(dev);
2998 if (err)
2999 en_err(bond->priv, "Fail to bond device\n");
3000 }
3001 if (!err) {
3002 err = mlx4_port_map_set(dev, &bond->port_map);
3003 if (err)
3004 en_err(bond->priv, "Fail to set port map [%d][%d]: %d\n",
3005 bond->port_map.port1,
3006 bond->port_map.port2,
3007 err);
3008 }
3009 } else if (mlx4_is_bonded(dev)) {
3010 err = mlx4_unbond(dev);
3011 if (err)
3012 en_err(bond->priv, "Fail to unbond device\n");
3013 }
3014 dev_put(bond->priv->dev);
3015 kfree(bond);
3016}
3017
3018static int mlx4_en_queue_bond_work(struct mlx4_en_priv *priv, int is_bonded,
3019 u8 v2p_p1, u8 v2p_p2)
3020{
3021 struct mlx4_en_bond *bond = NULL;
3022
3023 bond = kzalloc(sizeof(*bond), GFP_ATOMIC);
3024 if (!bond)
3025 return -ENOMEM;
3026
3027 INIT_WORK(&bond->work, mlx4_en_bond_work);
3028 bond->priv = priv;
3029 bond->is_bonded = is_bonded;
3030 bond->port_map.port1 = v2p_p1;
3031 bond->port_map.port2 = v2p_p2;
3032 dev_hold(priv->dev);
3033 queue_work(priv->mdev->workqueue, &bond->work);
3034 return 0;
3035}
3036
3037int mlx4_en_netdev_event(struct notifier_block *this,
3038 unsigned long event, void *ptr)
3039{
3040 struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
3041 u8 port = 0;
3042 struct mlx4_en_dev *mdev;
3043 struct mlx4_dev *dev;
3044 int i, num_eth_ports = 0;
3045 bool do_bond = true;
3046 struct mlx4_en_priv *priv;
3047 u8 v2p_port1 = 0;
3048 u8 v2p_port2 = 0;
3049
3050 if (!net_eq(dev_net(ndev), &init_net))
3051 return NOTIFY_DONE;
3052
3053 mdev = container_of(this, struct mlx4_en_dev, nb);
3054 dev = mdev->dev;
3055
3056 /* Go into this mode only when two network devices set on two ports
3057 * of the same mlx4 device are slaves of the same bonding master
3058 */
3059 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
3060 ++num_eth_ports;
3061 if (!port && (mdev->pndev[i] == ndev))
3062 port = i;
3063 mdev->upper[i] = mdev->pndev[i] ?
3064 netdev_master_upper_dev_get(mdev->pndev[i]) : NULL;
3065 /* condition not met: network device is a slave */
3066 if (!mdev->upper[i])
3067 do_bond = false;
3068 if (num_eth_ports < 2)
3069 continue;
3070 /* condition not met: same master */
3071 if (mdev->upper[i] != mdev->upper[i-1])
3072 do_bond = false;
3073 }
3074 /* condition not met: 2 salves */
3075 do_bond = (num_eth_ports == 2) ? do_bond : false;
3076
3077 /* handle only events that come with enough info */
3078 if ((do_bond && (event != NETDEV_BONDING_INFO)) || !port)
3079 return NOTIFY_DONE;
3080
3081 priv = netdev_priv(ndev);
3082 if (do_bond) {
3083 struct netdev_notifier_bonding_info *notifier_info = ptr;
3084 struct netdev_bonding_info *bonding_info =
3085 &notifier_info->bonding_info;
3086
3087 /* required mode 1, 2 or 4 */
3088 if ((bonding_info->master.bond_mode != BOND_MODE_ACTIVEBACKUP) &&
3089 (bonding_info->master.bond_mode != BOND_MODE_XOR) &&
3090 (bonding_info->master.bond_mode != BOND_MODE_8023AD))
3091 do_bond = false;
3092
3093 /* require exactly 2 slaves */
3094 if (bonding_info->master.num_slaves != 2)
3095 do_bond = false;
3096
3097 /* calc v2p */
3098 if (do_bond) {
3099 if (bonding_info->master.bond_mode ==
3100 BOND_MODE_ACTIVEBACKUP) {
3101 /* in active-backup mode virtual ports are
3102 * mapped to the physical port of the active
3103 * slave */
3104 if (bonding_info->slave.state ==
3105 BOND_STATE_BACKUP) {
3106 if (port == 1) {
3107 v2p_port1 = 2;
3108 v2p_port2 = 2;
3109 } else {
3110 v2p_port1 = 1;
3111 v2p_port2 = 1;
3112 }
3113 } else { /* BOND_STATE_ACTIVE */
3114 if (port == 1) {
3115 v2p_port1 = 1;
3116 v2p_port2 = 1;
3117 } else {
3118 v2p_port1 = 2;
3119 v2p_port2 = 2;
3120 }
3121 }
3122 } else { /* Active-Active */
3123 /* in active-active mode a virtual port is
3124 * mapped to the native physical port if and only
3125 * if the physical port is up */
3126 __s8 link = bonding_info->slave.link;
3127
3128 if (port == 1)
3129 v2p_port2 = 2;
3130 else
3131 v2p_port1 = 1;
3132 if ((link == BOND_LINK_UP) ||
3133 (link == BOND_LINK_FAIL)) {
3134 if (port == 1)
3135 v2p_port1 = 1;
3136 else
3137 v2p_port2 = 2;
3138 } else { /* BOND_LINK_DOWN || BOND_LINK_BACK */
3139 if (port == 1)
3140 v2p_port1 = 2;
3141 else
3142 v2p_port2 = 1;
3143 }
3144 }
3145 }
3146 }
3147
3148 mlx4_en_queue_bond_work(priv, do_bond,
3149 v2p_port1, v2p_port2);
3150
3151 return NOTIFY_DONE;
3152}
3153
Matan Barak0b131562015-03-30 17:45:25 +03003154void mlx4_en_update_pfc_stats_bitmap(struct mlx4_dev *dev,
3155 struct mlx4_en_stats_bitmap *stats_bitmap,
3156 u8 rx_ppp, u8 rx_pause,
3157 u8 tx_ppp, u8 tx_pause)
3158{
Eran Ben Elishab42de4d2015-06-15 17:59:06 +03003159 int last_i = NUM_MAIN_STATS + NUM_PORT_STATS + NUM_PF_STATS;
Matan Barak0b131562015-03-30 17:45:25 +03003160
3161 if (!mlx4_is_slave(dev) &&
3162 (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN)) {
3163 mutex_lock(&stats_bitmap->mutex);
3164 bitmap_clear(stats_bitmap->bitmap, last_i, NUM_FLOW_STATS);
3165
3166 if (rx_ppp)
3167 bitmap_set(stats_bitmap->bitmap, last_i,
3168 NUM_FLOW_PRIORITY_STATS_RX);
3169 last_i += NUM_FLOW_PRIORITY_STATS_RX;
3170
3171 if (rx_pause && !(rx_ppp))
3172 bitmap_set(stats_bitmap->bitmap, last_i,
3173 NUM_FLOW_STATS_RX);
3174 last_i += NUM_FLOW_STATS_RX;
3175
3176 if (tx_ppp)
3177 bitmap_set(stats_bitmap->bitmap, last_i,
3178 NUM_FLOW_PRIORITY_STATS_TX);
3179 last_i += NUM_FLOW_PRIORITY_STATS_TX;
3180
3181 if (tx_pause && !(tx_ppp))
3182 bitmap_set(stats_bitmap->bitmap, last_i,
3183 NUM_FLOW_STATS_TX);
3184 last_i += NUM_FLOW_STATS_TX;
3185
3186 mutex_unlock(&stats_bitmap->mutex);
3187 }
3188}
3189
Eran Ben Elisha6fcd2732015-03-30 17:45:23 +03003190void mlx4_en_set_stats_bitmap(struct mlx4_dev *dev,
Matan Barak0b131562015-03-30 17:45:25 +03003191 struct mlx4_en_stats_bitmap *stats_bitmap,
3192 u8 rx_ppp, u8 rx_pause,
3193 u8 tx_ppp, u8 tx_pause)
Eran Ben Elishaffa88f32015-03-30 17:45:22 +03003194{
Eran Ben Elisha6fcd2732015-03-30 17:45:23 +03003195 int last_i = 0;
3196
Eran Ben Elisha3da8a362015-03-30 17:45:24 +03003197 mutex_init(&stats_bitmap->mutex);
3198 bitmap_zero(stats_bitmap->bitmap, NUM_ALL_STATS);
Eran Ben Elisha6fcd2732015-03-30 17:45:23 +03003199
3200 if (mlx4_is_slave(dev)) {
Eran Ben Elisha3da8a362015-03-30 17:45:24 +03003201 bitmap_set(stats_bitmap->bitmap, last_i +
Eran Ben Elisha6fcd2732015-03-30 17:45:23 +03003202 MLX4_FIND_NETDEV_STAT(rx_packets), 1);
Eran Ben Elisha3da8a362015-03-30 17:45:24 +03003203 bitmap_set(stats_bitmap->bitmap, last_i +
Eran Ben Elisha6fcd2732015-03-30 17:45:23 +03003204 MLX4_FIND_NETDEV_STAT(tx_packets), 1);
Eran Ben Elisha3da8a362015-03-30 17:45:24 +03003205 bitmap_set(stats_bitmap->bitmap, last_i +
Eran Ben Elisha6fcd2732015-03-30 17:45:23 +03003206 MLX4_FIND_NETDEV_STAT(rx_bytes), 1);
Eran Ben Elisha3da8a362015-03-30 17:45:24 +03003207 bitmap_set(stats_bitmap->bitmap, last_i +
Eran Ben Elisha6fcd2732015-03-30 17:45:23 +03003208 MLX4_FIND_NETDEV_STAT(tx_bytes), 1);
Eran Ben Elisha3da8a362015-03-30 17:45:24 +03003209 bitmap_set(stats_bitmap->bitmap, last_i +
Eran Ben Elisha6fcd2732015-03-30 17:45:23 +03003210 MLX4_FIND_NETDEV_STAT(rx_dropped), 1);
Eran Ben Elisha3da8a362015-03-30 17:45:24 +03003211 bitmap_set(stats_bitmap->bitmap, last_i +
Eran Ben Elisha6fcd2732015-03-30 17:45:23 +03003212 MLX4_FIND_NETDEV_STAT(tx_dropped), 1);
3213 } else {
Eran Ben Elisha3da8a362015-03-30 17:45:24 +03003214 bitmap_set(stats_bitmap->bitmap, last_i, NUM_MAIN_STATS);
Eran Ben Elishaffa88f32015-03-30 17:45:22 +03003215 }
Eran Ben Elisha6fcd2732015-03-30 17:45:23 +03003216 last_i += NUM_MAIN_STATS;
Eran Ben Elishaffa88f32015-03-30 17:45:22 +03003217
Eran Ben Elisha3da8a362015-03-30 17:45:24 +03003218 bitmap_set(stats_bitmap->bitmap, last_i, NUM_PORT_STATS);
Eran Ben Elisha6fcd2732015-03-30 17:45:23 +03003219 last_i += NUM_PORT_STATS;
Eran Ben Elishaffa88f32015-03-30 17:45:22 +03003220
Eran Ben Elishab42de4d2015-06-15 17:59:06 +03003221 if (mlx4_is_master(dev))
3222 bitmap_set(stats_bitmap->bitmap, last_i,
3223 NUM_PF_STATS);
3224 last_i += NUM_PF_STATS;
3225
Matan Barak0b131562015-03-30 17:45:25 +03003226 mlx4_en_update_pfc_stats_bitmap(dev, stats_bitmap,
3227 rx_ppp, rx_pause,
3228 tx_ppp, tx_pause);
3229 last_i += NUM_FLOW_STATS;
3230
Eran Ben Elisha6fcd2732015-03-30 17:45:23 +03003231 if (!mlx4_is_slave(dev))
Eran Ben Elisha3da8a362015-03-30 17:45:24 +03003232 bitmap_set(stats_bitmap->bitmap, last_i, NUM_PKT_STATS);
Tariq Toukan15fca2c2016-11-02 17:12:25 +02003233 last_i += NUM_PKT_STATS;
3234
3235 bitmap_set(stats_bitmap->bitmap, last_i, NUM_XDP_STATS);
3236 last_i += NUM_XDP_STATS;
Eran Ben Elishaffa88f32015-03-30 17:45:22 +03003237}
3238
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07003239int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
3240 struct mlx4_en_port_profile *prof)
3241{
3242 struct net_device *dev;
3243 struct mlx4_en_priv *priv;
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02003244 int i, t;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07003245 int err;
3246
Tom Herbertf1593d22011-01-09 19:36:36 +00003247 dev = alloc_etherdev_mqs(sizeof(struct mlx4_en_priv),
Amir Vadaid3179662012-12-02 03:49:23 +00003248 MAX_TX_RINGS, MAX_RX_RINGS);
Joe Perches41de8d42012-01-29 13:47:52 +00003249 if (dev == NULL)
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07003250 return -ENOMEM;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07003251
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02003252 netif_set_real_num_tx_queues(dev, prof->tx_ring_num[TX]);
Amir Vadaid3179662012-12-02 03:49:23 +00003253 netif_set_real_num_rx_queues(dev, prof->rx_ring_num);
3254
Yishai Hadas872bf2f2015-01-25 16:59:35 +02003255 SET_NETDEV_DEV(dev, &mdev->dev->persist->pdev->dev);
Amir Vadai76a066f2014-02-25 18:17:51 +02003256 dev->dev_port = port - 1;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07003257
3258 /*
3259 * Initialize driver private data
3260 */
3261
3262 priv = netdev_priv(dev);
3263 memset(priv, 0, sizeof(struct mlx4_en_priv));
Eran Ben Elisha6de5f7f2015-06-15 17:59:02 +03003264 priv->counter_index = MLX4_SINK_COUNTER_INDEX(mdev->dev);
Eugenia Emantayev207af6c2014-10-27 11:37:46 +02003265 spin_lock_init(&priv->stats_lock);
3266 INIT_WORK(&priv->rx_mode_task, mlx4_en_do_set_rx_mode);
3267 INIT_WORK(&priv->watchdog_task, mlx4_en_restart);
3268 INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate);
3269 INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats);
3270 INIT_DELAYED_WORK(&priv->service_task, mlx4_en_service_task);
Eugenia Emantayev207af6c2014-10-27 11:37:46 +02003271 INIT_WORK(&priv->vxlan_add_task, mlx4_en_add_vxlan_offloads);
3272 INIT_WORK(&priv->vxlan_del_task, mlx4_en_del_vxlan_offloads);
Eugenia Emantayev207af6c2014-10-27 11:37:46 +02003273#ifdef CONFIG_RFS_ACCEL
3274 INIT_LIST_HEAD(&priv->filters);
3275 spin_lock_init(&priv->filters_lock);
3276#endif
3277
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07003278 priv->dev = dev;
3279 priv->mdev = mdev;
Yevgeny Petrilinebf8c9a2012-03-06 04:03:34 +00003280 priv->ddev = &mdev->pdev->dev;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07003281 priv->prof = prof;
3282 priv->port = port;
3283 priv->port_up = false;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07003284 priv->flags = prof->flags;
Amir Vadai0fef9d02014-07-22 15:44:10 +03003285 priv->pflags = MLX4_EN_PRIV_FLAGS_BLUEFLAME;
Amir Vadai60d6fe92011-11-26 19:55:19 +00003286 priv->ctrl_flags = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE |
3287 MLX4_WQE_CTRL_SOLICITED);
Amir Vadaid3179662012-12-02 03:49:23 +00003288 priv->num_tx_rings_p_up = mdev->profile.num_tx_rings_p_up;
Amir Vadaifbc6daf2014-07-08 11:28:12 +03003289 priv->tx_work_limit = MLX4_EN_DEFAULT_TX_WORK;
Eric Dumazetbd635c32014-11-22 17:24:19 -08003290 netdev_rss_key_fill(priv->rss_key, sizeof(priv->rss_key));
Amir Vadaid3179662012-12-02 03:49:23 +00003291
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02003292 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
3293 priv->tx_ring_num[t] = prof->tx_ring_num[t];
3294 if (!priv->tx_ring_num[t])
3295 continue;
3296
3297 priv->tx_ring[t] = kzalloc(sizeof(struct mlx4_en_tx_ring *) *
3298 MAX_TX_RINGS, GFP_KERNEL);
3299 if (!priv->tx_ring[t]) {
3300 err = -ENOMEM;
3301 goto err_free_tx;
3302 }
3303 priv->tx_cq[t] = kzalloc(sizeof(struct mlx4_en_cq *) *
3304 MAX_TX_RINGS, GFP_KERNEL);
3305 if (!priv->tx_cq[t]) {
3306 kfree(priv->tx_ring[t]);
3307 err = -ENOMEM;
3308 goto out;
3309 }
Amir Vadaibc6a4742012-05-17 00:58:10 +00003310 }
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07003311 priv->rx_ring_num = prof->rx_ring_num;
Or Gerlitz08ff3232012-10-21 14:59:24 +00003312 priv->cqe_factor = (mdev->dev->caps.cqe_size == 64) ? 1 : 0;
Ido Shamayb1b6b4d2014-09-18 11:51:01 +03003313 priv->cqe_size = mdev->dev->caps.cqe_size;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07003314 priv->mac_index = -1;
3315 priv->msg_enable = MLX4_EN_MSG_LEVEL;
Amir Vadai564c2742012-04-04 21:33:26 +00003316#ifdef CONFIG_MLX4_EN_DCB
Or Gerlitz540b3a32013-04-07 03:44:07 +00003317 if (!mlx4_is_slave(priv->mdev->dev)) {
Tariq Toukan564ed9b2016-09-11 10:56:19 +03003318 priv->dcbx_cap = DCB_CAP_DCBX_VER_CEE | DCB_CAP_DCBX_HOST |
3319 DCB_CAP_DCBX_VER_IEEE;
Rana Shahoutaf7d5182016-06-21 12:43:59 +03003320 priv->flags |= MLX4_EN_DCB_ENABLED;
Tariq Toukan564ed9b2016-09-11 10:56:19 +03003321 priv->cee_config.pfc_state = false;
Rana Shahoutaf7d5182016-06-21 12:43:59 +03003322
Inbar Karmyf21ad612017-06-29 14:07:56 +03003323 for (i = 0; i < MLX4_EN_NUM_UP_HIGH; i++)
Tariq Toukan564ed9b2016-09-11 10:56:19 +03003324 priv->cee_config.dcb_pfc[i] = pfc_disabled;
Rana Shahoutaf7d5182016-06-21 12:43:59 +03003325
Ido Shamay3742cc62015-04-02 16:31:17 +03003326 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETS_CFG) {
Or Gerlitz540b3a32013-04-07 03:44:07 +00003327 dev->dcbnl_ops = &mlx4_en_dcbnl_ops;
3328 } else {
3329 en_info(priv, "enabling only PFC DCB ops\n");
3330 dev->dcbnl_ops = &mlx4_en_dcbnl_pfc_ops;
3331 }
3332 }
Amir Vadai564c2742012-04-04 21:33:26 +00003333#endif
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07003334
Yan Burmanc07cb4b2013-02-07 02:25:25 +00003335 for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i)
3336 INIT_HLIST_HEAD(&priv->mac_hash[i]);
Yan Burman16a10ff2013-02-07 02:25:22 +00003337
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07003338 /* Query for default mac and max mtu */
3339 priv->max_mtu = mdev->dev->caps.eth_mtu_cap[priv->port];
Yan Burman6bbb6d92013-02-07 02:25:20 +00003340
Shani Michaelif8c64552014-11-09 13:51:53 +02003341 if (mdev->dev->caps.rx_checksum_flags_port[priv->port] &
3342 MLX4_RX_CSUM_MODE_VAL_NON_TCP_UDP)
3343 priv->flags |= MLX4_EN_FLAG_RX_CSUM_NON_TCP_UDP;
3344
Yan Burman6bbb6d92013-02-07 02:25:20 +00003345 /* Set default MAC */
3346 dev->addr_len = ETH_ALEN;
3347 mlx4_en_u64_to_mac(dev->dev_addr, mdev->dev->caps.def_mac[priv->port]);
3348 if (!is_valid_ether_addr(dev->dev_addr)) {
Jack Morgenstein2b3ddf22015-10-14 17:43:48 +03003349 en_err(priv, "Port: %d, invalid mac burned: %pM, quiting\n",
3350 priv->port, dev->dev_addr);
3351 err = -EINVAL;
3352 goto out;
3353 } else if (mlx4_is_slave(priv->mdev->dev) &&
3354 (priv->mdev->dev->port_random_macs & 1 << priv->port)) {
3355 /* Random MAC was assigned in mlx4_slave_cap
3356 * in mlx4_core module
3357 */
3358 dev->addr_assign_type |= NET_ADDR_RANDOM;
3359 en_warn(priv, "Assigned random MAC address %pM\n", dev->dev_addr);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07003360 }
3361
Noa Osherovich2695bab2014-07-08 11:25:24 +03003362 memcpy(priv->current_mac, dev->dev_addr, sizeof(priv->current_mac));
Yan Burman6bbb6d92013-02-07 02:25:20 +00003363
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07003364 priv->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
3365 DS_SIZE * MLX4_EN_MAX_RX_FRAGS);
3366 err = mlx4_en_alloc_resources(priv);
3367 if (err)
3368 goto out;
3369
Amir Vadaiec693d42013-04-23 06:06:49 +00003370 /* Initialize time stamping config */
3371 priv->hwtstamp_config.flags = 0;
3372 priv->hwtstamp_config.tx_type = HWTSTAMP_TX_OFF;
3373 priv->hwtstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
3374
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07003375 /* Allocate page for receive rings */
3376 err = mlx4_alloc_hwq_res(mdev->dev, &priv->res,
Haggai Abramovsky73898db2016-05-04 14:50:15 +03003377 MLX4_EN_PAGE_SIZE);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07003378 if (err) {
Yevgeny Petrilin453a6082009-06-01 20:27:13 +00003379 en_err(priv, "Failed to allocate page for rx qps\n");
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07003380 goto out;
3381 }
3382 priv->allocated = 1;
3383
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07003384 /*
3385 * Initialize netdev entry points
3386 */
Rony Efraim8f7ba3c2013-04-25 05:22:27 +00003387 if (mlx4_is_master(priv->mdev->dev))
3388 dev->netdev_ops = &mlx4_netdev_ops_master;
3389 else
3390 dev->netdev_ops = &mlx4_netdev_ops;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07003391 dev->watchdog_timeo = MLX4_EN_WATCHDOG_TIMEOUT;
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02003392 netif_set_real_num_tx_queues(dev, priv->tx_ring_num[TX]);
Ben Hutchings1eb63a22010-09-27 08:29:34 +00003393 netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
Stephen Hemminger3addc562008-11-21 17:30:58 -08003394
Wilfried Klaebe7ad24ea2014-05-11 00:12:32 +00003395 dev->ethtool_ops = &mlx4_en_ethtool_ops;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07003396
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07003397 /*
3398 * Set driver features
3399 */
Michał Mirosławc8c64cf2011-04-15 04:50:49 +00003400 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
3401 if (mdev->LSO_support)
3402 dev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
3403
3404 dev->vlan_features = dev->hw_features;
3405
Yevgeny Petrilinad861072011-10-18 01:51:24 +00003406 dev->hw_features |= NETIF_F_RXCSUM | NETIF_F_RXHASH;
Michał Mirosławc8c64cf2011-04-15 04:50:49 +00003407 dev->features = dev->hw_features | NETIF_F_HIGHDMA |
Patrick McHardyf6469682013-04-19 02:04:27 +00003408 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
3409 NETIF_F_HW_VLAN_CTAG_FILTER;
Saeed Mahameed537f6f92014-10-27 11:37:43 +02003410 dev->hw_features |= NETIF_F_LOOPBACK |
3411 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07003412
Hadar Hen Zione38af4f2015-07-27 14:46:34 +03003413 if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN)) {
3414 dev->features |= NETIF_F_HW_VLAN_STAG_RX |
3415 NETIF_F_HW_VLAN_STAG_FILTER;
3416 dev->hw_features |= NETIF_F_HW_VLAN_STAG_RX;
3417 }
3418
3419 if (mlx4_is_slave(mdev->dev)) {
Moshe Shemesh0815fe32016-09-22 12:11:14 +03003420 bool vlan_offload_disabled;
Hadar Hen Zione38af4f2015-07-27 14:46:34 +03003421 int phv;
3422
3423 err = get_phv_bit(mdev->dev, port, &phv);
3424 if (!err && phv) {
3425 dev->hw_features |= NETIF_F_HW_VLAN_STAG_TX;
3426 priv->pflags |= MLX4_EN_PRIV_FLAGS_PHV;
3427 }
Moshe Shemesh0815fe32016-09-22 12:11:14 +03003428 err = mlx4_get_is_vlan_offload_disabled(mdev->dev, port,
3429 &vlan_offload_disabled);
3430 if (!err && vlan_offload_disabled) {
3431 dev->hw_features &= ~(NETIF_F_HW_VLAN_CTAG_TX |
3432 NETIF_F_HW_VLAN_CTAG_RX |
3433 NETIF_F_HW_VLAN_STAG_TX |
3434 NETIF_F_HW_VLAN_STAG_RX);
3435 dev->features &= ~(NETIF_F_HW_VLAN_CTAG_TX |
3436 NETIF_F_HW_VLAN_CTAG_RX |
3437 NETIF_F_HW_VLAN_STAG_TX |
3438 NETIF_F_HW_VLAN_STAG_RX);
3439 }
Hadar Hen Zione38af4f2015-07-27 14:46:34 +03003440 } else {
3441 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PHV_EN &&
3442 !(mdev->dev->caps.flags2 &
3443 MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN))
3444 dev->hw_features |= NETIF_F_HW_VLAN_STAG_TX;
3445 }
3446
Muhammad Mahajnaf0df3502015-04-02 16:31:21 +03003447 if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP)
3448 dev->hw_features |= NETIF_F_RXFCS;
3449
Muhammad Mahajna78500b82015-04-02 16:31:22 +03003450 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_IGNORE_FCS)
3451 dev->hw_features |= NETIF_F_RXALL;
3452
Amir Vadai1eb8c692012-07-18 22:33:52 +00003453 if (mdev->dev->caps.steering_mode ==
Matan Barak7d077cd2014-12-11 10:58:00 +02003454 MLX4_STEERING_MODE_DEVICE_MANAGED &&
3455 mdev->dev->caps.dmfs_high_steer_mode != MLX4_STEERING_DMFS_A0_STATIC)
Amir Vadai1eb8c692012-07-18 22:33:52 +00003456 dev->hw_features |= NETIF_F_NTUPLE;
3457
Yan Burmancc5387f2013-02-07 02:25:26 +00003458 if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0)
3459 dev->priv_flags |= IFF_UNICAST_FLT;
3460
Eyal Perry947cbb02014-12-02 18:12:11 +02003461 /* Setting a default hash function value */
3462 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_TOP) {
3463 priv->rss_hash_fn = ETH_RSS_HASH_TOP;
3464 } else if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_XOR) {
3465 priv->rss_hash_fn = ETH_RSS_HASH_XOR;
3466 } else {
3467 en_warn(priv,
3468 "No RSS hash capabilities exposed, using Toeplitz\n");
3469 priv->rss_hash_fn = ETH_RSS_HASH_TOP;
3470 }
3471
Eugenia Emantayev925ab1a2016-02-17 17:24:27 +02003472 if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
Alexander Duyck3c9346b2016-05-02 09:38:30 -07003473 dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
3474 NETIF_F_GSO_UDP_TUNNEL_CSUM |
3475 NETIF_F_GSO_PARTIAL;
3476 dev->features |= NETIF_F_GSO_UDP_TUNNEL |
3477 NETIF_F_GSO_UDP_TUNNEL_CSUM |
3478 NETIF_F_GSO_PARTIAL;
3479 dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM;
Eugenia Emantayev925ab1a2016-02-17 17:24:27 +02003480 }
3481
Jarod Wilsonb80f71f2016-10-17 15:54:07 -04003482 /* MTU range: 46 - hw-specific max */
3483 dev->min_mtu = MLX4_EN_MIN_MTU;
3484 dev->max_mtu = priv->max_mtu;
3485
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07003486 mdev->pndev[port] = dev;
Moni Shoua5da03542015-02-03 16:48:34 +02003487 mdev->upper[port] = NULL;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07003488
3489 netif_carrier_off(dev);
Eugenia Emantayev4801ae72013-06-25 12:09:31 +03003490 mlx4_en_set_default_moderation(priv);
3491
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02003492 en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num[TX]);
Yevgeny Petrilin453a6082009-06-01 20:27:13 +00003493 en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num);
3494
Yan Burman79aeacc2013-02-07 02:25:19 +00003495 mlx4_en_update_loopback_state(priv->dev, priv->dev->features);
3496
Yevgeny Petrilin90822262011-03-22 22:37:41 +00003497 /* Configure port */
Yevgeny Petrilin5c8e9042012-06-25 00:24:11 +00003498 mlx4_en_calc_rx_buf(dev);
Yevgeny Petrilin90822262011-03-22 22:37:41 +00003499 err = mlx4_SET_PORT_general(mdev->dev, priv->port,
Yevgeny Petrilin5c8e9042012-06-25 00:24:11 +00003500 priv->rx_skb_size + ETH_FCS_LEN,
3501 prof->tx_pause, prof->tx_ppp,
3502 prof->rx_pause, prof->rx_ppp);
Yevgeny Petrilin90822262011-03-22 22:37:41 +00003503 if (err) {
Joe Perches1a91de22014-05-07 12:52:57 -07003504 en_err(priv, "Failed setting port general configurations for port %d, with error %d\n",
3505 priv->port, err);
Yevgeny Petrilin90822262011-03-22 22:37:41 +00003506 goto out;
3507 }
3508
Or Gerlitz837052d2013-12-23 16:09:44 +02003509 if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
Or Gerlitz1b136de2014-03-27 14:02:04 +02003510 err = mlx4_SET_PORT_VXLAN(mdev->dev, priv->port, VXLAN_STEER_BY_OUTER_MAC, 1);
Or Gerlitz837052d2013-12-23 16:09:44 +02003511 if (err) {
3512 en_err(priv, "Failed setting port L2 tunnel configuration, err %d\n",
3513 err);
3514 goto out;
3515 }
3516 }
3517
Yevgeny Petrilin90822262011-03-22 22:37:41 +00003518 /* Init port */
3519 en_warn(priv, "Initializing port\n");
3520 err = mlx4_INIT_PORT(mdev->dev, priv->port);
3521 if (err) {
3522 en_err(priv, "Failed Initializing port\n");
3523 goto out;
3524 }
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07003525 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
Amir Vadaidc8142e2013-04-25 05:22:24 +00003526
Eugenia Emantayev90683062015-12-17 15:35:38 +02003527 /* Initialize time stamp mechanism */
Amir Vadaidc8142e2013-04-25 05:22:24 +00003528 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
Eugenia Emantayev90683062015-12-17 15:35:38 +02003529 mlx4_en_init_timestamp(mdev);
3530
Eugenia Emantayevfc9f5ea2015-12-17 15:35:37 +02003531 queue_delayed_work(mdev->workqueue, &priv->service_task,
3532 SERVICE_TASK_DELAY);
Amir Vadaidc8142e2013-04-25 05:22:24 +00003533
Matan Barak0b131562015-03-30 17:45:25 +03003534 mlx4_en_set_stats_bitmap(mdev->dev, &priv->stats_bitmap,
3535 mdev->profile.prof[priv->port].rx_ppp,
3536 mdev->profile.prof[priv->port].rx_pause,
3537 mdev->profile.prof[priv->port].tx_ppp,
3538 mdev->profile.prof[priv->port].tx_pause);
Eran Ben Elisha39de9612015-03-18 16:51:38 +02003539
Ido Shamaye5eda892015-03-24 15:18:38 +02003540 err = register_netdev(dev);
3541 if (err) {
3542 en_err(priv, "Netdev registration failed for port %d\n", port);
3543 goto out;
3544 }
3545
3546 priv->registered = 1;
Jiri Pirko09d4d082016-02-26 17:32:24 +01003547 devlink_port_type_eth_set(mlx4_get_devlink_port(mdev->dev, priv->port),
3548 dev);
Ido Shamaye5eda892015-03-24 15:18:38 +02003549
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07003550 return 0;
3551
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02003552err_free_tx:
3553 while (t--) {
3554 kfree(priv->tx_ring[t]);
3555 kfree(priv->tx_cq[t]);
3556 }
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07003557out:
3558 mlx4_en_destroy_netdev(dev);
3559 return err;
3560}
3561
Saeed Mahameed537f6f92014-10-27 11:37:43 +02003562int mlx4_en_reset_config(struct net_device *dev,
3563 struct hwtstamp_config ts_config,
3564 netdev_features_t features)
3565{
3566 struct mlx4_en_priv *priv = netdev_priv(dev);
3567 struct mlx4_en_dev *mdev = priv->mdev;
Eugenia Emantayevec25bc02016-07-18 18:35:12 +03003568 struct mlx4_en_port_profile new_prof;
3569 struct mlx4_en_priv *tmp;
Saeed Mahameed537f6f92014-10-27 11:37:43 +02003570 int port_up = 0;
3571 int err = 0;
3572
3573 if (priv->hwtstamp_config.tx_type == ts_config.tx_type &&
3574 priv->hwtstamp_config.rx_filter == ts_config.rx_filter &&
Muhammad Mahajnaf0df3502015-04-02 16:31:21 +03003575 !DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX) &&
3576 !DEV_FEATURE_CHANGED(dev, features, NETIF_F_RXFCS))
Saeed Mahameed537f6f92014-10-27 11:37:43 +02003577 return 0; /* Nothing to change */
3578
3579 if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX) &&
3580 (features & NETIF_F_HW_VLAN_CTAG_RX) &&
3581 (priv->hwtstamp_config.rx_filter != HWTSTAMP_FILTER_NONE)) {
3582 en_warn(priv, "Can't turn ON rx vlan offload while time-stamping rx filter is ON\n");
3583 return -EINVAL;
3584 }
3585
Eugenia Emantayevec25bc02016-07-18 18:35:12 +03003586 tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
3587 if (!tmp)
3588 return -ENOMEM;
3589
Saeed Mahameed537f6f92014-10-27 11:37:43 +02003590 mutex_lock(&mdev->state_lock);
Eugenia Emantayevec25bc02016-07-18 18:35:12 +03003591
3592 memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile));
3593 memcpy(&new_prof.hwtstamp_config, &ts_config, sizeof(ts_config));
3594
Martin KaFai Lau770f8222017-01-31 22:35:33 -08003595 err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof, true);
Eugenia Emantayevec25bc02016-07-18 18:35:12 +03003596 if (err)
3597 goto out;
3598
Saeed Mahameed537f6f92014-10-27 11:37:43 +02003599 if (priv->port_up) {
3600 port_up = 1;
3601 mlx4_en_stop_port(dev, 1);
3602 }
3603
Saeed Mahameed537f6f92014-10-27 11:37:43 +02003604 en_warn(priv, "Changing device configuration rx filter(%x) rx vlan(%x)\n",
Eugenia Emantayevec25bc02016-07-18 18:35:12 +03003605 ts_config.rx_filter,
3606 !!(features & NETIF_F_HW_VLAN_CTAG_RX));
Saeed Mahameed537f6f92014-10-27 11:37:43 +02003607
Eugenia Emantayevec25bc02016-07-18 18:35:12 +03003608 mlx4_en_safe_replace_resources(priv, tmp);
Saeed Mahameed537f6f92014-10-27 11:37:43 +02003609
3610 if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX)) {
3611 if (features & NETIF_F_HW_VLAN_CTAG_RX)
3612 dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
3613 else
3614 dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
3615 } else if (ts_config.rx_filter == HWTSTAMP_FILTER_NONE) {
3616 /* RX time-stamping is OFF, update the RX vlan offload
3617 * to the latest wanted state
3618 */
3619 if (dev->wanted_features & NETIF_F_HW_VLAN_CTAG_RX)
3620 dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
3621 else
3622 dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
3623 }
3624
Muhammad Mahajnaf0df3502015-04-02 16:31:21 +03003625 if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_RXFCS)) {
3626 if (features & NETIF_F_RXFCS)
3627 dev->features |= NETIF_F_RXFCS;
3628 else
3629 dev->features &= ~NETIF_F_RXFCS;
3630 }
3631
Saeed Mahameed537f6f92014-10-27 11:37:43 +02003632 /* RX vlan offload and RX time-stamping can't co-exist !
3633 * Regardless of the caller's choice,
3634 * Turn Off RX vlan offload in case of time-stamping is ON
3635 */
3636 if (ts_config.rx_filter != HWTSTAMP_FILTER_NONE) {
3637 if (dev->features & NETIF_F_HW_VLAN_CTAG_RX)
3638 en_warn(priv, "Turning off RX vlan offload since RX time-stamping is ON\n");
3639 dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
3640 }
3641
Saeed Mahameed537f6f92014-10-27 11:37:43 +02003642 if (port_up) {
3643 err = mlx4_en_start_port(dev);
3644 if (err)
3645 en_err(priv, "Failed starting port\n");
3646 }
3647
3648out:
3649 mutex_unlock(&mdev->state_lock);
Eugenia Emantayevec25bc02016-07-18 18:35:12 +03003650 kfree(tmp);
3651 if (!err)
3652 netdev_features_change(dev);
Saeed Mahameed537f6f92014-10-27 11:37:43 +02003653 return err;
3654}