blob: 1667e86ac05d7aa3212e74efed1391f7b50bb848 [file] [log] [blame]
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001/*
2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33
Brenden Blanco47a38e12016-07-19 12:16:50 -070034#include <linux/bpf.h>
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -070035#include <linux/etherdevice.h>
36#include <linux/tcp.h>
37#include <linux/if_vlan.h>
38#include <linux/delay.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090039#include <linux/slab.h>
Amir Vadai1eb8c692012-07-18 22:33:52 +000040#include <linux/hash.h>
41#include <net/ip.h>
Eliezer Tamir076bb0c2013-07-10 17:13:17 +030042#include <net/busy_poll.h>
Or Gerlitz1b136de2014-03-27 14:02:04 +020043#include <net/vxlan.h>
Jiri Pirko09d4d082016-02-26 17:32:24 +010044#include <net/devlink.h>
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -070045
46#include <linux/mlx4/driver.h>
47#include <linux/mlx4/device.h>
48#include <linux/mlx4/cmd.h>
49#include <linux/mlx4/cq.h>
50
51#include "mlx4_en.h"
52#include "en_port.h"
53
Martin KaFai Lauea3349a2016-12-07 15:53:13 -080054#define MLX4_EN_MAX_XDP_MTU ((int)(PAGE_SIZE - ETH_HLEN - (2 * VLAN_HLEN) - \
55 XDP_PACKET_HEADROOM))
Martin KaFai Laub45f0672016-12-07 15:53:12 -080056
Amir Vadaid3179662012-12-02 03:49:23 +000057int mlx4_en_setup_tc(struct net_device *dev, u8 up)
Amir Vadai897d7842012-04-04 21:33:27 +000058{
Amir Vadaibc6a4742012-05-17 00:58:10 +000059 struct mlx4_en_priv *priv = netdev_priv(dev);
60 int i;
Amir Vadaid3179662012-12-02 03:49:23 +000061 unsigned int offset = 0;
Amir Vadaibc6a4742012-05-17 00:58:10 +000062
Inbar Karmyf21ad612017-06-29 14:07:56 +030063 if (up && up != MLX4_EN_NUM_UP_HIGH)
Amir Vadai897d7842012-04-04 21:33:27 +000064 return -EINVAL;
65
Amir Vadaibc6a4742012-05-17 00:58:10 +000066 netdev_set_num_tc(dev, up);
Inbar Karmyec327f72017-06-29 14:07:57 +030067 netif_set_real_num_tx_queues(dev, priv->tx_ring_num[TX]);
Amir Vadaibc6a4742012-05-17 00:58:10 +000068 /* Partition Tx queues evenly amongst UP's */
Amir Vadaibc6a4742012-05-17 00:58:10 +000069 for (i = 0; i < up; i++) {
Amir Vadaid3179662012-12-02 03:49:23 +000070 netdev_set_tc_queue(dev, i, priv->num_tx_rings_p_up, offset);
71 offset += priv->num_tx_rings_p_up;
Amir Vadaibc6a4742012-05-17 00:58:10 +000072 }
73
Rana Shahoutaf7d5182016-06-21 12:43:59 +030074#ifdef CONFIG_MLX4_EN_DCB
75 if (!mlx4_is_slave(priv->mdev->dev)) {
76 if (up) {
Tariq Toukan564ed9b2016-09-11 10:56:19 +030077 if (priv->dcbx_cap)
78 priv->flags |= MLX4_EN_FLAG_DCB_ENABLED;
Rana Shahoutaf7d5182016-06-21 12:43:59 +030079 } else {
80 priv->flags &= ~MLX4_EN_FLAG_DCB_ENABLED;
Tariq Toukan564ed9b2016-09-11 10:56:19 +030081 priv->cee_config.pfc_state = false;
Rana Shahoutaf7d5182016-06-21 12:43:59 +030082 }
83 }
84#endif /* CONFIG_MLX4_EN_DCB */
85
Amir Vadai897d7842012-04-04 21:33:27 +000086 return 0;
87}
88
Inbar Karmyec327f72017-06-29 14:07:57 +030089int mlx4_en_alloc_tx_queue_per_tc(struct net_device *dev, u8 tc)
90{
91 struct mlx4_en_priv *priv = netdev_priv(dev);
92 struct mlx4_en_dev *mdev = priv->mdev;
93 struct mlx4_en_port_profile new_prof;
94 struct mlx4_en_priv *tmp;
95 int port_up = 0;
96 int err = 0;
97
98 tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
99 if (!tmp)
100 return -ENOMEM;
101
102 mutex_lock(&mdev->state_lock);
103 memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile));
104 new_prof.num_up = (tc == 0) ? MLX4_EN_NUM_UP_LOW :
105 MLX4_EN_NUM_UP_HIGH;
106 new_prof.tx_ring_num[TX] = new_prof.num_tx_rings_p_up *
107 new_prof.num_up;
108 err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof, true);
109 if (err)
110 goto out;
111
112 if (priv->port_up) {
113 port_up = 1;
114 mlx4_en_stop_port(dev, 1);
115 }
116
117 mlx4_en_safe_replace_resources(priv, tmp);
118 if (port_up) {
119 err = mlx4_en_start_port(dev);
120 if (err) {
121 en_err(priv, "Failed starting port for setup TC\n");
122 goto out;
123 }
124 }
125
126 err = mlx4_en_setup_tc(dev, tc);
127out:
128 mutex_unlock(&mdev->state_lock);
129 kfree(tmp);
130 return err;
131}
132
Jiri Pirko2572ac52017-08-07 10:15:17 +0200133static int __mlx4_en_setup_tc(struct net_device *dev, enum tc_setup_type type,
John Fastabend16e5cc62016-02-16 21:16:43 -0800134 struct tc_to_netdev *tc)
John Fastabende4c67342016-02-16 21:16:15 -0800135{
Jiri Pirko2572ac52017-08-07 10:15:17 +0200136 if (type != TC_SETUP_MQPRIO)
Jiri Pirko38cf0422017-08-07 10:15:31 +0200137 return -EOPNOTSUPP;
John Fastabende4c67342016-02-16 21:16:15 -0800138
Inbar Karmyec327f72017-06-29 14:07:57 +0300139 if (tc->mqprio->num_tc && tc->mqprio->num_tc != MLX4_EN_NUM_UP_HIGH)
140 return -EINVAL;
141
Amritha Nambiar56f36ac2017-03-15 10:39:25 -0700142 tc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
143
Inbar Karmyec327f72017-06-29 14:07:57 +0300144 return mlx4_en_alloc_tx_queue_per_tc(dev, tc->mqprio->num_tc);
John Fastabende4c67342016-02-16 21:16:15 -0800145}
146
Amir Vadai1eb8c692012-07-18 22:33:52 +0000147#ifdef CONFIG_RFS_ACCEL
148
149struct mlx4_en_filter {
150 struct list_head next;
151 struct work_struct work;
152
Eyal Perry75a353d2013-11-07 12:19:49 +0200153 u8 ip_proto;
Amir Vadai1eb8c692012-07-18 22:33:52 +0000154 __be32 src_ip;
155 __be32 dst_ip;
156 __be16 src_port;
157 __be16 dst_port;
158
159 int rxq_index;
160 struct mlx4_en_priv *priv;
161 u32 flow_id; /* RFS infrastructure id */
162 int id; /* mlx4_en driver id */
163 u64 reg_id; /* Flow steering API id */
164 u8 activated; /* Used to prevent expiry before filter
165 * is attached
166 */
167 struct hlist_node filter_chain;
168};
169
170static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv);
171
Eyal Perry75a353d2013-11-07 12:19:49 +0200172static enum mlx4_net_trans_rule_id mlx4_ip_proto_to_trans_rule_id(u8 ip_proto)
173{
174 switch (ip_proto) {
175 case IPPROTO_UDP:
176 return MLX4_NET_TRANS_RULE_ID_UDP;
177 case IPPROTO_TCP:
178 return MLX4_NET_TRANS_RULE_ID_TCP;
179 default:
Eyal Perryc3ca5202014-05-14 12:15:16 +0300180 return MLX4_NET_TRANS_RULE_NUM;
Eyal Perry75a353d2013-11-07 12:19:49 +0200181 }
182};
183
Tariq Toukanb6e01232016-11-22 16:20:39 +0200184/* Must not acquire state_lock, as its corresponding work_sync
185 * is done under it.
186 */
Amir Vadai1eb8c692012-07-18 22:33:52 +0000187static void mlx4_en_filter_work(struct work_struct *work)
188{
189 struct mlx4_en_filter *filter = container_of(work,
190 struct mlx4_en_filter,
191 work);
192 struct mlx4_en_priv *priv = filter->priv;
Eyal Perry75a353d2013-11-07 12:19:49 +0200193 struct mlx4_spec_list spec_tcp_udp = {
194 .id = mlx4_ip_proto_to_trans_rule_id(filter->ip_proto),
Amir Vadai1eb8c692012-07-18 22:33:52 +0000195 {
196 .tcp_udp = {
197 .dst_port = filter->dst_port,
198 .dst_port_msk = (__force __be16)-1,
199 .src_port = filter->src_port,
200 .src_port_msk = (__force __be16)-1,
201 },
202 },
203 };
204 struct mlx4_spec_list spec_ip = {
205 .id = MLX4_NET_TRANS_RULE_ID_IPV4,
206 {
207 .ipv4 = {
208 .dst_ip = filter->dst_ip,
209 .dst_ip_msk = (__force __be32)-1,
210 .src_ip = filter->src_ip,
211 .src_ip_msk = (__force __be32)-1,
212 },
213 },
214 };
215 struct mlx4_spec_list spec_eth = {
216 .id = MLX4_NET_TRANS_RULE_ID_ETH,
217 };
218 struct mlx4_net_trans_rule rule = {
219 .list = LIST_HEAD_INIT(rule.list),
220 .queue_mode = MLX4_NET_TRANS_Q_LIFO,
221 .exclusive = 1,
222 .allow_loopback = 1,
Hadar Hen Zionf9162532013-04-24 13:58:45 +0000223 .promisc_mode = MLX4_FS_REGULAR,
Amir Vadai1eb8c692012-07-18 22:33:52 +0000224 .port = priv->port,
225 .priority = MLX4_DOMAIN_RFS,
226 };
227 int rc;
Amir Vadai1eb8c692012-07-18 22:33:52 +0000228 __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
229
Eyal Perryc3ca5202014-05-14 12:15:16 +0300230 if (spec_tcp_udp.id >= MLX4_NET_TRANS_RULE_NUM) {
Eyal Perry75a353d2013-11-07 12:19:49 +0200231 en_warn(priv, "RFS: ignoring unsupported ip protocol (%d)\n",
232 filter->ip_proto);
233 goto ignore;
234 }
Amir Vadai1eb8c692012-07-18 22:33:52 +0000235 list_add_tail(&spec_eth.list, &rule.list);
236 list_add_tail(&spec_ip.list, &rule.list);
Eyal Perry75a353d2013-11-07 12:19:49 +0200237 list_add_tail(&spec_tcp_udp.list, &rule.list);
Amir Vadai1eb8c692012-07-18 22:33:52 +0000238
Amir Vadai1eb8c692012-07-18 22:33:52 +0000239 rule.qpn = priv->rss_map.qps[filter->rxq_index].qpn;
Yan Burman6bbb6d92013-02-07 02:25:20 +0000240 memcpy(spec_eth.eth.dst_mac, priv->dev->dev_addr, ETH_ALEN);
Amir Vadai1eb8c692012-07-18 22:33:52 +0000241 memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
242
243 filter->activated = 0;
244
245 if (filter->reg_id) {
246 rc = mlx4_flow_detach(priv->mdev->dev, filter->reg_id);
247 if (rc && rc != -ENOENT)
248 en_err(priv, "Error detaching flow. rc = %d\n", rc);
249 }
250
251 rc = mlx4_flow_attach(priv->mdev->dev, &rule, &filter->reg_id);
252 if (rc)
253 en_err(priv, "Error attaching flow. err = %d\n", rc);
254
Eyal Perry75a353d2013-11-07 12:19:49 +0200255ignore:
Amir Vadai1eb8c692012-07-18 22:33:52 +0000256 mlx4_en_filter_rfs_expire(priv);
257
258 filter->activated = 1;
259}
260
261static inline struct hlist_head *
262filter_hash_bucket(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip,
263 __be16 src_port, __be16 dst_port)
264{
265 unsigned long l;
266 int bucket_idx;
267
268 l = (__force unsigned long)src_port |
269 ((__force unsigned long)dst_port << 2);
270 l ^= (__force unsigned long)(src_ip ^ dst_ip);
271
272 bucket_idx = hash_long(l, MLX4_EN_FILTER_HASH_SHIFT);
273
274 return &priv->filter_hash[bucket_idx];
275}
276
277static struct mlx4_en_filter *
278mlx4_en_filter_alloc(struct mlx4_en_priv *priv, int rxq_index, __be32 src_ip,
Eyal Perry75a353d2013-11-07 12:19:49 +0200279 __be32 dst_ip, u8 ip_proto, __be16 src_port,
280 __be16 dst_port, u32 flow_id)
Amir Vadai1eb8c692012-07-18 22:33:52 +0000281{
282 struct mlx4_en_filter *filter = NULL;
283
284 filter = kzalloc(sizeof(struct mlx4_en_filter), GFP_ATOMIC);
285 if (!filter)
286 return NULL;
287
288 filter->priv = priv;
289 filter->rxq_index = rxq_index;
290 INIT_WORK(&filter->work, mlx4_en_filter_work);
291
292 filter->src_ip = src_ip;
293 filter->dst_ip = dst_ip;
Eyal Perry75a353d2013-11-07 12:19:49 +0200294 filter->ip_proto = ip_proto;
Amir Vadai1eb8c692012-07-18 22:33:52 +0000295 filter->src_port = src_port;
296 filter->dst_port = dst_port;
297
298 filter->flow_id = flow_id;
299
Amir Vadaiee64c0e2012-07-25 21:21:16 +0000300 filter->id = priv->last_filter_id++ % RPS_NO_FILTER;
Amir Vadai1eb8c692012-07-18 22:33:52 +0000301
302 list_add_tail(&filter->next, &priv->filters);
303 hlist_add_head(&filter->filter_chain,
304 filter_hash_bucket(priv, src_ip, dst_ip, src_port,
305 dst_port));
306
307 return filter;
308}
309
310static void mlx4_en_filter_free(struct mlx4_en_filter *filter)
311{
312 struct mlx4_en_priv *priv = filter->priv;
313 int rc;
314
315 list_del(&filter->next);
316
317 rc = mlx4_flow_detach(priv->mdev->dev, filter->reg_id);
318 if (rc && rc != -ENOENT)
319 en_err(priv, "Error detaching flow. rc = %d\n", rc);
320
321 kfree(filter);
322}
323
324static inline struct mlx4_en_filter *
325mlx4_en_filter_find(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip,
Eyal Perry75a353d2013-11-07 12:19:49 +0200326 u8 ip_proto, __be16 src_port, __be16 dst_port)
Amir Vadai1eb8c692012-07-18 22:33:52 +0000327{
Amir Vadai1eb8c692012-07-18 22:33:52 +0000328 struct mlx4_en_filter *filter;
329 struct mlx4_en_filter *ret = NULL;
330
Sasha Levinb67bfe02013-02-27 17:06:00 -0800331 hlist_for_each_entry(filter,
Amir Vadai1eb8c692012-07-18 22:33:52 +0000332 filter_hash_bucket(priv, src_ip, dst_ip,
333 src_port, dst_port),
334 filter_chain) {
335 if (filter->src_ip == src_ip &&
336 filter->dst_ip == dst_ip &&
Eyal Perry75a353d2013-11-07 12:19:49 +0200337 filter->ip_proto == ip_proto &&
Amir Vadai1eb8c692012-07-18 22:33:52 +0000338 filter->src_port == src_port &&
339 filter->dst_port == dst_port) {
340 ret = filter;
341 break;
342 }
343 }
344
345 return ret;
346}
347
348static int
349mlx4_en_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
350 u16 rxq_index, u32 flow_id)
351{
352 struct mlx4_en_priv *priv = netdev_priv(net_dev);
353 struct mlx4_en_filter *filter;
354 const struct iphdr *ip;
355 const __be16 *ports;
Eyal Perry75a353d2013-11-07 12:19:49 +0200356 u8 ip_proto;
Amir Vadai1eb8c692012-07-18 22:33:52 +0000357 __be32 src_ip;
358 __be32 dst_ip;
359 __be16 src_port;
360 __be16 dst_port;
361 int nhoff = skb_network_offset(skb);
362 int ret = 0;
363
364 if (skb->protocol != htons(ETH_P_IP))
365 return -EPROTONOSUPPORT;
366
367 ip = (const struct iphdr *)(skb->data + nhoff);
368 if (ip_is_fragment(ip))
369 return -EPROTONOSUPPORT;
370
Eyal Perry75a353d2013-11-07 12:19:49 +0200371 if ((ip->protocol != IPPROTO_TCP) && (ip->protocol != IPPROTO_UDP))
372 return -EPROTONOSUPPORT;
Amir Vadai1eb8c692012-07-18 22:33:52 +0000373 ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
374
Eyal Perry75a353d2013-11-07 12:19:49 +0200375 ip_proto = ip->protocol;
Amir Vadai1eb8c692012-07-18 22:33:52 +0000376 src_ip = ip->saddr;
377 dst_ip = ip->daddr;
378 src_port = ports[0];
379 dst_port = ports[1];
380
Amir Vadai1eb8c692012-07-18 22:33:52 +0000381 spin_lock_bh(&priv->filters_lock);
Eyal Perry75a353d2013-11-07 12:19:49 +0200382 filter = mlx4_en_filter_find(priv, src_ip, dst_ip, ip_proto,
383 src_port, dst_port);
Amir Vadai1eb8c692012-07-18 22:33:52 +0000384 if (filter) {
385 if (filter->rxq_index == rxq_index)
386 goto out;
387
388 filter->rxq_index = rxq_index;
389 } else {
390 filter = mlx4_en_filter_alloc(priv, rxq_index,
Eyal Perry75a353d2013-11-07 12:19:49 +0200391 src_ip, dst_ip, ip_proto,
Amir Vadai1eb8c692012-07-18 22:33:52 +0000392 src_port, dst_port, flow_id);
393 if (!filter) {
394 ret = -ENOMEM;
395 goto err;
396 }
397 }
398
399 queue_work(priv->mdev->workqueue, &filter->work);
400
401out:
402 ret = filter->id;
403err:
404 spin_unlock_bh(&priv->filters_lock);
405
406 return ret;
407}
408
Eugenia Emantayev41d942d2013-11-07 12:19:52 +0200409void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv)
Amir Vadai1eb8c692012-07-18 22:33:52 +0000410{
411 struct mlx4_en_filter *filter, *tmp;
412 LIST_HEAD(del_list);
413
414 spin_lock_bh(&priv->filters_lock);
415 list_for_each_entry_safe(filter, tmp, &priv->filters, next) {
416 list_move(&filter->next, &del_list);
417 hlist_del(&filter->filter_chain);
418 }
419 spin_unlock_bh(&priv->filters_lock);
420
421 list_for_each_entry_safe(filter, tmp, &del_list, next) {
422 cancel_work_sync(&filter->work);
423 mlx4_en_filter_free(filter);
424 }
425}
426
427static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv)
428{
429 struct mlx4_en_filter *filter = NULL, *tmp, *last_filter = NULL;
430 LIST_HEAD(del_list);
431 int i = 0;
432
433 spin_lock_bh(&priv->filters_lock);
434 list_for_each_entry_safe(filter, tmp, &priv->filters, next) {
435 if (i > MLX4_EN_FILTER_EXPIRY_QUOTA)
436 break;
437
438 if (filter->activated &&
439 !work_pending(&filter->work) &&
440 rps_may_expire_flow(priv->dev,
441 filter->rxq_index, filter->flow_id,
442 filter->id)) {
443 list_move(&filter->next, &del_list);
444 hlist_del(&filter->filter_chain);
445 } else
446 last_filter = filter;
447
448 i++;
449 }
450
451 if (last_filter && (&last_filter->next != priv->filters.next))
452 list_move(&priv->filters, &last_filter->next);
453
454 spin_unlock_bh(&priv->filters_lock);
455
456 list_for_each_entry_safe(filter, tmp, &del_list, next)
457 mlx4_en_filter_free(filter);
458}
459#endif
460
Patrick McHardy80d5c362013-04-19 02:04:28 +0000461static int mlx4_en_vlan_rx_add_vid(struct net_device *dev,
462 __be16 proto, u16 vid)
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700463{
464 struct mlx4_en_priv *priv = netdev_priv(dev);
465 struct mlx4_en_dev *mdev = priv->mdev;
466 int err;
Eli Cohen4c3eb3c2010-08-26 17:19:22 +0300467 int idx;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700468
Jiri Pirkof1b553f2011-07-20 04:54:22 +0000469 en_dbg(HW, priv, "adding VLAN:%d\n", vid);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700470
Jiri Pirkof1b553f2011-07-20 04:54:22 +0000471 set_bit(vid, priv->active_vlans);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700472
473 /* Add VID to port VLAN filter */
474 mutex_lock(&mdev->state_lock);
475 if (mdev->device_up && priv->port_up) {
Jiri Pirkof1b553f2011-07-20 04:54:22 +0000476 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
Kamal Heib93c098a2016-06-21 14:20:02 +0300477 if (err) {
Yevgeny Petrilin453a6082009-06-01 20:27:13 +0000478 en_err(priv, "Failed configuring VLAN filter\n");
Kamal Heib93c098a2016-06-21 14:20:02 +0300479 goto out;
480 }
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700481 }
Kamal Heib93c098a2016-06-21 14:20:02 +0300482 err = mlx4_register_vlan(mdev->dev, priv->port, vid, &idx);
483 if (err)
484 en_dbg(HW, priv, "Failed adding vlan %d\n", vid);
Eli Cohen4c3eb3c2010-08-26 17:19:22 +0300485
Kamal Heib93c098a2016-06-21 14:20:02 +0300486out:
487 mutex_unlock(&mdev->state_lock);
488 return err;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700489}
490
Patrick McHardy80d5c362013-04-19 02:04:28 +0000491static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev,
492 __be16 proto, u16 vid)
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700493{
494 struct mlx4_en_priv *priv = netdev_priv(dev);
495 struct mlx4_en_dev *mdev = priv->mdev;
Kamal Heib93c098a2016-06-21 14:20:02 +0300496 int err = 0;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700497
Jiri Pirkof1b553f2011-07-20 04:54:22 +0000498 en_dbg(HW, priv, "Killing VID:%d\n", vid);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700499
Jiri Pirkof1b553f2011-07-20 04:54:22 +0000500 clear_bit(vid, priv->active_vlans);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700501
502 /* Remove VID from port VLAN filter */
503 mutex_lock(&mdev->state_lock);
Jack Morgenstein2009d002013-11-03 10:03:19 +0200504 mlx4_unregister_vlan(mdev->dev, priv->port, vid);
Eli Cohen4c3eb3c2010-08-26 17:19:22 +0300505
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700506 if (mdev->device_up && priv->port_up) {
Jiri Pirkof1b553f2011-07-20 04:54:22 +0000507 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700508 if (err)
Yevgeny Petrilin453a6082009-06-01 20:27:13 +0000509 en_err(priv, "Failed configuring VLAN filter\n");
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700510 }
511 mutex_unlock(&mdev->state_lock);
Jiri Pirko8e586132011-12-08 19:52:37 -0500512
Kamal Heib93c098a2016-06-21 14:20:02 +0300513 return err;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700514}
515
Yan Burman6bbb6d92013-02-07 02:25:20 +0000516static void mlx4_en_u64_to_mac(unsigned char dst_mac[ETH_ALEN + 2], u64 src_mac)
517{
Yan Burmanbab6a9e2013-04-02 16:49:45 +0300518 int i;
519 for (i = ETH_ALEN - 1; i >= 0; --i) {
Yan Burman6bbb6d92013-02-07 02:25:20 +0000520 dst_mac[i] = src_mac & 0xff;
521 src_mac >>= 8;
522 }
523 memset(&dst_mac[ETH_ALEN], 0, 2);
524}
525
Or Gerlitz837052d2013-12-23 16:09:44 +0200526
527static int mlx4_en_tunnel_steer_add(struct mlx4_en_priv *priv, unsigned char *addr,
528 int qpn, u64 *reg_id)
529{
530 int err;
Or Gerlitz837052d2013-12-23 16:09:44 +0200531
Or Gerlitz5eff6da2015-01-15 15:28:54 +0200532 if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN ||
533 priv->mdev->dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC)
Or Gerlitz837052d2013-12-23 16:09:44 +0200534 return 0; /* do nothing */
535
Or Gerlitzb95089d2014-08-27 16:47:48 +0300536 err = mlx4_tunnel_steer_add(priv->mdev->dev, addr, priv->port, qpn,
537 MLX4_DOMAIN_NIC, reg_id);
Or Gerlitz837052d2013-12-23 16:09:44 +0200538 if (err) {
539 en_err(priv, "failed to add vxlan steering rule, err %d\n", err);
540 return err;
541 }
542 en_dbg(DRV, priv, "added vxlan steering rule, mac %pM reg_id %llx\n", addr, *reg_id);
543 return 0;
544}
545
546
Yan Burman16a10ff2013-02-07 02:25:22 +0000547static int mlx4_en_uc_steer_add(struct mlx4_en_priv *priv,
548 unsigned char *mac, int *qpn, u64 *reg_id)
549{
550 struct mlx4_en_dev *mdev = priv->mdev;
551 struct mlx4_dev *dev = mdev->dev;
552 int err;
553
554 switch (dev->caps.steering_mode) {
555 case MLX4_STEERING_MODE_B0: {
556 struct mlx4_qp qp;
557 u8 gid[16] = {0};
558
559 qp.qpn = *qpn;
560 memcpy(&gid[10], mac, ETH_ALEN);
561 gid[5] = priv->port;
562
563 err = mlx4_unicast_attach(dev, &qp, gid, 0, MLX4_PROT_ETH);
564 break;
565 }
566 case MLX4_STEERING_MODE_DEVICE_MANAGED: {
567 struct mlx4_spec_list spec_eth = { {NULL} };
568 __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
569
570 struct mlx4_net_trans_rule rule = {
571 .queue_mode = MLX4_NET_TRANS_Q_FIFO,
572 .exclusive = 0,
573 .allow_loopback = 1,
Hadar Hen Zionf9162532013-04-24 13:58:45 +0000574 .promisc_mode = MLX4_FS_REGULAR,
Yan Burman16a10ff2013-02-07 02:25:22 +0000575 .priority = MLX4_DOMAIN_NIC,
576 };
577
578 rule.port = priv->port;
579 rule.qpn = *qpn;
580 INIT_LIST_HEAD(&rule.list);
581
582 spec_eth.id = MLX4_NET_TRANS_RULE_ID_ETH;
583 memcpy(spec_eth.eth.dst_mac, mac, ETH_ALEN);
584 memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
585 list_add_tail(&spec_eth.list, &rule.list);
586
587 err = mlx4_flow_attach(dev, &rule, reg_id);
588 break;
589 }
590 default:
591 return -EINVAL;
592 }
593 if (err)
594 en_warn(priv, "Failed Attaching Unicast\n");
595
596 return err;
597}
598
599static void mlx4_en_uc_steer_release(struct mlx4_en_priv *priv,
600 unsigned char *mac, int qpn, u64 reg_id)
601{
602 struct mlx4_en_dev *mdev = priv->mdev;
603 struct mlx4_dev *dev = mdev->dev;
604
605 switch (dev->caps.steering_mode) {
606 case MLX4_STEERING_MODE_B0: {
607 struct mlx4_qp qp;
608 u8 gid[16] = {0};
609
610 qp.qpn = qpn;
611 memcpy(&gid[10], mac, ETH_ALEN);
612 gid[5] = priv->port;
613
614 mlx4_unicast_detach(dev, &qp, gid, MLX4_PROT_ETH);
615 break;
616 }
617 case MLX4_STEERING_MODE_DEVICE_MANAGED: {
618 mlx4_flow_detach(dev, reg_id);
619 break;
620 }
621 default:
622 en_err(priv, "Invalid steering mode.\n");
623 }
624}
625
626static int mlx4_en_get_qp(struct mlx4_en_priv *priv)
627{
628 struct mlx4_en_dev *mdev = priv->mdev;
629 struct mlx4_dev *dev = mdev->dev;
Yan Burman16a10ff2013-02-07 02:25:22 +0000630 int index = 0;
631 int err = 0;
Yan Burman16a10ff2013-02-07 02:25:22 +0000632 int *qpn = &priv->base_qpn;
Eugenia Emantayev98133372014-03-02 10:25:01 +0200633 u64 mac = mlx4_mac_to_u64(priv->dev->dev_addr);
Yan Burman16a10ff2013-02-07 02:25:22 +0000634
635 en_dbg(DRV, priv, "Registering MAC: %pM for adding\n",
636 priv->dev->dev_addr);
637 index = mlx4_register_mac(dev, priv->port, mac);
638 if (index < 0) {
639 err = index;
640 en_err(priv, "Failed adding MAC: %pM\n",
641 priv->dev->dev_addr);
642 return err;
643 }
644
Saeed Mahameed4931c6e2017-06-15 14:35:32 +0300645 en_info(priv, "Steering Mode %d\n", dev->caps.steering_mode);
646
Yan Burman16a10ff2013-02-07 02:25:22 +0000647 if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) {
648 int base_qpn = mlx4_get_base_qpn(dev, priv->port);
649 *qpn = base_qpn + index;
650 return 0;
651 }
652
Matan Barakd57febe2014-12-11 10:57:57 +0200653 err = mlx4_qp_reserve_range(dev, 1, 1, qpn, MLX4_RESERVE_A0_QP);
Yan Burman16a10ff2013-02-07 02:25:22 +0000654 en_dbg(DRV, priv, "Reserved qp %d\n", *qpn);
655 if (err) {
656 en_err(priv, "Failed to reserve qp for mac registration\n");
Ido Shamayba4b87ae2015-10-08 17:14:01 +0300657 mlx4_unregister_mac(dev, priv->port, mac);
658 return err;
Yan Burman16a10ff2013-02-07 02:25:22 +0000659 }
660
Yan Burmanc07cb4b2013-02-07 02:25:25 +0000661 return 0;
Yan Burman16a10ff2013-02-07 02:25:22 +0000662}
663
664static void mlx4_en_put_qp(struct mlx4_en_priv *priv)
665{
666 struct mlx4_en_dev *mdev = priv->mdev;
667 struct mlx4_dev *dev = mdev->dev;
Yan Burman16a10ff2013-02-07 02:25:22 +0000668 int qpn = priv->base_qpn;
Yan Burman16a10ff2013-02-07 02:25:22 +0000669
Yan Burman83a5a6c2013-03-07 03:46:56 +0000670 if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) {
Ido Shamayba4b87ae2015-10-08 17:14:01 +0300671 u64 mac = mlx4_mac_to_u64(priv->dev->dev_addr);
Yan Burman83a5a6c2013-03-07 03:46:56 +0000672 en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n",
673 priv->dev->dev_addr);
674 mlx4_unregister_mac(dev, priv->port, mac);
675 } else {
Yan Burman83a5a6c2013-03-07 03:46:56 +0000676 en_dbg(DRV, priv, "Releasing qp: port %d, qpn %d\n",
677 priv->port, qpn);
678 mlx4_qp_release_range(dev, qpn, 1);
679 priv->flags &= ~MLX4_EN_FLAG_FORCE_PROMISC;
Yan Burman16a10ff2013-02-07 02:25:22 +0000680 }
681}
682
683static int mlx4_en_replace_mac(struct mlx4_en_priv *priv, int qpn,
Yan Burman90bbb742013-02-07 02:25:24 +0000684 unsigned char *new_mac, unsigned char *prev_mac)
Yan Burman16a10ff2013-02-07 02:25:22 +0000685{
686 struct mlx4_en_dev *mdev = priv->mdev;
687 struct mlx4_dev *dev = mdev->dev;
Yan Burman16a10ff2013-02-07 02:25:22 +0000688 int err = 0;
Eugenia Emantayev98133372014-03-02 10:25:01 +0200689 u64 new_mac_u64 = mlx4_mac_to_u64(new_mac);
Yan Burman16a10ff2013-02-07 02:25:22 +0000690
691 if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) {
Yan Burmanc07cb4b2013-02-07 02:25:25 +0000692 struct hlist_head *bucket;
693 unsigned int mac_hash;
694 struct mlx4_mac_entry *entry;
Sasha Levinb67bfe02013-02-27 17:06:00 -0800695 struct hlist_node *tmp;
Eugenia Emantayev98133372014-03-02 10:25:01 +0200696 u64 prev_mac_u64 = mlx4_mac_to_u64(prev_mac);
Yan Burman16a10ff2013-02-07 02:25:22 +0000697
Yan Burmanc07cb4b2013-02-07 02:25:25 +0000698 bucket = &priv->mac_hash[prev_mac[MLX4_EN_MAC_HASH_IDX]];
Sasha Levinb67bfe02013-02-27 17:06:00 -0800699 hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
Yan Burmanc07cb4b2013-02-07 02:25:25 +0000700 if (ether_addr_equal_64bits(entry->mac, prev_mac)) {
701 mlx4_en_uc_steer_release(priv, entry->mac,
702 qpn, entry->reg_id);
703 mlx4_unregister_mac(dev, priv->port,
704 prev_mac_u64);
705 hlist_del_rcu(&entry->hlist);
706 synchronize_rcu();
707 memcpy(entry->mac, new_mac, ETH_ALEN);
708 entry->reg_id = 0;
709 mac_hash = new_mac[MLX4_EN_MAC_HASH_IDX];
710 hlist_add_head_rcu(&entry->hlist,
711 &priv->mac_hash[mac_hash]);
712 mlx4_register_mac(dev, priv->port, new_mac_u64);
713 err = mlx4_en_uc_steer_add(priv, new_mac,
714 &qpn,
715 &entry->reg_id);
Or Gerlitz2a2083f2014-03-12 17:16:31 +0200716 if (err)
717 return err;
718 if (priv->tunnel_reg_id) {
719 mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id);
720 priv->tunnel_reg_id = 0;
721 }
722 err = mlx4_en_tunnel_steer_add(priv, new_mac, qpn,
723 &priv->tunnel_reg_id);
Yan Burmanc07cb4b2013-02-07 02:25:25 +0000724 return err;
725 }
726 }
727 return -EINVAL;
Yan Burman16a10ff2013-02-07 02:25:22 +0000728 }
729
730 return __mlx4_replace_mac(dev, priv->port, qpn, new_mac_u64);
731}
732
Noa Osherovich2695bab2014-07-08 11:25:24 +0300733static int mlx4_en_do_set_mac(struct mlx4_en_priv *priv,
734 unsigned char new_mac[ETH_ALEN + 2])
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700735{
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700736 int err = 0;
737
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700738 if (priv->port_up) {
739 /* Remove old MAC and insert the new one */
Yan Burman16a10ff2013-02-07 02:25:22 +0000740 err = mlx4_en_replace_mac(priv, priv->base_qpn,
Noa Osherovich2695bab2014-07-08 11:25:24 +0300741 new_mac, priv->current_mac);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700742 if (err)
Yevgeny Petrilin453a6082009-06-01 20:27:13 +0000743 en_err(priv, "Failed changing HW MAC address\n");
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700744 } else
Yan Burman48e551f2013-02-07 02:25:21 +0000745 en_dbg(HW, priv, "Port is down while registering mac, exiting...\n");
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700746
Noa Osherovich2695bab2014-07-08 11:25:24 +0300747 if (!err)
748 memcpy(priv->current_mac, new_mac, sizeof(priv->current_mac));
Shani Michaelliee755322014-05-14 12:15:12 +0300749
Yan Burmanbfa8ab42013-03-07 03:46:55 +0000750 return err;
751}
752
753static int mlx4_en_set_mac(struct net_device *dev, void *addr)
754{
755 struct mlx4_en_priv *priv = netdev_priv(dev);
756 struct mlx4_en_dev *mdev = priv->mdev;
757 struct sockaddr *saddr = addr;
Noa Osherovich2695bab2014-07-08 11:25:24 +0300758 unsigned char new_mac[ETH_ALEN + 2];
Yan Burmanbfa8ab42013-03-07 03:46:55 +0000759 int err;
760
761 if (!is_valid_ether_addr(saddr->sa_data))
762 return -EADDRNOTAVAIL;
763
Yan Burmanbfa8ab42013-03-07 03:46:55 +0000764 mutex_lock(&mdev->state_lock);
Noa Osherovich2695bab2014-07-08 11:25:24 +0300765 memcpy(new_mac, saddr->sa_data, ETH_ALEN);
766 err = mlx4_en_do_set_mac(priv, new_mac);
767 if (!err)
768 memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700769 mutex_unlock(&mdev->state_lock);
Yan Burmanbfa8ab42013-03-07 03:46:55 +0000770
771 return err;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700772}
773
774static void mlx4_en_clear_list(struct net_device *dev)
775{
776 struct mlx4_en_priv *priv = netdev_priv(dev);
Yevgeny Petrilin6d199932012-07-05 04:03:43 +0000777 struct mlx4_en_mc_list *tmp, *mc_to_del;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700778
Yevgeny Petrilin6d199932012-07-05 04:03:43 +0000779 list_for_each_entry_safe(mc_to_del, tmp, &priv->mc_list, list) {
780 list_del(&mc_to_del->list);
781 kfree(mc_to_del);
782 }
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700783}
784
785static void mlx4_en_cache_mclist(struct net_device *dev)
786{
787 struct mlx4_en_priv *priv = netdev_priv(dev);
Jiri Pirko22bedad32010-04-01 21:22:57 +0000788 struct netdev_hw_addr *ha;
Yevgeny Petrilin6d199932012-07-05 04:03:43 +0000789 struct mlx4_en_mc_list *tmp;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700790
Alexander Guller0e035672011-12-19 04:02:58 +0000791 mlx4_en_clear_list(dev);
Yevgeny Petrilin6d199932012-07-05 04:03:43 +0000792 netdev_for_each_mc_addr(ha, dev) {
793 tmp = kzalloc(sizeof(struct mlx4_en_mc_list), GFP_ATOMIC);
794 if (!tmp) {
Yevgeny Petrilin6d199932012-07-05 04:03:43 +0000795 mlx4_en_clear_list(dev);
796 return;
797 }
798 memcpy(tmp->addr, ha->addr, ETH_ALEN);
799 list_add_tail(&tmp->list, &priv->mc_list);
800 }
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700801}
802
Yevgeny Petrilin6d199932012-07-05 04:03:43 +0000803static void update_mclist_flags(struct mlx4_en_priv *priv,
804 struct list_head *dst,
805 struct list_head *src)
806{
807 struct mlx4_en_mc_list *dst_tmp, *src_tmp, *new_mc;
808 bool found;
809
810 /* Find all the entries that should be removed from dst,
811 * These are the entries that are not found in src
812 */
813 list_for_each_entry(dst_tmp, dst, list) {
814 found = false;
815 list_for_each_entry(src_tmp, src, list) {
dingtianhongc0623e52013-12-30 15:40:55 +0800816 if (ether_addr_equal(dst_tmp->addr, src_tmp->addr)) {
Yevgeny Petrilin6d199932012-07-05 04:03:43 +0000817 found = true;
818 break;
819 }
820 }
821 if (!found)
822 dst_tmp->action = MCLIST_REM;
823 }
824
825 /* Add entries that exist in src but not in dst
826 * mark them as need to add
827 */
828 list_for_each_entry(src_tmp, src, list) {
829 found = false;
830 list_for_each_entry(dst_tmp, dst, list) {
dingtianhongc0623e52013-12-30 15:40:55 +0800831 if (ether_addr_equal(dst_tmp->addr, src_tmp->addr)) {
Yevgeny Petrilin6d199932012-07-05 04:03:43 +0000832 dst_tmp->action = MCLIST_NONE;
833 found = true;
834 break;
835 }
836 }
837 if (!found) {
Joe Perches14f8dc42013-02-07 11:46:27 +0000838 new_mc = kmemdup(src_tmp,
839 sizeof(struct mlx4_en_mc_list),
Yevgeny Petrilin6d199932012-07-05 04:03:43 +0000840 GFP_KERNEL);
Joe Perches14f8dc42013-02-07 11:46:27 +0000841 if (!new_mc)
Yevgeny Petrilin6d199932012-07-05 04:03:43 +0000842 return;
Joe Perches14f8dc42013-02-07 11:46:27 +0000843
Yevgeny Petrilin6d199932012-07-05 04:03:43 +0000844 new_mc->action = MCLIST_ADD;
845 list_add_tail(&new_mc->list, dst);
846 }
847 }
848}
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700849
Yan Burman0eb74fd2013-02-07 02:25:23 +0000850static void mlx4_en_set_rx_mode(struct net_device *dev)
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700851{
852 struct mlx4_en_priv *priv = netdev_priv(dev);
853
854 if (!priv->port_up)
855 return;
856
Yan Burman0eb74fd2013-02-07 02:25:23 +0000857 queue_work(priv->mdev->workqueue, &priv->rx_mode_task);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700858}
859
Yan Burman0eb74fd2013-02-07 02:25:23 +0000860static void mlx4_en_set_promisc_mode(struct mlx4_en_priv *priv,
861 struct mlx4_en_dev *mdev)
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700862{
Hadar Hen Zionc96d97f2012-07-05 04:03:44 +0000863 int err = 0;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700864
Yan Burman0eb74fd2013-02-07 02:25:23 +0000865 if (!(priv->flags & MLX4_EN_FLAG_PROMISC)) {
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700866 if (netif_msg_rx_status(priv))
Yan Burman0eb74fd2013-02-07 02:25:23 +0000867 en_warn(priv, "Entering promiscuous mode\n");
868 priv->flags |= MLX4_EN_FLAG_PROMISC;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700869
Yan Burman0eb74fd2013-02-07 02:25:23 +0000870 /* Enable promiscouos mode */
Hadar Hen Zionc96d97f2012-07-05 04:03:44 +0000871 switch (mdev->dev->caps.steering_mode) {
Hadar Hen Zion592e49d2012-07-05 04:03:48 +0000872 case MLX4_STEERING_MODE_DEVICE_MANAGED:
Yan Burman0eb74fd2013-02-07 02:25:23 +0000873 err = mlx4_flow_steer_promisc_add(mdev->dev,
874 priv->port,
875 priv->base_qpn,
Hadar Hen Zionf9162532013-04-24 13:58:45 +0000876 MLX4_FS_ALL_DEFAULT);
Hadar Hen Zion592e49d2012-07-05 04:03:48 +0000877 if (err)
Yan Burman0eb74fd2013-02-07 02:25:23 +0000878 en_err(priv, "Failed enabling promiscuous mode\n");
879 priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
Hadar Hen Zion592e49d2012-07-05 04:03:48 +0000880 break;
881
Hadar Hen Zionc96d97f2012-07-05 04:03:44 +0000882 case MLX4_STEERING_MODE_B0:
Yan Burman0eb74fd2013-02-07 02:25:23 +0000883 err = mlx4_unicast_promisc_add(mdev->dev,
884 priv->base_qpn,
885 priv->port);
Yevgeny Petrilin16792002011-03-22 22:38:31 +0000886 if (err)
Yan Burman0eb74fd2013-02-07 02:25:23 +0000887 en_err(priv, "Failed enabling unicast promiscuous mode\n");
888
889 /* Add the default qp number as multicast
890 * promisc
891 */
892 if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
893 err = mlx4_multicast_promisc_add(mdev->dev,
894 priv->base_qpn,
895 priv->port);
Hadar Hen Zionc96d97f2012-07-05 04:03:44 +0000896 if (err)
Yan Burman0eb74fd2013-02-07 02:25:23 +0000897 en_err(priv, "Failed enabling multicast promiscuous mode\n");
898 priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
Hadar Hen Zionc96d97f2012-07-05 04:03:44 +0000899 }
900 break;
901
902 case MLX4_STEERING_MODE_A0:
903 err = mlx4_SET_PORT_qpn_calc(mdev->dev,
904 priv->port,
Yan Burman0eb74fd2013-02-07 02:25:23 +0000905 priv->base_qpn,
906 1);
Hadar Hen Zionc96d97f2012-07-05 04:03:44 +0000907 if (err)
Yan Burman0eb74fd2013-02-07 02:25:23 +0000908 en_err(priv, "Failed enabling promiscuous mode\n");
Hadar Hen Zionc96d97f2012-07-05 04:03:44 +0000909 break;
Yevgeny Petrilin16792002011-03-22 22:38:31 +0000910 }
911
Yan Burman0eb74fd2013-02-07 02:25:23 +0000912 /* Disable port multicast filter (unconditionally) */
913 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
914 0, MLX4_MCAST_DISABLE);
915 if (err)
916 en_err(priv, "Failed disabling multicast filter\n");
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700917 }
Yan Burman0eb74fd2013-02-07 02:25:23 +0000918}
919
920static void mlx4_en_clear_promisc_mode(struct mlx4_en_priv *priv,
921 struct mlx4_en_dev *mdev)
922{
923 int err = 0;
924
925 if (netif_msg_rx_status(priv))
926 en_warn(priv, "Leaving promiscuous mode\n");
927 priv->flags &= ~MLX4_EN_FLAG_PROMISC;
928
929 /* Disable promiscouos mode */
930 switch (mdev->dev->caps.steering_mode) {
931 case MLX4_STEERING_MODE_DEVICE_MANAGED:
932 err = mlx4_flow_steer_promisc_remove(mdev->dev,
933 priv->port,
Hadar Hen Zionf9162532013-04-24 13:58:45 +0000934 MLX4_FS_ALL_DEFAULT);
Yan Burman0eb74fd2013-02-07 02:25:23 +0000935 if (err)
936 en_err(priv, "Failed disabling promiscuous mode\n");
937 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
938 break;
939
940 case MLX4_STEERING_MODE_B0:
941 err = mlx4_unicast_promisc_remove(mdev->dev,
942 priv->base_qpn,
943 priv->port);
944 if (err)
945 en_err(priv, "Failed disabling unicast promiscuous mode\n");
946 /* Disable Multicast promisc */
947 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
948 err = mlx4_multicast_promisc_remove(mdev->dev,
949 priv->base_qpn,
950 priv->port);
951 if (err)
952 en_err(priv, "Failed disabling multicast promiscuous mode\n");
953 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
954 }
955 break;
956
957 case MLX4_STEERING_MODE_A0:
958 err = mlx4_SET_PORT_qpn_calc(mdev->dev,
959 priv->port,
960 priv->base_qpn, 0);
961 if (err)
962 en_err(priv, "Failed disabling promiscuous mode\n");
963 break;
964 }
Yan Burman0eb74fd2013-02-07 02:25:23 +0000965}
966
967static void mlx4_en_do_multicast(struct mlx4_en_priv *priv,
968 struct net_device *dev,
969 struct mlx4_en_dev *mdev)
970{
971 struct mlx4_en_mc_list *mclist, *tmp;
972 u64 mcast_addr = 0;
973 u8 mc_list[16] = {0};
974 int err = 0;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700975
976 /* Enable/disable the multicast filter according to IFF_ALLMULTI */
977 if (dev->flags & IFF_ALLMULTI) {
978 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
979 0, MLX4_MCAST_DISABLE);
980 if (err)
Yevgeny Petrilin453a6082009-06-01 20:27:13 +0000981 en_err(priv, "Failed disabling multicast filter\n");
Yevgeny Petrilin16792002011-03-22 22:38:31 +0000982
983 /* Add the default qp number as multicast promisc */
984 if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
Hadar Hen Zionc96d97f2012-07-05 04:03:44 +0000985 switch (mdev->dev->caps.steering_mode) {
Hadar Hen Zion592e49d2012-07-05 04:03:48 +0000986 case MLX4_STEERING_MODE_DEVICE_MANAGED:
987 err = mlx4_flow_steer_promisc_add(mdev->dev,
988 priv->port,
989 priv->base_qpn,
Hadar Hen Zionf9162532013-04-24 13:58:45 +0000990 MLX4_FS_MC_DEFAULT);
Hadar Hen Zion592e49d2012-07-05 04:03:48 +0000991 break;
992
Hadar Hen Zionc96d97f2012-07-05 04:03:44 +0000993 case MLX4_STEERING_MODE_B0:
994 err = mlx4_multicast_promisc_add(mdev->dev,
995 priv->base_qpn,
996 priv->port);
997 break;
998
999 case MLX4_STEERING_MODE_A0:
1000 break;
1001 }
Yevgeny Petrilin16792002011-03-22 22:38:31 +00001002 if (err)
1003 en_err(priv, "Failed entering multicast promisc mode\n");
1004 priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
1005 }
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001006 } else {
Yevgeny Petrilin16792002011-03-22 22:38:31 +00001007 /* Disable Multicast promisc */
1008 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
Hadar Hen Zionc96d97f2012-07-05 04:03:44 +00001009 switch (mdev->dev->caps.steering_mode) {
Hadar Hen Zion592e49d2012-07-05 04:03:48 +00001010 case MLX4_STEERING_MODE_DEVICE_MANAGED:
1011 err = mlx4_flow_steer_promisc_remove(mdev->dev,
1012 priv->port,
Hadar Hen Zionf9162532013-04-24 13:58:45 +00001013 MLX4_FS_MC_DEFAULT);
Hadar Hen Zion592e49d2012-07-05 04:03:48 +00001014 break;
1015
Hadar Hen Zionc96d97f2012-07-05 04:03:44 +00001016 case MLX4_STEERING_MODE_B0:
1017 err = mlx4_multicast_promisc_remove(mdev->dev,
1018 priv->base_qpn,
1019 priv->port);
1020 break;
1021
1022 case MLX4_STEERING_MODE_A0:
1023 break;
1024 }
Yevgeny Petrilin16792002011-03-22 22:38:31 +00001025 if (err)
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001026 en_err(priv, "Failed disabling multicast promiscuous mode\n");
Yevgeny Petrilin16792002011-03-22 22:38:31 +00001027 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
1028 }
Jiri Pirkoff6e2162010-03-01 05:09:14 +00001029
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001030 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
1031 0, MLX4_MCAST_DISABLE);
1032 if (err)
Yevgeny Petrilin453a6082009-06-01 20:27:13 +00001033 en_err(priv, "Failed disabling multicast filter\n");
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001034
1035 /* Flush mcast filter and init it with broadcast address */
1036 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, ETH_BCAST,
1037 1, MLX4_MCAST_CONFIG);
1038
1039 /* Update multicast list - we cache all addresses so they won't
1040 * change while HW is updated holding the command semaphor */
Eugenia Emantayevdbd501a2013-01-24 01:54:16 +00001041 netif_addr_lock_bh(dev);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001042 mlx4_en_cache_mclist(dev);
Eugenia Emantayevdbd501a2013-01-24 01:54:16 +00001043 netif_addr_unlock_bh(dev);
Yevgeny Petrilin6d199932012-07-05 04:03:43 +00001044 list_for_each_entry(mclist, &priv->mc_list, list) {
Eugenia Emantayev98133372014-03-02 10:25:01 +02001045 mcast_addr = mlx4_mac_to_u64(mclist->addr);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001046 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port,
1047 mcast_addr, 0, MLX4_MCAST_CONFIG);
1048 }
1049 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
1050 0, MLX4_MCAST_ENABLE);
1051 if (err)
Yevgeny Petrilin453a6082009-06-01 20:27:13 +00001052 en_err(priv, "Failed enabling multicast filter\n");
Yevgeny Petrilin6d199932012-07-05 04:03:43 +00001053
1054 update_mclist_flags(priv, &priv->curr_list, &priv->mc_list);
1055 list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) {
1056 if (mclist->action == MCLIST_REM) {
1057 /* detach this address and delete from list */
1058 memcpy(&mc_list[10], mclist->addr, ETH_ALEN);
1059 mc_list[5] = priv->port;
1060 err = mlx4_multicast_detach(mdev->dev,
Saeed Mahameed4931c6e2017-06-15 14:35:32 +03001061 priv->rss_map.indir_qp,
Yevgeny Petrilin6d199932012-07-05 04:03:43 +00001062 mc_list,
Hadar Hen Zion0ff1fb62012-07-05 04:03:46 +00001063 MLX4_PROT_ETH,
1064 mclist->reg_id);
Yevgeny Petrilin6d199932012-07-05 04:03:43 +00001065 if (err)
1066 en_err(priv, "Fail to detach multicast address\n");
1067
Or Gerlitz837052d2013-12-23 16:09:44 +02001068 if (mclist->tunnel_reg_id) {
1069 err = mlx4_flow_detach(priv->mdev->dev, mclist->tunnel_reg_id);
1070 if (err)
1071 en_err(priv, "Failed to detach multicast address\n");
1072 }
1073
Yevgeny Petrilin6d199932012-07-05 04:03:43 +00001074 /* remove from list */
1075 list_del(&mclist->list);
1076 kfree(mclist);
Dan Carpenter9c645082012-07-10 20:34:07 +00001077 } else if (mclist->action == MCLIST_ADD) {
Yevgeny Petrilin6d199932012-07-05 04:03:43 +00001078 /* attach the address */
1079 memcpy(&mc_list[10], mclist->addr, ETH_ALEN);
Hadar Hen Zion0ff1fb62012-07-05 04:03:46 +00001080 /* needed for B0 steering support */
Yevgeny Petrilin6d199932012-07-05 04:03:43 +00001081 mc_list[5] = priv->port;
1082 err = mlx4_multicast_attach(mdev->dev,
Saeed Mahameed4931c6e2017-06-15 14:35:32 +03001083 priv->rss_map.indir_qp,
Hadar Hen Zion0ff1fb62012-07-05 04:03:46 +00001084 mc_list,
1085 priv->port, 0,
1086 MLX4_PROT_ETH,
1087 &mclist->reg_id);
Yevgeny Petrilin6d199932012-07-05 04:03:43 +00001088 if (err)
1089 en_err(priv, "Fail to attach multicast address\n");
1090
Or Gerlitz837052d2013-12-23 16:09:44 +02001091 err = mlx4_en_tunnel_steer_add(priv, &mc_list[10], priv->base_qpn,
1092 &mclist->tunnel_reg_id);
1093 if (err)
1094 en_err(priv, "Failed to attach multicast address\n");
Yevgeny Petrilin6d199932012-07-05 04:03:43 +00001095 }
1096 }
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001097 }
Yan Burman0eb74fd2013-02-07 02:25:23 +00001098}
1099
Yan Burmancc5387f2013-02-07 02:25:26 +00001100static void mlx4_en_do_uc_filter(struct mlx4_en_priv *priv,
1101 struct net_device *dev,
1102 struct mlx4_en_dev *mdev)
1103{
1104 struct netdev_hw_addr *ha;
1105 struct mlx4_mac_entry *entry;
Sasha Levinb67bfe02013-02-27 17:06:00 -08001106 struct hlist_node *tmp;
Yan Burmancc5387f2013-02-07 02:25:26 +00001107 bool found;
1108 u64 mac;
1109 int err = 0;
1110 struct hlist_head *bucket;
1111 unsigned int i;
1112 int removed = 0;
1113 u32 prev_flags;
1114
1115 /* Note that we do not need to protect our mac_hash traversal with rcu,
1116 * since all modification code is protected by mdev->state_lock
1117 */
1118
1119 /* find what to remove */
1120 for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) {
1121 bucket = &priv->mac_hash[i];
Sasha Levinb67bfe02013-02-27 17:06:00 -08001122 hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
Yan Burmancc5387f2013-02-07 02:25:26 +00001123 found = false;
1124 netdev_for_each_uc_addr(ha, dev) {
1125 if (ether_addr_equal_64bits(entry->mac,
1126 ha->addr)) {
1127 found = true;
1128 break;
1129 }
1130 }
1131
1132 /* MAC address of the port is not in uc list */
Noa Osherovich2695bab2014-07-08 11:25:24 +03001133 if (ether_addr_equal_64bits(entry->mac,
1134 priv->current_mac))
Yan Burmancc5387f2013-02-07 02:25:26 +00001135 found = true;
1136
1137 if (!found) {
Eugenia Emantayev98133372014-03-02 10:25:01 +02001138 mac = mlx4_mac_to_u64(entry->mac);
Yan Burmancc5387f2013-02-07 02:25:26 +00001139 mlx4_en_uc_steer_release(priv, entry->mac,
1140 priv->base_qpn,
1141 entry->reg_id);
1142 mlx4_unregister_mac(mdev->dev, priv->port, mac);
1143
1144 hlist_del_rcu(&entry->hlist);
1145 kfree_rcu(entry, rcu);
1146 en_dbg(DRV, priv, "Removed MAC %pM on port:%d\n",
1147 entry->mac, priv->port);
1148 ++removed;
1149 }
1150 }
1151 }
1152
1153 /* if we didn't remove anything, there is no use in trying to add
1154 * again once we are in a forced promisc mode state
1155 */
1156 if ((priv->flags & MLX4_EN_FLAG_FORCE_PROMISC) && 0 == removed)
1157 return;
1158
1159 prev_flags = priv->flags;
1160 priv->flags &= ~MLX4_EN_FLAG_FORCE_PROMISC;
1161
1162 /* find what to add */
1163 netdev_for_each_uc_addr(ha, dev) {
1164 found = false;
1165 bucket = &priv->mac_hash[ha->addr[MLX4_EN_MAC_HASH_IDX]];
Sasha Levinb67bfe02013-02-27 17:06:00 -08001166 hlist_for_each_entry(entry, bucket, hlist) {
Yan Burmancc5387f2013-02-07 02:25:26 +00001167 if (ether_addr_equal_64bits(entry->mac, ha->addr)) {
1168 found = true;
1169 break;
1170 }
1171 }
1172
1173 if (!found) {
1174 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1175 if (!entry) {
1176 en_err(priv, "Failed adding MAC %pM on port:%d (out of memory)\n",
1177 ha->addr, priv->port);
1178 priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC;
1179 break;
1180 }
Eugenia Emantayev98133372014-03-02 10:25:01 +02001181 mac = mlx4_mac_to_u64(ha->addr);
Yan Burmancc5387f2013-02-07 02:25:26 +00001182 memcpy(entry->mac, ha->addr, ETH_ALEN);
1183 err = mlx4_register_mac(mdev->dev, priv->port, mac);
1184 if (err < 0) {
1185 en_err(priv, "Failed registering MAC %pM on port %d: %d\n",
1186 ha->addr, priv->port, err);
1187 kfree(entry);
1188 priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC;
1189 break;
1190 }
1191 err = mlx4_en_uc_steer_add(priv, ha->addr,
1192 &priv->base_qpn,
1193 &entry->reg_id);
1194 if (err) {
1195 en_err(priv, "Failed adding MAC %pM on port %d: %d\n",
1196 ha->addr, priv->port, err);
1197 mlx4_unregister_mac(mdev->dev, priv->port, mac);
1198 kfree(entry);
1199 priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC;
1200 break;
1201 } else {
1202 unsigned int mac_hash;
1203 en_dbg(DRV, priv, "Added MAC %pM on port:%d\n",
1204 ha->addr, priv->port);
1205 mac_hash = ha->addr[MLX4_EN_MAC_HASH_IDX];
1206 bucket = &priv->mac_hash[mac_hash];
1207 hlist_add_head_rcu(&entry->hlist, bucket);
1208 }
1209 }
1210 }
1211
1212 if (priv->flags & MLX4_EN_FLAG_FORCE_PROMISC) {
1213 en_warn(priv, "Forcing promiscuous mode on port:%d\n",
1214 priv->port);
1215 } else if (prev_flags & MLX4_EN_FLAG_FORCE_PROMISC) {
1216 en_warn(priv, "Stop forcing promiscuous mode on port:%d\n",
1217 priv->port);
1218 }
1219}
1220
Yan Burman0eb74fd2013-02-07 02:25:23 +00001221static void mlx4_en_do_set_rx_mode(struct work_struct *work)
1222{
1223 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
1224 rx_mode_task);
1225 struct mlx4_en_dev *mdev = priv->mdev;
1226 struct net_device *dev = priv->dev;
1227
1228 mutex_lock(&mdev->state_lock);
1229 if (!mdev->device_up) {
1230 en_dbg(HW, priv, "Card is not up, ignoring rx mode change.\n");
1231 goto out;
1232 }
1233 if (!priv->port_up) {
1234 en_dbg(HW, priv, "Port is down, ignoring rx mode change.\n");
1235 goto out;
1236 }
1237
1238 if (!netif_carrier_ok(dev)) {
1239 if (!mlx4_en_QUERY_PORT(mdev, priv->port)) {
1240 if (priv->port_state.link_state) {
1241 priv->last_link_state = MLX4_DEV_EVENT_PORT_UP;
1242 netif_carrier_on(dev);
1243 en_dbg(LINK, priv, "Link Up\n");
1244 }
1245 }
1246 }
1247
Yan Burmancc5387f2013-02-07 02:25:26 +00001248 if (dev->priv_flags & IFF_UNICAST_FLT)
1249 mlx4_en_do_uc_filter(priv, dev, mdev);
1250
Yan Burman0eb74fd2013-02-07 02:25:23 +00001251 /* Promsicuous mode: disable all filters */
Yan Burmancc5387f2013-02-07 02:25:26 +00001252 if ((dev->flags & IFF_PROMISC) ||
1253 (priv->flags & MLX4_EN_FLAG_FORCE_PROMISC)) {
Yan Burman0eb74fd2013-02-07 02:25:23 +00001254 mlx4_en_set_promisc_mode(priv, mdev);
1255 goto out;
1256 }
1257
1258 /* Not in promiscuous mode */
1259 if (priv->flags & MLX4_EN_FLAG_PROMISC)
1260 mlx4_en_clear_promisc_mode(priv, mdev);
1261
1262 mlx4_en_do_multicast(priv, dev, mdev);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001263out:
1264 mutex_unlock(&mdev->state_lock);
1265}
1266
1267#ifdef CONFIG_NET_POLL_CONTROLLER
1268static void mlx4_en_netpoll(struct net_device *dev)
1269{
1270 struct mlx4_en_priv *priv = netdev_priv(dev);
1271 struct mlx4_en_cq *cq;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001272 int i;
1273
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02001274 for (i = 0; i < priv->tx_ring_num[TX]; i++) {
1275 cq = priv->tx_cq[TX][i];
Chris Masonc98235c2014-04-15 18:09:24 -04001276 napi_schedule(&cq->napi);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001277 }
1278}
1279#endif
1280
Ido Shamayba4b87ae2015-10-08 17:14:01 +03001281static int mlx4_en_set_rss_steer_rules(struct mlx4_en_priv *priv)
1282{
1283 u64 reg_id;
1284 int err = 0;
1285 int *qpn = &priv->base_qpn;
1286 struct mlx4_mac_entry *entry;
1287
1288 err = mlx4_en_uc_steer_add(priv, priv->dev->dev_addr, qpn, &reg_id);
1289 if (err)
1290 return err;
1291
1292 err = mlx4_en_tunnel_steer_add(priv, priv->dev->dev_addr, *qpn,
1293 &priv->tunnel_reg_id);
1294 if (err)
1295 goto tunnel_err;
1296
1297 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1298 if (!entry) {
1299 err = -ENOMEM;
1300 goto alloc_err;
1301 }
1302
1303 memcpy(entry->mac, priv->dev->dev_addr, sizeof(entry->mac));
1304 memcpy(priv->current_mac, entry->mac, sizeof(priv->current_mac));
1305 entry->reg_id = reg_id;
1306 hlist_add_head_rcu(&entry->hlist,
1307 &priv->mac_hash[entry->mac[MLX4_EN_MAC_HASH_IDX]]);
1308
1309 return 0;
1310
1311alloc_err:
1312 if (priv->tunnel_reg_id)
1313 mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id);
1314
1315tunnel_err:
1316 mlx4_en_uc_steer_release(priv, priv->dev->dev_addr, *qpn, reg_id);
1317 return err;
1318}
1319
1320static void mlx4_en_delete_rss_steer_rules(struct mlx4_en_priv *priv)
1321{
1322 u64 mac;
1323 unsigned int i;
1324 int qpn = priv->base_qpn;
1325 struct hlist_head *bucket;
1326 struct hlist_node *tmp;
1327 struct mlx4_mac_entry *entry;
1328
1329 for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) {
1330 bucket = &priv->mac_hash[i];
1331 hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
1332 mac = mlx4_mac_to_u64(entry->mac);
1333 en_dbg(DRV, priv, "Registering MAC:%pM for deleting\n",
1334 entry->mac);
1335 mlx4_en_uc_steer_release(priv, entry->mac,
1336 qpn, entry->reg_id);
1337
1338 mlx4_unregister_mac(priv->mdev->dev, priv->port, mac);
1339 hlist_del_rcu(&entry->hlist);
1340 kfree_rcu(entry, rcu);
1341 }
1342 }
1343
1344 if (priv->tunnel_reg_id) {
1345 mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id);
1346 priv->tunnel_reg_id = 0;
1347 }
1348}
1349
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001350static void mlx4_en_tx_timeout(struct net_device *dev)
1351{
1352 struct mlx4_en_priv *priv = netdev_priv(dev);
1353 struct mlx4_en_dev *mdev = priv->mdev;
Yevgeny Petrilinb944ebe2013-06-25 12:09:34 +03001354 int i;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001355
1356 if (netif_msg_timer(priv))
Yevgeny Petrilin453a6082009-06-01 20:27:13 +00001357 en_warn(priv, "Tx timeout called on port:%d\n", priv->port);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001358
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02001359 for (i = 0; i < priv->tx_ring_num[TX]; i++) {
1360 struct mlx4_en_tx_ring *tx_ring = priv->tx_ring[TX][i];
1361
Yevgeny Petrilinb944ebe2013-06-25 12:09:34 +03001362 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, i)))
1363 continue;
1364 en_warn(priv, "TX timeout on queue: %d, QP: 0x%x, CQ: 0x%x, Cons: 0x%x, Prod: 0x%x\n",
Eric Dumazete3f42f82016-11-22 15:56:10 -08001365 i, tx_ring->qpn, tx_ring->sp_cqn,
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02001366 tx_ring->cons, tx_ring->prod);
Yevgeny Petrilinb944ebe2013-06-25 12:09:34 +03001367 }
1368
Yevgeny Petrilin1e338db2009-04-20 04:26:05 +00001369 priv->port_stats.tx_timeout++;
Yevgeny Petrilin453a6082009-06-01 20:27:13 +00001370 en_dbg(DRV, priv, "Scheduling watchdog\n");
Yevgeny Petrilin1e338db2009-04-20 04:26:05 +00001371 queue_work(mdev->workqueue, &priv->watchdog_task);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001372}
1373
1374
stephen hemmingerbc1f4472017-01-06 19:12:52 -08001375static void
Eric Dumazet9ed17db172016-05-25 09:50:38 -07001376mlx4_en_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001377{
1378 struct mlx4_en_priv *priv = netdev_priv(dev);
1379
1380 spin_lock_bh(&priv->stats_lock);
Eric Dumazet40931b82016-11-25 07:46:20 -08001381 mlx4_en_fold_software_stats(dev);
Eric Dumazetf73a6f42016-05-25 09:50:39 -07001382 netdev_stats_to_stats64(stats, &dev->stats);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001383 spin_unlock_bh(&priv->stats_lock);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001384}
1385
1386static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
1387{
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001388 struct mlx4_en_cq *cq;
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02001389 int i, t;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001390
1391 /* If we haven't received a specific coalescing setting
Martin Olsson98a17082009-04-22 18:21:29 +02001392 * (module param), we set the moderation parameters as follows:
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001393 * - moder_cnt is set to the number of mtu sized packets to
Eric Dumazetecfd2ce2012-11-05 16:20:42 +00001394 * satisfy our coalescing target.
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001395 * - moder_time is set to a fixed value.
1396 */
Yevgeny Petrilin3db36fb2009-06-01 23:23:13 +00001397 priv->rx_frames = MLX4_EN_RX_COAL_TARGET;
Yevgeny Petrilin60b9f9e2008-12-25 18:19:47 -08001398 priv->rx_usecs = MLX4_EN_RX_COAL_TIME;
Yevgeny Petrilina19a8482012-04-23 02:18:33 +00001399 priv->tx_frames = MLX4_EN_TX_COAL_PKTS;
1400 priv->tx_usecs = MLX4_EN_TX_COAL_TIME;
Colin Ian King593814d2017-06-26 13:53:46 +01001401 en_dbg(INTR, priv, "Default coalescing params for mtu:%d - rx_frames:%d rx_usecs:%d\n",
Yan Burman48e551f2013-02-07 02:25:21 +00001402 priv->dev->mtu, priv->rx_frames, priv->rx_usecs);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001403
1404 /* Setup cq moderation params */
1405 for (i = 0; i < priv->rx_ring_num; i++) {
Eugenia Emantayev41d942d2013-11-07 12:19:52 +02001406 cq = priv->rx_cq[i];
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001407 cq->moder_cnt = priv->rx_frames;
1408 cq->moder_time = priv->rx_usecs;
Alexander Guller6b4d8d92011-10-09 05:38:23 +00001409 priv->last_moder_time[i] = MLX4_EN_AUTO_CONF;
1410 priv->last_moder_packets[i] = 0;
1411 priv->last_moder_bytes[i] = 0;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001412 }
1413
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02001414 for (t = 0 ; t < MLX4_EN_NUM_TX_TYPES; t++) {
1415 for (i = 0; i < priv->tx_ring_num[t]; i++) {
1416 cq = priv->tx_cq[t][i];
1417 cq->moder_cnt = priv->tx_frames;
1418 cq->moder_time = priv->tx_usecs;
1419 }
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001420 }
1421
1422 /* Reset auto-moderation params */
1423 priv->pkt_rate_low = MLX4_EN_RX_RATE_LOW;
1424 priv->rx_usecs_low = MLX4_EN_RX_COAL_TIME_LOW;
1425 priv->pkt_rate_high = MLX4_EN_RX_RATE_HIGH;
1426 priv->rx_usecs_high = MLX4_EN_RX_COAL_TIME_HIGH;
1427 priv->sample_interval = MLX4_EN_SAMPLE_INTERVAL;
Yevgeny Petrilin60b9f9e2008-12-25 18:19:47 -08001428 priv->adaptive_rx_coal = 1;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001429 priv->last_moder_jiffies = 0;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001430 priv->last_moder_tx_packets = 0;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001431}
1432
1433static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
1434{
1435 unsigned long period = (unsigned long) (jiffies - priv->last_moder_jiffies);
Eric Dumazetf5a57722017-02-16 15:23:27 -08001436 u32 pkt_rate_high, pkt_rate_low;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001437 struct mlx4_en_cq *cq;
1438 unsigned long packets;
1439 unsigned long rate;
1440 unsigned long avg_pkt_size;
1441 unsigned long rx_packets;
1442 unsigned long rx_bytes;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001443 unsigned long rx_pkt_diff;
1444 int moder_time;
Alexander Guller6b4d8d92011-10-09 05:38:23 +00001445 int ring, err;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001446
1447 if (!priv->adaptive_rx_coal || period < priv->sample_interval * HZ)
1448 return;
1449
Eric Dumazetf5a57722017-02-16 15:23:27 -08001450 pkt_rate_low = READ_ONCE(priv->pkt_rate_low);
1451 pkt_rate_high = READ_ONCE(priv->pkt_rate_high);
1452
Alexander Guller6b4d8d92011-10-09 05:38:23 +00001453 for (ring = 0; ring < priv->rx_ring_num; ring++) {
Eric Dumazetb9972d22016-11-23 09:46:52 -08001454 rx_packets = READ_ONCE(priv->rx_ring[ring]->packets);
1455 rx_bytes = READ_ONCE(priv->rx_ring[ring]->bytes);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001456
Eric Dumazetf5a57722017-02-16 15:23:27 -08001457 rx_pkt_diff = rx_packets - priv->last_moder_packets[ring];
Alexander Guller6b4d8d92011-10-09 05:38:23 +00001458 packets = rx_pkt_diff;
1459 rate = packets * HZ / period;
Eric Dumazetf5a57722017-02-16 15:23:27 -08001460 avg_pkt_size = packets ? (rx_bytes -
1461 priv->last_moder_bytes[ring]) / packets : 0;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001462
Alexander Guller6b4d8d92011-10-09 05:38:23 +00001463 /* Apply auto-moderation only when packet rate
1464 * exceeds a rate that it matters */
1465 if (rate > (MLX4_EN_RX_RATE_THRESH / priv->rx_ring_num) &&
1466 avg_pkt_size > MLX4_EN_AVG_PKT_SMALL) {
Eric Dumazetf5a57722017-02-16 15:23:27 -08001467 if (rate <= pkt_rate_low)
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001468 moder_time = priv->rx_usecs_low;
Eric Dumazetf5a57722017-02-16 15:23:27 -08001469 else if (rate >= pkt_rate_high)
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001470 moder_time = priv->rx_usecs_high;
1471 else
Eric Dumazetf5a57722017-02-16 15:23:27 -08001472 moder_time = (rate - pkt_rate_low) *
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001473 (priv->rx_usecs_high - priv->rx_usecs_low) /
Eric Dumazetf5a57722017-02-16 15:23:27 -08001474 (pkt_rate_high - pkt_rate_low) +
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001475 priv->rx_usecs_low;
Alexander Guller6b4d8d92011-10-09 05:38:23 +00001476 } else {
1477 moder_time = priv->rx_usecs_low;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001478 }
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001479
Eric Dumazetf5a57722017-02-16 15:23:27 -08001480 cq = priv->rx_cq[ring];
1481 if (moder_time != priv->last_moder_time[ring] ||
1482 cq->moder_cnt != priv->rx_frames) {
Alexander Guller6b4d8d92011-10-09 05:38:23 +00001483 priv->last_moder_time[ring] = moder_time;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001484 cq->moder_time = moder_time;
Sagi Grimberga1c66932013-06-04 05:13:26 +00001485 cq->moder_cnt = priv->rx_frames;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001486 err = mlx4_en_set_cq_moder(priv, cq);
Alexander Guller6b4d8d92011-10-09 05:38:23 +00001487 if (err)
Yan Burman48e551f2013-02-07 02:25:21 +00001488 en_err(priv, "Failed modifying moderation for cq:%d\n",
1489 ring);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001490 }
Alexander Guller6b4d8d92011-10-09 05:38:23 +00001491 priv->last_moder_packets[ring] = rx_packets;
1492 priv->last_moder_bytes[ring] = rx_bytes;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001493 }
1494
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001495 priv->last_moder_jiffies = jiffies;
1496}
1497
1498static void mlx4_en_do_get_stats(struct work_struct *work)
1499{
Jean Delvarebf6aede2009-04-02 16:56:54 -07001500 struct delayed_work *delay = to_delayed_work(work);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001501 struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv,
1502 stats_task);
1503 struct mlx4_en_dev *mdev = priv->mdev;
1504 int err;
1505
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001506 mutex_lock(&mdev->state_lock);
1507 if (mdev->device_up) {
Jack Morgenstein6123db2e2013-06-25 12:09:30 +03001508 if (priv->port_up) {
1509 err = mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 0);
1510 if (err)
1511 en_dbg(HW, priv, "Could not update stats\n");
Eugenia Emantayev2d518372013-01-24 01:54:14 +00001512
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001513 mlx4_en_auto_moderation(priv);
Jack Morgenstein6123db2e2013-06-25 12:09:30 +03001514 }
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001515
1516 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
1517 }
Yevgeny Petrilind7e1a482010-08-24 03:46:38 +00001518 if (mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port]) {
Noa Osherovich2695bab2014-07-08 11:25:24 +03001519 mlx4_en_do_set_mac(priv, priv->current_mac);
Yevgeny Petrilind7e1a482010-08-24 03:46:38 +00001520 mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port] = 0;
1521 }
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001522 mutex_unlock(&mdev->state_lock);
1523}
1524
Amir Vadaib6c39bf2013-04-23 06:06:51 +00001525/* mlx4_en_service_task - Run service task for tasks that needed to be done
1526 * periodically
1527 */
1528static void mlx4_en_service_task(struct work_struct *work)
1529{
1530 struct delayed_work *delay = to_delayed_work(work);
1531 struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv,
1532 service_task);
1533 struct mlx4_en_dev *mdev = priv->mdev;
1534
1535 mutex_lock(&mdev->state_lock);
1536 if (mdev->device_up) {
Amir Vadaidc8142e2013-04-25 05:22:24 +00001537 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
1538 mlx4_en_ptp_overflow_check(mdev);
Amir Vadaib6c39bf2013-04-23 06:06:51 +00001539
Ido Shamay07841f92015-04-30 17:32:46 +03001540 mlx4_en_recover_from_oom(priv);
Amir Vadaib6c39bf2013-04-23 06:06:51 +00001541 queue_delayed_work(mdev->workqueue, &priv->service_task,
1542 SERVICE_TASK_DELAY);
1543 }
1544 mutex_unlock(&mdev->state_lock);
1545}
1546
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001547static void mlx4_en_linkstate(struct work_struct *work)
1548{
1549 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
1550 linkstate_task);
1551 struct mlx4_en_dev *mdev = priv->mdev;
1552 int linkstate = priv->link_state;
1553
1554 mutex_lock(&mdev->state_lock);
1555 /* If observable port state changed set carrier state and
1556 * report to system log */
1557 if (priv->last_link_state != linkstate) {
1558 if (linkstate == MLX4_DEV_EVENT_PORT_DOWN) {
Yevgeny Petriline5cc44b2010-08-24 03:46:01 +00001559 en_info(priv, "Link Down\n");
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001560 netif_carrier_off(priv->dev);
1561 } else {
Yevgeny Petriline5cc44b2010-08-24 03:46:01 +00001562 en_info(priv, "Link Up\n");
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001563 netif_carrier_on(priv->dev);
1564 }
1565 }
1566 priv->last_link_state = linkstate;
1567 mutex_unlock(&mdev->state_lock);
1568}
1569
Yuval Atias9e311e72014-06-09 10:24:39 +03001570static int mlx4_en_init_affinity_hint(struct mlx4_en_priv *priv, int ring_idx)
1571{
1572 struct mlx4_en_rx_ring *ring = priv->rx_ring[ring_idx];
1573 int numa_node = priv->mdev->dev->numa_node;
Yuval Atias9e311e72014-06-09 10:24:39 +03001574
1575 if (!zalloc_cpumask_var(&ring->affinity_mask, GFP_KERNEL))
1576 return -ENOMEM;
1577
Rusty Russellf36963c2015-05-09 03:14:13 +09301578 cpumask_set_cpu(cpumask_local_spread(ring_idx, numa_node),
1579 ring->affinity_mask);
1580 return 0;
Yuval Atias9e311e72014-06-09 10:24:39 +03001581}
1582
1583static void mlx4_en_free_affinity_hint(struct mlx4_en_priv *priv, int ring_idx)
1584{
1585 free_cpumask_var(priv->rx_ring[ring_idx]->affinity_mask);
1586}
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001587
Brenden Blanco9ecc2d82016-07-19 12:16:55 -07001588static void mlx4_en_init_recycle_ring(struct mlx4_en_priv *priv,
1589 int tx_ring_idx)
1590{
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02001591 struct mlx4_en_tx_ring *tx_ring = priv->tx_ring[TX_XDP][tx_ring_idx];
1592 int rr_index = tx_ring_idx;
Brenden Blanco9ecc2d82016-07-19 12:16:55 -07001593
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02001594 tx_ring->free_tx_desc = mlx4_en_recycle_tx_desc;
1595 tx_ring->recycle_ring = priv->rx_ring[rr_index];
1596 en_dbg(DRV, priv, "Set tx_ring[%d][%d]->recycle_ring = rx_ring[%d]\n",
1597 TX_XDP, tx_ring_idx, rr_index);
Brenden Blanco9ecc2d82016-07-19 12:16:55 -07001598}
1599
Yevgeny Petrilin18cc42a2008-12-29 18:39:20 -08001600int mlx4_en_start_port(struct net_device *dev)
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001601{
1602 struct mlx4_en_priv *priv = netdev_priv(dev);
1603 struct mlx4_en_dev *mdev = priv->mdev;
1604 struct mlx4_en_cq *cq;
1605 struct mlx4_en_tx_ring *tx_ring;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001606 int rx_index = 0;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001607 int err = 0;
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02001608 int i, t;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001609 int j;
Yevgeny Petrilin16792002011-03-22 22:38:31 +00001610 u8 mc_list[16] = {0};
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001611
1612 if (priv->port_up) {
Yevgeny Petrilin453a6082009-06-01 20:27:13 +00001613 en_dbg(DRV, priv, "start port called while port already up\n");
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001614 return 0;
1615 }
1616
Yevgeny Petrilin6d199932012-07-05 04:03:43 +00001617 INIT_LIST_HEAD(&priv->mc_list);
1618 INIT_LIST_HEAD(&priv->curr_list);
Hadar Hen Zion0d256c02013-01-30 23:07:08 +00001619 INIT_LIST_HEAD(&priv->ethtool_list);
1620 memset(&priv->ethtool_rules[0], 0,
1621 sizeof(struct ethtool_flow_id) * MAX_NUM_OF_FS_RULES);
Yevgeny Petrilin6d199932012-07-05 04:03:43 +00001622
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001623 /* Calculate Rx buf size */
1624 dev->mtu = min(dev->mtu, priv->max_mtu);
1625 mlx4_en_calc_rx_buf(dev);
Yevgeny Petrilin453a6082009-06-01 20:27:13 +00001626 en_dbg(DRV, priv, "Rx buf size:%d\n", priv->rx_skb_size);
Yevgeny Petrilin38aab072009-05-24 03:17:11 +00001627
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001628 /* Configure rx cq's and rings */
Yevgeny Petrilin38aab072009-05-24 03:17:11 +00001629 err = mlx4_en_activate_rx_rings(priv);
1630 if (err) {
Yevgeny Petrilin453a6082009-06-01 20:27:13 +00001631 en_err(priv, "Failed to activate RX rings\n");
Yevgeny Petrilin38aab072009-05-24 03:17:11 +00001632 return err;
1633 }
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001634 for (i = 0; i < priv->rx_ring_num; i++) {
Eugenia Emantayev41d942d2013-11-07 12:19:52 +02001635 cq = priv->rx_cq[i];
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001636
Yuval Atias9e311e72014-06-09 10:24:39 +03001637 err = mlx4_en_init_affinity_hint(priv, i);
1638 if (err) {
1639 en_err(priv, "Failed preparing IRQ affinity hint\n");
1640 goto cq_err;
1641 }
1642
Alexander Guller76532d02011-10-09 05:26:31 +00001643 err = mlx4_en_activate_cq(priv, cq, i);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001644 if (err) {
Yevgeny Petrilin453a6082009-06-01 20:27:13 +00001645 en_err(priv, "Failed activating Rx CQ\n");
Yuval Atias9e311e72014-06-09 10:24:39 +03001646 mlx4_en_free_affinity_hint(priv, i);
Yevgeny Petrilina4233302009-04-26 20:41:34 +00001647 goto cq_err;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001648 }
Ido Shamayc3f25112014-12-16 13:28:54 +02001649
1650 for (j = 0; j < cq->size; j++) {
1651 struct mlx4_cqe *cqe = NULL;
1652
1653 cqe = mlx4_en_get_cqe(cq->buf, j, priv->cqe_size) +
1654 priv->cqe_factor;
1655 cqe->owner_sr_opcode = MLX4_CQE_OWNER_MASK;
1656 }
1657
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001658 err = mlx4_en_set_cq_moder(priv, cq);
1659 if (err) {
Joe Perches1a91de22014-05-07 12:52:57 -07001660 en_err(priv, "Failed setting cq moderation parameters\n");
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001661 mlx4_en_deactivate_cq(priv, cq);
Yuval Atias9e311e72014-06-09 10:24:39 +03001662 mlx4_en_free_affinity_hint(priv, i);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001663 goto cq_err;
1664 }
1665 mlx4_en_arm_cq(priv, cq);
Eugenia Emantayev41d942d2013-11-07 12:19:52 +02001666 priv->rx_ring[i]->cqn = cq->mcq.cqn;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001667 ++rx_index;
1668 }
1669
Eugenia Emantayevffe455a2011-12-13 04:16:21 +00001670 /* Set qp number */
1671 en_dbg(DRV, priv, "Getting qp number for port %d\n", priv->port);
Yan Burman16a10ff2013-02-07 02:25:22 +00001672 err = mlx4_en_get_qp(priv);
Yevgeny Petrilin16792002011-03-22 22:38:31 +00001673 if (err) {
Eugenia Emantayevffe455a2011-12-13 04:16:21 +00001674 en_err(priv, "Failed getting eth qp\n");
Yevgeny Petrilin16792002011-03-22 22:38:31 +00001675 goto cq_err;
1676 }
1677 mdev->mac_removed[priv->port] = 0;
1678
Eran Ben Elisha6de5f7f2015-06-15 17:59:02 +03001679 priv->counter_index =
1680 mlx4_get_default_counter_index(mdev->dev, priv->port);
1681
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001682 err = mlx4_en_config_rss_steer(priv);
1683 if (err) {
Yevgeny Petrilin453a6082009-06-01 20:27:13 +00001684 en_err(priv, "Failed configuring rss steering\n");
Yevgeny Petrilin16792002011-03-22 22:38:31 +00001685 goto mac_err;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001686 }
1687
Hadar Hen Zioncabdc8ee2012-07-05 04:03:50 +00001688 err = mlx4_en_create_drop_qp(priv);
1689 if (err)
1690 goto rss_err;
1691
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001692 /* Configure tx cq's and rings */
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02001693 for (t = 0 ; t < MLX4_EN_NUM_TX_TYPES; t++) {
Tariq Toukaneb9def62016-12-22 14:32:58 +02001694 u8 num_tx_rings_p_up = t == TX ?
1695 priv->num_tx_rings_p_up : priv->tx_ring_num[t];
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02001696
1697 for (i = 0; i < priv->tx_ring_num[t]; i++) {
1698 /* Configure cq */
1699 cq = priv->tx_cq[t][i];
1700 err = mlx4_en_activate_cq(priv, cq, i);
1701 if (err) {
1702 en_err(priv, "Failed allocating Tx CQ\n");
1703 goto tx_err;
1704 }
1705 err = mlx4_en_set_cq_moder(priv, cq);
1706 if (err) {
1707 en_err(priv, "Failed setting cq moderation parameters\n");
1708 mlx4_en_deactivate_cq(priv, cq);
1709 goto tx_err;
1710 }
1711 en_dbg(DRV, priv,
1712 "Resetting index of collapsed CQ:%d to -1\n", i);
1713 cq->buf->wqe_index = cpu_to_be16(0xffff);
1714
1715 /* Configure ring */
1716 tx_ring = priv->tx_ring[t][i];
1717 err = mlx4_en_activate_tx_ring(priv, tx_ring,
1718 cq->mcq.cqn,
1719 i / num_tx_rings_p_up);
1720 if (err) {
1721 en_err(priv, "Failed allocating Tx ring\n");
1722 mlx4_en_deactivate_cq(priv, cq);
1723 goto tx_err;
1724 }
1725 if (t != TX_XDP) {
1726 tx_ring->tx_queue = netdev_get_tx_queue(dev, i);
1727 tx_ring->recycle_ring = NULL;
Tariq Toukan6c785112017-06-15 14:35:37 +03001728
1729 /* Arm CQ for TX completions */
1730 mlx4_en_arm_cq(priv, cq);
1731
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02001732 } else {
1733 mlx4_en_init_recycle_ring(priv, i);
Tariq Toukan6c785112017-06-15 14:35:37 +03001734 /* XDP TX CQ should never be armed */
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02001735 }
1736
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02001737 /* Set initial ownership of all Tx TXBBs to SW (1) */
1738 for (j = 0; j < tx_ring->buf_size; j += STAMP_STRIDE)
1739 *((u32 *)(tx_ring->buf + j)) = 0xffffffff;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001740 }
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001741 }
1742
1743 /* Configure port */
1744 err = mlx4_SET_PORT_general(mdev->dev, priv->port,
1745 priv->rx_skb_size + ETH_FCS_LEN,
Yevgeny Petrilind53b93f2008-11-05 04:48:36 +00001746 priv->prof->tx_pause,
1747 priv->prof->tx_ppp,
1748 priv->prof->rx_pause,
1749 priv->prof->rx_ppp);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001750 if (err) {
Yan Burman48e551f2013-02-07 02:25:21 +00001751 en_err(priv, "Failed setting port general configurations for port %d, with error %d\n",
1752 priv->port, err);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001753 goto tx_err;
1754 }
Shaker Daibes40fb4fc2017-01-29 18:56:18 +02001755
1756 err = mlx4_SET_PORT_user_mtu(mdev->dev, priv->port, dev->mtu);
1757 if (err) {
1758 en_err(priv, "Failed to pass user MTU(%d) to Firmware for port %d, with error %d\n",
1759 dev->mtu, priv->port, err);
1760 goto tx_err;
1761 }
1762
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001763 /* Set default qp number */
1764 err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, priv->base_qpn, 0);
1765 if (err) {
Yevgeny Petrilin453a6082009-06-01 20:27:13 +00001766 en_err(priv, "Failed setting default qp numbers\n");
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001767 goto tx_err;
1768 }
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001769
Or Gerlitz837052d2013-12-23 16:09:44 +02001770 if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
Or Gerlitz1b136de2014-03-27 14:02:04 +02001771 err = mlx4_SET_PORT_VXLAN(mdev->dev, priv->port, VXLAN_STEER_BY_OUTER_MAC, 1);
Or Gerlitz837052d2013-12-23 16:09:44 +02001772 if (err) {
1773 en_err(priv, "Failed setting port L2 tunnel configuration, err %d\n",
1774 err);
1775 goto tx_err;
1776 }
1777 }
1778
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001779 /* Init port */
Yevgeny Petrilin453a6082009-06-01 20:27:13 +00001780 en_dbg(HW, priv, "Initializing port\n");
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001781 err = mlx4_INIT_PORT(mdev->dev, priv->port);
1782 if (err) {
Yevgeny Petrilin453a6082009-06-01 20:27:13 +00001783 en_err(priv, "Failed Initializing port\n");
Yevgeny Petrilin16792002011-03-22 22:38:31 +00001784 goto tx_err;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001785 }
1786
Ido Shamayba4b87ae2015-10-08 17:14:01 +03001787 /* Set Unicast and VXLAN steering rules */
1788 if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0 &&
1789 mlx4_en_set_rss_steer_rules(priv))
1790 mlx4_warn(mdev, "Failed setting steering rules\n");
1791
Yevgeny Petrilin16792002011-03-22 22:38:31 +00001792 /* Attach rx QP to bradcast address */
Joe Perchesc7bf7162015-03-02 19:54:47 -08001793 eth_broadcast_addr(&mc_list[10]);
Hadar Hen Zion0ff1fb62012-07-05 04:03:46 +00001794 mc_list[5] = priv->port; /* needed for B0 steering support */
Saeed Mahameed4931c6e2017-06-15 14:35:32 +03001795 if (mlx4_multicast_attach(mdev->dev, priv->rss_map.indir_qp, mc_list,
Hadar Hen Zion0ff1fb62012-07-05 04:03:46 +00001796 priv->port, 0, MLX4_PROT_ETH,
1797 &priv->broadcast_id))
Yevgeny Petrilin16792002011-03-22 22:38:31 +00001798 mlx4_warn(mdev, "Failed Attaching Broadcast\n");
1799
Herbert Xub5845f92011-03-27 01:01:26 +00001800 /* Must redo promiscuous mode setup. */
1801 priv->flags &= ~(MLX4_EN_FLAG_PROMISC | MLX4_EN_FLAG_MC_PROMISC);
1802
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001803 /* Schedule multicast task to populate multicast list */
Yan Burman0eb74fd2013-02-07 02:25:23 +00001804 queue_work(mdev->workqueue, &priv->rx_mode_task);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001805
Or Gerlitz9737c6a2014-11-18 17:51:27 +02001806 if (priv->mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
Alexander Duycka8312742016-06-16 12:22:30 -07001807 udp_tunnel_get_rx_info(dev);
1808
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001809 priv->port_up = true;
Erez Shitrit8d59de82016-10-27 16:27:17 +03001810
1811 /* Process all completions if exist to prevent
1812 * the queues freezing if they are full
1813 */
Eric Dumazet8cf699e2017-01-13 08:39:24 -08001814 for (i = 0; i < priv->rx_ring_num; i++) {
1815 local_bh_disable();
Erez Shitrit8d59de82016-10-27 16:27:17 +03001816 napi_schedule(&priv->rx_cq[i]->napi);
Eric Dumazet8cf699e2017-01-13 08:39:24 -08001817 local_bh_enable();
1818 }
Erez Shitrit8d59de82016-10-27 16:27:17 +03001819
Yevgeny Petrilina11faac2009-06-20 22:15:46 +00001820 netif_tx_start_all_queues(dev);
Amir Vadai3484aac2013-01-30 23:07:11 +00001821 netif_device_attach(dev);
1822
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001823 return 0;
1824
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001825tx_err:
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02001826 if (t == MLX4_EN_NUM_TX_TYPES) {
1827 t--;
1828 i = priv->tx_ring_num[t];
1829 }
1830 while (t >= 0) {
1831 while (i--) {
1832 mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[t][i]);
1833 mlx4_en_deactivate_cq(priv, priv->tx_cq[t][i]);
1834 }
1835 if (!t--)
1836 break;
1837 i = priv->tx_ring_num[t];
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001838 }
Hadar Hen Zioncabdc8ee2012-07-05 04:03:50 +00001839 mlx4_en_destroy_drop_qp(priv);
1840rss_err:
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001841 mlx4_en_release_rss_steer(priv);
Yevgeny Petrilin16792002011-03-22 22:38:31 +00001842mac_err:
Yan Burman16a10ff2013-02-07 02:25:22 +00001843 mlx4_en_put_qp(priv);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001844cq_err:
Yuval Atias9e311e72014-06-09 10:24:39 +03001845 while (rx_index--) {
Eugenia Emantayev41d942d2013-11-07 12:19:52 +02001846 mlx4_en_deactivate_cq(priv, priv->rx_cq[rx_index]);
Benjamin Poirierf94813f2015-04-29 15:59:35 -07001847 mlx4_en_free_affinity_hint(priv, rx_index);
Yuval Atias9e311e72014-06-09 10:24:39 +03001848 }
Yevgeny Petrilin38aab072009-05-24 03:17:11 +00001849 for (i = 0; i < priv->rx_ring_num; i++)
Eugenia Emantayev41d942d2013-11-07 12:19:52 +02001850 mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001851
1852 return err; /* need to close devices */
1853}
1854
1855
Amir Vadai3484aac2013-01-30 23:07:11 +00001856void mlx4_en_stop_port(struct net_device *dev, int detach)
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001857{
1858 struct mlx4_en_priv *priv = netdev_priv(dev);
1859 struct mlx4_en_dev *mdev = priv->mdev;
Yevgeny Petrilin6d199932012-07-05 04:03:43 +00001860 struct mlx4_en_mc_list *mclist, *tmp;
Hadar Hen Zion0d256c02013-01-30 23:07:08 +00001861 struct ethtool_flow_id *flow, *tmp_flow;
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02001862 int i, t;
Yevgeny Petrilin16792002011-03-22 22:38:31 +00001863 u8 mc_list[16] = {0};
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001864
1865 if (!priv->port_up) {
Yevgeny Petrilin453a6082009-06-01 20:27:13 +00001866 en_dbg(DRV, priv, "stop port called while port already down\n");
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001867 return;
1868 }
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001869
Eugenia Emantayev0cc5c8b2013-06-25 12:09:33 +03001870 /* close port*/
1871 mlx4_CLOSE_PORT(mdev->dev, priv->port);
1872
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001873 /* Synchronize with tx routine */
1874 netif_tx_lock_bh(dev);
Amir Vadai3484aac2013-01-30 23:07:11 +00001875 if (detach)
1876 netif_device_detach(dev);
Yevgeny Petrilin3c05f5e2009-06-20 22:15:52 +00001877 netif_tx_stop_all_queues(dev);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001878 netif_tx_unlock_bh(dev);
1879
Amir Vadai3484aac2013-01-30 23:07:11 +00001880 netif_tx_disable(dev);
1881
Eric Dumazet7f7bf162016-12-01 05:02:06 -08001882 spin_lock_bh(&priv->stats_lock);
1883 mlx4_en_fold_software_stats(dev);
Yevgeny Petrilin7c287382010-08-24 03:45:45 +00001884 /* Set port as not active */
Yevgeny Petrilin3c05f5e2009-06-20 22:15:52 +00001885 priv->port_up = false;
Eric Dumazet7f7bf162016-12-01 05:02:06 -08001886 spin_unlock_bh(&priv->stats_lock);
1887
Eran Ben Elisha6de5f7f2015-06-15 17:59:02 +03001888 priv->counter_index = MLX4_SINK_COUNTER_INDEX(mdev->dev);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001889
Aviad Yehezkeldb0e7cb2013-01-24 01:54:15 +00001890 /* Promsicuous mode */
1891 if (mdev->dev->caps.steering_mode ==
1892 MLX4_STEERING_MODE_DEVICE_MANAGED) {
1893 priv->flags &= ~(MLX4_EN_FLAG_PROMISC |
1894 MLX4_EN_FLAG_MC_PROMISC);
1895 mlx4_flow_steer_promisc_remove(mdev->dev,
1896 priv->port,
Hadar Hen Zionf9162532013-04-24 13:58:45 +00001897 MLX4_FS_ALL_DEFAULT);
Aviad Yehezkeldb0e7cb2013-01-24 01:54:15 +00001898 mlx4_flow_steer_promisc_remove(mdev->dev,
1899 priv->port,
Hadar Hen Zionf9162532013-04-24 13:58:45 +00001900 MLX4_FS_MC_DEFAULT);
Aviad Yehezkeldb0e7cb2013-01-24 01:54:15 +00001901 } else if (priv->flags & MLX4_EN_FLAG_PROMISC) {
1902 priv->flags &= ~MLX4_EN_FLAG_PROMISC;
1903
1904 /* Disable promiscouos mode */
1905 mlx4_unicast_promisc_remove(mdev->dev, priv->base_qpn,
1906 priv->port);
1907
1908 /* Disable Multicast promisc */
1909 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
1910 mlx4_multicast_promisc_remove(mdev->dev, priv->base_qpn,
1911 priv->port);
1912 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
1913 }
1914 }
1915
Yevgeny Petrilin16792002011-03-22 22:38:31 +00001916 /* Detach All multicasts */
Joe Perchesc7bf7162015-03-02 19:54:47 -08001917 eth_broadcast_addr(&mc_list[10]);
Hadar Hen Zion0ff1fb62012-07-05 04:03:46 +00001918 mc_list[5] = priv->port; /* needed for B0 steering support */
Saeed Mahameed4931c6e2017-06-15 14:35:32 +03001919 mlx4_multicast_detach(mdev->dev, priv->rss_map.indir_qp, mc_list,
Hadar Hen Zion0ff1fb62012-07-05 04:03:46 +00001920 MLX4_PROT_ETH, priv->broadcast_id);
Yevgeny Petrilin6d199932012-07-05 04:03:43 +00001921 list_for_each_entry(mclist, &priv->curr_list, list) {
1922 memcpy(&mc_list[10], mclist->addr, ETH_ALEN);
Yevgeny Petrilin16792002011-03-22 22:38:31 +00001923 mc_list[5] = priv->port;
Saeed Mahameed4931c6e2017-06-15 14:35:32 +03001924 mlx4_multicast_detach(mdev->dev, priv->rss_map.indir_qp,
Hadar Hen Zion0ff1fb62012-07-05 04:03:46 +00001925 mc_list, MLX4_PROT_ETH, mclist->reg_id);
Or Gerlitzde123262014-03-13 14:52:15 +02001926 if (mclist->tunnel_reg_id)
1927 mlx4_flow_detach(mdev->dev, mclist->tunnel_reg_id);
Yevgeny Petrilin16792002011-03-22 22:38:31 +00001928 }
1929 mlx4_en_clear_list(dev);
Yevgeny Petrilin6d199932012-07-05 04:03:43 +00001930 list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) {
1931 list_del(&mclist->list);
1932 kfree(mclist);
1933 }
1934
Yevgeny Petrilin16792002011-03-22 22:38:31 +00001935 /* Flush multicast filter */
1936 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 1, MLX4_MCAST_CONFIG);
1937
Hadar Hen Zion6efb5fa2013-03-21 05:55:53 +00001938 /* Remove flow steering rules for the port*/
1939 if (mdev->dev->caps.steering_mode ==
1940 MLX4_STEERING_MODE_DEVICE_MANAGED) {
1941 ASSERT_RTNL();
1942 list_for_each_entry_safe(flow, tmp_flow,
1943 &priv->ethtool_list, list) {
1944 mlx4_flow_detach(mdev->dev, flow->id);
1945 list_del(&flow->list);
1946 }
1947 }
1948
Hadar Hen Zioncabdc8ee2012-07-05 04:03:50 +00001949 mlx4_en_destroy_drop_qp(priv);
1950
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001951 /* Free TX Rings */
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02001952 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
1953 for (i = 0; i < priv->tx_ring_num[t]; i++) {
1954 mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[t][i]);
1955 mlx4_en_deactivate_cq(priv, priv->tx_cq[t][i]);
1956 }
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001957 }
1958 msleep(10);
1959
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02001960 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++)
1961 for (i = 0; i < priv->tx_ring_num[t]; i++)
1962 mlx4_en_free_tx_buf(dev, priv->tx_ring[t][i]);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001963
Ido Shamayba4b87ae2015-10-08 17:14:01 +03001964 if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0)
1965 mlx4_en_delete_rss_steer_rules(priv);
1966
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001967 /* Free RSS qps */
1968 mlx4_en_release_rss_steer(priv);
1969
Eugenia Emantayevffe455a2011-12-13 04:16:21 +00001970 /* Unregister Mac address for the port */
Yan Burman16a10ff2013-02-07 02:25:22 +00001971 mlx4_en_put_qp(priv);
Or Gerlitz5930e8d2013-10-15 16:55:22 +02001972 if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN))
Matan Barak955154f2013-01-30 23:07:10 +00001973 mdev->mac_removed[priv->port] = 1;
Eugenia Emantayevffe455a2011-12-13 04:16:21 +00001974
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001975 /* Free RX Rings */
1976 for (i = 0; i < priv->rx_ring_num; i++) {
Eugenia Emantayev41d942d2013-11-07 12:19:52 +02001977 struct mlx4_en_cq *cq = priv->rx_cq[i];
Amir Vadai9e77a2b2013-06-18 16:18:27 +03001978
Ido Shamayf4a36752014-10-27 11:37:45 +02001979 napi_synchronize(&cq->napi);
Eugenia Emantayev41d942d2013-11-07 12:19:52 +02001980 mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]);
Amir Vadai9e77a2b2013-06-18 16:18:27 +03001981 mlx4_en_deactivate_cq(priv, cq);
Yuval Atias9e311e72014-06-09 10:24:39 +03001982
1983 mlx4_en_free_affinity_hint(priv, i);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001984 }
1985}
1986
1987static void mlx4_en_restart(struct work_struct *work)
1988{
1989 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
1990 watchdog_task);
1991 struct mlx4_en_dev *mdev = priv->mdev;
1992 struct net_device *dev = priv->dev;
1993
Yevgeny Petrilin453a6082009-06-01 20:27:13 +00001994 en_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port);
Yevgeny Petrilin1e338db2009-04-20 04:26:05 +00001995
Hannes Frederic Sowa0c5c3252016-04-18 21:19:44 +02001996 rtnl_lock();
Yevgeny Petrilin1e338db2009-04-20 04:26:05 +00001997 mutex_lock(&mdev->state_lock);
1998 if (priv->port_up) {
Amir Vadai3484aac2013-01-30 23:07:11 +00001999 mlx4_en_stop_port(dev, 1);
Yevgeny Petrilin1e338db2009-04-20 04:26:05 +00002000 if (mlx4_en_start_port(dev))
Yevgeny Petrilin453a6082009-06-01 20:27:13 +00002001 en_err(priv, "Failed restarting port %d\n", priv->port);
Yevgeny Petrilin1e338db2009-04-20 04:26:05 +00002002 }
2003 mutex_unlock(&mdev->state_lock);
Hannes Frederic Sowa0c5c3252016-04-18 21:19:44 +02002004 rtnl_unlock();
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002005}
2006
Eugenia Emantayevb477ba62012-01-19 09:42:37 +00002007static void mlx4_en_clear_stats(struct net_device *dev)
2008{
2009 struct mlx4_en_priv *priv = netdev_priv(dev);
2010 struct mlx4_en_dev *mdev = priv->mdev;
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02002011 struct mlx4_en_tx_ring **tx_ring;
Eugenia Emantayevb477ba62012-01-19 09:42:37 +00002012 int i;
2013
Tariq Toukaneb4b6782016-10-27 16:27:22 +03002014 if (!mlx4_is_slave(mdev->dev))
2015 if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1))
2016 en_dbg(HW, priv, "Failed dumping statistics\n");
Eugenia Emantayevb477ba62012-01-19 09:42:37 +00002017
Eugenia Emantayevb477ba62012-01-19 09:42:37 +00002018 memset(&priv->pstats, 0, sizeof(priv->pstats));
2019 memset(&priv->pkstats, 0, sizeof(priv->pkstats));
2020 memset(&priv->port_stats, 0, sizeof(priv->port_stats));
Matan Barak0b131562015-03-30 17:45:25 +03002021 memset(&priv->rx_flowstats, 0, sizeof(priv->rx_flowstats));
2022 memset(&priv->tx_flowstats, 0, sizeof(priv->tx_flowstats));
2023 memset(&priv->rx_priority_flowstats, 0,
2024 sizeof(priv->rx_priority_flowstats));
2025 memset(&priv->tx_priority_flowstats, 0,
2026 sizeof(priv->tx_priority_flowstats));
Eran Ben Elishab42de4d2015-06-15 17:59:06 +03002027 memset(&priv->pf_stats, 0, sizeof(priv->pf_stats));
Eugenia Emantayevb477ba62012-01-19 09:42:37 +00002028
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02002029 tx_ring = priv->tx_ring[TX];
2030 for (i = 0; i < priv->tx_ring_num[TX]; i++) {
2031 tx_ring[i]->bytes = 0;
2032 tx_ring[i]->packets = 0;
2033 tx_ring[i]->tx_csum = 0;
2034 tx_ring[i]->tx_dropped = 0;
2035 tx_ring[i]->queue_stopped = 0;
2036 tx_ring[i]->wake_queue = 0;
2037 tx_ring[i]->tso_packets = 0;
2038 tx_ring[i]->xmit_more = 0;
Eugenia Emantayevb477ba62012-01-19 09:42:37 +00002039 }
2040 for (i = 0; i < priv->rx_ring_num; i++) {
Eugenia Emantayev41d942d2013-11-07 12:19:52 +02002041 priv->rx_ring[i]->bytes = 0;
2042 priv->rx_ring[i]->packets = 0;
2043 priv->rx_ring[i]->csum_ok = 0;
2044 priv->rx_ring[i]->csum_none = 0;
Shani Michaelif8c64552014-11-09 13:51:53 +02002045 priv->rx_ring[i]->csum_complete = 0;
Eugenia Emantayevb477ba62012-01-19 09:42:37 +00002046 }
2047}
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002048
2049static int mlx4_en_open(struct net_device *dev)
2050{
2051 struct mlx4_en_priv *priv = netdev_priv(dev);
2052 struct mlx4_en_dev *mdev = priv->mdev;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002053 int err = 0;
2054
2055 mutex_lock(&mdev->state_lock);
2056
2057 if (!mdev->device_up) {
Yevgeny Petrilin453a6082009-06-01 20:27:13 +00002058 en_err(priv, "Cannot open - device down/disabled\n");
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002059 err = -EBUSY;
2060 goto out;
2061 }
2062
Eugenia Emantayevb477ba62012-01-19 09:42:37 +00002063 /* Reset HW statistics and SW counters */
2064 mlx4_en_clear_stats(dev);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002065
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002066 err = mlx4_en_start_port(dev);
2067 if (err)
Yevgeny Petrilin453a6082009-06-01 20:27:13 +00002068 en_err(priv, "Failed starting port:%d\n", priv->port);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002069
2070out:
2071 mutex_unlock(&mdev->state_lock);
2072 return err;
2073}
2074
2075
2076static int mlx4_en_close(struct net_device *dev)
2077{
2078 struct mlx4_en_priv *priv = netdev_priv(dev);
2079 struct mlx4_en_dev *mdev = priv->mdev;
2080
Yevgeny Petrilin453a6082009-06-01 20:27:13 +00002081 en_dbg(IFDOWN, priv, "Close port called\n");
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002082
2083 mutex_lock(&mdev->state_lock);
2084
Amir Vadai3484aac2013-01-30 23:07:11 +00002085 mlx4_en_stop_port(dev, 0);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002086 netif_carrier_off(dev);
2087
2088 mutex_unlock(&mdev->state_lock);
2089 return 0;
2090}
2091
Eugenia Emantayevec25bc02016-07-18 18:35:12 +03002092static void mlx4_en_free_resources(struct mlx4_en_priv *priv)
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002093{
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02002094 int i, t;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002095
Amir Vadai1eb8c692012-07-18 22:33:52 +00002096#ifdef CONFIG_RFS_ACCEL
Amir Vadai1eb8c692012-07-18 22:33:52 +00002097 priv->dev->rx_cpu_rmap = NULL;
2098#endif
2099
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02002100 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
2101 for (i = 0; i < priv->tx_ring_num[t]; i++) {
2102 if (priv->tx_ring[t] && priv->tx_ring[t][i])
2103 mlx4_en_destroy_tx_ring(priv,
2104 &priv->tx_ring[t][i]);
2105 if (priv->tx_cq[t] && priv->tx_cq[t][i])
2106 mlx4_en_destroy_cq(priv, &priv->tx_cq[t][i]);
2107 }
Martin KaFai Lauf32b20e82017-01-31 22:35:32 -08002108 kfree(priv->tx_ring[t]);
2109 kfree(priv->tx_cq[t]);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002110 }
2111
2112 for (i = 0; i < priv->rx_ring_num; i++) {
Eugenia Emantayev41d942d2013-11-07 12:19:52 +02002113 if (priv->rx_ring[i])
Thadeu Lima de Souza Cascardo68355f72012-02-06 08:39:49 +00002114 mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i],
2115 priv->prof->rx_ring_size, priv->stride);
Eugenia Emantayev41d942d2013-11-07 12:19:52 +02002116 if (priv->rx_cq[i])
Alexander Gullerfe0af032011-10-09 05:26:46 +00002117 mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002118 }
Yevgeny Petrilin044ca2a2012-06-25 00:24:13 +00002119
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002120}
2121
Eugenia Emantayevec25bc02016-07-18 18:35:12 +03002122static int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002123{
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002124 struct mlx4_en_port_profile *prof = priv->prof;
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02002125 int i, t;
Eugenia Emantayev163561a2013-11-07 12:19:54 +02002126 int node;
Yevgeny Petrilin87a5c382011-03-22 22:38:52 +00002127
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002128 /* Create tx Rings */
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02002129 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
2130 for (i = 0; i < priv->tx_ring_num[t]; i++) {
2131 node = cpu_to_node(i % num_online_cpus());
2132 if (mlx4_en_create_cq(priv, &priv->tx_cq[t][i],
2133 prof->tx_ring_size, i, t, node))
2134 goto err;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002135
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02002136 if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[t][i],
2137 prof->tx_ring_size,
2138 TXBB_SIZE, node, i))
2139 goto err;
2140 }
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002141 }
2142
2143 /* Create rx Rings */
2144 for (i = 0; i < priv->rx_ring_num; i++) {
Eugenia Emantayev163561a2013-11-07 12:19:54 +02002145 node = cpu_to_node(i % num_online_cpus());
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002146 if (mlx4_en_create_cq(priv, &priv->rx_cq[i],
Eugenia Emantayev163561a2013-11-07 12:19:54 +02002147 prof->rx_ring_size, i, RX, node))
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002148 goto err;
2149
2150 if (mlx4_en_create_rx_ring(priv, &priv->rx_ring[i],
Eugenia Emantayev163561a2013-11-07 12:19:54 +02002151 prof->rx_ring_size, priv->stride,
2152 node))
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002153 goto err;
2154 }
2155
Amir Vadai1eb8c692012-07-18 22:33:52 +00002156#ifdef CONFIG_RFS_ACCEL
Matan Barakc66fa192015-05-31 09:30:16 +03002157 priv->dev->rx_cpu_rmap = mlx4_get_cpu_rmap(priv->mdev->dev, priv->port);
Amir Vadai1eb8c692012-07-18 22:33:52 +00002158#endif
2159
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002160 return 0;
2161
2162err:
Yevgeny Petrilin453a6082009-06-01 20:27:13 +00002163 en_err(priv, "Failed to allocate NIC resources\n");
Eugenia Emantayev41d942d2013-11-07 12:19:52 +02002164 for (i = 0; i < priv->rx_ring_num; i++) {
2165 if (priv->rx_ring[i])
2166 mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i],
2167 prof->rx_ring_size,
2168 priv->stride);
2169 if (priv->rx_cq[i])
2170 mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
2171 }
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02002172 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
2173 for (i = 0; i < priv->tx_ring_num[t]; i++) {
2174 if (priv->tx_ring[t][i])
2175 mlx4_en_destroy_tx_ring(priv,
2176 &priv->tx_ring[t][i]);
2177 if (priv->tx_cq[t][i])
2178 mlx4_en_destroy_cq(priv, &priv->tx_cq[t][i]);
2179 }
Eugenia Emantayev41d942d2013-11-07 12:19:52 +02002180 }
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002181 return -ENOMEM;
2182}
2183
2184
Eugenia Emantayevec25bc02016-07-18 18:35:12 +03002185static int mlx4_en_copy_priv(struct mlx4_en_priv *dst,
2186 struct mlx4_en_priv *src,
2187 struct mlx4_en_port_profile *prof)
2188{
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02002189 int t;
2190
Eugenia Emantayevec25bc02016-07-18 18:35:12 +03002191 memcpy(&dst->hwtstamp_config, &prof->hwtstamp_config,
2192 sizeof(dst->hwtstamp_config));
Inbar Karmyec327f72017-06-29 14:07:57 +03002193 dst->num_tx_rings_p_up = prof->num_tx_rings_p_up;
Eugenia Emantayevec25bc02016-07-18 18:35:12 +03002194 dst->rx_ring_num = prof->rx_ring_num;
2195 dst->flags = prof->flags;
2196 dst->mdev = src->mdev;
2197 dst->port = src->port;
2198 dst->dev = src->dev;
2199 dst->prof = prof;
2200 dst->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
2201 DS_SIZE * MLX4_EN_MAX_RX_FRAGS);
2202
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02002203 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
2204 dst->tx_ring_num[t] = prof->tx_ring_num[t];
2205 if (!dst->tx_ring_num[t])
2206 continue;
Eugenia Emantayevec25bc02016-07-18 18:35:12 +03002207
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02002208 dst->tx_ring[t] = kzalloc(sizeof(struct mlx4_en_tx_ring *) *
2209 MAX_TX_RINGS, GFP_KERNEL);
2210 if (!dst->tx_ring[t])
2211 goto err_free_tx;
2212
2213 dst->tx_cq[t] = kzalloc(sizeof(struct mlx4_en_cq *) *
2214 MAX_TX_RINGS, GFP_KERNEL);
2215 if (!dst->tx_cq[t]) {
2216 kfree(dst->tx_ring[t]);
2217 goto err_free_tx;
2218 }
Eugenia Emantayevec25bc02016-07-18 18:35:12 +03002219 }
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02002220
Eugenia Emantayevec25bc02016-07-18 18:35:12 +03002221 return 0;
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02002222
2223err_free_tx:
2224 while (t--) {
2225 kfree(dst->tx_ring[t]);
2226 kfree(dst->tx_cq[t]);
2227 }
2228 return -ENOMEM;
Eugenia Emantayevec25bc02016-07-18 18:35:12 +03002229}
2230
2231static void mlx4_en_update_priv(struct mlx4_en_priv *dst,
2232 struct mlx4_en_priv *src)
2233{
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02002234 int t;
Eugenia Emantayevec25bc02016-07-18 18:35:12 +03002235 memcpy(dst->rx_ring, src->rx_ring,
2236 sizeof(struct mlx4_en_rx_ring *) * src->rx_ring_num);
2237 memcpy(dst->rx_cq, src->rx_cq,
2238 sizeof(struct mlx4_en_cq *) * src->rx_ring_num);
2239 memcpy(&dst->hwtstamp_config, &src->hwtstamp_config,
2240 sizeof(dst->hwtstamp_config));
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02002241 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
2242 dst->tx_ring_num[t] = src->tx_ring_num[t];
2243 dst->tx_ring[t] = src->tx_ring[t];
2244 dst->tx_cq[t] = src->tx_cq[t];
2245 }
Inbar Karmyec327f72017-06-29 14:07:57 +03002246 dst->num_tx_rings_p_up = src->num_tx_rings_p_up;
Eugenia Emantayevec25bc02016-07-18 18:35:12 +03002247 dst->rx_ring_num = src->rx_ring_num;
Eugenia Emantayevec25bc02016-07-18 18:35:12 +03002248 memcpy(dst->prof, src->prof, sizeof(struct mlx4_en_port_profile));
2249}
2250
2251int mlx4_en_try_alloc_resources(struct mlx4_en_priv *priv,
2252 struct mlx4_en_priv *tmp,
Martin KaFai Lau770f8222017-01-31 22:35:33 -08002253 struct mlx4_en_port_profile *prof,
2254 bool carry_xdp_prog)
Eugenia Emantayevec25bc02016-07-18 18:35:12 +03002255{
Martin KaFai Lau770f8222017-01-31 22:35:33 -08002256 struct bpf_prog *xdp_prog;
2257 int i, t;
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02002258
Eugenia Emantayevec25bc02016-07-18 18:35:12 +03002259 mlx4_en_copy_priv(tmp, priv, prof);
2260
2261 if (mlx4_en_alloc_resources(tmp)) {
2262 en_warn(priv,
2263 "%s: Resource allocation failed, using previous configuration\n",
2264 __func__);
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02002265 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
2266 kfree(tmp->tx_ring[t]);
2267 kfree(tmp->tx_cq[t]);
2268 }
Eugenia Emantayevec25bc02016-07-18 18:35:12 +03002269 return -ENOMEM;
2270 }
Martin KaFai Lau770f8222017-01-31 22:35:33 -08002271
2272 /* All rx_rings has the same xdp_prog. Pick the first one. */
2273 xdp_prog = rcu_dereference_protected(
2274 priv->rx_ring[0]->xdp_prog,
2275 lockdep_is_held(&priv->mdev->state_lock));
2276
2277 if (xdp_prog && carry_xdp_prog) {
2278 xdp_prog = bpf_prog_add(xdp_prog, tmp->rx_ring_num);
2279 if (IS_ERR(xdp_prog)) {
2280 mlx4_en_free_resources(tmp);
2281 return PTR_ERR(xdp_prog);
2282 }
2283 for (i = 0; i < tmp->rx_ring_num; i++)
2284 rcu_assign_pointer(tmp->rx_ring[i]->xdp_prog,
2285 xdp_prog);
2286 }
2287
Eugenia Emantayevec25bc02016-07-18 18:35:12 +03002288 return 0;
2289}
2290
2291void mlx4_en_safe_replace_resources(struct mlx4_en_priv *priv,
2292 struct mlx4_en_priv *tmp)
2293{
2294 mlx4_en_free_resources(priv);
2295 mlx4_en_update_priv(priv, tmp);
2296}
2297
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002298void mlx4_en_destroy_netdev(struct net_device *dev)
2299{
2300 struct mlx4_en_priv *priv = netdev_priv(dev);
2301 struct mlx4_en_dev *mdev = priv->mdev;
2302
Yevgeny Petrilin453a6082009-06-01 20:27:13 +00002303 en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002304
2305 /* Unregister device - this will close the port if it was up */
Jiri Pirko09d4d082016-02-26 17:32:24 +01002306 if (priv->registered) {
2307 devlink_port_type_clear(mlx4_get_devlink_port(mdev->dev,
2308 priv->port));
Tariq Toukanb4353702016-11-27 19:20:51 +02002309 unregister_netdev(dev);
Jiri Pirko09d4d082016-02-26 17:32:24 +01002310 }
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002311
2312 if (priv->allocated)
2313 mlx4_free_hwq_res(mdev->dev, &priv->res, MLX4_EN_PAGE_SIZE);
2314
2315 cancel_delayed_work(&priv->stats_task);
Amir Vadaib6c39bf2013-04-23 06:06:51 +00002316 cancel_delayed_work(&priv->service_task);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002317 /* flush any pending task for this netdev */
2318 flush_workqueue(mdev->workqueue);
2319
Eugenia Emantayev90683062015-12-17 15:35:38 +02002320 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
2321 mlx4_en_remove_timestamp(mdev);
2322
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002323 /* Detach the netdev so tasks would not attempt to access it */
2324 mutex_lock(&mdev->state_lock);
2325 mdev->pndev[priv->port] = NULL;
Moni Shoua5da03542015-02-03 16:48:34 +02002326 mdev->upper[priv->port] = NULL;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002327
Eugenia Emantayev30f56e32016-07-18 18:35:11 +03002328#ifdef CONFIG_RFS_ACCEL
2329 mlx4_en_cleanup_filters(priv);
2330#endif
2331
Alexander Gullerfe0af032011-10-09 05:26:46 +00002332 mlx4_en_free_resources(priv);
Tariq Toukanb6e01232016-11-22 16:20:39 +02002333 mutex_unlock(&mdev->state_lock);
Amir Vadai564c2742012-04-04 21:33:26 +00002334
Tariq Toukanb4353702016-11-27 19:20:51 +02002335 free_netdev(dev);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002336}
2337
Martin KaFai Laub45f0672016-12-07 15:53:12 -08002338static bool mlx4_en_check_xdp_mtu(struct net_device *dev, int mtu)
2339{
2340 struct mlx4_en_priv *priv = netdev_priv(dev);
2341
2342 if (mtu > MLX4_EN_MAX_XDP_MTU) {
2343 en_err(priv, "mtu:%d > max:%d when XDP prog is attached\n",
2344 mtu, MLX4_EN_MAX_XDP_MTU);
2345 return false;
2346 }
2347
2348 return true;
2349}
2350
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002351static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
2352{
2353 struct mlx4_en_priv *priv = netdev_priv(dev);
2354 struct mlx4_en_dev *mdev = priv->mdev;
2355 int err = 0;
2356
Yevgeny Petrilin453a6082009-06-01 20:27:13 +00002357 en_dbg(DRV, priv, "Change MTU called - current:%d new:%d\n",
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002358 dev->mtu, new_mtu);
2359
Martin KaFai Laub45f0672016-12-07 15:53:12 -08002360 if (priv->tx_ring_num[TX_XDP] &&
2361 !mlx4_en_check_xdp_mtu(dev, new_mtu))
Martin KaFai Lau9f9b74e2017-01-10 09:41:49 -08002362 return -EOPNOTSUPP;
Martin KaFai Laub45f0672016-12-07 15:53:12 -08002363
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002364 dev->mtu = new_mtu;
2365
2366 if (netif_running(dev)) {
2367 mutex_lock(&mdev->state_lock);
2368 if (!mdev->device_up) {
2369 /* NIC is probably restarting - let watchdog task reset
2370 * the port */
Yevgeny Petrilin453a6082009-06-01 20:27:13 +00002371 en_dbg(DRV, priv, "Change MTU called with card down!?\n");
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002372 } else {
Amir Vadai3484aac2013-01-30 23:07:11 +00002373 mlx4_en_stop_port(dev, 1);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002374 err = mlx4_en_start_port(dev);
2375 if (err) {
Yevgeny Petrilin453a6082009-06-01 20:27:13 +00002376 en_err(priv, "Failed restarting port:%d\n",
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002377 priv->port);
2378 queue_work(mdev->workqueue, &priv->watchdog_task);
2379 }
2380 }
2381 mutex_unlock(&mdev->state_lock);
2382 }
2383 return 0;
2384}
2385
Ben Hutchings100dbda2013-11-18 23:13:31 +00002386static int mlx4_en_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
Amir Vadaiec693d42013-04-23 06:06:49 +00002387{
2388 struct mlx4_en_priv *priv = netdev_priv(dev);
2389 struct mlx4_en_dev *mdev = priv->mdev;
2390 struct hwtstamp_config config;
2391
2392 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
2393 return -EFAULT;
2394
2395 /* reserved for future extensions */
2396 if (config.flags)
2397 return -EINVAL;
2398
2399 /* device doesn't support time stamping */
2400 if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS))
2401 return -EINVAL;
2402
2403 /* TX HW timestamp */
2404 switch (config.tx_type) {
2405 case HWTSTAMP_TX_OFF:
2406 case HWTSTAMP_TX_ON:
2407 break;
2408 default:
2409 return -ERANGE;
2410 }
2411
2412 /* RX HW timestamp */
2413 switch (config.rx_filter) {
2414 case HWTSTAMP_FILTER_NONE:
2415 break;
2416 case HWTSTAMP_FILTER_ALL:
2417 case HWTSTAMP_FILTER_SOME:
2418 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
2419 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
2420 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
2421 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
2422 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
2423 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
2424 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
2425 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
2426 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
2427 case HWTSTAMP_FILTER_PTP_V2_EVENT:
2428 case HWTSTAMP_FILTER_PTP_V2_SYNC:
2429 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
Miroslav Lichvare3412572017-05-19 17:52:36 +02002430 case HWTSTAMP_FILTER_NTP_ALL:
Amir Vadaiec693d42013-04-23 06:06:49 +00002431 config.rx_filter = HWTSTAMP_FILTER_ALL;
2432 break;
2433 default:
2434 return -ERANGE;
2435 }
2436
Saeed Mahameed7787fa62014-10-27 11:37:42 +02002437 if (mlx4_en_reset_config(dev, config, dev->features)) {
Amir Vadaiec693d42013-04-23 06:06:49 +00002438 config.tx_type = HWTSTAMP_TX_OFF;
2439 config.rx_filter = HWTSTAMP_FILTER_NONE;
2440 }
2441
2442 return copy_to_user(ifr->ifr_data, &config,
2443 sizeof(config)) ? -EFAULT : 0;
2444}
2445
Ben Hutchings100dbda2013-11-18 23:13:31 +00002446static int mlx4_en_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
2447{
2448 struct mlx4_en_priv *priv = netdev_priv(dev);
2449
2450 return copy_to_user(ifr->ifr_data, &priv->hwtstamp_config,
2451 sizeof(priv->hwtstamp_config)) ? -EFAULT : 0;
2452}
2453
Amir Vadaiec693d42013-04-23 06:06:49 +00002454static int mlx4_en_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2455{
2456 switch (cmd) {
2457 case SIOCSHWTSTAMP:
Ben Hutchings100dbda2013-11-18 23:13:31 +00002458 return mlx4_en_hwtstamp_set(dev, ifr);
2459 case SIOCGHWTSTAMP:
2460 return mlx4_en_hwtstamp_get(dev, ifr);
Amir Vadaiec693d42013-04-23 06:06:49 +00002461 default:
2462 return -EOPNOTSUPP;
2463 }
2464}
2465
Hadar Hen Zione38af4f2015-07-27 14:46:34 +03002466static netdev_features_t mlx4_en_fix_features(struct net_device *netdev,
2467 netdev_features_t features)
2468{
2469 struct mlx4_en_priv *en_priv = netdev_priv(netdev);
2470 struct mlx4_en_dev *mdev = en_priv->mdev;
2471
2472 /* Since there is no support for separate RX C-TAG/S-TAG vlan accel
2473 * enable/disable make sure S-TAG flag is always in same state as
2474 * C-TAG.
2475 */
2476 if (features & NETIF_F_HW_VLAN_CTAG_RX &&
2477 !(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN))
2478 features |= NETIF_F_HW_VLAN_STAG_RX;
2479 else
2480 features &= ~NETIF_F_HW_VLAN_STAG_RX;
2481
2482 return features;
2483}
2484
Amir Vadai60d6fe92011-11-26 19:55:19 +00002485static int mlx4_en_set_features(struct net_device *netdev,
2486 netdev_features_t features)
2487{
2488 struct mlx4_en_priv *priv = netdev_priv(netdev);
Muhammad Mahajnaf0df3502015-04-02 16:31:21 +03002489 bool reset = false;
Saeed Mahameed537f6f92014-10-27 11:37:43 +02002490 int ret = 0;
2491
Muhammad Mahajnaf0df3502015-04-02 16:31:21 +03002492 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_RXFCS)) {
2493 en_info(priv, "Turn %s RX-FCS\n",
2494 (features & NETIF_F_RXFCS) ? "ON" : "OFF");
2495 reset = true;
2496 }
2497
Muhammad Mahajna78500b82015-04-02 16:31:22 +03002498 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_RXALL)) {
2499 u8 ignore_fcs_value = (features & NETIF_F_RXALL) ? 1 : 0;
2500
2501 en_info(priv, "Turn %s RX-ALL\n",
2502 ignore_fcs_value ? "ON" : "OFF");
2503 ret = mlx4_SET_PORT_fcs_check(priv->mdev->dev,
2504 priv->port, ignore_fcs_value);
2505 if (ret)
2506 return ret;
2507 }
2508
Saeed Mahameed537f6f92014-10-27 11:37:43 +02002509 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_HW_VLAN_CTAG_RX)) {
2510 en_info(priv, "Turn %s RX vlan strip offload\n",
2511 (features & NETIF_F_HW_VLAN_CTAG_RX) ? "ON" : "OFF");
Muhammad Mahajnaf0df3502015-04-02 16:31:21 +03002512 reset = true;
Saeed Mahameed537f6f92014-10-27 11:37:43 +02002513 }
Amir Vadai60d6fe92011-11-26 19:55:19 +00002514
Ido Shamaycfb53f32015-02-03 17:57:21 +02002515 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_HW_VLAN_CTAG_TX))
2516 en_info(priv, "Turn %s TX vlan strip offload\n",
2517 (features & NETIF_F_HW_VLAN_CTAG_TX) ? "ON" : "OFF");
2518
Hadar Hen Zione38af4f2015-07-27 14:46:34 +03002519 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_HW_VLAN_STAG_TX))
2520 en_info(priv, "Turn %s TX S-VLAN strip offload\n",
2521 (features & NETIF_F_HW_VLAN_STAG_TX) ? "ON" : "OFF");
2522
Ido Shamay241a08c2015-04-02 16:31:07 +03002523 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_LOOPBACK)) {
2524 en_info(priv, "Turn %s loopback\n",
2525 (features & NETIF_F_LOOPBACK) ? "ON" : "OFF");
2526 mlx4_en_update_loopback_state(netdev, features);
2527 }
Yan Burman79aeacc2013-02-07 02:25:19 +00002528
Muhammad Mahajnaf0df3502015-04-02 16:31:21 +03002529 if (reset) {
2530 ret = mlx4_en_reset_config(netdev, priv->hwtstamp_config,
2531 features);
2532 if (ret)
2533 return ret;
2534 }
Amir Vadai60d6fe92011-11-26 19:55:19 +00002535
Muhammad Mahajnaf0df3502015-04-02 16:31:21 +03002536 return 0;
Amir Vadai60d6fe92011-11-26 19:55:19 +00002537}
2538
Rony Efraim8f7ba3c2013-04-25 05:22:27 +00002539static int mlx4_en_set_vf_mac(struct net_device *dev, int queue, u8 *mac)
2540{
2541 struct mlx4_en_priv *en_priv = netdev_priv(dev);
2542 struct mlx4_en_dev *mdev = en_priv->mdev;
Rony Efraim8f7ba3c2013-04-25 05:22:27 +00002543
Eugenia Emantayev745d8ae2017-02-23 12:02:42 +02002544 return mlx4_set_vf_mac(mdev->dev, en_priv->port, queue, mac);
Rony Efraim8f7ba3c2013-04-25 05:22:27 +00002545}
2546
Moshe Shemesh79aab092016-09-22 12:11:15 +03002547static int mlx4_en_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos,
2548 __be16 vlan_proto)
Rony Efraim3f7fb022013-04-25 05:22:28 +00002549{
2550 struct mlx4_en_priv *en_priv = netdev_priv(dev);
2551 struct mlx4_en_dev *mdev = en_priv->mdev;
2552
Moshe Shemeshb42959d2016-09-22 12:11:16 +03002553 return mlx4_set_vf_vlan(mdev->dev, en_priv->port, vf, vlan, qos,
2554 vlan_proto);
Rony Efraim3f7fb022013-04-25 05:22:28 +00002555}
2556
Ido Shamaycda373f2015-04-02 16:31:16 +03002557static int mlx4_en_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate,
2558 int max_tx_rate)
2559{
2560 struct mlx4_en_priv *en_priv = netdev_priv(dev);
2561 struct mlx4_en_dev *mdev = en_priv->mdev;
2562
2563 return mlx4_set_vf_rate(mdev->dev, en_priv->port, vf, min_tx_rate,
2564 max_tx_rate);
2565}
2566
Rony Efraime6b6a232013-04-25 05:22:29 +00002567static int mlx4_en_set_vf_spoofchk(struct net_device *dev, int vf, bool setting)
2568{
2569 struct mlx4_en_priv *en_priv = netdev_priv(dev);
2570 struct mlx4_en_dev *mdev = en_priv->mdev;
2571
2572 return mlx4_set_vf_spoofchk(mdev->dev, en_priv->port, vf, setting);
2573}
2574
Rony Efraim2cccb9e2013-04-25 05:22:30 +00002575static int mlx4_en_get_vf_config(struct net_device *dev, int vf, struct ifla_vf_info *ivf)
2576{
2577 struct mlx4_en_priv *en_priv = netdev_priv(dev);
2578 struct mlx4_en_dev *mdev = en_priv->mdev;
2579
2580 return mlx4_get_vf_config(mdev->dev, en_priv->port, vf, ivf);
2581}
Rony Efraim8f7ba3c2013-04-25 05:22:27 +00002582
Rony Efraim948e3062013-06-13 13:19:11 +03002583static int mlx4_en_set_vf_link_state(struct net_device *dev, int vf, int link_state)
2584{
2585 struct mlx4_en_priv *en_priv = netdev_priv(dev);
2586 struct mlx4_en_dev *mdev = en_priv->mdev;
2587
2588 return mlx4_set_vf_link_state(mdev->dev, en_priv->port, vf, link_state);
2589}
Hadar Hen Zion84c86402013-12-19 21:20:13 +02002590
Eran Ben Elisha62a89052015-06-15 17:59:08 +03002591static int mlx4_en_get_vf_stats(struct net_device *dev, int vf,
2592 struct ifla_vf_stats *vf_stats)
2593{
2594 struct mlx4_en_priv *en_priv = netdev_priv(dev);
2595 struct mlx4_en_dev *mdev = en_priv->mdev;
2596
2597 return mlx4_get_vf_stats(mdev->dev, en_priv->port, vf, vf_stats);
2598}
2599
Hadar Hen Zion84c86402013-12-19 21:20:13 +02002600#define PORT_ID_BYTE_LEN 8
2601static int mlx4_en_get_phys_port_id(struct net_device *dev,
Jiri Pirko02637fc2014-11-28 14:34:16 +01002602 struct netdev_phys_item_id *ppid)
Hadar Hen Zion84c86402013-12-19 21:20:13 +02002603{
2604 struct mlx4_en_priv *priv = netdev_priv(dev);
2605 struct mlx4_dev *mdev = priv->mdev->dev;
2606 int i;
2607 u64 phys_port_id = mdev->caps.phys_port_id[priv->port];
2608
2609 if (!phys_port_id)
2610 return -EOPNOTSUPP;
2611
2612 ppid->id_len = sizeof(phys_port_id);
2613 for (i = PORT_ID_BYTE_LEN - 1; i >= 0; --i) {
2614 ppid->id[i] = phys_port_id & 0xff;
2615 phys_port_id >>= 8;
2616 }
2617 return 0;
2618}
2619
Or Gerlitz1b136de2014-03-27 14:02:04 +02002620static void mlx4_en_add_vxlan_offloads(struct work_struct *work)
2621{
2622 int ret;
2623 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
2624 vxlan_add_task);
2625
2626 ret = mlx4_config_vxlan_port(priv->mdev->dev, priv->vxlan_port);
2627 if (ret)
2628 goto out;
2629
2630 ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port,
2631 VXLAN_STEER_BY_OUTER_MAC, 1);
2632out:
Or Gerlitzf4a1edd2014-11-09 14:25:39 +02002633 if (ret) {
Or Gerlitz1b136de2014-03-27 14:02:04 +02002634 en_err(priv, "failed setting L2 tunnel configuration ret %d\n", ret);
Or Gerlitzf4a1edd2014-11-09 14:25:39 +02002635 return;
2636 }
2637
2638 /* set offloads */
Alexander Duyck09067122016-05-02 09:38:37 -07002639 priv->dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2640 NETIF_F_RXCSUM |
2641 NETIF_F_TSO | NETIF_F_TSO6 |
2642 NETIF_F_GSO_UDP_TUNNEL |
Alexander Duyck3c9346b2016-05-02 09:38:30 -07002643 NETIF_F_GSO_UDP_TUNNEL_CSUM |
2644 NETIF_F_GSO_PARTIAL;
Or Gerlitz1b136de2014-03-27 14:02:04 +02002645}
2646
2647static void mlx4_en_del_vxlan_offloads(struct work_struct *work)
2648{
2649 int ret;
2650 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
2651 vxlan_del_task);
Or Gerlitzf4a1edd2014-11-09 14:25:39 +02002652 /* unset offloads */
Alexander Duyck09067122016-05-02 09:38:37 -07002653 priv->dev->hw_enc_features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2654 NETIF_F_RXCSUM |
2655 NETIF_F_TSO | NETIF_F_TSO6 |
2656 NETIF_F_GSO_UDP_TUNNEL |
Alexander Duyck3c9346b2016-05-02 09:38:30 -07002657 NETIF_F_GSO_UDP_TUNNEL_CSUM |
2658 NETIF_F_GSO_PARTIAL);
Or Gerlitz1b136de2014-03-27 14:02:04 +02002659
2660 ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port,
2661 VXLAN_STEER_BY_OUTER_MAC, 0);
2662 if (ret)
2663 en_err(priv, "failed setting L2 tunnel configuration ret %d\n", ret);
2664
2665 priv->vxlan_port = 0;
2666}
2667
2668static void mlx4_en_add_vxlan_port(struct net_device *dev,
Alexander Duycka8312742016-06-16 12:22:30 -07002669 struct udp_tunnel_info *ti)
Or Gerlitz1b136de2014-03-27 14:02:04 +02002670{
2671 struct mlx4_en_priv *priv = netdev_priv(dev);
Alexander Duycka8312742016-06-16 12:22:30 -07002672 __be16 port = ti->port;
Or Gerlitz1b136de2014-03-27 14:02:04 +02002673 __be16 current_port;
2674
Alexander Duycka8312742016-06-16 12:22:30 -07002675 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
Or Gerlitz1b136de2014-03-27 14:02:04 +02002676 return;
2677
Alexander Duycka8312742016-06-16 12:22:30 -07002678 if (ti->sa_family != AF_INET)
2679 return;
2680
2681 if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
Or Gerlitz1b136de2014-03-27 14:02:04 +02002682 return;
2683
2684 current_port = priv->vxlan_port;
2685 if (current_port && current_port != port) {
2686 en_warn(priv, "vxlan port %d configured, can't add port %d\n",
2687 ntohs(current_port), ntohs(port));
2688 return;
2689 }
2690
2691 priv->vxlan_port = port;
2692 queue_work(priv->mdev->workqueue, &priv->vxlan_add_task);
2693}
2694
2695static void mlx4_en_del_vxlan_port(struct net_device *dev,
Alexander Duycka8312742016-06-16 12:22:30 -07002696 struct udp_tunnel_info *ti)
Or Gerlitz1b136de2014-03-27 14:02:04 +02002697{
2698 struct mlx4_en_priv *priv = netdev_priv(dev);
Alexander Duycka8312742016-06-16 12:22:30 -07002699 __be16 port = ti->port;
Or Gerlitz1b136de2014-03-27 14:02:04 +02002700 __be16 current_port;
2701
Alexander Duycka8312742016-06-16 12:22:30 -07002702 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
Or Gerlitz1b136de2014-03-27 14:02:04 +02002703 return;
2704
Alexander Duycka8312742016-06-16 12:22:30 -07002705 if (ti->sa_family != AF_INET)
2706 return;
2707
2708 if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
Or Gerlitz1b136de2014-03-27 14:02:04 +02002709 return;
2710
2711 current_port = priv->vxlan_port;
2712 if (current_port != port) {
2713 en_dbg(DRV, priv, "vxlan port %d isn't configured, ignoring\n", ntohs(port));
2714 return;
2715 }
2716
2717 queue_work(priv->mdev->workqueue, &priv->vxlan_del_task);
2718}
Joe Stringer956bdab2014-11-13 16:38:14 -08002719
Jesse Gross5f352272014-12-23 22:37:26 -08002720static netdev_features_t mlx4_en_features_check(struct sk_buff *skb,
2721 struct net_device *dev,
2722 netdev_features_t features)
Joe Stringer956bdab2014-11-13 16:38:14 -08002723{
Toshiaki Makita8cb65d02015-03-27 14:31:12 +09002724 features = vlan_features_check(skb, features);
Alexander Duyck09067122016-05-02 09:38:37 -07002725 features = vxlan_features_check(skb, features);
2726
2727 /* The ConnectX-3 doesn't support outer IPv6 checksums but it does
2728 * support inner IPv6 checksums and segmentation so we need to
2729 * strip that feature if this is an IPv6 encapsulated frame.
2730 */
2731 if (skb->encapsulation &&
Alexander Duycka5472242016-06-15 14:42:11 -07002732 (skb->ip_summed == CHECKSUM_PARTIAL)) {
2733 struct mlx4_en_priv *priv = netdev_priv(dev);
2734
2735 if (!priv->vxlan_port ||
2736 (ip_hdr(skb)->version != 4) ||
2737 (udp_hdr(skb)->dest != priv->vxlan_port))
2738 features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
2739 }
Alexander Duyck09067122016-05-02 09:38:37 -07002740
2741 return features;
Joe Stringer956bdab2014-11-13 16:38:14 -08002742}
Or Gerlitz1b136de2014-03-27 14:02:04 +02002743
Wu Fengguangde1cf8a2015-03-19 08:51:27 +08002744static int mlx4_en_set_tx_maxrate(struct net_device *dev, int queue_index, u32 maxrate)
Or Gerlitzc10e4fc2015-03-18 14:57:35 +02002745{
2746 struct mlx4_en_priv *priv = netdev_priv(dev);
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02002747 struct mlx4_en_tx_ring *tx_ring = priv->tx_ring[TX][queue_index];
Or Gerlitzc10e4fc2015-03-18 14:57:35 +02002748 struct mlx4_update_qp_params params;
2749 int err;
2750
2751 if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QP_RATE_LIMIT))
2752 return -EOPNOTSUPP;
2753
2754 /* rate provided to us in Mbs, check if it fits into 12 bits, if not use Gbs */
2755 if (maxrate >> 12) {
2756 params.rate_unit = MLX4_QP_RATE_LIMIT_GBS;
2757 params.rate_val = maxrate / 1000;
2758 } else if (maxrate) {
2759 params.rate_unit = MLX4_QP_RATE_LIMIT_MBS;
2760 params.rate_val = maxrate;
2761 } else { /* zero serves to revoke the QP rate-limitation */
2762 params.rate_unit = 0;
2763 params.rate_val = 0;
2764 }
2765
2766 err = mlx4_update_qp(priv->mdev->dev, tx_ring->qpn, MLX4_UPDATE_QP_RATE_LIMIT,
2767 &params);
2768 return err;
2769}
2770
Brenden Blanco47a38e12016-07-19 12:16:50 -07002771static int mlx4_xdp_set(struct net_device *dev, struct bpf_prog *prog)
2772{
2773 struct mlx4_en_priv *priv = netdev_priv(dev);
Brenden Blancod576acf2016-07-19 12:16:52 -07002774 struct mlx4_en_dev *mdev = priv->mdev;
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02002775 struct mlx4_en_port_profile new_prof;
Brenden Blanco47a38e12016-07-19 12:16:50 -07002776 struct bpf_prog *old_prog;
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02002777 struct mlx4_en_priv *tmp;
2778 int tx_changed = 0;
Brenden Blanco47a38e12016-07-19 12:16:50 -07002779 int xdp_ring_num;
Brenden Blancod576acf2016-07-19 12:16:52 -07002780 int port_up = 0;
2781 int err;
Brenden Blanco47a38e12016-07-19 12:16:50 -07002782 int i;
2783
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02002784 xdp_ring_num = prog ? priv->rx_ring_num : 0;
Brenden Blanco47a38e12016-07-19 12:16:50 -07002785
Brenden Blancod576acf2016-07-19 12:16:52 -07002786 /* No need to reconfigure buffers when simply swapping the
2787 * program for a new one.
2788 */
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02002789 if (priv->tx_ring_num[TX_XDP] == xdp_ring_num) {
Brenden Blancod576acf2016-07-19 12:16:52 -07002790 if (prog) {
2791 prog = bpf_prog_add(prog, priv->rx_ring_num - 1);
2792 if (IS_ERR(prog))
2793 return PTR_ERR(prog);
2794 }
Brenden Blanco326fe022016-09-03 21:29:58 -07002795 mutex_lock(&mdev->state_lock);
Brenden Blancod576acf2016-07-19 12:16:52 -07002796 for (i = 0; i < priv->rx_ring_num; i++) {
Brenden Blanco326fe022016-09-03 21:29:58 -07002797 old_prog = rcu_dereference_protected(
2798 priv->rx_ring[i]->xdp_prog,
2799 lockdep_is_held(&mdev->state_lock));
2800 rcu_assign_pointer(priv->rx_ring[i]->xdp_prog, prog);
Brenden Blancod576acf2016-07-19 12:16:52 -07002801 if (old_prog)
2802 bpf_prog_put(old_prog);
2803 }
Brenden Blanco326fe022016-09-03 21:29:58 -07002804 mutex_unlock(&mdev->state_lock);
Brenden Blancod576acf2016-07-19 12:16:52 -07002805 return 0;
2806 }
2807
Martin KaFai Laub45f0672016-12-07 15:53:12 -08002808 if (!mlx4_en_check_xdp_mtu(dev, dev->mtu))
Brenden Blanco47a38e12016-07-19 12:16:50 -07002809 return -EOPNOTSUPP;
Brenden Blanco47a38e12016-07-19 12:16:50 -07002810
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02002811 tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
2812 if (!tmp)
2813 return -ENOMEM;
Brenden Blanco9ecc2d82016-07-19 12:16:55 -07002814
Brenden Blanco47a38e12016-07-19 12:16:50 -07002815 if (prog) {
2816 prog = bpf_prog_add(prog, priv->rx_ring_num - 1);
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02002817 if (IS_ERR(prog)) {
2818 err = PTR_ERR(prog);
2819 goto out;
2820 }
Brenden Blanco47a38e12016-07-19 12:16:50 -07002821 }
2822
Brenden Blancod576acf2016-07-19 12:16:52 -07002823 mutex_lock(&mdev->state_lock);
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02002824 memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile));
2825 new_prof.tx_ring_num[TX_XDP] = xdp_ring_num;
2826
2827 if (priv->tx_ring_num[TX] + xdp_ring_num > MAX_TX_RINGS) {
2828 tx_changed = 1;
2829 new_prof.tx_ring_num[TX] =
Inbar Karmyf21ad612017-06-29 14:07:56 +03002830 MAX_TX_RINGS - ALIGN(xdp_ring_num, priv->prof->num_up);
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02002831 en_warn(priv, "Reducing the number of TX rings, to not exceed the max total rings number.\n");
2832 }
2833
Martin KaFai Lau770f8222017-01-31 22:35:33 -08002834 err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof, false);
Daniel Borkmannc5405942016-11-09 22:02:34 +01002835 if (err) {
2836 if (prog)
2837 bpf_prog_sub(prog, priv->rx_ring_num - 1);
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02002838 goto unlock_out;
Daniel Borkmannc5405942016-11-09 22:02:34 +01002839 }
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02002840
Brenden Blancod576acf2016-07-19 12:16:52 -07002841 if (priv->port_up) {
2842 port_up = 1;
2843 mlx4_en_stop_port(dev, 1);
2844 }
2845
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02002846 mlx4_en_safe_replace_resources(priv, tmp);
2847 if (tx_changed)
2848 netif_set_real_num_tx_queues(dev, priv->tx_ring_num[TX]);
Brenden Blanco47a38e12016-07-19 12:16:50 -07002849
Brenden Blanco47a38e12016-07-19 12:16:50 -07002850 for (i = 0; i < priv->rx_ring_num; i++) {
Brenden Blanco326fe022016-09-03 21:29:58 -07002851 old_prog = rcu_dereference_protected(
2852 priv->rx_ring[i]->xdp_prog,
2853 lockdep_is_held(&mdev->state_lock));
2854 rcu_assign_pointer(priv->rx_ring[i]->xdp_prog, prog);
Brenden Blanco47a38e12016-07-19 12:16:50 -07002855 if (old_prog)
2856 bpf_prog_put(old_prog);
2857 }
2858
Brenden Blancod576acf2016-07-19 12:16:52 -07002859 if (port_up) {
2860 err = mlx4_en_start_port(dev);
2861 if (err) {
2862 en_err(priv, "Failed starting port %d for XDP change\n",
2863 priv->port);
2864 queue_work(mdev->workqueue, &priv->watchdog_task);
2865 }
2866 }
2867
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02002868unlock_out:
Brenden Blancod576acf2016-07-19 12:16:52 -07002869 mutex_unlock(&mdev->state_lock);
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02002870out:
2871 kfree(tmp);
2872 return err;
Brenden Blanco47a38e12016-07-19 12:16:50 -07002873}
2874
Martin KaFai Lau2e37e9b2017-06-15 17:29:10 -07002875static u32 mlx4_xdp_query(struct net_device *dev)
Brenden Blanco47a38e12016-07-19 12:16:50 -07002876{
2877 struct mlx4_en_priv *priv = netdev_priv(dev);
Martin KaFai Lau2e37e9b2017-06-15 17:29:10 -07002878 struct mlx4_en_dev *mdev = priv->mdev;
2879 const struct bpf_prog *xdp_prog;
2880 u32 prog_id = 0;
Brenden Blanco47a38e12016-07-19 12:16:50 -07002881
Martin KaFai Lau2e37e9b2017-06-15 17:29:10 -07002882 if (!priv->tx_ring_num[TX_XDP])
2883 return prog_id;
2884
2885 mutex_lock(&mdev->state_lock);
2886 xdp_prog = rcu_dereference_protected(
2887 priv->rx_ring[0]->xdp_prog,
2888 lockdep_is_held(&mdev->state_lock));
2889 if (xdp_prog)
2890 prog_id = xdp_prog->aux->id;
2891 mutex_unlock(&mdev->state_lock);
2892
2893 return prog_id;
Brenden Blanco47a38e12016-07-19 12:16:50 -07002894}
2895
2896static int mlx4_xdp(struct net_device *dev, struct netdev_xdp *xdp)
2897{
2898 switch (xdp->command) {
2899 case XDP_SETUP_PROG:
2900 return mlx4_xdp_set(dev, xdp->prog);
2901 case XDP_QUERY_PROG:
Martin KaFai Lau2e37e9b2017-06-15 17:29:10 -07002902 xdp->prog_id = mlx4_xdp_query(dev);
2903 xdp->prog_attached = !!xdp->prog_id;
Brenden Blanco47a38e12016-07-19 12:16:50 -07002904 return 0;
2905 default:
2906 return -EINVAL;
2907 }
2908}
2909
Stephen Hemminger3addc562008-11-21 17:30:58 -08002910static const struct net_device_ops mlx4_netdev_ops = {
2911 .ndo_open = mlx4_en_open,
2912 .ndo_stop = mlx4_en_close,
2913 .ndo_start_xmit = mlx4_en_xmit,
Yevgeny Petrilinf813cad2009-06-01 23:24:07 +00002914 .ndo_select_queue = mlx4_en_select_queue,
Eric Dumazet9ed17db172016-05-25 09:50:38 -07002915 .ndo_get_stats64 = mlx4_en_get_stats64,
Yan Burman0eb74fd2013-02-07 02:25:23 +00002916 .ndo_set_rx_mode = mlx4_en_set_rx_mode,
Stephen Hemminger3addc562008-11-21 17:30:58 -08002917 .ndo_set_mac_address = mlx4_en_set_mac,
Stephen Hemminger52255bb2009-01-09 10:45:37 +00002918 .ndo_validate_addr = eth_validate_addr,
Stephen Hemminger3addc562008-11-21 17:30:58 -08002919 .ndo_change_mtu = mlx4_en_change_mtu,
Amir Vadaiec693d42013-04-23 06:06:49 +00002920 .ndo_do_ioctl = mlx4_en_ioctl,
Stephen Hemminger3addc562008-11-21 17:30:58 -08002921 .ndo_tx_timeout = mlx4_en_tx_timeout,
Stephen Hemminger3addc562008-11-21 17:30:58 -08002922 .ndo_vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid,
2923 .ndo_vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid,
2924#ifdef CONFIG_NET_POLL_CONTROLLER
2925 .ndo_poll_controller = mlx4_en_netpoll,
2926#endif
Amir Vadai60d6fe92011-11-26 19:55:19 +00002927 .ndo_set_features = mlx4_en_set_features,
Hadar Hen Zione38af4f2015-07-27 14:46:34 +03002928 .ndo_fix_features = mlx4_en_fix_features,
John Fastabende4c67342016-02-16 21:16:15 -08002929 .ndo_setup_tc = __mlx4_en_setup_tc,
Amir Vadai1eb8c692012-07-18 22:33:52 +00002930#ifdef CONFIG_RFS_ACCEL
2931 .ndo_rx_flow_steer = mlx4_en_filter_rfs,
2932#endif
Hadar Hen Zion84c86402013-12-19 21:20:13 +02002933 .ndo_get_phys_port_id = mlx4_en_get_phys_port_id,
Alexander Duycka8312742016-06-16 12:22:30 -07002934 .ndo_udp_tunnel_add = mlx4_en_add_vxlan_port,
2935 .ndo_udp_tunnel_del = mlx4_en_del_vxlan_port,
Jesse Gross5f352272014-12-23 22:37:26 -08002936 .ndo_features_check = mlx4_en_features_check,
Or Gerlitzc10e4fc2015-03-18 14:57:35 +02002937 .ndo_set_tx_maxrate = mlx4_en_set_tx_maxrate,
Brenden Blanco47a38e12016-07-19 12:16:50 -07002938 .ndo_xdp = mlx4_xdp,
Stephen Hemminger3addc562008-11-21 17:30:58 -08002939};
2940
Rony Efraim8f7ba3c2013-04-25 05:22:27 +00002941static const struct net_device_ops mlx4_netdev_ops_master = {
2942 .ndo_open = mlx4_en_open,
2943 .ndo_stop = mlx4_en_close,
2944 .ndo_start_xmit = mlx4_en_xmit,
2945 .ndo_select_queue = mlx4_en_select_queue,
Eric Dumazet9ed17db172016-05-25 09:50:38 -07002946 .ndo_get_stats64 = mlx4_en_get_stats64,
Rony Efraim8f7ba3c2013-04-25 05:22:27 +00002947 .ndo_set_rx_mode = mlx4_en_set_rx_mode,
2948 .ndo_set_mac_address = mlx4_en_set_mac,
2949 .ndo_validate_addr = eth_validate_addr,
2950 .ndo_change_mtu = mlx4_en_change_mtu,
2951 .ndo_tx_timeout = mlx4_en_tx_timeout,
2952 .ndo_vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid,
2953 .ndo_vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid,
2954 .ndo_set_vf_mac = mlx4_en_set_vf_mac,
Rony Efraim3f7fb022013-04-25 05:22:28 +00002955 .ndo_set_vf_vlan = mlx4_en_set_vf_vlan,
Ido Shamaycda373f2015-04-02 16:31:16 +03002956 .ndo_set_vf_rate = mlx4_en_set_vf_rate,
Rony Efraime6b6a232013-04-25 05:22:29 +00002957 .ndo_set_vf_spoofchk = mlx4_en_set_vf_spoofchk,
Rony Efraim948e3062013-06-13 13:19:11 +03002958 .ndo_set_vf_link_state = mlx4_en_set_vf_link_state,
Eran Ben Elisha62a89052015-06-15 17:59:08 +03002959 .ndo_get_vf_stats = mlx4_en_get_vf_stats,
Rony Efraim2cccb9e2013-04-25 05:22:30 +00002960 .ndo_get_vf_config = mlx4_en_get_vf_config,
Rony Efraim8f7ba3c2013-04-25 05:22:27 +00002961#ifdef CONFIG_NET_POLL_CONTROLLER
2962 .ndo_poll_controller = mlx4_en_netpoll,
2963#endif
2964 .ndo_set_features = mlx4_en_set_features,
Hadar Hen Zione38af4f2015-07-27 14:46:34 +03002965 .ndo_fix_features = mlx4_en_fix_features,
John Fastabende4c67342016-02-16 21:16:15 -08002966 .ndo_setup_tc = __mlx4_en_setup_tc,
Rony Efraim8f7ba3c2013-04-25 05:22:27 +00002967#ifdef CONFIG_RFS_ACCEL
2968 .ndo_rx_flow_steer = mlx4_en_filter_rfs,
2969#endif
Hadar Hen Zion84c86402013-12-19 21:20:13 +02002970 .ndo_get_phys_port_id = mlx4_en_get_phys_port_id,
Alexander Duycka8312742016-06-16 12:22:30 -07002971 .ndo_udp_tunnel_add = mlx4_en_add_vxlan_port,
2972 .ndo_udp_tunnel_del = mlx4_en_del_vxlan_port,
Jesse Gross5f352272014-12-23 22:37:26 -08002973 .ndo_features_check = mlx4_en_features_check,
Or Gerlitzc10e4fc2015-03-18 14:57:35 +02002974 .ndo_set_tx_maxrate = mlx4_en_set_tx_maxrate,
Brenden Blanco47a38e12016-07-19 12:16:50 -07002975 .ndo_xdp = mlx4_xdp,
Rony Efraim8f7ba3c2013-04-25 05:22:27 +00002976};
2977
Moni Shoua5da03542015-02-03 16:48:34 +02002978struct mlx4_en_bond {
2979 struct work_struct work;
2980 struct mlx4_en_priv *priv;
2981 int is_bonded;
2982 struct mlx4_port_map port_map;
2983};
2984
2985static void mlx4_en_bond_work(struct work_struct *work)
2986{
2987 struct mlx4_en_bond *bond = container_of(work,
2988 struct mlx4_en_bond,
2989 work);
2990 int err = 0;
2991 struct mlx4_dev *dev = bond->priv->mdev->dev;
2992
2993 if (bond->is_bonded) {
2994 if (!mlx4_is_bonded(dev)) {
2995 err = mlx4_bond(dev);
2996 if (err)
2997 en_err(bond->priv, "Fail to bond device\n");
2998 }
2999 if (!err) {
3000 err = mlx4_port_map_set(dev, &bond->port_map);
3001 if (err)
3002 en_err(bond->priv, "Fail to set port map [%d][%d]: %d\n",
3003 bond->port_map.port1,
3004 bond->port_map.port2,
3005 err);
3006 }
3007 } else if (mlx4_is_bonded(dev)) {
3008 err = mlx4_unbond(dev);
3009 if (err)
3010 en_err(bond->priv, "Fail to unbond device\n");
3011 }
3012 dev_put(bond->priv->dev);
3013 kfree(bond);
3014}
3015
3016static int mlx4_en_queue_bond_work(struct mlx4_en_priv *priv, int is_bonded,
3017 u8 v2p_p1, u8 v2p_p2)
3018{
3019 struct mlx4_en_bond *bond = NULL;
3020
3021 bond = kzalloc(sizeof(*bond), GFP_ATOMIC);
3022 if (!bond)
3023 return -ENOMEM;
3024
3025 INIT_WORK(&bond->work, mlx4_en_bond_work);
3026 bond->priv = priv;
3027 bond->is_bonded = is_bonded;
3028 bond->port_map.port1 = v2p_p1;
3029 bond->port_map.port2 = v2p_p2;
3030 dev_hold(priv->dev);
3031 queue_work(priv->mdev->workqueue, &bond->work);
3032 return 0;
3033}
3034
3035int mlx4_en_netdev_event(struct notifier_block *this,
3036 unsigned long event, void *ptr)
3037{
3038 struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
3039 u8 port = 0;
3040 struct mlx4_en_dev *mdev;
3041 struct mlx4_dev *dev;
3042 int i, num_eth_ports = 0;
3043 bool do_bond = true;
3044 struct mlx4_en_priv *priv;
3045 u8 v2p_port1 = 0;
3046 u8 v2p_port2 = 0;
3047
3048 if (!net_eq(dev_net(ndev), &init_net))
3049 return NOTIFY_DONE;
3050
3051 mdev = container_of(this, struct mlx4_en_dev, nb);
3052 dev = mdev->dev;
3053
3054 /* Go into this mode only when two network devices set on two ports
3055 * of the same mlx4 device are slaves of the same bonding master
3056 */
3057 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
3058 ++num_eth_ports;
3059 if (!port && (mdev->pndev[i] == ndev))
3060 port = i;
3061 mdev->upper[i] = mdev->pndev[i] ?
3062 netdev_master_upper_dev_get(mdev->pndev[i]) : NULL;
3063 /* condition not met: network device is a slave */
3064 if (!mdev->upper[i])
3065 do_bond = false;
3066 if (num_eth_ports < 2)
3067 continue;
3068 /* condition not met: same master */
3069 if (mdev->upper[i] != mdev->upper[i-1])
3070 do_bond = false;
3071 }
3072 /* condition not met: 2 salves */
3073 do_bond = (num_eth_ports == 2) ? do_bond : false;
3074
3075 /* handle only events that come with enough info */
3076 if ((do_bond && (event != NETDEV_BONDING_INFO)) || !port)
3077 return NOTIFY_DONE;
3078
3079 priv = netdev_priv(ndev);
3080 if (do_bond) {
3081 struct netdev_notifier_bonding_info *notifier_info = ptr;
3082 struct netdev_bonding_info *bonding_info =
3083 &notifier_info->bonding_info;
3084
3085 /* required mode 1, 2 or 4 */
3086 if ((bonding_info->master.bond_mode != BOND_MODE_ACTIVEBACKUP) &&
3087 (bonding_info->master.bond_mode != BOND_MODE_XOR) &&
3088 (bonding_info->master.bond_mode != BOND_MODE_8023AD))
3089 do_bond = false;
3090
3091 /* require exactly 2 slaves */
3092 if (bonding_info->master.num_slaves != 2)
3093 do_bond = false;
3094
3095 /* calc v2p */
3096 if (do_bond) {
3097 if (bonding_info->master.bond_mode ==
3098 BOND_MODE_ACTIVEBACKUP) {
3099 /* in active-backup mode virtual ports are
3100 * mapped to the physical port of the active
3101 * slave */
3102 if (bonding_info->slave.state ==
3103 BOND_STATE_BACKUP) {
3104 if (port == 1) {
3105 v2p_port1 = 2;
3106 v2p_port2 = 2;
3107 } else {
3108 v2p_port1 = 1;
3109 v2p_port2 = 1;
3110 }
3111 } else { /* BOND_STATE_ACTIVE */
3112 if (port == 1) {
3113 v2p_port1 = 1;
3114 v2p_port2 = 1;
3115 } else {
3116 v2p_port1 = 2;
3117 v2p_port2 = 2;
3118 }
3119 }
3120 } else { /* Active-Active */
3121 /* in active-active mode a virtual port is
3122 * mapped to the native physical port if and only
3123 * if the physical port is up */
3124 __s8 link = bonding_info->slave.link;
3125
3126 if (port == 1)
3127 v2p_port2 = 2;
3128 else
3129 v2p_port1 = 1;
3130 if ((link == BOND_LINK_UP) ||
3131 (link == BOND_LINK_FAIL)) {
3132 if (port == 1)
3133 v2p_port1 = 1;
3134 else
3135 v2p_port2 = 2;
3136 } else { /* BOND_LINK_DOWN || BOND_LINK_BACK */
3137 if (port == 1)
3138 v2p_port1 = 2;
3139 else
3140 v2p_port2 = 1;
3141 }
3142 }
3143 }
3144 }
3145
3146 mlx4_en_queue_bond_work(priv, do_bond,
3147 v2p_port1, v2p_port2);
3148
3149 return NOTIFY_DONE;
3150}
3151
Matan Barak0b131562015-03-30 17:45:25 +03003152void mlx4_en_update_pfc_stats_bitmap(struct mlx4_dev *dev,
3153 struct mlx4_en_stats_bitmap *stats_bitmap,
3154 u8 rx_ppp, u8 rx_pause,
3155 u8 tx_ppp, u8 tx_pause)
3156{
Eran Ben Elishab42de4d2015-06-15 17:59:06 +03003157 int last_i = NUM_MAIN_STATS + NUM_PORT_STATS + NUM_PF_STATS;
Matan Barak0b131562015-03-30 17:45:25 +03003158
3159 if (!mlx4_is_slave(dev) &&
3160 (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN)) {
3161 mutex_lock(&stats_bitmap->mutex);
3162 bitmap_clear(stats_bitmap->bitmap, last_i, NUM_FLOW_STATS);
3163
3164 if (rx_ppp)
3165 bitmap_set(stats_bitmap->bitmap, last_i,
3166 NUM_FLOW_PRIORITY_STATS_RX);
3167 last_i += NUM_FLOW_PRIORITY_STATS_RX;
3168
3169 if (rx_pause && !(rx_ppp))
3170 bitmap_set(stats_bitmap->bitmap, last_i,
3171 NUM_FLOW_STATS_RX);
3172 last_i += NUM_FLOW_STATS_RX;
3173
3174 if (tx_ppp)
3175 bitmap_set(stats_bitmap->bitmap, last_i,
3176 NUM_FLOW_PRIORITY_STATS_TX);
3177 last_i += NUM_FLOW_PRIORITY_STATS_TX;
3178
3179 if (tx_pause && !(tx_ppp))
3180 bitmap_set(stats_bitmap->bitmap, last_i,
3181 NUM_FLOW_STATS_TX);
3182 last_i += NUM_FLOW_STATS_TX;
3183
3184 mutex_unlock(&stats_bitmap->mutex);
3185 }
3186}
3187
Eran Ben Elisha6fcd2732015-03-30 17:45:23 +03003188void mlx4_en_set_stats_bitmap(struct mlx4_dev *dev,
Matan Barak0b131562015-03-30 17:45:25 +03003189 struct mlx4_en_stats_bitmap *stats_bitmap,
3190 u8 rx_ppp, u8 rx_pause,
3191 u8 tx_ppp, u8 tx_pause)
Eran Ben Elishaffa88f32015-03-30 17:45:22 +03003192{
Eran Ben Elisha6fcd2732015-03-30 17:45:23 +03003193 int last_i = 0;
3194
Eran Ben Elisha3da8a362015-03-30 17:45:24 +03003195 mutex_init(&stats_bitmap->mutex);
3196 bitmap_zero(stats_bitmap->bitmap, NUM_ALL_STATS);
Eran Ben Elisha6fcd2732015-03-30 17:45:23 +03003197
3198 if (mlx4_is_slave(dev)) {
Eran Ben Elisha3da8a362015-03-30 17:45:24 +03003199 bitmap_set(stats_bitmap->bitmap, last_i +
Eran Ben Elisha6fcd2732015-03-30 17:45:23 +03003200 MLX4_FIND_NETDEV_STAT(rx_packets), 1);
Eran Ben Elisha3da8a362015-03-30 17:45:24 +03003201 bitmap_set(stats_bitmap->bitmap, last_i +
Eran Ben Elisha6fcd2732015-03-30 17:45:23 +03003202 MLX4_FIND_NETDEV_STAT(tx_packets), 1);
Eran Ben Elisha3da8a362015-03-30 17:45:24 +03003203 bitmap_set(stats_bitmap->bitmap, last_i +
Eran Ben Elisha6fcd2732015-03-30 17:45:23 +03003204 MLX4_FIND_NETDEV_STAT(rx_bytes), 1);
Eran Ben Elisha3da8a362015-03-30 17:45:24 +03003205 bitmap_set(stats_bitmap->bitmap, last_i +
Eran Ben Elisha6fcd2732015-03-30 17:45:23 +03003206 MLX4_FIND_NETDEV_STAT(tx_bytes), 1);
Eran Ben Elisha3da8a362015-03-30 17:45:24 +03003207 bitmap_set(stats_bitmap->bitmap, last_i +
Eran Ben Elisha6fcd2732015-03-30 17:45:23 +03003208 MLX4_FIND_NETDEV_STAT(rx_dropped), 1);
Eran Ben Elisha3da8a362015-03-30 17:45:24 +03003209 bitmap_set(stats_bitmap->bitmap, last_i +
Eran Ben Elisha6fcd2732015-03-30 17:45:23 +03003210 MLX4_FIND_NETDEV_STAT(tx_dropped), 1);
3211 } else {
Eran Ben Elisha3da8a362015-03-30 17:45:24 +03003212 bitmap_set(stats_bitmap->bitmap, last_i, NUM_MAIN_STATS);
Eran Ben Elishaffa88f32015-03-30 17:45:22 +03003213 }
Eran Ben Elisha6fcd2732015-03-30 17:45:23 +03003214 last_i += NUM_MAIN_STATS;
Eran Ben Elishaffa88f32015-03-30 17:45:22 +03003215
Eran Ben Elisha3da8a362015-03-30 17:45:24 +03003216 bitmap_set(stats_bitmap->bitmap, last_i, NUM_PORT_STATS);
Eran Ben Elisha6fcd2732015-03-30 17:45:23 +03003217 last_i += NUM_PORT_STATS;
Eran Ben Elishaffa88f32015-03-30 17:45:22 +03003218
Eran Ben Elishab42de4d2015-06-15 17:59:06 +03003219 if (mlx4_is_master(dev))
3220 bitmap_set(stats_bitmap->bitmap, last_i,
3221 NUM_PF_STATS);
3222 last_i += NUM_PF_STATS;
3223
Matan Barak0b131562015-03-30 17:45:25 +03003224 mlx4_en_update_pfc_stats_bitmap(dev, stats_bitmap,
3225 rx_ppp, rx_pause,
3226 tx_ppp, tx_pause);
3227 last_i += NUM_FLOW_STATS;
3228
Eran Ben Elisha6fcd2732015-03-30 17:45:23 +03003229 if (!mlx4_is_slave(dev))
Eran Ben Elisha3da8a362015-03-30 17:45:24 +03003230 bitmap_set(stats_bitmap->bitmap, last_i, NUM_PKT_STATS);
Tariq Toukan15fca2c2016-11-02 17:12:25 +02003231 last_i += NUM_PKT_STATS;
3232
3233 bitmap_set(stats_bitmap->bitmap, last_i, NUM_XDP_STATS);
3234 last_i += NUM_XDP_STATS;
Eran Ben Elishaffa88f32015-03-30 17:45:22 +03003235}
3236
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07003237int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
3238 struct mlx4_en_port_profile *prof)
3239{
3240 struct net_device *dev;
3241 struct mlx4_en_priv *priv;
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02003242 int i, t;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07003243 int err;
3244
Tom Herbertf1593d22011-01-09 19:36:36 +00003245 dev = alloc_etherdev_mqs(sizeof(struct mlx4_en_priv),
Amir Vadaid3179662012-12-02 03:49:23 +00003246 MAX_TX_RINGS, MAX_RX_RINGS);
Joe Perches41de8d42012-01-29 13:47:52 +00003247 if (dev == NULL)
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07003248 return -ENOMEM;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07003249
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02003250 netif_set_real_num_tx_queues(dev, prof->tx_ring_num[TX]);
Amir Vadaid3179662012-12-02 03:49:23 +00003251 netif_set_real_num_rx_queues(dev, prof->rx_ring_num);
3252
Yishai Hadas872bf2f2015-01-25 16:59:35 +02003253 SET_NETDEV_DEV(dev, &mdev->dev->persist->pdev->dev);
Amir Vadai76a066f2014-02-25 18:17:51 +02003254 dev->dev_port = port - 1;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07003255
3256 /*
3257 * Initialize driver private data
3258 */
3259
3260 priv = netdev_priv(dev);
3261 memset(priv, 0, sizeof(struct mlx4_en_priv));
Eran Ben Elisha6de5f7f2015-06-15 17:59:02 +03003262 priv->counter_index = MLX4_SINK_COUNTER_INDEX(mdev->dev);
Eugenia Emantayev207af6c2014-10-27 11:37:46 +02003263 spin_lock_init(&priv->stats_lock);
3264 INIT_WORK(&priv->rx_mode_task, mlx4_en_do_set_rx_mode);
3265 INIT_WORK(&priv->watchdog_task, mlx4_en_restart);
3266 INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate);
3267 INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats);
3268 INIT_DELAYED_WORK(&priv->service_task, mlx4_en_service_task);
Eugenia Emantayev207af6c2014-10-27 11:37:46 +02003269 INIT_WORK(&priv->vxlan_add_task, mlx4_en_add_vxlan_offloads);
3270 INIT_WORK(&priv->vxlan_del_task, mlx4_en_del_vxlan_offloads);
Eugenia Emantayev207af6c2014-10-27 11:37:46 +02003271#ifdef CONFIG_RFS_ACCEL
3272 INIT_LIST_HEAD(&priv->filters);
3273 spin_lock_init(&priv->filters_lock);
3274#endif
3275
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07003276 priv->dev = dev;
3277 priv->mdev = mdev;
Yevgeny Petrilinebf8c9a2012-03-06 04:03:34 +00003278 priv->ddev = &mdev->pdev->dev;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07003279 priv->prof = prof;
3280 priv->port = port;
3281 priv->port_up = false;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07003282 priv->flags = prof->flags;
Amir Vadai0fef9d02014-07-22 15:44:10 +03003283 priv->pflags = MLX4_EN_PRIV_FLAGS_BLUEFLAME;
Amir Vadai60d6fe92011-11-26 19:55:19 +00003284 priv->ctrl_flags = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE |
3285 MLX4_WQE_CTRL_SOLICITED);
Amir Vadaid3179662012-12-02 03:49:23 +00003286 priv->num_tx_rings_p_up = mdev->profile.num_tx_rings_p_up;
Amir Vadaifbc6daf2014-07-08 11:28:12 +03003287 priv->tx_work_limit = MLX4_EN_DEFAULT_TX_WORK;
Eric Dumazetbd635c32014-11-22 17:24:19 -08003288 netdev_rss_key_fill(priv->rss_key, sizeof(priv->rss_key));
Amir Vadaid3179662012-12-02 03:49:23 +00003289
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02003290 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
3291 priv->tx_ring_num[t] = prof->tx_ring_num[t];
3292 if (!priv->tx_ring_num[t])
3293 continue;
3294
3295 priv->tx_ring[t] = kzalloc(sizeof(struct mlx4_en_tx_ring *) *
3296 MAX_TX_RINGS, GFP_KERNEL);
3297 if (!priv->tx_ring[t]) {
3298 err = -ENOMEM;
3299 goto err_free_tx;
3300 }
3301 priv->tx_cq[t] = kzalloc(sizeof(struct mlx4_en_cq *) *
3302 MAX_TX_RINGS, GFP_KERNEL);
3303 if (!priv->tx_cq[t]) {
3304 kfree(priv->tx_ring[t]);
3305 err = -ENOMEM;
3306 goto out;
3307 }
Amir Vadaibc6a4742012-05-17 00:58:10 +00003308 }
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07003309 priv->rx_ring_num = prof->rx_ring_num;
Or Gerlitz08ff3232012-10-21 14:59:24 +00003310 priv->cqe_factor = (mdev->dev->caps.cqe_size == 64) ? 1 : 0;
Ido Shamayb1b6b4d2014-09-18 11:51:01 +03003311 priv->cqe_size = mdev->dev->caps.cqe_size;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07003312 priv->mac_index = -1;
3313 priv->msg_enable = MLX4_EN_MSG_LEVEL;
Amir Vadai564c2742012-04-04 21:33:26 +00003314#ifdef CONFIG_MLX4_EN_DCB
Or Gerlitz540b3a32013-04-07 03:44:07 +00003315 if (!mlx4_is_slave(priv->mdev->dev)) {
Tariq Toukan564ed9b2016-09-11 10:56:19 +03003316 priv->dcbx_cap = DCB_CAP_DCBX_VER_CEE | DCB_CAP_DCBX_HOST |
3317 DCB_CAP_DCBX_VER_IEEE;
Rana Shahoutaf7d5182016-06-21 12:43:59 +03003318 priv->flags |= MLX4_EN_DCB_ENABLED;
Tariq Toukan564ed9b2016-09-11 10:56:19 +03003319 priv->cee_config.pfc_state = false;
Rana Shahoutaf7d5182016-06-21 12:43:59 +03003320
Inbar Karmyf21ad612017-06-29 14:07:56 +03003321 for (i = 0; i < MLX4_EN_NUM_UP_HIGH; i++)
Tariq Toukan564ed9b2016-09-11 10:56:19 +03003322 priv->cee_config.dcb_pfc[i] = pfc_disabled;
Rana Shahoutaf7d5182016-06-21 12:43:59 +03003323
Ido Shamay3742cc62015-04-02 16:31:17 +03003324 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETS_CFG) {
Or Gerlitz540b3a32013-04-07 03:44:07 +00003325 dev->dcbnl_ops = &mlx4_en_dcbnl_ops;
3326 } else {
3327 en_info(priv, "enabling only PFC DCB ops\n");
3328 dev->dcbnl_ops = &mlx4_en_dcbnl_pfc_ops;
3329 }
3330 }
Amir Vadai564c2742012-04-04 21:33:26 +00003331#endif
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07003332
Yan Burmanc07cb4b2013-02-07 02:25:25 +00003333 for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i)
3334 INIT_HLIST_HEAD(&priv->mac_hash[i]);
Yan Burman16a10ff2013-02-07 02:25:22 +00003335
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07003336 /* Query for default mac and max mtu */
3337 priv->max_mtu = mdev->dev->caps.eth_mtu_cap[priv->port];
Yan Burman6bbb6d92013-02-07 02:25:20 +00003338
Shani Michaelif8c64552014-11-09 13:51:53 +02003339 if (mdev->dev->caps.rx_checksum_flags_port[priv->port] &
3340 MLX4_RX_CSUM_MODE_VAL_NON_TCP_UDP)
3341 priv->flags |= MLX4_EN_FLAG_RX_CSUM_NON_TCP_UDP;
3342
Yan Burman6bbb6d92013-02-07 02:25:20 +00003343 /* Set default MAC */
3344 dev->addr_len = ETH_ALEN;
3345 mlx4_en_u64_to_mac(dev->dev_addr, mdev->dev->caps.def_mac[priv->port]);
3346 if (!is_valid_ether_addr(dev->dev_addr)) {
Jack Morgenstein2b3ddf22015-10-14 17:43:48 +03003347 en_err(priv, "Port: %d, invalid mac burned: %pM, quiting\n",
3348 priv->port, dev->dev_addr);
3349 err = -EINVAL;
3350 goto out;
3351 } else if (mlx4_is_slave(priv->mdev->dev) &&
3352 (priv->mdev->dev->port_random_macs & 1 << priv->port)) {
3353 /* Random MAC was assigned in mlx4_slave_cap
3354 * in mlx4_core module
3355 */
3356 dev->addr_assign_type |= NET_ADDR_RANDOM;
3357 en_warn(priv, "Assigned random MAC address %pM\n", dev->dev_addr);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07003358 }
3359
Noa Osherovich2695bab2014-07-08 11:25:24 +03003360 memcpy(priv->current_mac, dev->dev_addr, sizeof(priv->current_mac));
Yan Burman6bbb6d92013-02-07 02:25:20 +00003361
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07003362 priv->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
3363 DS_SIZE * MLX4_EN_MAX_RX_FRAGS);
3364 err = mlx4_en_alloc_resources(priv);
3365 if (err)
3366 goto out;
3367
Amir Vadaiec693d42013-04-23 06:06:49 +00003368 /* Initialize time stamping config */
3369 priv->hwtstamp_config.flags = 0;
3370 priv->hwtstamp_config.tx_type = HWTSTAMP_TX_OFF;
3371 priv->hwtstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
3372
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07003373 /* Allocate page for receive rings */
3374 err = mlx4_alloc_hwq_res(mdev->dev, &priv->res,
Haggai Abramovsky73898db2016-05-04 14:50:15 +03003375 MLX4_EN_PAGE_SIZE);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07003376 if (err) {
Yevgeny Petrilin453a6082009-06-01 20:27:13 +00003377 en_err(priv, "Failed to allocate page for rx qps\n");
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07003378 goto out;
3379 }
3380 priv->allocated = 1;
3381
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07003382 /*
3383 * Initialize netdev entry points
3384 */
Rony Efraim8f7ba3c2013-04-25 05:22:27 +00003385 if (mlx4_is_master(priv->mdev->dev))
3386 dev->netdev_ops = &mlx4_netdev_ops_master;
3387 else
3388 dev->netdev_ops = &mlx4_netdev_ops;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07003389 dev->watchdog_timeo = MLX4_EN_WATCHDOG_TIMEOUT;
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02003390 netif_set_real_num_tx_queues(dev, priv->tx_ring_num[TX]);
Ben Hutchings1eb63a22010-09-27 08:29:34 +00003391 netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
Stephen Hemminger3addc562008-11-21 17:30:58 -08003392
Wilfried Klaebe7ad24ea2014-05-11 00:12:32 +00003393 dev->ethtool_ops = &mlx4_en_ethtool_ops;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07003394
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07003395 /*
3396 * Set driver features
3397 */
Michał Mirosławc8c64cf2011-04-15 04:50:49 +00003398 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
3399 if (mdev->LSO_support)
3400 dev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
3401
3402 dev->vlan_features = dev->hw_features;
3403
Yevgeny Petrilinad861072011-10-18 01:51:24 +00003404 dev->hw_features |= NETIF_F_RXCSUM | NETIF_F_RXHASH;
Michał Mirosławc8c64cf2011-04-15 04:50:49 +00003405 dev->features = dev->hw_features | NETIF_F_HIGHDMA |
Patrick McHardyf6469682013-04-19 02:04:27 +00003406 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
3407 NETIF_F_HW_VLAN_CTAG_FILTER;
Saeed Mahameed537f6f92014-10-27 11:37:43 +02003408 dev->hw_features |= NETIF_F_LOOPBACK |
3409 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07003410
Hadar Hen Zione38af4f2015-07-27 14:46:34 +03003411 if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN)) {
3412 dev->features |= NETIF_F_HW_VLAN_STAG_RX |
3413 NETIF_F_HW_VLAN_STAG_FILTER;
3414 dev->hw_features |= NETIF_F_HW_VLAN_STAG_RX;
3415 }
3416
3417 if (mlx4_is_slave(mdev->dev)) {
Moshe Shemesh0815fe32016-09-22 12:11:14 +03003418 bool vlan_offload_disabled;
Hadar Hen Zione38af4f2015-07-27 14:46:34 +03003419 int phv;
3420
3421 err = get_phv_bit(mdev->dev, port, &phv);
3422 if (!err && phv) {
3423 dev->hw_features |= NETIF_F_HW_VLAN_STAG_TX;
3424 priv->pflags |= MLX4_EN_PRIV_FLAGS_PHV;
3425 }
Moshe Shemesh0815fe32016-09-22 12:11:14 +03003426 err = mlx4_get_is_vlan_offload_disabled(mdev->dev, port,
3427 &vlan_offload_disabled);
3428 if (!err && vlan_offload_disabled) {
3429 dev->hw_features &= ~(NETIF_F_HW_VLAN_CTAG_TX |
3430 NETIF_F_HW_VLAN_CTAG_RX |
3431 NETIF_F_HW_VLAN_STAG_TX |
3432 NETIF_F_HW_VLAN_STAG_RX);
3433 dev->features &= ~(NETIF_F_HW_VLAN_CTAG_TX |
3434 NETIF_F_HW_VLAN_CTAG_RX |
3435 NETIF_F_HW_VLAN_STAG_TX |
3436 NETIF_F_HW_VLAN_STAG_RX);
3437 }
Hadar Hen Zione38af4f2015-07-27 14:46:34 +03003438 } else {
3439 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PHV_EN &&
3440 !(mdev->dev->caps.flags2 &
3441 MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN))
3442 dev->hw_features |= NETIF_F_HW_VLAN_STAG_TX;
3443 }
3444
Muhammad Mahajnaf0df3502015-04-02 16:31:21 +03003445 if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP)
3446 dev->hw_features |= NETIF_F_RXFCS;
3447
Muhammad Mahajna78500b82015-04-02 16:31:22 +03003448 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_IGNORE_FCS)
3449 dev->hw_features |= NETIF_F_RXALL;
3450
Amir Vadai1eb8c692012-07-18 22:33:52 +00003451 if (mdev->dev->caps.steering_mode ==
Matan Barak7d077cd2014-12-11 10:58:00 +02003452 MLX4_STEERING_MODE_DEVICE_MANAGED &&
3453 mdev->dev->caps.dmfs_high_steer_mode != MLX4_STEERING_DMFS_A0_STATIC)
Amir Vadai1eb8c692012-07-18 22:33:52 +00003454 dev->hw_features |= NETIF_F_NTUPLE;
3455
Yan Burmancc5387f2013-02-07 02:25:26 +00003456 if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0)
3457 dev->priv_flags |= IFF_UNICAST_FLT;
3458
Eyal Perry947cbb02014-12-02 18:12:11 +02003459 /* Setting a default hash function value */
3460 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_TOP) {
3461 priv->rss_hash_fn = ETH_RSS_HASH_TOP;
3462 } else if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_XOR) {
3463 priv->rss_hash_fn = ETH_RSS_HASH_XOR;
3464 } else {
3465 en_warn(priv,
3466 "No RSS hash capabilities exposed, using Toeplitz\n");
3467 priv->rss_hash_fn = ETH_RSS_HASH_TOP;
3468 }
3469
Eugenia Emantayev925ab1a2016-02-17 17:24:27 +02003470 if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
Alexander Duyck3c9346b2016-05-02 09:38:30 -07003471 dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
3472 NETIF_F_GSO_UDP_TUNNEL_CSUM |
3473 NETIF_F_GSO_PARTIAL;
3474 dev->features |= NETIF_F_GSO_UDP_TUNNEL |
3475 NETIF_F_GSO_UDP_TUNNEL_CSUM |
3476 NETIF_F_GSO_PARTIAL;
3477 dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM;
Eugenia Emantayev925ab1a2016-02-17 17:24:27 +02003478 }
3479
Jarod Wilsonb80f71f2016-10-17 15:54:07 -04003480 /* MTU range: 46 - hw-specific max */
3481 dev->min_mtu = MLX4_EN_MIN_MTU;
3482 dev->max_mtu = priv->max_mtu;
3483
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07003484 mdev->pndev[port] = dev;
Moni Shoua5da03542015-02-03 16:48:34 +02003485 mdev->upper[port] = NULL;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07003486
3487 netif_carrier_off(dev);
Eugenia Emantayev4801ae72013-06-25 12:09:31 +03003488 mlx4_en_set_default_moderation(priv);
3489
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02003490 en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num[TX]);
Yevgeny Petrilin453a6082009-06-01 20:27:13 +00003491 en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num);
3492
Yan Burman79aeacc2013-02-07 02:25:19 +00003493 mlx4_en_update_loopback_state(priv->dev, priv->dev->features);
3494
Yevgeny Petrilin90822262011-03-22 22:37:41 +00003495 /* Configure port */
Yevgeny Petrilin5c8e9042012-06-25 00:24:11 +00003496 mlx4_en_calc_rx_buf(dev);
Yevgeny Petrilin90822262011-03-22 22:37:41 +00003497 err = mlx4_SET_PORT_general(mdev->dev, priv->port,
Yevgeny Petrilin5c8e9042012-06-25 00:24:11 +00003498 priv->rx_skb_size + ETH_FCS_LEN,
3499 prof->tx_pause, prof->tx_ppp,
3500 prof->rx_pause, prof->rx_ppp);
Yevgeny Petrilin90822262011-03-22 22:37:41 +00003501 if (err) {
Joe Perches1a91de22014-05-07 12:52:57 -07003502 en_err(priv, "Failed setting port general configurations for port %d, with error %d\n",
3503 priv->port, err);
Yevgeny Petrilin90822262011-03-22 22:37:41 +00003504 goto out;
3505 }
3506
Or Gerlitz837052d2013-12-23 16:09:44 +02003507 if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
Or Gerlitz1b136de2014-03-27 14:02:04 +02003508 err = mlx4_SET_PORT_VXLAN(mdev->dev, priv->port, VXLAN_STEER_BY_OUTER_MAC, 1);
Or Gerlitz837052d2013-12-23 16:09:44 +02003509 if (err) {
3510 en_err(priv, "Failed setting port L2 tunnel configuration, err %d\n",
3511 err);
3512 goto out;
3513 }
3514 }
3515
Yevgeny Petrilin90822262011-03-22 22:37:41 +00003516 /* Init port */
3517 en_warn(priv, "Initializing port\n");
3518 err = mlx4_INIT_PORT(mdev->dev, priv->port);
3519 if (err) {
3520 en_err(priv, "Failed Initializing port\n");
3521 goto out;
3522 }
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07003523 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
Amir Vadaidc8142e2013-04-25 05:22:24 +00003524
Eugenia Emantayev90683062015-12-17 15:35:38 +02003525 /* Initialize time stamp mechanism */
Amir Vadaidc8142e2013-04-25 05:22:24 +00003526 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
Eugenia Emantayev90683062015-12-17 15:35:38 +02003527 mlx4_en_init_timestamp(mdev);
3528
Eugenia Emantayevfc9f5ea2015-12-17 15:35:37 +02003529 queue_delayed_work(mdev->workqueue, &priv->service_task,
3530 SERVICE_TASK_DELAY);
Amir Vadaidc8142e2013-04-25 05:22:24 +00003531
Matan Barak0b131562015-03-30 17:45:25 +03003532 mlx4_en_set_stats_bitmap(mdev->dev, &priv->stats_bitmap,
3533 mdev->profile.prof[priv->port].rx_ppp,
3534 mdev->profile.prof[priv->port].rx_pause,
3535 mdev->profile.prof[priv->port].tx_ppp,
3536 mdev->profile.prof[priv->port].tx_pause);
Eran Ben Elisha39de9612015-03-18 16:51:38 +02003537
Ido Shamaye5eda892015-03-24 15:18:38 +02003538 err = register_netdev(dev);
3539 if (err) {
3540 en_err(priv, "Netdev registration failed for port %d\n", port);
3541 goto out;
3542 }
3543
3544 priv->registered = 1;
Jiri Pirko09d4d082016-02-26 17:32:24 +01003545 devlink_port_type_eth_set(mlx4_get_devlink_port(mdev->dev, priv->port),
3546 dev);
Ido Shamaye5eda892015-03-24 15:18:38 +02003547
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07003548 return 0;
3549
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02003550err_free_tx:
3551 while (t--) {
3552 kfree(priv->tx_ring[t]);
3553 kfree(priv->tx_cq[t]);
3554 }
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07003555out:
3556 mlx4_en_destroy_netdev(dev);
3557 return err;
3558}
3559
Saeed Mahameed537f6f92014-10-27 11:37:43 +02003560int mlx4_en_reset_config(struct net_device *dev,
3561 struct hwtstamp_config ts_config,
3562 netdev_features_t features)
3563{
3564 struct mlx4_en_priv *priv = netdev_priv(dev);
3565 struct mlx4_en_dev *mdev = priv->mdev;
Eugenia Emantayevec25bc02016-07-18 18:35:12 +03003566 struct mlx4_en_port_profile new_prof;
3567 struct mlx4_en_priv *tmp;
Saeed Mahameed537f6f92014-10-27 11:37:43 +02003568 int port_up = 0;
3569 int err = 0;
3570
3571 if (priv->hwtstamp_config.tx_type == ts_config.tx_type &&
3572 priv->hwtstamp_config.rx_filter == ts_config.rx_filter &&
Muhammad Mahajnaf0df3502015-04-02 16:31:21 +03003573 !DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX) &&
3574 !DEV_FEATURE_CHANGED(dev, features, NETIF_F_RXFCS))
Saeed Mahameed537f6f92014-10-27 11:37:43 +02003575 return 0; /* Nothing to change */
3576
3577 if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX) &&
3578 (features & NETIF_F_HW_VLAN_CTAG_RX) &&
3579 (priv->hwtstamp_config.rx_filter != HWTSTAMP_FILTER_NONE)) {
3580 en_warn(priv, "Can't turn ON rx vlan offload while time-stamping rx filter is ON\n");
3581 return -EINVAL;
3582 }
3583
Eugenia Emantayevec25bc02016-07-18 18:35:12 +03003584 tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
3585 if (!tmp)
3586 return -ENOMEM;
3587
Saeed Mahameed537f6f92014-10-27 11:37:43 +02003588 mutex_lock(&mdev->state_lock);
Eugenia Emantayevec25bc02016-07-18 18:35:12 +03003589
3590 memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile));
3591 memcpy(&new_prof.hwtstamp_config, &ts_config, sizeof(ts_config));
3592
Martin KaFai Lau770f8222017-01-31 22:35:33 -08003593 err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof, true);
Eugenia Emantayevec25bc02016-07-18 18:35:12 +03003594 if (err)
3595 goto out;
3596
Saeed Mahameed537f6f92014-10-27 11:37:43 +02003597 if (priv->port_up) {
3598 port_up = 1;
3599 mlx4_en_stop_port(dev, 1);
3600 }
3601
Saeed Mahameed537f6f92014-10-27 11:37:43 +02003602 en_warn(priv, "Changing device configuration rx filter(%x) rx vlan(%x)\n",
Eugenia Emantayevec25bc02016-07-18 18:35:12 +03003603 ts_config.rx_filter,
3604 !!(features & NETIF_F_HW_VLAN_CTAG_RX));
Saeed Mahameed537f6f92014-10-27 11:37:43 +02003605
Eugenia Emantayevec25bc02016-07-18 18:35:12 +03003606 mlx4_en_safe_replace_resources(priv, tmp);
Saeed Mahameed537f6f92014-10-27 11:37:43 +02003607
3608 if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX)) {
3609 if (features & NETIF_F_HW_VLAN_CTAG_RX)
3610 dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
3611 else
3612 dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
3613 } else if (ts_config.rx_filter == HWTSTAMP_FILTER_NONE) {
3614 /* RX time-stamping is OFF, update the RX vlan offload
3615 * to the latest wanted state
3616 */
3617 if (dev->wanted_features & NETIF_F_HW_VLAN_CTAG_RX)
3618 dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
3619 else
3620 dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
3621 }
3622
Muhammad Mahajnaf0df3502015-04-02 16:31:21 +03003623 if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_RXFCS)) {
3624 if (features & NETIF_F_RXFCS)
3625 dev->features |= NETIF_F_RXFCS;
3626 else
3627 dev->features &= ~NETIF_F_RXFCS;
3628 }
3629
Saeed Mahameed537f6f92014-10-27 11:37:43 +02003630 /* RX vlan offload and RX time-stamping can't co-exist !
3631 * Regardless of the caller's choice,
3632 * Turn Off RX vlan offload in case of time-stamping is ON
3633 */
3634 if (ts_config.rx_filter != HWTSTAMP_FILTER_NONE) {
3635 if (dev->features & NETIF_F_HW_VLAN_CTAG_RX)
3636 en_warn(priv, "Turning off RX vlan offload since RX time-stamping is ON\n");
3637 dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
3638 }
3639
Saeed Mahameed537f6f92014-10-27 11:37:43 +02003640 if (port_up) {
3641 err = mlx4_en_start_port(dev);
3642 if (err)
3643 en_err(priv, "Failed starting port\n");
3644 }
3645
3646out:
3647 mutex_unlock(&mdev->state_lock);
Eugenia Emantayevec25bc02016-07-18 18:35:12 +03003648 kfree(tmp);
3649 if (!err)
3650 netdev_features_change(dev);
Saeed Mahameed537f6f92014-10-27 11:37:43 +02003651 return err;
3652}