blob: 18252a79a074ddfee3363c47aa0009eb3b4c39d8 [file] [log] [blame]
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001/*
2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33
Brenden Blanco47a38e12016-07-19 12:16:50 -070034#include <linux/bpf.h>
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -070035#include <linux/etherdevice.h>
36#include <linux/tcp.h>
37#include <linux/if_vlan.h>
38#include <linux/delay.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090039#include <linux/slab.h>
Amir Vadai1eb8c692012-07-18 22:33:52 +000040#include <linux/hash.h>
41#include <net/ip.h>
Eliezer Tamir076bb0c2013-07-10 17:13:17 +030042#include <net/busy_poll.h>
Or Gerlitz1b136de2014-03-27 14:02:04 +020043#include <net/vxlan.h>
Jiri Pirko09d4d082016-02-26 17:32:24 +010044#include <net/devlink.h>
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -070045
46#include <linux/mlx4/driver.h>
47#include <linux/mlx4/device.h>
48#include <linux/mlx4/cmd.h>
49#include <linux/mlx4/cq.h>
50
51#include "mlx4_en.h"
52#include "en_port.h"
53
Martin KaFai Lauea3349a2016-12-07 15:53:13 -080054#define MLX4_EN_MAX_XDP_MTU ((int)(PAGE_SIZE - ETH_HLEN - (2 * VLAN_HLEN) - \
55 XDP_PACKET_HEADROOM))
Martin KaFai Laub45f0672016-12-07 15:53:12 -080056
Amir Vadaid3179662012-12-02 03:49:23 +000057int mlx4_en_setup_tc(struct net_device *dev, u8 up)
Amir Vadai897d7842012-04-04 21:33:27 +000058{
Amir Vadaibc6a4742012-05-17 00:58:10 +000059 struct mlx4_en_priv *priv = netdev_priv(dev);
60 int i;
Amir Vadaid3179662012-12-02 03:49:23 +000061 unsigned int offset = 0;
Amir Vadaibc6a4742012-05-17 00:58:10 +000062
63 if (up && up != MLX4_EN_NUM_UP)
Amir Vadai897d7842012-04-04 21:33:27 +000064 return -EINVAL;
65
Amir Vadaibc6a4742012-05-17 00:58:10 +000066 netdev_set_num_tc(dev, up);
67
68 /* Partition Tx queues evenly amongst UP's */
Amir Vadaibc6a4742012-05-17 00:58:10 +000069 for (i = 0; i < up; i++) {
Amir Vadaid3179662012-12-02 03:49:23 +000070 netdev_set_tc_queue(dev, i, priv->num_tx_rings_p_up, offset);
71 offset += priv->num_tx_rings_p_up;
Amir Vadaibc6a4742012-05-17 00:58:10 +000072 }
73
Rana Shahoutaf7d5182016-06-21 12:43:59 +030074#ifdef CONFIG_MLX4_EN_DCB
75 if (!mlx4_is_slave(priv->mdev->dev)) {
76 if (up) {
Tariq Toukan564ed9b2016-09-11 10:56:19 +030077 if (priv->dcbx_cap)
78 priv->flags |= MLX4_EN_FLAG_DCB_ENABLED;
Rana Shahoutaf7d5182016-06-21 12:43:59 +030079 } else {
80 priv->flags &= ~MLX4_EN_FLAG_DCB_ENABLED;
Tariq Toukan564ed9b2016-09-11 10:56:19 +030081 priv->cee_config.pfc_state = false;
Rana Shahoutaf7d5182016-06-21 12:43:59 +030082 }
83 }
84#endif /* CONFIG_MLX4_EN_DCB */
85
Amir Vadai897d7842012-04-04 21:33:27 +000086 return 0;
87}
88
Jiri Pirkoa5fcf8a2017-06-06 17:00:16 +020089static int __mlx4_en_setup_tc(struct net_device *dev, u32 handle,
90 u32 chain_index, __be16 proto,
John Fastabend16e5cc62016-02-16 21:16:43 -080091 struct tc_to_netdev *tc)
John Fastabende4c67342016-02-16 21:16:15 -080092{
John Fastabend5eb4dce2016-02-29 11:26:13 -080093 if (tc->type != TC_SETUP_MQPRIO)
John Fastabende4c67342016-02-16 21:16:15 -080094 return -EINVAL;
95
Amritha Nambiar56f36ac2017-03-15 10:39:25 -070096 tc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
97
98 return mlx4_en_setup_tc(dev, tc->mqprio->num_tc);
John Fastabende4c67342016-02-16 21:16:15 -080099}
100
Amir Vadai1eb8c692012-07-18 22:33:52 +0000101#ifdef CONFIG_RFS_ACCEL
102
103struct mlx4_en_filter {
104 struct list_head next;
105 struct work_struct work;
106
Eyal Perry75a353d2013-11-07 12:19:49 +0200107 u8 ip_proto;
Amir Vadai1eb8c692012-07-18 22:33:52 +0000108 __be32 src_ip;
109 __be32 dst_ip;
110 __be16 src_port;
111 __be16 dst_port;
112
113 int rxq_index;
114 struct mlx4_en_priv *priv;
115 u32 flow_id; /* RFS infrastructure id */
116 int id; /* mlx4_en driver id */
117 u64 reg_id; /* Flow steering API id */
118 u8 activated; /* Used to prevent expiry before filter
119 * is attached
120 */
121 struct hlist_node filter_chain;
122};
123
124static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv);
125
Eyal Perry75a353d2013-11-07 12:19:49 +0200126static enum mlx4_net_trans_rule_id mlx4_ip_proto_to_trans_rule_id(u8 ip_proto)
127{
128 switch (ip_proto) {
129 case IPPROTO_UDP:
130 return MLX4_NET_TRANS_RULE_ID_UDP;
131 case IPPROTO_TCP:
132 return MLX4_NET_TRANS_RULE_ID_TCP;
133 default:
Eyal Perryc3ca5202014-05-14 12:15:16 +0300134 return MLX4_NET_TRANS_RULE_NUM;
Eyal Perry75a353d2013-11-07 12:19:49 +0200135 }
136};
137
Tariq Toukanb6e01232016-11-22 16:20:39 +0200138/* Must not acquire state_lock, as its corresponding work_sync
139 * is done under it.
140 */
Amir Vadai1eb8c692012-07-18 22:33:52 +0000141static void mlx4_en_filter_work(struct work_struct *work)
142{
143 struct mlx4_en_filter *filter = container_of(work,
144 struct mlx4_en_filter,
145 work);
146 struct mlx4_en_priv *priv = filter->priv;
Eyal Perry75a353d2013-11-07 12:19:49 +0200147 struct mlx4_spec_list spec_tcp_udp = {
148 .id = mlx4_ip_proto_to_trans_rule_id(filter->ip_proto),
Amir Vadai1eb8c692012-07-18 22:33:52 +0000149 {
150 .tcp_udp = {
151 .dst_port = filter->dst_port,
152 .dst_port_msk = (__force __be16)-1,
153 .src_port = filter->src_port,
154 .src_port_msk = (__force __be16)-1,
155 },
156 },
157 };
158 struct mlx4_spec_list spec_ip = {
159 .id = MLX4_NET_TRANS_RULE_ID_IPV4,
160 {
161 .ipv4 = {
162 .dst_ip = filter->dst_ip,
163 .dst_ip_msk = (__force __be32)-1,
164 .src_ip = filter->src_ip,
165 .src_ip_msk = (__force __be32)-1,
166 },
167 },
168 };
169 struct mlx4_spec_list spec_eth = {
170 .id = MLX4_NET_TRANS_RULE_ID_ETH,
171 };
172 struct mlx4_net_trans_rule rule = {
173 .list = LIST_HEAD_INIT(rule.list),
174 .queue_mode = MLX4_NET_TRANS_Q_LIFO,
175 .exclusive = 1,
176 .allow_loopback = 1,
Hadar Hen Zionf9162532013-04-24 13:58:45 +0000177 .promisc_mode = MLX4_FS_REGULAR,
Amir Vadai1eb8c692012-07-18 22:33:52 +0000178 .port = priv->port,
179 .priority = MLX4_DOMAIN_RFS,
180 };
181 int rc;
Amir Vadai1eb8c692012-07-18 22:33:52 +0000182 __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
183
Eyal Perryc3ca5202014-05-14 12:15:16 +0300184 if (spec_tcp_udp.id >= MLX4_NET_TRANS_RULE_NUM) {
Eyal Perry75a353d2013-11-07 12:19:49 +0200185 en_warn(priv, "RFS: ignoring unsupported ip protocol (%d)\n",
186 filter->ip_proto);
187 goto ignore;
188 }
Amir Vadai1eb8c692012-07-18 22:33:52 +0000189 list_add_tail(&spec_eth.list, &rule.list);
190 list_add_tail(&spec_ip.list, &rule.list);
Eyal Perry75a353d2013-11-07 12:19:49 +0200191 list_add_tail(&spec_tcp_udp.list, &rule.list);
Amir Vadai1eb8c692012-07-18 22:33:52 +0000192
Amir Vadai1eb8c692012-07-18 22:33:52 +0000193 rule.qpn = priv->rss_map.qps[filter->rxq_index].qpn;
Yan Burman6bbb6d92013-02-07 02:25:20 +0000194 memcpy(spec_eth.eth.dst_mac, priv->dev->dev_addr, ETH_ALEN);
Amir Vadai1eb8c692012-07-18 22:33:52 +0000195 memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
196
197 filter->activated = 0;
198
199 if (filter->reg_id) {
200 rc = mlx4_flow_detach(priv->mdev->dev, filter->reg_id);
201 if (rc && rc != -ENOENT)
202 en_err(priv, "Error detaching flow. rc = %d\n", rc);
203 }
204
205 rc = mlx4_flow_attach(priv->mdev->dev, &rule, &filter->reg_id);
206 if (rc)
207 en_err(priv, "Error attaching flow. err = %d\n", rc);
208
Eyal Perry75a353d2013-11-07 12:19:49 +0200209ignore:
Amir Vadai1eb8c692012-07-18 22:33:52 +0000210 mlx4_en_filter_rfs_expire(priv);
211
212 filter->activated = 1;
213}
214
215static inline struct hlist_head *
216filter_hash_bucket(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip,
217 __be16 src_port, __be16 dst_port)
218{
219 unsigned long l;
220 int bucket_idx;
221
222 l = (__force unsigned long)src_port |
223 ((__force unsigned long)dst_port << 2);
224 l ^= (__force unsigned long)(src_ip ^ dst_ip);
225
226 bucket_idx = hash_long(l, MLX4_EN_FILTER_HASH_SHIFT);
227
228 return &priv->filter_hash[bucket_idx];
229}
230
231static struct mlx4_en_filter *
232mlx4_en_filter_alloc(struct mlx4_en_priv *priv, int rxq_index, __be32 src_ip,
Eyal Perry75a353d2013-11-07 12:19:49 +0200233 __be32 dst_ip, u8 ip_proto, __be16 src_port,
234 __be16 dst_port, u32 flow_id)
Amir Vadai1eb8c692012-07-18 22:33:52 +0000235{
236 struct mlx4_en_filter *filter = NULL;
237
238 filter = kzalloc(sizeof(struct mlx4_en_filter), GFP_ATOMIC);
239 if (!filter)
240 return NULL;
241
242 filter->priv = priv;
243 filter->rxq_index = rxq_index;
244 INIT_WORK(&filter->work, mlx4_en_filter_work);
245
246 filter->src_ip = src_ip;
247 filter->dst_ip = dst_ip;
Eyal Perry75a353d2013-11-07 12:19:49 +0200248 filter->ip_proto = ip_proto;
Amir Vadai1eb8c692012-07-18 22:33:52 +0000249 filter->src_port = src_port;
250 filter->dst_port = dst_port;
251
252 filter->flow_id = flow_id;
253
Amir Vadaiee64c0e2012-07-25 21:21:16 +0000254 filter->id = priv->last_filter_id++ % RPS_NO_FILTER;
Amir Vadai1eb8c692012-07-18 22:33:52 +0000255
256 list_add_tail(&filter->next, &priv->filters);
257 hlist_add_head(&filter->filter_chain,
258 filter_hash_bucket(priv, src_ip, dst_ip, src_port,
259 dst_port));
260
261 return filter;
262}
263
264static void mlx4_en_filter_free(struct mlx4_en_filter *filter)
265{
266 struct mlx4_en_priv *priv = filter->priv;
267 int rc;
268
269 list_del(&filter->next);
270
271 rc = mlx4_flow_detach(priv->mdev->dev, filter->reg_id);
272 if (rc && rc != -ENOENT)
273 en_err(priv, "Error detaching flow. rc = %d\n", rc);
274
275 kfree(filter);
276}
277
278static inline struct mlx4_en_filter *
279mlx4_en_filter_find(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip,
Eyal Perry75a353d2013-11-07 12:19:49 +0200280 u8 ip_proto, __be16 src_port, __be16 dst_port)
Amir Vadai1eb8c692012-07-18 22:33:52 +0000281{
Amir Vadai1eb8c692012-07-18 22:33:52 +0000282 struct mlx4_en_filter *filter;
283 struct mlx4_en_filter *ret = NULL;
284
Sasha Levinb67bfe02013-02-27 17:06:00 -0800285 hlist_for_each_entry(filter,
Amir Vadai1eb8c692012-07-18 22:33:52 +0000286 filter_hash_bucket(priv, src_ip, dst_ip,
287 src_port, dst_port),
288 filter_chain) {
289 if (filter->src_ip == src_ip &&
290 filter->dst_ip == dst_ip &&
Eyal Perry75a353d2013-11-07 12:19:49 +0200291 filter->ip_proto == ip_proto &&
Amir Vadai1eb8c692012-07-18 22:33:52 +0000292 filter->src_port == src_port &&
293 filter->dst_port == dst_port) {
294 ret = filter;
295 break;
296 }
297 }
298
299 return ret;
300}
301
302static int
303mlx4_en_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
304 u16 rxq_index, u32 flow_id)
305{
306 struct mlx4_en_priv *priv = netdev_priv(net_dev);
307 struct mlx4_en_filter *filter;
308 const struct iphdr *ip;
309 const __be16 *ports;
Eyal Perry75a353d2013-11-07 12:19:49 +0200310 u8 ip_proto;
Amir Vadai1eb8c692012-07-18 22:33:52 +0000311 __be32 src_ip;
312 __be32 dst_ip;
313 __be16 src_port;
314 __be16 dst_port;
315 int nhoff = skb_network_offset(skb);
316 int ret = 0;
317
318 if (skb->protocol != htons(ETH_P_IP))
319 return -EPROTONOSUPPORT;
320
321 ip = (const struct iphdr *)(skb->data + nhoff);
322 if (ip_is_fragment(ip))
323 return -EPROTONOSUPPORT;
324
Eyal Perry75a353d2013-11-07 12:19:49 +0200325 if ((ip->protocol != IPPROTO_TCP) && (ip->protocol != IPPROTO_UDP))
326 return -EPROTONOSUPPORT;
Amir Vadai1eb8c692012-07-18 22:33:52 +0000327 ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
328
Eyal Perry75a353d2013-11-07 12:19:49 +0200329 ip_proto = ip->protocol;
Amir Vadai1eb8c692012-07-18 22:33:52 +0000330 src_ip = ip->saddr;
331 dst_ip = ip->daddr;
332 src_port = ports[0];
333 dst_port = ports[1];
334
Amir Vadai1eb8c692012-07-18 22:33:52 +0000335 spin_lock_bh(&priv->filters_lock);
Eyal Perry75a353d2013-11-07 12:19:49 +0200336 filter = mlx4_en_filter_find(priv, src_ip, dst_ip, ip_proto,
337 src_port, dst_port);
Amir Vadai1eb8c692012-07-18 22:33:52 +0000338 if (filter) {
339 if (filter->rxq_index == rxq_index)
340 goto out;
341
342 filter->rxq_index = rxq_index;
343 } else {
344 filter = mlx4_en_filter_alloc(priv, rxq_index,
Eyal Perry75a353d2013-11-07 12:19:49 +0200345 src_ip, dst_ip, ip_proto,
Amir Vadai1eb8c692012-07-18 22:33:52 +0000346 src_port, dst_port, flow_id);
347 if (!filter) {
348 ret = -ENOMEM;
349 goto err;
350 }
351 }
352
353 queue_work(priv->mdev->workqueue, &filter->work);
354
355out:
356 ret = filter->id;
357err:
358 spin_unlock_bh(&priv->filters_lock);
359
360 return ret;
361}
362
Eugenia Emantayev41d942d2013-11-07 12:19:52 +0200363void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv)
Amir Vadai1eb8c692012-07-18 22:33:52 +0000364{
365 struct mlx4_en_filter *filter, *tmp;
366 LIST_HEAD(del_list);
367
368 spin_lock_bh(&priv->filters_lock);
369 list_for_each_entry_safe(filter, tmp, &priv->filters, next) {
370 list_move(&filter->next, &del_list);
371 hlist_del(&filter->filter_chain);
372 }
373 spin_unlock_bh(&priv->filters_lock);
374
375 list_for_each_entry_safe(filter, tmp, &del_list, next) {
376 cancel_work_sync(&filter->work);
377 mlx4_en_filter_free(filter);
378 }
379}
380
381static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv)
382{
383 struct mlx4_en_filter *filter = NULL, *tmp, *last_filter = NULL;
384 LIST_HEAD(del_list);
385 int i = 0;
386
387 spin_lock_bh(&priv->filters_lock);
388 list_for_each_entry_safe(filter, tmp, &priv->filters, next) {
389 if (i > MLX4_EN_FILTER_EXPIRY_QUOTA)
390 break;
391
392 if (filter->activated &&
393 !work_pending(&filter->work) &&
394 rps_may_expire_flow(priv->dev,
395 filter->rxq_index, filter->flow_id,
396 filter->id)) {
397 list_move(&filter->next, &del_list);
398 hlist_del(&filter->filter_chain);
399 } else
400 last_filter = filter;
401
402 i++;
403 }
404
405 if (last_filter && (&last_filter->next != priv->filters.next))
406 list_move(&priv->filters, &last_filter->next);
407
408 spin_unlock_bh(&priv->filters_lock);
409
410 list_for_each_entry_safe(filter, tmp, &del_list, next)
411 mlx4_en_filter_free(filter);
412}
413#endif
414
Patrick McHardy80d5c362013-04-19 02:04:28 +0000415static int mlx4_en_vlan_rx_add_vid(struct net_device *dev,
416 __be16 proto, u16 vid)
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700417{
418 struct mlx4_en_priv *priv = netdev_priv(dev);
419 struct mlx4_en_dev *mdev = priv->mdev;
420 int err;
Eli Cohen4c3eb3c2010-08-26 17:19:22 +0300421 int idx;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700422
Jiri Pirkof1b553f2011-07-20 04:54:22 +0000423 en_dbg(HW, priv, "adding VLAN:%d\n", vid);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700424
Jiri Pirkof1b553f2011-07-20 04:54:22 +0000425 set_bit(vid, priv->active_vlans);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700426
427 /* Add VID to port VLAN filter */
428 mutex_lock(&mdev->state_lock);
429 if (mdev->device_up && priv->port_up) {
Jiri Pirkof1b553f2011-07-20 04:54:22 +0000430 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
Kamal Heib93c098a2016-06-21 14:20:02 +0300431 if (err) {
Yevgeny Petrilin453a6082009-06-01 20:27:13 +0000432 en_err(priv, "Failed configuring VLAN filter\n");
Kamal Heib93c098a2016-06-21 14:20:02 +0300433 goto out;
434 }
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700435 }
Kamal Heib93c098a2016-06-21 14:20:02 +0300436 err = mlx4_register_vlan(mdev->dev, priv->port, vid, &idx);
437 if (err)
438 en_dbg(HW, priv, "Failed adding vlan %d\n", vid);
Eli Cohen4c3eb3c2010-08-26 17:19:22 +0300439
Kamal Heib93c098a2016-06-21 14:20:02 +0300440out:
441 mutex_unlock(&mdev->state_lock);
442 return err;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700443}
444
Patrick McHardy80d5c362013-04-19 02:04:28 +0000445static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev,
446 __be16 proto, u16 vid)
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700447{
448 struct mlx4_en_priv *priv = netdev_priv(dev);
449 struct mlx4_en_dev *mdev = priv->mdev;
Kamal Heib93c098a2016-06-21 14:20:02 +0300450 int err = 0;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700451
Jiri Pirkof1b553f2011-07-20 04:54:22 +0000452 en_dbg(HW, priv, "Killing VID:%d\n", vid);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700453
Jiri Pirkof1b553f2011-07-20 04:54:22 +0000454 clear_bit(vid, priv->active_vlans);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700455
456 /* Remove VID from port VLAN filter */
457 mutex_lock(&mdev->state_lock);
Jack Morgenstein2009d002013-11-03 10:03:19 +0200458 mlx4_unregister_vlan(mdev->dev, priv->port, vid);
Eli Cohen4c3eb3c2010-08-26 17:19:22 +0300459
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700460 if (mdev->device_up && priv->port_up) {
Jiri Pirkof1b553f2011-07-20 04:54:22 +0000461 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700462 if (err)
Yevgeny Petrilin453a6082009-06-01 20:27:13 +0000463 en_err(priv, "Failed configuring VLAN filter\n");
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700464 }
465 mutex_unlock(&mdev->state_lock);
Jiri Pirko8e586132011-12-08 19:52:37 -0500466
Kamal Heib93c098a2016-06-21 14:20:02 +0300467 return err;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700468}
469
Yan Burman6bbb6d92013-02-07 02:25:20 +0000470static void mlx4_en_u64_to_mac(unsigned char dst_mac[ETH_ALEN + 2], u64 src_mac)
471{
Yan Burmanbab6a9e2013-04-02 16:49:45 +0300472 int i;
473 for (i = ETH_ALEN - 1; i >= 0; --i) {
Yan Burman6bbb6d92013-02-07 02:25:20 +0000474 dst_mac[i] = src_mac & 0xff;
475 src_mac >>= 8;
476 }
477 memset(&dst_mac[ETH_ALEN], 0, 2);
478}
479
Or Gerlitz837052d2013-12-23 16:09:44 +0200480
481static int mlx4_en_tunnel_steer_add(struct mlx4_en_priv *priv, unsigned char *addr,
482 int qpn, u64 *reg_id)
483{
484 int err;
Or Gerlitz837052d2013-12-23 16:09:44 +0200485
Or Gerlitz5eff6da2015-01-15 15:28:54 +0200486 if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN ||
487 priv->mdev->dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC)
Or Gerlitz837052d2013-12-23 16:09:44 +0200488 return 0; /* do nothing */
489
Or Gerlitzb95089d2014-08-27 16:47:48 +0300490 err = mlx4_tunnel_steer_add(priv->mdev->dev, addr, priv->port, qpn,
491 MLX4_DOMAIN_NIC, reg_id);
Or Gerlitz837052d2013-12-23 16:09:44 +0200492 if (err) {
493 en_err(priv, "failed to add vxlan steering rule, err %d\n", err);
494 return err;
495 }
496 en_dbg(DRV, priv, "added vxlan steering rule, mac %pM reg_id %llx\n", addr, *reg_id);
497 return 0;
498}
499
500
Yan Burman16a10ff2013-02-07 02:25:22 +0000501static int mlx4_en_uc_steer_add(struct mlx4_en_priv *priv,
502 unsigned char *mac, int *qpn, u64 *reg_id)
503{
504 struct mlx4_en_dev *mdev = priv->mdev;
505 struct mlx4_dev *dev = mdev->dev;
506 int err;
507
508 switch (dev->caps.steering_mode) {
509 case MLX4_STEERING_MODE_B0: {
510 struct mlx4_qp qp;
511 u8 gid[16] = {0};
512
513 qp.qpn = *qpn;
514 memcpy(&gid[10], mac, ETH_ALEN);
515 gid[5] = priv->port;
516
517 err = mlx4_unicast_attach(dev, &qp, gid, 0, MLX4_PROT_ETH);
518 break;
519 }
520 case MLX4_STEERING_MODE_DEVICE_MANAGED: {
521 struct mlx4_spec_list spec_eth = { {NULL} };
522 __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
523
524 struct mlx4_net_trans_rule rule = {
525 .queue_mode = MLX4_NET_TRANS_Q_FIFO,
526 .exclusive = 0,
527 .allow_loopback = 1,
Hadar Hen Zionf9162532013-04-24 13:58:45 +0000528 .promisc_mode = MLX4_FS_REGULAR,
Yan Burman16a10ff2013-02-07 02:25:22 +0000529 .priority = MLX4_DOMAIN_NIC,
530 };
531
532 rule.port = priv->port;
533 rule.qpn = *qpn;
534 INIT_LIST_HEAD(&rule.list);
535
536 spec_eth.id = MLX4_NET_TRANS_RULE_ID_ETH;
537 memcpy(spec_eth.eth.dst_mac, mac, ETH_ALEN);
538 memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
539 list_add_tail(&spec_eth.list, &rule.list);
540
541 err = mlx4_flow_attach(dev, &rule, reg_id);
542 break;
543 }
544 default:
545 return -EINVAL;
546 }
547 if (err)
548 en_warn(priv, "Failed Attaching Unicast\n");
549
550 return err;
551}
552
553static void mlx4_en_uc_steer_release(struct mlx4_en_priv *priv,
554 unsigned char *mac, int qpn, u64 reg_id)
555{
556 struct mlx4_en_dev *mdev = priv->mdev;
557 struct mlx4_dev *dev = mdev->dev;
558
559 switch (dev->caps.steering_mode) {
560 case MLX4_STEERING_MODE_B0: {
561 struct mlx4_qp qp;
562 u8 gid[16] = {0};
563
564 qp.qpn = qpn;
565 memcpy(&gid[10], mac, ETH_ALEN);
566 gid[5] = priv->port;
567
568 mlx4_unicast_detach(dev, &qp, gid, MLX4_PROT_ETH);
569 break;
570 }
571 case MLX4_STEERING_MODE_DEVICE_MANAGED: {
572 mlx4_flow_detach(dev, reg_id);
573 break;
574 }
575 default:
576 en_err(priv, "Invalid steering mode.\n");
577 }
578}
579
580static int mlx4_en_get_qp(struct mlx4_en_priv *priv)
581{
582 struct mlx4_en_dev *mdev = priv->mdev;
583 struct mlx4_dev *dev = mdev->dev;
Yan Burman16a10ff2013-02-07 02:25:22 +0000584 int index = 0;
585 int err = 0;
Yan Burman16a10ff2013-02-07 02:25:22 +0000586 int *qpn = &priv->base_qpn;
Eugenia Emantayev98133372014-03-02 10:25:01 +0200587 u64 mac = mlx4_mac_to_u64(priv->dev->dev_addr);
Yan Burman16a10ff2013-02-07 02:25:22 +0000588
589 en_dbg(DRV, priv, "Registering MAC: %pM for adding\n",
590 priv->dev->dev_addr);
591 index = mlx4_register_mac(dev, priv->port, mac);
592 if (index < 0) {
593 err = index;
594 en_err(priv, "Failed adding MAC: %pM\n",
595 priv->dev->dev_addr);
596 return err;
597 }
598
Saeed Mahameed4931c6e2017-06-15 14:35:32 +0300599 en_info(priv, "Steering Mode %d\n", dev->caps.steering_mode);
600
Yan Burman16a10ff2013-02-07 02:25:22 +0000601 if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) {
602 int base_qpn = mlx4_get_base_qpn(dev, priv->port);
603 *qpn = base_qpn + index;
604 return 0;
605 }
606
Matan Barakd57febe2014-12-11 10:57:57 +0200607 err = mlx4_qp_reserve_range(dev, 1, 1, qpn, MLX4_RESERVE_A0_QP);
Yan Burman16a10ff2013-02-07 02:25:22 +0000608 en_dbg(DRV, priv, "Reserved qp %d\n", *qpn);
609 if (err) {
610 en_err(priv, "Failed to reserve qp for mac registration\n");
Ido Shamayba4b87ae2015-10-08 17:14:01 +0300611 mlx4_unregister_mac(dev, priv->port, mac);
612 return err;
Yan Burman16a10ff2013-02-07 02:25:22 +0000613 }
614
Yan Burmanc07cb4b2013-02-07 02:25:25 +0000615 return 0;
Yan Burman16a10ff2013-02-07 02:25:22 +0000616}
617
618static void mlx4_en_put_qp(struct mlx4_en_priv *priv)
619{
620 struct mlx4_en_dev *mdev = priv->mdev;
621 struct mlx4_dev *dev = mdev->dev;
Yan Burman16a10ff2013-02-07 02:25:22 +0000622 int qpn = priv->base_qpn;
Yan Burman16a10ff2013-02-07 02:25:22 +0000623
Yan Burman83a5a6c2013-03-07 03:46:56 +0000624 if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) {
Ido Shamayba4b87ae2015-10-08 17:14:01 +0300625 u64 mac = mlx4_mac_to_u64(priv->dev->dev_addr);
Yan Burman83a5a6c2013-03-07 03:46:56 +0000626 en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n",
627 priv->dev->dev_addr);
628 mlx4_unregister_mac(dev, priv->port, mac);
629 } else {
Yan Burman83a5a6c2013-03-07 03:46:56 +0000630 en_dbg(DRV, priv, "Releasing qp: port %d, qpn %d\n",
631 priv->port, qpn);
632 mlx4_qp_release_range(dev, qpn, 1);
633 priv->flags &= ~MLX4_EN_FLAG_FORCE_PROMISC;
Yan Burman16a10ff2013-02-07 02:25:22 +0000634 }
635}
636
637static int mlx4_en_replace_mac(struct mlx4_en_priv *priv, int qpn,
Yan Burman90bbb742013-02-07 02:25:24 +0000638 unsigned char *new_mac, unsigned char *prev_mac)
Yan Burman16a10ff2013-02-07 02:25:22 +0000639{
640 struct mlx4_en_dev *mdev = priv->mdev;
641 struct mlx4_dev *dev = mdev->dev;
Yan Burman16a10ff2013-02-07 02:25:22 +0000642 int err = 0;
Eugenia Emantayev98133372014-03-02 10:25:01 +0200643 u64 new_mac_u64 = mlx4_mac_to_u64(new_mac);
Yan Burman16a10ff2013-02-07 02:25:22 +0000644
645 if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) {
Yan Burmanc07cb4b2013-02-07 02:25:25 +0000646 struct hlist_head *bucket;
647 unsigned int mac_hash;
648 struct mlx4_mac_entry *entry;
Sasha Levinb67bfe02013-02-27 17:06:00 -0800649 struct hlist_node *tmp;
Eugenia Emantayev98133372014-03-02 10:25:01 +0200650 u64 prev_mac_u64 = mlx4_mac_to_u64(prev_mac);
Yan Burman16a10ff2013-02-07 02:25:22 +0000651
Yan Burmanc07cb4b2013-02-07 02:25:25 +0000652 bucket = &priv->mac_hash[prev_mac[MLX4_EN_MAC_HASH_IDX]];
Sasha Levinb67bfe02013-02-27 17:06:00 -0800653 hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
Yan Burmanc07cb4b2013-02-07 02:25:25 +0000654 if (ether_addr_equal_64bits(entry->mac, prev_mac)) {
655 mlx4_en_uc_steer_release(priv, entry->mac,
656 qpn, entry->reg_id);
657 mlx4_unregister_mac(dev, priv->port,
658 prev_mac_u64);
659 hlist_del_rcu(&entry->hlist);
660 synchronize_rcu();
661 memcpy(entry->mac, new_mac, ETH_ALEN);
662 entry->reg_id = 0;
663 mac_hash = new_mac[MLX4_EN_MAC_HASH_IDX];
664 hlist_add_head_rcu(&entry->hlist,
665 &priv->mac_hash[mac_hash]);
666 mlx4_register_mac(dev, priv->port, new_mac_u64);
667 err = mlx4_en_uc_steer_add(priv, new_mac,
668 &qpn,
669 &entry->reg_id);
Or Gerlitz2a2083f2014-03-12 17:16:31 +0200670 if (err)
671 return err;
672 if (priv->tunnel_reg_id) {
673 mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id);
674 priv->tunnel_reg_id = 0;
675 }
676 err = mlx4_en_tunnel_steer_add(priv, new_mac, qpn,
677 &priv->tunnel_reg_id);
Yan Burmanc07cb4b2013-02-07 02:25:25 +0000678 return err;
679 }
680 }
681 return -EINVAL;
Yan Burman16a10ff2013-02-07 02:25:22 +0000682 }
683
684 return __mlx4_replace_mac(dev, priv->port, qpn, new_mac_u64);
685}
686
Noa Osherovich2695bab2014-07-08 11:25:24 +0300687static int mlx4_en_do_set_mac(struct mlx4_en_priv *priv,
688 unsigned char new_mac[ETH_ALEN + 2])
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700689{
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700690 int err = 0;
691
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700692 if (priv->port_up) {
693 /* Remove old MAC and insert the new one */
Yan Burman16a10ff2013-02-07 02:25:22 +0000694 err = mlx4_en_replace_mac(priv, priv->base_qpn,
Noa Osherovich2695bab2014-07-08 11:25:24 +0300695 new_mac, priv->current_mac);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700696 if (err)
Yevgeny Petrilin453a6082009-06-01 20:27:13 +0000697 en_err(priv, "Failed changing HW MAC address\n");
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700698 } else
Yan Burman48e551f2013-02-07 02:25:21 +0000699 en_dbg(HW, priv, "Port is down while registering mac, exiting...\n");
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700700
Noa Osherovich2695bab2014-07-08 11:25:24 +0300701 if (!err)
702 memcpy(priv->current_mac, new_mac, sizeof(priv->current_mac));
Shani Michaelliee755322014-05-14 12:15:12 +0300703
Yan Burmanbfa8ab42013-03-07 03:46:55 +0000704 return err;
705}
706
707static int mlx4_en_set_mac(struct net_device *dev, void *addr)
708{
709 struct mlx4_en_priv *priv = netdev_priv(dev);
710 struct mlx4_en_dev *mdev = priv->mdev;
711 struct sockaddr *saddr = addr;
Noa Osherovich2695bab2014-07-08 11:25:24 +0300712 unsigned char new_mac[ETH_ALEN + 2];
Yan Burmanbfa8ab42013-03-07 03:46:55 +0000713 int err;
714
715 if (!is_valid_ether_addr(saddr->sa_data))
716 return -EADDRNOTAVAIL;
717
Yan Burmanbfa8ab42013-03-07 03:46:55 +0000718 mutex_lock(&mdev->state_lock);
Noa Osherovich2695bab2014-07-08 11:25:24 +0300719 memcpy(new_mac, saddr->sa_data, ETH_ALEN);
720 err = mlx4_en_do_set_mac(priv, new_mac);
721 if (!err)
722 memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700723 mutex_unlock(&mdev->state_lock);
Yan Burmanbfa8ab42013-03-07 03:46:55 +0000724
725 return err;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700726}
727
728static void mlx4_en_clear_list(struct net_device *dev)
729{
730 struct mlx4_en_priv *priv = netdev_priv(dev);
Yevgeny Petrilin6d199932012-07-05 04:03:43 +0000731 struct mlx4_en_mc_list *tmp, *mc_to_del;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700732
Yevgeny Petrilin6d199932012-07-05 04:03:43 +0000733 list_for_each_entry_safe(mc_to_del, tmp, &priv->mc_list, list) {
734 list_del(&mc_to_del->list);
735 kfree(mc_to_del);
736 }
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700737}
738
739static void mlx4_en_cache_mclist(struct net_device *dev)
740{
741 struct mlx4_en_priv *priv = netdev_priv(dev);
Jiri Pirko22bedad32010-04-01 21:22:57 +0000742 struct netdev_hw_addr *ha;
Yevgeny Petrilin6d199932012-07-05 04:03:43 +0000743 struct mlx4_en_mc_list *tmp;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700744
Alexander Guller0e035672011-12-19 04:02:58 +0000745 mlx4_en_clear_list(dev);
Yevgeny Petrilin6d199932012-07-05 04:03:43 +0000746 netdev_for_each_mc_addr(ha, dev) {
747 tmp = kzalloc(sizeof(struct mlx4_en_mc_list), GFP_ATOMIC);
748 if (!tmp) {
Yevgeny Petrilin6d199932012-07-05 04:03:43 +0000749 mlx4_en_clear_list(dev);
750 return;
751 }
752 memcpy(tmp->addr, ha->addr, ETH_ALEN);
753 list_add_tail(&tmp->list, &priv->mc_list);
754 }
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700755}
756
Yevgeny Petrilin6d199932012-07-05 04:03:43 +0000757static void update_mclist_flags(struct mlx4_en_priv *priv,
758 struct list_head *dst,
759 struct list_head *src)
760{
761 struct mlx4_en_mc_list *dst_tmp, *src_tmp, *new_mc;
762 bool found;
763
764 /* Find all the entries that should be removed from dst,
765 * These are the entries that are not found in src
766 */
767 list_for_each_entry(dst_tmp, dst, list) {
768 found = false;
769 list_for_each_entry(src_tmp, src, list) {
dingtianhongc0623e52013-12-30 15:40:55 +0800770 if (ether_addr_equal(dst_tmp->addr, src_tmp->addr)) {
Yevgeny Petrilin6d199932012-07-05 04:03:43 +0000771 found = true;
772 break;
773 }
774 }
775 if (!found)
776 dst_tmp->action = MCLIST_REM;
777 }
778
779 /* Add entries that exist in src but not in dst
780 * mark them as need to add
781 */
782 list_for_each_entry(src_tmp, src, list) {
783 found = false;
784 list_for_each_entry(dst_tmp, dst, list) {
dingtianhongc0623e52013-12-30 15:40:55 +0800785 if (ether_addr_equal(dst_tmp->addr, src_tmp->addr)) {
Yevgeny Petrilin6d199932012-07-05 04:03:43 +0000786 dst_tmp->action = MCLIST_NONE;
787 found = true;
788 break;
789 }
790 }
791 if (!found) {
Joe Perches14f8dc42013-02-07 11:46:27 +0000792 new_mc = kmemdup(src_tmp,
793 sizeof(struct mlx4_en_mc_list),
Yevgeny Petrilin6d199932012-07-05 04:03:43 +0000794 GFP_KERNEL);
Joe Perches14f8dc42013-02-07 11:46:27 +0000795 if (!new_mc)
Yevgeny Petrilin6d199932012-07-05 04:03:43 +0000796 return;
Joe Perches14f8dc42013-02-07 11:46:27 +0000797
Yevgeny Petrilin6d199932012-07-05 04:03:43 +0000798 new_mc->action = MCLIST_ADD;
799 list_add_tail(&new_mc->list, dst);
800 }
801 }
802}
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700803
Yan Burman0eb74fd2013-02-07 02:25:23 +0000804static void mlx4_en_set_rx_mode(struct net_device *dev)
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700805{
806 struct mlx4_en_priv *priv = netdev_priv(dev);
807
808 if (!priv->port_up)
809 return;
810
Yan Burman0eb74fd2013-02-07 02:25:23 +0000811 queue_work(priv->mdev->workqueue, &priv->rx_mode_task);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700812}
813
Yan Burman0eb74fd2013-02-07 02:25:23 +0000814static void mlx4_en_set_promisc_mode(struct mlx4_en_priv *priv,
815 struct mlx4_en_dev *mdev)
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700816{
Hadar Hen Zionc96d97f2012-07-05 04:03:44 +0000817 int err = 0;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700818
Yan Burman0eb74fd2013-02-07 02:25:23 +0000819 if (!(priv->flags & MLX4_EN_FLAG_PROMISC)) {
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700820 if (netif_msg_rx_status(priv))
Yan Burman0eb74fd2013-02-07 02:25:23 +0000821 en_warn(priv, "Entering promiscuous mode\n");
822 priv->flags |= MLX4_EN_FLAG_PROMISC;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700823
Yan Burman0eb74fd2013-02-07 02:25:23 +0000824 /* Enable promiscouos mode */
Hadar Hen Zionc96d97f2012-07-05 04:03:44 +0000825 switch (mdev->dev->caps.steering_mode) {
Hadar Hen Zion592e49d2012-07-05 04:03:48 +0000826 case MLX4_STEERING_MODE_DEVICE_MANAGED:
Yan Burman0eb74fd2013-02-07 02:25:23 +0000827 err = mlx4_flow_steer_promisc_add(mdev->dev,
828 priv->port,
829 priv->base_qpn,
Hadar Hen Zionf9162532013-04-24 13:58:45 +0000830 MLX4_FS_ALL_DEFAULT);
Hadar Hen Zion592e49d2012-07-05 04:03:48 +0000831 if (err)
Yan Burman0eb74fd2013-02-07 02:25:23 +0000832 en_err(priv, "Failed enabling promiscuous mode\n");
833 priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
Hadar Hen Zion592e49d2012-07-05 04:03:48 +0000834 break;
835
Hadar Hen Zionc96d97f2012-07-05 04:03:44 +0000836 case MLX4_STEERING_MODE_B0:
Yan Burman0eb74fd2013-02-07 02:25:23 +0000837 err = mlx4_unicast_promisc_add(mdev->dev,
838 priv->base_qpn,
839 priv->port);
Yevgeny Petrilin16792002011-03-22 22:38:31 +0000840 if (err)
Yan Burman0eb74fd2013-02-07 02:25:23 +0000841 en_err(priv, "Failed enabling unicast promiscuous mode\n");
842
843 /* Add the default qp number as multicast
844 * promisc
845 */
846 if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
847 err = mlx4_multicast_promisc_add(mdev->dev,
848 priv->base_qpn,
849 priv->port);
Hadar Hen Zionc96d97f2012-07-05 04:03:44 +0000850 if (err)
Yan Burman0eb74fd2013-02-07 02:25:23 +0000851 en_err(priv, "Failed enabling multicast promiscuous mode\n");
852 priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
Hadar Hen Zionc96d97f2012-07-05 04:03:44 +0000853 }
854 break;
855
856 case MLX4_STEERING_MODE_A0:
857 err = mlx4_SET_PORT_qpn_calc(mdev->dev,
858 priv->port,
Yan Burman0eb74fd2013-02-07 02:25:23 +0000859 priv->base_qpn,
860 1);
Hadar Hen Zionc96d97f2012-07-05 04:03:44 +0000861 if (err)
Yan Burman0eb74fd2013-02-07 02:25:23 +0000862 en_err(priv, "Failed enabling promiscuous mode\n");
Hadar Hen Zionc96d97f2012-07-05 04:03:44 +0000863 break;
Yevgeny Petrilin16792002011-03-22 22:38:31 +0000864 }
865
Yan Burman0eb74fd2013-02-07 02:25:23 +0000866 /* Disable port multicast filter (unconditionally) */
867 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
868 0, MLX4_MCAST_DISABLE);
869 if (err)
870 en_err(priv, "Failed disabling multicast filter\n");
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700871 }
Yan Burman0eb74fd2013-02-07 02:25:23 +0000872}
873
874static void mlx4_en_clear_promisc_mode(struct mlx4_en_priv *priv,
875 struct mlx4_en_dev *mdev)
876{
877 int err = 0;
878
879 if (netif_msg_rx_status(priv))
880 en_warn(priv, "Leaving promiscuous mode\n");
881 priv->flags &= ~MLX4_EN_FLAG_PROMISC;
882
883 /* Disable promiscouos mode */
884 switch (mdev->dev->caps.steering_mode) {
885 case MLX4_STEERING_MODE_DEVICE_MANAGED:
886 err = mlx4_flow_steer_promisc_remove(mdev->dev,
887 priv->port,
Hadar Hen Zionf9162532013-04-24 13:58:45 +0000888 MLX4_FS_ALL_DEFAULT);
Yan Burman0eb74fd2013-02-07 02:25:23 +0000889 if (err)
890 en_err(priv, "Failed disabling promiscuous mode\n");
891 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
892 break;
893
894 case MLX4_STEERING_MODE_B0:
895 err = mlx4_unicast_promisc_remove(mdev->dev,
896 priv->base_qpn,
897 priv->port);
898 if (err)
899 en_err(priv, "Failed disabling unicast promiscuous mode\n");
900 /* Disable Multicast promisc */
901 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
902 err = mlx4_multicast_promisc_remove(mdev->dev,
903 priv->base_qpn,
904 priv->port);
905 if (err)
906 en_err(priv, "Failed disabling multicast promiscuous mode\n");
907 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
908 }
909 break;
910
911 case MLX4_STEERING_MODE_A0:
912 err = mlx4_SET_PORT_qpn_calc(mdev->dev,
913 priv->port,
914 priv->base_qpn, 0);
915 if (err)
916 en_err(priv, "Failed disabling promiscuous mode\n");
917 break;
918 }
Yan Burman0eb74fd2013-02-07 02:25:23 +0000919}
920
921static void mlx4_en_do_multicast(struct mlx4_en_priv *priv,
922 struct net_device *dev,
923 struct mlx4_en_dev *mdev)
924{
925 struct mlx4_en_mc_list *mclist, *tmp;
926 u64 mcast_addr = 0;
927 u8 mc_list[16] = {0};
928 int err = 0;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700929
930 /* Enable/disable the multicast filter according to IFF_ALLMULTI */
931 if (dev->flags & IFF_ALLMULTI) {
932 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
933 0, MLX4_MCAST_DISABLE);
934 if (err)
Yevgeny Petrilin453a6082009-06-01 20:27:13 +0000935 en_err(priv, "Failed disabling multicast filter\n");
Yevgeny Petrilin16792002011-03-22 22:38:31 +0000936
937 /* Add the default qp number as multicast promisc */
938 if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
Hadar Hen Zionc96d97f2012-07-05 04:03:44 +0000939 switch (mdev->dev->caps.steering_mode) {
Hadar Hen Zion592e49d2012-07-05 04:03:48 +0000940 case MLX4_STEERING_MODE_DEVICE_MANAGED:
941 err = mlx4_flow_steer_promisc_add(mdev->dev,
942 priv->port,
943 priv->base_qpn,
Hadar Hen Zionf9162532013-04-24 13:58:45 +0000944 MLX4_FS_MC_DEFAULT);
Hadar Hen Zion592e49d2012-07-05 04:03:48 +0000945 break;
946
Hadar Hen Zionc96d97f2012-07-05 04:03:44 +0000947 case MLX4_STEERING_MODE_B0:
948 err = mlx4_multicast_promisc_add(mdev->dev,
949 priv->base_qpn,
950 priv->port);
951 break;
952
953 case MLX4_STEERING_MODE_A0:
954 break;
955 }
Yevgeny Petrilin16792002011-03-22 22:38:31 +0000956 if (err)
957 en_err(priv, "Failed entering multicast promisc mode\n");
958 priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
959 }
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700960 } else {
Yevgeny Petrilin16792002011-03-22 22:38:31 +0000961 /* Disable Multicast promisc */
962 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
Hadar Hen Zionc96d97f2012-07-05 04:03:44 +0000963 switch (mdev->dev->caps.steering_mode) {
Hadar Hen Zion592e49d2012-07-05 04:03:48 +0000964 case MLX4_STEERING_MODE_DEVICE_MANAGED:
965 err = mlx4_flow_steer_promisc_remove(mdev->dev,
966 priv->port,
Hadar Hen Zionf9162532013-04-24 13:58:45 +0000967 MLX4_FS_MC_DEFAULT);
Hadar Hen Zion592e49d2012-07-05 04:03:48 +0000968 break;
969
Hadar Hen Zionc96d97f2012-07-05 04:03:44 +0000970 case MLX4_STEERING_MODE_B0:
971 err = mlx4_multicast_promisc_remove(mdev->dev,
972 priv->base_qpn,
973 priv->port);
974 break;
975
976 case MLX4_STEERING_MODE_A0:
977 break;
978 }
Yevgeny Petrilin16792002011-03-22 22:38:31 +0000979 if (err)
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300980 en_err(priv, "Failed disabling multicast promiscuous mode\n");
Yevgeny Petrilin16792002011-03-22 22:38:31 +0000981 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
982 }
Jiri Pirkoff6e2162010-03-01 05:09:14 +0000983
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700984 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
985 0, MLX4_MCAST_DISABLE);
986 if (err)
Yevgeny Petrilin453a6082009-06-01 20:27:13 +0000987 en_err(priv, "Failed disabling multicast filter\n");
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700988
989 /* Flush mcast filter and init it with broadcast address */
990 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, ETH_BCAST,
991 1, MLX4_MCAST_CONFIG);
992
993 /* Update multicast list - we cache all addresses so they won't
994 * change while HW is updated holding the command semaphor */
Eugenia Emantayevdbd501a2013-01-24 01:54:16 +0000995 netif_addr_lock_bh(dev);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700996 mlx4_en_cache_mclist(dev);
Eugenia Emantayevdbd501a2013-01-24 01:54:16 +0000997 netif_addr_unlock_bh(dev);
Yevgeny Petrilin6d199932012-07-05 04:03:43 +0000998 list_for_each_entry(mclist, &priv->mc_list, list) {
Eugenia Emantayev98133372014-03-02 10:25:01 +0200999 mcast_addr = mlx4_mac_to_u64(mclist->addr);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001000 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port,
1001 mcast_addr, 0, MLX4_MCAST_CONFIG);
1002 }
1003 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
1004 0, MLX4_MCAST_ENABLE);
1005 if (err)
Yevgeny Petrilin453a6082009-06-01 20:27:13 +00001006 en_err(priv, "Failed enabling multicast filter\n");
Yevgeny Petrilin6d199932012-07-05 04:03:43 +00001007
1008 update_mclist_flags(priv, &priv->curr_list, &priv->mc_list);
1009 list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) {
1010 if (mclist->action == MCLIST_REM) {
1011 /* detach this address and delete from list */
1012 memcpy(&mc_list[10], mclist->addr, ETH_ALEN);
1013 mc_list[5] = priv->port;
1014 err = mlx4_multicast_detach(mdev->dev,
Saeed Mahameed4931c6e2017-06-15 14:35:32 +03001015 priv->rss_map.indir_qp,
Yevgeny Petrilin6d199932012-07-05 04:03:43 +00001016 mc_list,
Hadar Hen Zion0ff1fb62012-07-05 04:03:46 +00001017 MLX4_PROT_ETH,
1018 mclist->reg_id);
Yevgeny Petrilin6d199932012-07-05 04:03:43 +00001019 if (err)
1020 en_err(priv, "Fail to detach multicast address\n");
1021
Or Gerlitz837052d2013-12-23 16:09:44 +02001022 if (mclist->tunnel_reg_id) {
1023 err = mlx4_flow_detach(priv->mdev->dev, mclist->tunnel_reg_id);
1024 if (err)
1025 en_err(priv, "Failed to detach multicast address\n");
1026 }
1027
Yevgeny Petrilin6d199932012-07-05 04:03:43 +00001028 /* remove from list */
1029 list_del(&mclist->list);
1030 kfree(mclist);
Dan Carpenter9c645082012-07-10 20:34:07 +00001031 } else if (mclist->action == MCLIST_ADD) {
Yevgeny Petrilin6d199932012-07-05 04:03:43 +00001032 /* attach the address */
1033 memcpy(&mc_list[10], mclist->addr, ETH_ALEN);
Hadar Hen Zion0ff1fb62012-07-05 04:03:46 +00001034 /* needed for B0 steering support */
Yevgeny Petrilin6d199932012-07-05 04:03:43 +00001035 mc_list[5] = priv->port;
1036 err = mlx4_multicast_attach(mdev->dev,
Saeed Mahameed4931c6e2017-06-15 14:35:32 +03001037 priv->rss_map.indir_qp,
Hadar Hen Zion0ff1fb62012-07-05 04:03:46 +00001038 mc_list,
1039 priv->port, 0,
1040 MLX4_PROT_ETH,
1041 &mclist->reg_id);
Yevgeny Petrilin6d199932012-07-05 04:03:43 +00001042 if (err)
1043 en_err(priv, "Fail to attach multicast address\n");
1044
Or Gerlitz837052d2013-12-23 16:09:44 +02001045 err = mlx4_en_tunnel_steer_add(priv, &mc_list[10], priv->base_qpn,
1046 &mclist->tunnel_reg_id);
1047 if (err)
1048 en_err(priv, "Failed to attach multicast address\n");
Yevgeny Petrilin6d199932012-07-05 04:03:43 +00001049 }
1050 }
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001051 }
Yan Burman0eb74fd2013-02-07 02:25:23 +00001052}
1053
Yan Burmancc5387f2013-02-07 02:25:26 +00001054static void mlx4_en_do_uc_filter(struct mlx4_en_priv *priv,
1055 struct net_device *dev,
1056 struct mlx4_en_dev *mdev)
1057{
1058 struct netdev_hw_addr *ha;
1059 struct mlx4_mac_entry *entry;
Sasha Levinb67bfe02013-02-27 17:06:00 -08001060 struct hlist_node *tmp;
Yan Burmancc5387f2013-02-07 02:25:26 +00001061 bool found;
1062 u64 mac;
1063 int err = 0;
1064 struct hlist_head *bucket;
1065 unsigned int i;
1066 int removed = 0;
1067 u32 prev_flags;
1068
1069 /* Note that we do not need to protect our mac_hash traversal with rcu,
1070 * since all modification code is protected by mdev->state_lock
1071 */
1072
1073 /* find what to remove */
1074 for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) {
1075 bucket = &priv->mac_hash[i];
Sasha Levinb67bfe02013-02-27 17:06:00 -08001076 hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
Yan Burmancc5387f2013-02-07 02:25:26 +00001077 found = false;
1078 netdev_for_each_uc_addr(ha, dev) {
1079 if (ether_addr_equal_64bits(entry->mac,
1080 ha->addr)) {
1081 found = true;
1082 break;
1083 }
1084 }
1085
1086 /* MAC address of the port is not in uc list */
Noa Osherovich2695bab2014-07-08 11:25:24 +03001087 if (ether_addr_equal_64bits(entry->mac,
1088 priv->current_mac))
Yan Burmancc5387f2013-02-07 02:25:26 +00001089 found = true;
1090
1091 if (!found) {
Eugenia Emantayev98133372014-03-02 10:25:01 +02001092 mac = mlx4_mac_to_u64(entry->mac);
Yan Burmancc5387f2013-02-07 02:25:26 +00001093 mlx4_en_uc_steer_release(priv, entry->mac,
1094 priv->base_qpn,
1095 entry->reg_id);
1096 mlx4_unregister_mac(mdev->dev, priv->port, mac);
1097
1098 hlist_del_rcu(&entry->hlist);
1099 kfree_rcu(entry, rcu);
1100 en_dbg(DRV, priv, "Removed MAC %pM on port:%d\n",
1101 entry->mac, priv->port);
1102 ++removed;
1103 }
1104 }
1105 }
1106
1107 /* if we didn't remove anything, there is no use in trying to add
1108 * again once we are in a forced promisc mode state
1109 */
1110 if ((priv->flags & MLX4_EN_FLAG_FORCE_PROMISC) && 0 == removed)
1111 return;
1112
1113 prev_flags = priv->flags;
1114 priv->flags &= ~MLX4_EN_FLAG_FORCE_PROMISC;
1115
1116 /* find what to add */
1117 netdev_for_each_uc_addr(ha, dev) {
1118 found = false;
1119 bucket = &priv->mac_hash[ha->addr[MLX4_EN_MAC_HASH_IDX]];
Sasha Levinb67bfe02013-02-27 17:06:00 -08001120 hlist_for_each_entry(entry, bucket, hlist) {
Yan Burmancc5387f2013-02-07 02:25:26 +00001121 if (ether_addr_equal_64bits(entry->mac, ha->addr)) {
1122 found = true;
1123 break;
1124 }
1125 }
1126
1127 if (!found) {
1128 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1129 if (!entry) {
1130 en_err(priv, "Failed adding MAC %pM on port:%d (out of memory)\n",
1131 ha->addr, priv->port);
1132 priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC;
1133 break;
1134 }
Eugenia Emantayev98133372014-03-02 10:25:01 +02001135 mac = mlx4_mac_to_u64(ha->addr);
Yan Burmancc5387f2013-02-07 02:25:26 +00001136 memcpy(entry->mac, ha->addr, ETH_ALEN);
1137 err = mlx4_register_mac(mdev->dev, priv->port, mac);
1138 if (err < 0) {
1139 en_err(priv, "Failed registering MAC %pM on port %d: %d\n",
1140 ha->addr, priv->port, err);
1141 kfree(entry);
1142 priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC;
1143 break;
1144 }
1145 err = mlx4_en_uc_steer_add(priv, ha->addr,
1146 &priv->base_qpn,
1147 &entry->reg_id);
1148 if (err) {
1149 en_err(priv, "Failed adding MAC %pM on port %d: %d\n",
1150 ha->addr, priv->port, err);
1151 mlx4_unregister_mac(mdev->dev, priv->port, mac);
1152 kfree(entry);
1153 priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC;
1154 break;
1155 } else {
1156 unsigned int mac_hash;
1157 en_dbg(DRV, priv, "Added MAC %pM on port:%d\n",
1158 ha->addr, priv->port);
1159 mac_hash = ha->addr[MLX4_EN_MAC_HASH_IDX];
1160 bucket = &priv->mac_hash[mac_hash];
1161 hlist_add_head_rcu(&entry->hlist, bucket);
1162 }
1163 }
1164 }
1165
1166 if (priv->flags & MLX4_EN_FLAG_FORCE_PROMISC) {
1167 en_warn(priv, "Forcing promiscuous mode on port:%d\n",
1168 priv->port);
1169 } else if (prev_flags & MLX4_EN_FLAG_FORCE_PROMISC) {
1170 en_warn(priv, "Stop forcing promiscuous mode on port:%d\n",
1171 priv->port);
1172 }
1173}
1174
Yan Burman0eb74fd2013-02-07 02:25:23 +00001175static void mlx4_en_do_set_rx_mode(struct work_struct *work)
1176{
1177 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
1178 rx_mode_task);
1179 struct mlx4_en_dev *mdev = priv->mdev;
1180 struct net_device *dev = priv->dev;
1181
1182 mutex_lock(&mdev->state_lock);
1183 if (!mdev->device_up) {
1184 en_dbg(HW, priv, "Card is not up, ignoring rx mode change.\n");
1185 goto out;
1186 }
1187 if (!priv->port_up) {
1188 en_dbg(HW, priv, "Port is down, ignoring rx mode change.\n");
1189 goto out;
1190 }
1191
1192 if (!netif_carrier_ok(dev)) {
1193 if (!mlx4_en_QUERY_PORT(mdev, priv->port)) {
1194 if (priv->port_state.link_state) {
1195 priv->last_link_state = MLX4_DEV_EVENT_PORT_UP;
1196 netif_carrier_on(dev);
1197 en_dbg(LINK, priv, "Link Up\n");
1198 }
1199 }
1200 }
1201
Yan Burmancc5387f2013-02-07 02:25:26 +00001202 if (dev->priv_flags & IFF_UNICAST_FLT)
1203 mlx4_en_do_uc_filter(priv, dev, mdev);
1204
Yan Burman0eb74fd2013-02-07 02:25:23 +00001205 /* Promsicuous mode: disable all filters */
Yan Burmancc5387f2013-02-07 02:25:26 +00001206 if ((dev->flags & IFF_PROMISC) ||
1207 (priv->flags & MLX4_EN_FLAG_FORCE_PROMISC)) {
Yan Burman0eb74fd2013-02-07 02:25:23 +00001208 mlx4_en_set_promisc_mode(priv, mdev);
1209 goto out;
1210 }
1211
1212 /* Not in promiscuous mode */
1213 if (priv->flags & MLX4_EN_FLAG_PROMISC)
1214 mlx4_en_clear_promisc_mode(priv, mdev);
1215
1216 mlx4_en_do_multicast(priv, dev, mdev);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001217out:
1218 mutex_unlock(&mdev->state_lock);
1219}
1220
1221#ifdef CONFIG_NET_POLL_CONTROLLER
1222static void mlx4_en_netpoll(struct net_device *dev)
1223{
1224 struct mlx4_en_priv *priv = netdev_priv(dev);
1225 struct mlx4_en_cq *cq;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001226 int i;
1227
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02001228 for (i = 0; i < priv->tx_ring_num[TX]; i++) {
1229 cq = priv->tx_cq[TX][i];
Chris Masonc98235c2014-04-15 18:09:24 -04001230 napi_schedule(&cq->napi);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001231 }
1232}
1233#endif
1234
Ido Shamayba4b87ae2015-10-08 17:14:01 +03001235static int mlx4_en_set_rss_steer_rules(struct mlx4_en_priv *priv)
1236{
1237 u64 reg_id;
1238 int err = 0;
1239 int *qpn = &priv->base_qpn;
1240 struct mlx4_mac_entry *entry;
1241
1242 err = mlx4_en_uc_steer_add(priv, priv->dev->dev_addr, qpn, &reg_id);
1243 if (err)
1244 return err;
1245
1246 err = mlx4_en_tunnel_steer_add(priv, priv->dev->dev_addr, *qpn,
1247 &priv->tunnel_reg_id);
1248 if (err)
1249 goto tunnel_err;
1250
1251 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1252 if (!entry) {
1253 err = -ENOMEM;
1254 goto alloc_err;
1255 }
1256
1257 memcpy(entry->mac, priv->dev->dev_addr, sizeof(entry->mac));
1258 memcpy(priv->current_mac, entry->mac, sizeof(priv->current_mac));
1259 entry->reg_id = reg_id;
1260 hlist_add_head_rcu(&entry->hlist,
1261 &priv->mac_hash[entry->mac[MLX4_EN_MAC_HASH_IDX]]);
1262
1263 return 0;
1264
1265alloc_err:
1266 if (priv->tunnel_reg_id)
1267 mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id);
1268
1269tunnel_err:
1270 mlx4_en_uc_steer_release(priv, priv->dev->dev_addr, *qpn, reg_id);
1271 return err;
1272}
1273
1274static void mlx4_en_delete_rss_steer_rules(struct mlx4_en_priv *priv)
1275{
1276 u64 mac;
1277 unsigned int i;
1278 int qpn = priv->base_qpn;
1279 struct hlist_head *bucket;
1280 struct hlist_node *tmp;
1281 struct mlx4_mac_entry *entry;
1282
1283 for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) {
1284 bucket = &priv->mac_hash[i];
1285 hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
1286 mac = mlx4_mac_to_u64(entry->mac);
1287 en_dbg(DRV, priv, "Registering MAC:%pM for deleting\n",
1288 entry->mac);
1289 mlx4_en_uc_steer_release(priv, entry->mac,
1290 qpn, entry->reg_id);
1291
1292 mlx4_unregister_mac(priv->mdev->dev, priv->port, mac);
1293 hlist_del_rcu(&entry->hlist);
1294 kfree_rcu(entry, rcu);
1295 }
1296 }
1297
1298 if (priv->tunnel_reg_id) {
1299 mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id);
1300 priv->tunnel_reg_id = 0;
1301 }
1302}
1303
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001304static void mlx4_en_tx_timeout(struct net_device *dev)
1305{
1306 struct mlx4_en_priv *priv = netdev_priv(dev);
1307 struct mlx4_en_dev *mdev = priv->mdev;
Yevgeny Petrilinb944ebe2013-06-25 12:09:34 +03001308 int i;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001309
1310 if (netif_msg_timer(priv))
Yevgeny Petrilin453a6082009-06-01 20:27:13 +00001311 en_warn(priv, "Tx timeout called on port:%d\n", priv->port);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001312
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02001313 for (i = 0; i < priv->tx_ring_num[TX]; i++) {
1314 struct mlx4_en_tx_ring *tx_ring = priv->tx_ring[TX][i];
1315
Yevgeny Petrilinb944ebe2013-06-25 12:09:34 +03001316 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, i)))
1317 continue;
1318 en_warn(priv, "TX timeout on queue: %d, QP: 0x%x, CQ: 0x%x, Cons: 0x%x, Prod: 0x%x\n",
Eric Dumazete3f42f82016-11-22 15:56:10 -08001319 i, tx_ring->qpn, tx_ring->sp_cqn,
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02001320 tx_ring->cons, tx_ring->prod);
Yevgeny Petrilinb944ebe2013-06-25 12:09:34 +03001321 }
1322
Yevgeny Petrilin1e338db2009-04-20 04:26:05 +00001323 priv->port_stats.tx_timeout++;
Yevgeny Petrilin453a6082009-06-01 20:27:13 +00001324 en_dbg(DRV, priv, "Scheduling watchdog\n");
Yevgeny Petrilin1e338db2009-04-20 04:26:05 +00001325 queue_work(mdev->workqueue, &priv->watchdog_task);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001326}
1327
1328
stephen hemmingerbc1f4472017-01-06 19:12:52 -08001329static void
Eric Dumazet9ed17db172016-05-25 09:50:38 -07001330mlx4_en_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001331{
1332 struct mlx4_en_priv *priv = netdev_priv(dev);
1333
1334 spin_lock_bh(&priv->stats_lock);
Eric Dumazet40931b82016-11-25 07:46:20 -08001335 mlx4_en_fold_software_stats(dev);
Eric Dumazetf73a6f42016-05-25 09:50:39 -07001336 netdev_stats_to_stats64(stats, &dev->stats);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001337 spin_unlock_bh(&priv->stats_lock);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001338}
1339
1340static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
1341{
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001342 struct mlx4_en_cq *cq;
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02001343 int i, t;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001344
1345 /* If we haven't received a specific coalescing setting
Martin Olsson98a17082009-04-22 18:21:29 +02001346 * (module param), we set the moderation parameters as follows:
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001347 * - moder_cnt is set to the number of mtu sized packets to
Eric Dumazetecfd2ce2012-11-05 16:20:42 +00001348 * satisfy our coalescing target.
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001349 * - moder_time is set to a fixed value.
1350 */
Yevgeny Petrilin3db36fb2009-06-01 23:23:13 +00001351 priv->rx_frames = MLX4_EN_RX_COAL_TARGET;
Yevgeny Petrilin60b9f9e2008-12-25 18:19:47 -08001352 priv->rx_usecs = MLX4_EN_RX_COAL_TIME;
Yevgeny Petrilina19a8482012-04-23 02:18:33 +00001353 priv->tx_frames = MLX4_EN_TX_COAL_PKTS;
1354 priv->tx_usecs = MLX4_EN_TX_COAL_TIME;
Yan Burman48e551f2013-02-07 02:25:21 +00001355 en_dbg(INTR, priv, "Default coalesing params for mtu:%d - rx_frames:%d rx_usecs:%d\n",
1356 priv->dev->mtu, priv->rx_frames, priv->rx_usecs);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001357
1358 /* Setup cq moderation params */
1359 for (i = 0; i < priv->rx_ring_num; i++) {
Eugenia Emantayev41d942d2013-11-07 12:19:52 +02001360 cq = priv->rx_cq[i];
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001361 cq->moder_cnt = priv->rx_frames;
1362 cq->moder_time = priv->rx_usecs;
Alexander Guller6b4d8d92011-10-09 05:38:23 +00001363 priv->last_moder_time[i] = MLX4_EN_AUTO_CONF;
1364 priv->last_moder_packets[i] = 0;
1365 priv->last_moder_bytes[i] = 0;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001366 }
1367
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02001368 for (t = 0 ; t < MLX4_EN_NUM_TX_TYPES; t++) {
1369 for (i = 0; i < priv->tx_ring_num[t]; i++) {
1370 cq = priv->tx_cq[t][i];
1371 cq->moder_cnt = priv->tx_frames;
1372 cq->moder_time = priv->tx_usecs;
1373 }
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001374 }
1375
1376 /* Reset auto-moderation params */
1377 priv->pkt_rate_low = MLX4_EN_RX_RATE_LOW;
1378 priv->rx_usecs_low = MLX4_EN_RX_COAL_TIME_LOW;
1379 priv->pkt_rate_high = MLX4_EN_RX_RATE_HIGH;
1380 priv->rx_usecs_high = MLX4_EN_RX_COAL_TIME_HIGH;
1381 priv->sample_interval = MLX4_EN_SAMPLE_INTERVAL;
Yevgeny Petrilin60b9f9e2008-12-25 18:19:47 -08001382 priv->adaptive_rx_coal = 1;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001383 priv->last_moder_jiffies = 0;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001384 priv->last_moder_tx_packets = 0;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001385}
1386
1387static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
1388{
1389 unsigned long period = (unsigned long) (jiffies - priv->last_moder_jiffies);
Eric Dumazetf5a57722017-02-16 15:23:27 -08001390 u32 pkt_rate_high, pkt_rate_low;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001391 struct mlx4_en_cq *cq;
1392 unsigned long packets;
1393 unsigned long rate;
1394 unsigned long avg_pkt_size;
1395 unsigned long rx_packets;
1396 unsigned long rx_bytes;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001397 unsigned long rx_pkt_diff;
1398 int moder_time;
Alexander Guller6b4d8d92011-10-09 05:38:23 +00001399 int ring, err;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001400
1401 if (!priv->adaptive_rx_coal || period < priv->sample_interval * HZ)
1402 return;
1403
Eric Dumazetf5a57722017-02-16 15:23:27 -08001404 pkt_rate_low = READ_ONCE(priv->pkt_rate_low);
1405 pkt_rate_high = READ_ONCE(priv->pkt_rate_high);
1406
Alexander Guller6b4d8d92011-10-09 05:38:23 +00001407 for (ring = 0; ring < priv->rx_ring_num; ring++) {
Eric Dumazetb9972d22016-11-23 09:46:52 -08001408 rx_packets = READ_ONCE(priv->rx_ring[ring]->packets);
1409 rx_bytes = READ_ONCE(priv->rx_ring[ring]->bytes);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001410
Eric Dumazetf5a57722017-02-16 15:23:27 -08001411 rx_pkt_diff = rx_packets - priv->last_moder_packets[ring];
Alexander Guller6b4d8d92011-10-09 05:38:23 +00001412 packets = rx_pkt_diff;
1413 rate = packets * HZ / period;
Eric Dumazetf5a57722017-02-16 15:23:27 -08001414 avg_pkt_size = packets ? (rx_bytes -
1415 priv->last_moder_bytes[ring]) / packets : 0;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001416
Alexander Guller6b4d8d92011-10-09 05:38:23 +00001417 /* Apply auto-moderation only when packet rate
1418 * exceeds a rate that it matters */
1419 if (rate > (MLX4_EN_RX_RATE_THRESH / priv->rx_ring_num) &&
1420 avg_pkt_size > MLX4_EN_AVG_PKT_SMALL) {
Eric Dumazetf5a57722017-02-16 15:23:27 -08001421 if (rate <= pkt_rate_low)
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001422 moder_time = priv->rx_usecs_low;
Eric Dumazetf5a57722017-02-16 15:23:27 -08001423 else if (rate >= pkt_rate_high)
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001424 moder_time = priv->rx_usecs_high;
1425 else
Eric Dumazetf5a57722017-02-16 15:23:27 -08001426 moder_time = (rate - pkt_rate_low) *
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001427 (priv->rx_usecs_high - priv->rx_usecs_low) /
Eric Dumazetf5a57722017-02-16 15:23:27 -08001428 (pkt_rate_high - pkt_rate_low) +
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001429 priv->rx_usecs_low;
Alexander Guller6b4d8d92011-10-09 05:38:23 +00001430 } else {
1431 moder_time = priv->rx_usecs_low;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001432 }
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001433
Eric Dumazetf5a57722017-02-16 15:23:27 -08001434 cq = priv->rx_cq[ring];
1435 if (moder_time != priv->last_moder_time[ring] ||
1436 cq->moder_cnt != priv->rx_frames) {
Alexander Guller6b4d8d92011-10-09 05:38:23 +00001437 priv->last_moder_time[ring] = moder_time;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001438 cq->moder_time = moder_time;
Sagi Grimberga1c66932013-06-04 05:13:26 +00001439 cq->moder_cnt = priv->rx_frames;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001440 err = mlx4_en_set_cq_moder(priv, cq);
Alexander Guller6b4d8d92011-10-09 05:38:23 +00001441 if (err)
Yan Burman48e551f2013-02-07 02:25:21 +00001442 en_err(priv, "Failed modifying moderation for cq:%d\n",
1443 ring);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001444 }
Alexander Guller6b4d8d92011-10-09 05:38:23 +00001445 priv->last_moder_packets[ring] = rx_packets;
1446 priv->last_moder_bytes[ring] = rx_bytes;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001447 }
1448
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001449 priv->last_moder_jiffies = jiffies;
1450}
1451
1452static void mlx4_en_do_get_stats(struct work_struct *work)
1453{
Jean Delvarebf6aede2009-04-02 16:56:54 -07001454 struct delayed_work *delay = to_delayed_work(work);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001455 struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv,
1456 stats_task);
1457 struct mlx4_en_dev *mdev = priv->mdev;
1458 int err;
1459
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001460 mutex_lock(&mdev->state_lock);
1461 if (mdev->device_up) {
Jack Morgenstein6123db2e2013-06-25 12:09:30 +03001462 if (priv->port_up) {
1463 err = mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 0);
1464 if (err)
1465 en_dbg(HW, priv, "Could not update stats\n");
Eugenia Emantayev2d518372013-01-24 01:54:14 +00001466
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001467 mlx4_en_auto_moderation(priv);
Jack Morgenstein6123db2e2013-06-25 12:09:30 +03001468 }
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001469
1470 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
1471 }
Yevgeny Petrilind7e1a482010-08-24 03:46:38 +00001472 if (mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port]) {
Noa Osherovich2695bab2014-07-08 11:25:24 +03001473 mlx4_en_do_set_mac(priv, priv->current_mac);
Yevgeny Petrilind7e1a482010-08-24 03:46:38 +00001474 mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port] = 0;
1475 }
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001476 mutex_unlock(&mdev->state_lock);
1477}
1478
Amir Vadaib6c39bf2013-04-23 06:06:51 +00001479/* mlx4_en_service_task - Run service task for tasks that needed to be done
1480 * periodically
1481 */
1482static void mlx4_en_service_task(struct work_struct *work)
1483{
1484 struct delayed_work *delay = to_delayed_work(work);
1485 struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv,
1486 service_task);
1487 struct mlx4_en_dev *mdev = priv->mdev;
1488
1489 mutex_lock(&mdev->state_lock);
1490 if (mdev->device_up) {
Amir Vadaidc8142e2013-04-25 05:22:24 +00001491 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
1492 mlx4_en_ptp_overflow_check(mdev);
Amir Vadaib6c39bf2013-04-23 06:06:51 +00001493
Ido Shamay07841f92015-04-30 17:32:46 +03001494 mlx4_en_recover_from_oom(priv);
Amir Vadaib6c39bf2013-04-23 06:06:51 +00001495 queue_delayed_work(mdev->workqueue, &priv->service_task,
1496 SERVICE_TASK_DELAY);
1497 }
1498 mutex_unlock(&mdev->state_lock);
1499}
1500
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001501static void mlx4_en_linkstate(struct work_struct *work)
1502{
1503 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
1504 linkstate_task);
1505 struct mlx4_en_dev *mdev = priv->mdev;
1506 int linkstate = priv->link_state;
1507
1508 mutex_lock(&mdev->state_lock);
1509 /* If observable port state changed set carrier state and
1510 * report to system log */
1511 if (priv->last_link_state != linkstate) {
1512 if (linkstate == MLX4_DEV_EVENT_PORT_DOWN) {
Yevgeny Petriline5cc44b2010-08-24 03:46:01 +00001513 en_info(priv, "Link Down\n");
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001514 netif_carrier_off(priv->dev);
1515 } else {
Yevgeny Petriline5cc44b2010-08-24 03:46:01 +00001516 en_info(priv, "Link Up\n");
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001517 netif_carrier_on(priv->dev);
1518 }
1519 }
1520 priv->last_link_state = linkstate;
1521 mutex_unlock(&mdev->state_lock);
1522}
1523
Yuval Atias9e311e72014-06-09 10:24:39 +03001524static int mlx4_en_init_affinity_hint(struct mlx4_en_priv *priv, int ring_idx)
1525{
1526 struct mlx4_en_rx_ring *ring = priv->rx_ring[ring_idx];
1527 int numa_node = priv->mdev->dev->numa_node;
Yuval Atias9e311e72014-06-09 10:24:39 +03001528
1529 if (!zalloc_cpumask_var(&ring->affinity_mask, GFP_KERNEL))
1530 return -ENOMEM;
1531
Rusty Russellf36963c2015-05-09 03:14:13 +09301532 cpumask_set_cpu(cpumask_local_spread(ring_idx, numa_node),
1533 ring->affinity_mask);
1534 return 0;
Yuval Atias9e311e72014-06-09 10:24:39 +03001535}
1536
1537static void mlx4_en_free_affinity_hint(struct mlx4_en_priv *priv, int ring_idx)
1538{
1539 free_cpumask_var(priv->rx_ring[ring_idx]->affinity_mask);
1540}
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001541
Brenden Blanco9ecc2d82016-07-19 12:16:55 -07001542static void mlx4_en_init_recycle_ring(struct mlx4_en_priv *priv,
1543 int tx_ring_idx)
1544{
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02001545 struct mlx4_en_tx_ring *tx_ring = priv->tx_ring[TX_XDP][tx_ring_idx];
1546 int rr_index = tx_ring_idx;
Brenden Blanco9ecc2d82016-07-19 12:16:55 -07001547
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02001548 tx_ring->free_tx_desc = mlx4_en_recycle_tx_desc;
1549 tx_ring->recycle_ring = priv->rx_ring[rr_index];
1550 en_dbg(DRV, priv, "Set tx_ring[%d][%d]->recycle_ring = rx_ring[%d]\n",
1551 TX_XDP, tx_ring_idx, rr_index);
Brenden Blanco9ecc2d82016-07-19 12:16:55 -07001552}
1553
Yevgeny Petrilin18cc42a2008-12-29 18:39:20 -08001554int mlx4_en_start_port(struct net_device *dev)
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001555{
1556 struct mlx4_en_priv *priv = netdev_priv(dev);
1557 struct mlx4_en_dev *mdev = priv->mdev;
1558 struct mlx4_en_cq *cq;
1559 struct mlx4_en_tx_ring *tx_ring;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001560 int rx_index = 0;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001561 int err = 0;
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02001562 int i, t;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001563 int j;
Yevgeny Petrilin16792002011-03-22 22:38:31 +00001564 u8 mc_list[16] = {0};
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001565
1566 if (priv->port_up) {
Yevgeny Petrilin453a6082009-06-01 20:27:13 +00001567 en_dbg(DRV, priv, "start port called while port already up\n");
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001568 return 0;
1569 }
1570
Yevgeny Petrilin6d199932012-07-05 04:03:43 +00001571 INIT_LIST_HEAD(&priv->mc_list);
1572 INIT_LIST_HEAD(&priv->curr_list);
Hadar Hen Zion0d256c02013-01-30 23:07:08 +00001573 INIT_LIST_HEAD(&priv->ethtool_list);
1574 memset(&priv->ethtool_rules[0], 0,
1575 sizeof(struct ethtool_flow_id) * MAX_NUM_OF_FS_RULES);
Yevgeny Petrilin6d199932012-07-05 04:03:43 +00001576
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001577 /* Calculate Rx buf size */
1578 dev->mtu = min(dev->mtu, priv->max_mtu);
1579 mlx4_en_calc_rx_buf(dev);
Yevgeny Petrilin453a6082009-06-01 20:27:13 +00001580 en_dbg(DRV, priv, "Rx buf size:%d\n", priv->rx_skb_size);
Yevgeny Petrilin38aab072009-05-24 03:17:11 +00001581
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001582 /* Configure rx cq's and rings */
Yevgeny Petrilin38aab072009-05-24 03:17:11 +00001583 err = mlx4_en_activate_rx_rings(priv);
1584 if (err) {
Yevgeny Petrilin453a6082009-06-01 20:27:13 +00001585 en_err(priv, "Failed to activate RX rings\n");
Yevgeny Petrilin38aab072009-05-24 03:17:11 +00001586 return err;
1587 }
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001588 for (i = 0; i < priv->rx_ring_num; i++) {
Eugenia Emantayev41d942d2013-11-07 12:19:52 +02001589 cq = priv->rx_cq[i];
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001590
Yuval Atias9e311e72014-06-09 10:24:39 +03001591 err = mlx4_en_init_affinity_hint(priv, i);
1592 if (err) {
1593 en_err(priv, "Failed preparing IRQ affinity hint\n");
1594 goto cq_err;
1595 }
1596
Alexander Guller76532d02011-10-09 05:26:31 +00001597 err = mlx4_en_activate_cq(priv, cq, i);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001598 if (err) {
Yevgeny Petrilin453a6082009-06-01 20:27:13 +00001599 en_err(priv, "Failed activating Rx CQ\n");
Yuval Atias9e311e72014-06-09 10:24:39 +03001600 mlx4_en_free_affinity_hint(priv, i);
Yevgeny Petrilina4233302009-04-26 20:41:34 +00001601 goto cq_err;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001602 }
Ido Shamayc3f25112014-12-16 13:28:54 +02001603
1604 for (j = 0; j < cq->size; j++) {
1605 struct mlx4_cqe *cqe = NULL;
1606
1607 cqe = mlx4_en_get_cqe(cq->buf, j, priv->cqe_size) +
1608 priv->cqe_factor;
1609 cqe->owner_sr_opcode = MLX4_CQE_OWNER_MASK;
1610 }
1611
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001612 err = mlx4_en_set_cq_moder(priv, cq);
1613 if (err) {
Joe Perches1a91de22014-05-07 12:52:57 -07001614 en_err(priv, "Failed setting cq moderation parameters\n");
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001615 mlx4_en_deactivate_cq(priv, cq);
Yuval Atias9e311e72014-06-09 10:24:39 +03001616 mlx4_en_free_affinity_hint(priv, i);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001617 goto cq_err;
1618 }
1619 mlx4_en_arm_cq(priv, cq);
Eugenia Emantayev41d942d2013-11-07 12:19:52 +02001620 priv->rx_ring[i]->cqn = cq->mcq.cqn;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001621 ++rx_index;
1622 }
1623
Eugenia Emantayevffe455a2011-12-13 04:16:21 +00001624 /* Set qp number */
1625 en_dbg(DRV, priv, "Getting qp number for port %d\n", priv->port);
Yan Burman16a10ff2013-02-07 02:25:22 +00001626 err = mlx4_en_get_qp(priv);
Yevgeny Petrilin16792002011-03-22 22:38:31 +00001627 if (err) {
Eugenia Emantayevffe455a2011-12-13 04:16:21 +00001628 en_err(priv, "Failed getting eth qp\n");
Yevgeny Petrilin16792002011-03-22 22:38:31 +00001629 goto cq_err;
1630 }
1631 mdev->mac_removed[priv->port] = 0;
1632
Eran Ben Elisha6de5f7f2015-06-15 17:59:02 +03001633 priv->counter_index =
1634 mlx4_get_default_counter_index(mdev->dev, priv->port);
1635
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001636 err = mlx4_en_config_rss_steer(priv);
1637 if (err) {
Yevgeny Petrilin453a6082009-06-01 20:27:13 +00001638 en_err(priv, "Failed configuring rss steering\n");
Yevgeny Petrilin16792002011-03-22 22:38:31 +00001639 goto mac_err;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001640 }
1641
Hadar Hen Zioncabdc8ee2012-07-05 04:03:50 +00001642 err = mlx4_en_create_drop_qp(priv);
1643 if (err)
1644 goto rss_err;
1645
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001646 /* Configure tx cq's and rings */
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02001647 for (t = 0 ; t < MLX4_EN_NUM_TX_TYPES; t++) {
Tariq Toukaneb9def62016-12-22 14:32:58 +02001648 u8 num_tx_rings_p_up = t == TX ?
1649 priv->num_tx_rings_p_up : priv->tx_ring_num[t];
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02001650
1651 for (i = 0; i < priv->tx_ring_num[t]; i++) {
1652 /* Configure cq */
1653 cq = priv->tx_cq[t][i];
1654 err = mlx4_en_activate_cq(priv, cq, i);
1655 if (err) {
1656 en_err(priv, "Failed allocating Tx CQ\n");
1657 goto tx_err;
1658 }
1659 err = mlx4_en_set_cq_moder(priv, cq);
1660 if (err) {
1661 en_err(priv, "Failed setting cq moderation parameters\n");
1662 mlx4_en_deactivate_cq(priv, cq);
1663 goto tx_err;
1664 }
1665 en_dbg(DRV, priv,
1666 "Resetting index of collapsed CQ:%d to -1\n", i);
1667 cq->buf->wqe_index = cpu_to_be16(0xffff);
1668
1669 /* Configure ring */
1670 tx_ring = priv->tx_ring[t][i];
1671 err = mlx4_en_activate_tx_ring(priv, tx_ring,
1672 cq->mcq.cqn,
1673 i / num_tx_rings_p_up);
1674 if (err) {
1675 en_err(priv, "Failed allocating Tx ring\n");
1676 mlx4_en_deactivate_cq(priv, cq);
1677 goto tx_err;
1678 }
1679 if (t != TX_XDP) {
1680 tx_ring->tx_queue = netdev_get_tx_queue(dev, i);
1681 tx_ring->recycle_ring = NULL;
Tariq Toukan6c785112017-06-15 14:35:37 +03001682
1683 /* Arm CQ for TX completions */
1684 mlx4_en_arm_cq(priv, cq);
1685
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02001686 } else {
1687 mlx4_en_init_recycle_ring(priv, i);
Tariq Toukan6c785112017-06-15 14:35:37 +03001688 /* XDP TX CQ should never be armed */
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02001689 }
1690
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02001691 /* Set initial ownership of all Tx TXBBs to SW (1) */
1692 for (j = 0; j < tx_ring->buf_size; j += STAMP_STRIDE)
1693 *((u32 *)(tx_ring->buf + j)) = 0xffffffff;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001694 }
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001695 }
1696
1697 /* Configure port */
1698 err = mlx4_SET_PORT_general(mdev->dev, priv->port,
1699 priv->rx_skb_size + ETH_FCS_LEN,
Yevgeny Petrilind53b93f2008-11-05 04:48:36 +00001700 priv->prof->tx_pause,
1701 priv->prof->tx_ppp,
1702 priv->prof->rx_pause,
1703 priv->prof->rx_ppp);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001704 if (err) {
Yan Burman48e551f2013-02-07 02:25:21 +00001705 en_err(priv, "Failed setting port general configurations for port %d, with error %d\n",
1706 priv->port, err);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001707 goto tx_err;
1708 }
Shaker Daibes40fb4fc2017-01-29 18:56:18 +02001709
1710 err = mlx4_SET_PORT_user_mtu(mdev->dev, priv->port, dev->mtu);
1711 if (err) {
1712 en_err(priv, "Failed to pass user MTU(%d) to Firmware for port %d, with error %d\n",
1713 dev->mtu, priv->port, err);
1714 goto tx_err;
1715 }
1716
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001717 /* Set default qp number */
1718 err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, priv->base_qpn, 0);
1719 if (err) {
Yevgeny Petrilin453a6082009-06-01 20:27:13 +00001720 en_err(priv, "Failed setting default qp numbers\n");
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001721 goto tx_err;
1722 }
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001723
Or Gerlitz837052d2013-12-23 16:09:44 +02001724 if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
Or Gerlitz1b136de2014-03-27 14:02:04 +02001725 err = mlx4_SET_PORT_VXLAN(mdev->dev, priv->port, VXLAN_STEER_BY_OUTER_MAC, 1);
Or Gerlitz837052d2013-12-23 16:09:44 +02001726 if (err) {
1727 en_err(priv, "Failed setting port L2 tunnel configuration, err %d\n",
1728 err);
1729 goto tx_err;
1730 }
1731 }
1732
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001733 /* Init port */
Yevgeny Petrilin453a6082009-06-01 20:27:13 +00001734 en_dbg(HW, priv, "Initializing port\n");
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001735 err = mlx4_INIT_PORT(mdev->dev, priv->port);
1736 if (err) {
Yevgeny Petrilin453a6082009-06-01 20:27:13 +00001737 en_err(priv, "Failed Initializing port\n");
Yevgeny Petrilin16792002011-03-22 22:38:31 +00001738 goto tx_err;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001739 }
1740
Ido Shamayba4b87ae2015-10-08 17:14:01 +03001741 /* Set Unicast and VXLAN steering rules */
1742 if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0 &&
1743 mlx4_en_set_rss_steer_rules(priv))
1744 mlx4_warn(mdev, "Failed setting steering rules\n");
1745
Yevgeny Petrilin16792002011-03-22 22:38:31 +00001746 /* Attach rx QP to bradcast address */
Joe Perchesc7bf7162015-03-02 19:54:47 -08001747 eth_broadcast_addr(&mc_list[10]);
Hadar Hen Zion0ff1fb62012-07-05 04:03:46 +00001748 mc_list[5] = priv->port; /* needed for B0 steering support */
Saeed Mahameed4931c6e2017-06-15 14:35:32 +03001749 if (mlx4_multicast_attach(mdev->dev, priv->rss_map.indir_qp, mc_list,
Hadar Hen Zion0ff1fb62012-07-05 04:03:46 +00001750 priv->port, 0, MLX4_PROT_ETH,
1751 &priv->broadcast_id))
Yevgeny Petrilin16792002011-03-22 22:38:31 +00001752 mlx4_warn(mdev, "Failed Attaching Broadcast\n");
1753
Herbert Xub5845f92011-03-27 01:01:26 +00001754 /* Must redo promiscuous mode setup. */
1755 priv->flags &= ~(MLX4_EN_FLAG_PROMISC | MLX4_EN_FLAG_MC_PROMISC);
1756
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001757 /* Schedule multicast task to populate multicast list */
Yan Burman0eb74fd2013-02-07 02:25:23 +00001758 queue_work(mdev->workqueue, &priv->rx_mode_task);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001759
Or Gerlitz9737c6a2014-11-18 17:51:27 +02001760 if (priv->mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
Alexander Duycka8312742016-06-16 12:22:30 -07001761 udp_tunnel_get_rx_info(dev);
1762
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001763 priv->port_up = true;
Erez Shitrit8d59de82016-10-27 16:27:17 +03001764
1765 /* Process all completions if exist to prevent
1766 * the queues freezing if they are full
1767 */
Eric Dumazet8cf699e2017-01-13 08:39:24 -08001768 for (i = 0; i < priv->rx_ring_num; i++) {
1769 local_bh_disable();
Erez Shitrit8d59de82016-10-27 16:27:17 +03001770 napi_schedule(&priv->rx_cq[i]->napi);
Eric Dumazet8cf699e2017-01-13 08:39:24 -08001771 local_bh_enable();
1772 }
Erez Shitrit8d59de82016-10-27 16:27:17 +03001773
Yevgeny Petrilina11faac2009-06-20 22:15:46 +00001774 netif_tx_start_all_queues(dev);
Amir Vadai3484aac2013-01-30 23:07:11 +00001775 netif_device_attach(dev);
1776
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001777 return 0;
1778
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001779tx_err:
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02001780 if (t == MLX4_EN_NUM_TX_TYPES) {
1781 t--;
1782 i = priv->tx_ring_num[t];
1783 }
1784 while (t >= 0) {
1785 while (i--) {
1786 mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[t][i]);
1787 mlx4_en_deactivate_cq(priv, priv->tx_cq[t][i]);
1788 }
1789 if (!t--)
1790 break;
1791 i = priv->tx_ring_num[t];
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001792 }
Hadar Hen Zioncabdc8ee2012-07-05 04:03:50 +00001793 mlx4_en_destroy_drop_qp(priv);
1794rss_err:
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001795 mlx4_en_release_rss_steer(priv);
Yevgeny Petrilin16792002011-03-22 22:38:31 +00001796mac_err:
Yan Burman16a10ff2013-02-07 02:25:22 +00001797 mlx4_en_put_qp(priv);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001798cq_err:
Yuval Atias9e311e72014-06-09 10:24:39 +03001799 while (rx_index--) {
Eugenia Emantayev41d942d2013-11-07 12:19:52 +02001800 mlx4_en_deactivate_cq(priv, priv->rx_cq[rx_index]);
Benjamin Poirierf94813f2015-04-29 15:59:35 -07001801 mlx4_en_free_affinity_hint(priv, rx_index);
Yuval Atias9e311e72014-06-09 10:24:39 +03001802 }
Yevgeny Petrilin38aab072009-05-24 03:17:11 +00001803 for (i = 0; i < priv->rx_ring_num; i++)
Eugenia Emantayev41d942d2013-11-07 12:19:52 +02001804 mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001805
1806 return err; /* need to close devices */
1807}
1808
1809
Amir Vadai3484aac2013-01-30 23:07:11 +00001810void mlx4_en_stop_port(struct net_device *dev, int detach)
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001811{
1812 struct mlx4_en_priv *priv = netdev_priv(dev);
1813 struct mlx4_en_dev *mdev = priv->mdev;
Yevgeny Petrilin6d199932012-07-05 04:03:43 +00001814 struct mlx4_en_mc_list *mclist, *tmp;
Hadar Hen Zion0d256c02013-01-30 23:07:08 +00001815 struct ethtool_flow_id *flow, *tmp_flow;
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02001816 int i, t;
Yevgeny Petrilin16792002011-03-22 22:38:31 +00001817 u8 mc_list[16] = {0};
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001818
1819 if (!priv->port_up) {
Yevgeny Petrilin453a6082009-06-01 20:27:13 +00001820 en_dbg(DRV, priv, "stop port called while port already down\n");
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001821 return;
1822 }
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001823
Eugenia Emantayev0cc5c8b2013-06-25 12:09:33 +03001824 /* close port*/
1825 mlx4_CLOSE_PORT(mdev->dev, priv->port);
1826
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001827 /* Synchronize with tx routine */
1828 netif_tx_lock_bh(dev);
Amir Vadai3484aac2013-01-30 23:07:11 +00001829 if (detach)
1830 netif_device_detach(dev);
Yevgeny Petrilin3c05f5e2009-06-20 22:15:52 +00001831 netif_tx_stop_all_queues(dev);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001832 netif_tx_unlock_bh(dev);
1833
Amir Vadai3484aac2013-01-30 23:07:11 +00001834 netif_tx_disable(dev);
1835
Eric Dumazet7f7bf162016-12-01 05:02:06 -08001836 spin_lock_bh(&priv->stats_lock);
1837 mlx4_en_fold_software_stats(dev);
Yevgeny Petrilin7c287382010-08-24 03:45:45 +00001838 /* Set port as not active */
Yevgeny Petrilin3c05f5e2009-06-20 22:15:52 +00001839 priv->port_up = false;
Eric Dumazet7f7bf162016-12-01 05:02:06 -08001840 spin_unlock_bh(&priv->stats_lock);
1841
Eran Ben Elisha6de5f7f2015-06-15 17:59:02 +03001842 priv->counter_index = MLX4_SINK_COUNTER_INDEX(mdev->dev);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001843
Aviad Yehezkeldb0e7cb2013-01-24 01:54:15 +00001844 /* Promsicuous mode */
1845 if (mdev->dev->caps.steering_mode ==
1846 MLX4_STEERING_MODE_DEVICE_MANAGED) {
1847 priv->flags &= ~(MLX4_EN_FLAG_PROMISC |
1848 MLX4_EN_FLAG_MC_PROMISC);
1849 mlx4_flow_steer_promisc_remove(mdev->dev,
1850 priv->port,
Hadar Hen Zionf9162532013-04-24 13:58:45 +00001851 MLX4_FS_ALL_DEFAULT);
Aviad Yehezkeldb0e7cb2013-01-24 01:54:15 +00001852 mlx4_flow_steer_promisc_remove(mdev->dev,
1853 priv->port,
Hadar Hen Zionf9162532013-04-24 13:58:45 +00001854 MLX4_FS_MC_DEFAULT);
Aviad Yehezkeldb0e7cb2013-01-24 01:54:15 +00001855 } else if (priv->flags & MLX4_EN_FLAG_PROMISC) {
1856 priv->flags &= ~MLX4_EN_FLAG_PROMISC;
1857
1858 /* Disable promiscouos mode */
1859 mlx4_unicast_promisc_remove(mdev->dev, priv->base_qpn,
1860 priv->port);
1861
1862 /* Disable Multicast promisc */
1863 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
1864 mlx4_multicast_promisc_remove(mdev->dev, priv->base_qpn,
1865 priv->port);
1866 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
1867 }
1868 }
1869
Yevgeny Petrilin16792002011-03-22 22:38:31 +00001870 /* Detach All multicasts */
Joe Perchesc7bf7162015-03-02 19:54:47 -08001871 eth_broadcast_addr(&mc_list[10]);
Hadar Hen Zion0ff1fb62012-07-05 04:03:46 +00001872 mc_list[5] = priv->port; /* needed for B0 steering support */
Saeed Mahameed4931c6e2017-06-15 14:35:32 +03001873 mlx4_multicast_detach(mdev->dev, priv->rss_map.indir_qp, mc_list,
Hadar Hen Zion0ff1fb62012-07-05 04:03:46 +00001874 MLX4_PROT_ETH, priv->broadcast_id);
Yevgeny Petrilin6d199932012-07-05 04:03:43 +00001875 list_for_each_entry(mclist, &priv->curr_list, list) {
1876 memcpy(&mc_list[10], mclist->addr, ETH_ALEN);
Yevgeny Petrilin16792002011-03-22 22:38:31 +00001877 mc_list[5] = priv->port;
Saeed Mahameed4931c6e2017-06-15 14:35:32 +03001878 mlx4_multicast_detach(mdev->dev, priv->rss_map.indir_qp,
Hadar Hen Zion0ff1fb62012-07-05 04:03:46 +00001879 mc_list, MLX4_PROT_ETH, mclist->reg_id);
Or Gerlitzde123262014-03-13 14:52:15 +02001880 if (mclist->tunnel_reg_id)
1881 mlx4_flow_detach(mdev->dev, mclist->tunnel_reg_id);
Yevgeny Petrilin16792002011-03-22 22:38:31 +00001882 }
1883 mlx4_en_clear_list(dev);
Yevgeny Petrilin6d199932012-07-05 04:03:43 +00001884 list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) {
1885 list_del(&mclist->list);
1886 kfree(mclist);
1887 }
1888
Yevgeny Petrilin16792002011-03-22 22:38:31 +00001889 /* Flush multicast filter */
1890 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 1, MLX4_MCAST_CONFIG);
1891
Hadar Hen Zion6efb5fa2013-03-21 05:55:53 +00001892 /* Remove flow steering rules for the port*/
1893 if (mdev->dev->caps.steering_mode ==
1894 MLX4_STEERING_MODE_DEVICE_MANAGED) {
1895 ASSERT_RTNL();
1896 list_for_each_entry_safe(flow, tmp_flow,
1897 &priv->ethtool_list, list) {
1898 mlx4_flow_detach(mdev->dev, flow->id);
1899 list_del(&flow->list);
1900 }
1901 }
1902
Hadar Hen Zioncabdc8ee2012-07-05 04:03:50 +00001903 mlx4_en_destroy_drop_qp(priv);
1904
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001905 /* Free TX Rings */
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02001906 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
1907 for (i = 0; i < priv->tx_ring_num[t]; i++) {
1908 mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[t][i]);
1909 mlx4_en_deactivate_cq(priv, priv->tx_cq[t][i]);
1910 }
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001911 }
1912 msleep(10);
1913
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02001914 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++)
1915 for (i = 0; i < priv->tx_ring_num[t]; i++)
1916 mlx4_en_free_tx_buf(dev, priv->tx_ring[t][i]);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001917
Ido Shamayba4b87ae2015-10-08 17:14:01 +03001918 if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0)
1919 mlx4_en_delete_rss_steer_rules(priv);
1920
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001921 /* Free RSS qps */
1922 mlx4_en_release_rss_steer(priv);
1923
Eugenia Emantayevffe455a2011-12-13 04:16:21 +00001924 /* Unregister Mac address for the port */
Yan Burman16a10ff2013-02-07 02:25:22 +00001925 mlx4_en_put_qp(priv);
Or Gerlitz5930e8d2013-10-15 16:55:22 +02001926 if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN))
Matan Barak955154f2013-01-30 23:07:10 +00001927 mdev->mac_removed[priv->port] = 1;
Eugenia Emantayevffe455a2011-12-13 04:16:21 +00001928
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001929 /* Free RX Rings */
1930 for (i = 0; i < priv->rx_ring_num; i++) {
Eugenia Emantayev41d942d2013-11-07 12:19:52 +02001931 struct mlx4_en_cq *cq = priv->rx_cq[i];
Amir Vadai9e77a2b2013-06-18 16:18:27 +03001932
Ido Shamayf4a36752014-10-27 11:37:45 +02001933 napi_synchronize(&cq->napi);
Eugenia Emantayev41d942d2013-11-07 12:19:52 +02001934 mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]);
Amir Vadai9e77a2b2013-06-18 16:18:27 +03001935 mlx4_en_deactivate_cq(priv, cq);
Yuval Atias9e311e72014-06-09 10:24:39 +03001936
1937 mlx4_en_free_affinity_hint(priv, i);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001938 }
1939}
1940
1941static void mlx4_en_restart(struct work_struct *work)
1942{
1943 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
1944 watchdog_task);
1945 struct mlx4_en_dev *mdev = priv->mdev;
1946 struct net_device *dev = priv->dev;
1947
Yevgeny Petrilin453a6082009-06-01 20:27:13 +00001948 en_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port);
Yevgeny Petrilin1e338db2009-04-20 04:26:05 +00001949
Hannes Frederic Sowa0c5c3252016-04-18 21:19:44 +02001950 rtnl_lock();
Yevgeny Petrilin1e338db2009-04-20 04:26:05 +00001951 mutex_lock(&mdev->state_lock);
1952 if (priv->port_up) {
Amir Vadai3484aac2013-01-30 23:07:11 +00001953 mlx4_en_stop_port(dev, 1);
Yevgeny Petrilin1e338db2009-04-20 04:26:05 +00001954 if (mlx4_en_start_port(dev))
Yevgeny Petrilin453a6082009-06-01 20:27:13 +00001955 en_err(priv, "Failed restarting port %d\n", priv->port);
Yevgeny Petrilin1e338db2009-04-20 04:26:05 +00001956 }
1957 mutex_unlock(&mdev->state_lock);
Hannes Frederic Sowa0c5c3252016-04-18 21:19:44 +02001958 rtnl_unlock();
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001959}
1960
Eugenia Emantayevb477ba62012-01-19 09:42:37 +00001961static void mlx4_en_clear_stats(struct net_device *dev)
1962{
1963 struct mlx4_en_priv *priv = netdev_priv(dev);
1964 struct mlx4_en_dev *mdev = priv->mdev;
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02001965 struct mlx4_en_tx_ring **tx_ring;
Eugenia Emantayevb477ba62012-01-19 09:42:37 +00001966 int i;
1967
Tariq Toukaneb4b6782016-10-27 16:27:22 +03001968 if (!mlx4_is_slave(mdev->dev))
1969 if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1))
1970 en_dbg(HW, priv, "Failed dumping statistics\n");
Eugenia Emantayevb477ba62012-01-19 09:42:37 +00001971
Eugenia Emantayevb477ba62012-01-19 09:42:37 +00001972 memset(&priv->pstats, 0, sizeof(priv->pstats));
1973 memset(&priv->pkstats, 0, sizeof(priv->pkstats));
1974 memset(&priv->port_stats, 0, sizeof(priv->port_stats));
Matan Barak0b131562015-03-30 17:45:25 +03001975 memset(&priv->rx_flowstats, 0, sizeof(priv->rx_flowstats));
1976 memset(&priv->tx_flowstats, 0, sizeof(priv->tx_flowstats));
1977 memset(&priv->rx_priority_flowstats, 0,
1978 sizeof(priv->rx_priority_flowstats));
1979 memset(&priv->tx_priority_flowstats, 0,
1980 sizeof(priv->tx_priority_flowstats));
Eran Ben Elishab42de4d2015-06-15 17:59:06 +03001981 memset(&priv->pf_stats, 0, sizeof(priv->pf_stats));
Eugenia Emantayevb477ba62012-01-19 09:42:37 +00001982
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02001983 tx_ring = priv->tx_ring[TX];
1984 for (i = 0; i < priv->tx_ring_num[TX]; i++) {
1985 tx_ring[i]->bytes = 0;
1986 tx_ring[i]->packets = 0;
1987 tx_ring[i]->tx_csum = 0;
1988 tx_ring[i]->tx_dropped = 0;
1989 tx_ring[i]->queue_stopped = 0;
1990 tx_ring[i]->wake_queue = 0;
1991 tx_ring[i]->tso_packets = 0;
1992 tx_ring[i]->xmit_more = 0;
Eugenia Emantayevb477ba62012-01-19 09:42:37 +00001993 }
1994 for (i = 0; i < priv->rx_ring_num; i++) {
Eugenia Emantayev41d942d2013-11-07 12:19:52 +02001995 priv->rx_ring[i]->bytes = 0;
1996 priv->rx_ring[i]->packets = 0;
1997 priv->rx_ring[i]->csum_ok = 0;
1998 priv->rx_ring[i]->csum_none = 0;
Shani Michaelif8c64552014-11-09 13:51:53 +02001999 priv->rx_ring[i]->csum_complete = 0;
Eugenia Emantayevb477ba62012-01-19 09:42:37 +00002000 }
2001}
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002002
2003static int mlx4_en_open(struct net_device *dev)
2004{
2005 struct mlx4_en_priv *priv = netdev_priv(dev);
2006 struct mlx4_en_dev *mdev = priv->mdev;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002007 int err = 0;
2008
2009 mutex_lock(&mdev->state_lock);
2010
2011 if (!mdev->device_up) {
Yevgeny Petrilin453a6082009-06-01 20:27:13 +00002012 en_err(priv, "Cannot open - device down/disabled\n");
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002013 err = -EBUSY;
2014 goto out;
2015 }
2016
Eugenia Emantayevb477ba62012-01-19 09:42:37 +00002017 /* Reset HW statistics and SW counters */
2018 mlx4_en_clear_stats(dev);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002019
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002020 err = mlx4_en_start_port(dev);
2021 if (err)
Yevgeny Petrilin453a6082009-06-01 20:27:13 +00002022 en_err(priv, "Failed starting port:%d\n", priv->port);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002023
2024out:
2025 mutex_unlock(&mdev->state_lock);
2026 return err;
2027}
2028
2029
2030static int mlx4_en_close(struct net_device *dev)
2031{
2032 struct mlx4_en_priv *priv = netdev_priv(dev);
2033 struct mlx4_en_dev *mdev = priv->mdev;
2034
Yevgeny Petrilin453a6082009-06-01 20:27:13 +00002035 en_dbg(IFDOWN, priv, "Close port called\n");
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002036
2037 mutex_lock(&mdev->state_lock);
2038
Amir Vadai3484aac2013-01-30 23:07:11 +00002039 mlx4_en_stop_port(dev, 0);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002040 netif_carrier_off(dev);
2041
2042 mutex_unlock(&mdev->state_lock);
2043 return 0;
2044}
2045
Eugenia Emantayevec25bc02016-07-18 18:35:12 +03002046static void mlx4_en_free_resources(struct mlx4_en_priv *priv)
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002047{
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02002048 int i, t;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002049
Amir Vadai1eb8c692012-07-18 22:33:52 +00002050#ifdef CONFIG_RFS_ACCEL
Amir Vadai1eb8c692012-07-18 22:33:52 +00002051 priv->dev->rx_cpu_rmap = NULL;
2052#endif
2053
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02002054 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
2055 for (i = 0; i < priv->tx_ring_num[t]; i++) {
2056 if (priv->tx_ring[t] && priv->tx_ring[t][i])
2057 mlx4_en_destroy_tx_ring(priv,
2058 &priv->tx_ring[t][i]);
2059 if (priv->tx_cq[t] && priv->tx_cq[t][i])
2060 mlx4_en_destroy_cq(priv, &priv->tx_cq[t][i]);
2061 }
Martin KaFai Lauf32b20e82017-01-31 22:35:32 -08002062 kfree(priv->tx_ring[t]);
2063 kfree(priv->tx_cq[t]);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002064 }
2065
2066 for (i = 0; i < priv->rx_ring_num; i++) {
Eugenia Emantayev41d942d2013-11-07 12:19:52 +02002067 if (priv->rx_ring[i])
Thadeu Lima de Souza Cascardo68355f72012-02-06 08:39:49 +00002068 mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i],
2069 priv->prof->rx_ring_size, priv->stride);
Eugenia Emantayev41d942d2013-11-07 12:19:52 +02002070 if (priv->rx_cq[i])
Alexander Gullerfe0af032011-10-09 05:26:46 +00002071 mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002072 }
Yevgeny Petrilin044ca2a2012-06-25 00:24:13 +00002073
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002074}
2075
Eugenia Emantayevec25bc02016-07-18 18:35:12 +03002076static int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002077{
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002078 struct mlx4_en_port_profile *prof = priv->prof;
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02002079 int i, t;
Eugenia Emantayev163561a2013-11-07 12:19:54 +02002080 int node;
Yevgeny Petrilin87a5c382011-03-22 22:38:52 +00002081
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002082 /* Create tx Rings */
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02002083 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
2084 for (i = 0; i < priv->tx_ring_num[t]; i++) {
2085 node = cpu_to_node(i % num_online_cpus());
2086 if (mlx4_en_create_cq(priv, &priv->tx_cq[t][i],
2087 prof->tx_ring_size, i, t, node))
2088 goto err;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002089
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02002090 if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[t][i],
2091 prof->tx_ring_size,
2092 TXBB_SIZE, node, i))
2093 goto err;
2094 }
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002095 }
2096
2097 /* Create rx Rings */
2098 for (i = 0; i < priv->rx_ring_num; i++) {
Eugenia Emantayev163561a2013-11-07 12:19:54 +02002099 node = cpu_to_node(i % num_online_cpus());
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002100 if (mlx4_en_create_cq(priv, &priv->rx_cq[i],
Eugenia Emantayev163561a2013-11-07 12:19:54 +02002101 prof->rx_ring_size, i, RX, node))
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002102 goto err;
2103
2104 if (mlx4_en_create_rx_ring(priv, &priv->rx_ring[i],
Eugenia Emantayev163561a2013-11-07 12:19:54 +02002105 prof->rx_ring_size, priv->stride,
2106 node))
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002107 goto err;
2108 }
2109
Amir Vadai1eb8c692012-07-18 22:33:52 +00002110#ifdef CONFIG_RFS_ACCEL
Matan Barakc66fa192015-05-31 09:30:16 +03002111 priv->dev->rx_cpu_rmap = mlx4_get_cpu_rmap(priv->mdev->dev, priv->port);
Amir Vadai1eb8c692012-07-18 22:33:52 +00002112#endif
2113
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002114 return 0;
2115
2116err:
Yevgeny Petrilin453a6082009-06-01 20:27:13 +00002117 en_err(priv, "Failed to allocate NIC resources\n");
Eugenia Emantayev41d942d2013-11-07 12:19:52 +02002118 for (i = 0; i < priv->rx_ring_num; i++) {
2119 if (priv->rx_ring[i])
2120 mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i],
2121 prof->rx_ring_size,
2122 priv->stride);
2123 if (priv->rx_cq[i])
2124 mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
2125 }
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02002126 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
2127 for (i = 0; i < priv->tx_ring_num[t]; i++) {
2128 if (priv->tx_ring[t][i])
2129 mlx4_en_destroy_tx_ring(priv,
2130 &priv->tx_ring[t][i]);
2131 if (priv->tx_cq[t][i])
2132 mlx4_en_destroy_cq(priv, &priv->tx_cq[t][i]);
2133 }
Eugenia Emantayev41d942d2013-11-07 12:19:52 +02002134 }
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002135 return -ENOMEM;
2136}
2137
2138
Eugenia Emantayevec25bc02016-07-18 18:35:12 +03002139static int mlx4_en_copy_priv(struct mlx4_en_priv *dst,
2140 struct mlx4_en_priv *src,
2141 struct mlx4_en_port_profile *prof)
2142{
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02002143 int t;
2144
Eugenia Emantayevec25bc02016-07-18 18:35:12 +03002145 memcpy(&dst->hwtstamp_config, &prof->hwtstamp_config,
2146 sizeof(dst->hwtstamp_config));
2147 dst->num_tx_rings_p_up = src->mdev->profile.num_tx_rings_p_up;
Eugenia Emantayevec25bc02016-07-18 18:35:12 +03002148 dst->rx_ring_num = prof->rx_ring_num;
2149 dst->flags = prof->flags;
2150 dst->mdev = src->mdev;
2151 dst->port = src->port;
2152 dst->dev = src->dev;
2153 dst->prof = prof;
2154 dst->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
2155 DS_SIZE * MLX4_EN_MAX_RX_FRAGS);
2156
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02002157 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
2158 dst->tx_ring_num[t] = prof->tx_ring_num[t];
2159 if (!dst->tx_ring_num[t])
2160 continue;
Eugenia Emantayevec25bc02016-07-18 18:35:12 +03002161
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02002162 dst->tx_ring[t] = kzalloc(sizeof(struct mlx4_en_tx_ring *) *
2163 MAX_TX_RINGS, GFP_KERNEL);
2164 if (!dst->tx_ring[t])
2165 goto err_free_tx;
2166
2167 dst->tx_cq[t] = kzalloc(sizeof(struct mlx4_en_cq *) *
2168 MAX_TX_RINGS, GFP_KERNEL);
2169 if (!dst->tx_cq[t]) {
2170 kfree(dst->tx_ring[t]);
2171 goto err_free_tx;
2172 }
Eugenia Emantayevec25bc02016-07-18 18:35:12 +03002173 }
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02002174
Eugenia Emantayevec25bc02016-07-18 18:35:12 +03002175 return 0;
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02002176
2177err_free_tx:
2178 while (t--) {
2179 kfree(dst->tx_ring[t]);
2180 kfree(dst->tx_cq[t]);
2181 }
2182 return -ENOMEM;
Eugenia Emantayevec25bc02016-07-18 18:35:12 +03002183}
2184
2185static void mlx4_en_update_priv(struct mlx4_en_priv *dst,
2186 struct mlx4_en_priv *src)
2187{
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02002188 int t;
Eugenia Emantayevec25bc02016-07-18 18:35:12 +03002189 memcpy(dst->rx_ring, src->rx_ring,
2190 sizeof(struct mlx4_en_rx_ring *) * src->rx_ring_num);
2191 memcpy(dst->rx_cq, src->rx_cq,
2192 sizeof(struct mlx4_en_cq *) * src->rx_ring_num);
2193 memcpy(&dst->hwtstamp_config, &src->hwtstamp_config,
2194 sizeof(dst->hwtstamp_config));
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02002195 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
2196 dst->tx_ring_num[t] = src->tx_ring_num[t];
2197 dst->tx_ring[t] = src->tx_ring[t];
2198 dst->tx_cq[t] = src->tx_cq[t];
2199 }
Eugenia Emantayevec25bc02016-07-18 18:35:12 +03002200 dst->rx_ring_num = src->rx_ring_num;
Eugenia Emantayevec25bc02016-07-18 18:35:12 +03002201 memcpy(dst->prof, src->prof, sizeof(struct mlx4_en_port_profile));
2202}
2203
2204int mlx4_en_try_alloc_resources(struct mlx4_en_priv *priv,
2205 struct mlx4_en_priv *tmp,
Martin KaFai Lau770f8222017-01-31 22:35:33 -08002206 struct mlx4_en_port_profile *prof,
2207 bool carry_xdp_prog)
Eugenia Emantayevec25bc02016-07-18 18:35:12 +03002208{
Martin KaFai Lau770f8222017-01-31 22:35:33 -08002209 struct bpf_prog *xdp_prog;
2210 int i, t;
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02002211
Eugenia Emantayevec25bc02016-07-18 18:35:12 +03002212 mlx4_en_copy_priv(tmp, priv, prof);
2213
2214 if (mlx4_en_alloc_resources(tmp)) {
2215 en_warn(priv,
2216 "%s: Resource allocation failed, using previous configuration\n",
2217 __func__);
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02002218 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
2219 kfree(tmp->tx_ring[t]);
2220 kfree(tmp->tx_cq[t]);
2221 }
Eugenia Emantayevec25bc02016-07-18 18:35:12 +03002222 return -ENOMEM;
2223 }
Martin KaFai Lau770f8222017-01-31 22:35:33 -08002224
2225 /* All rx_rings has the same xdp_prog. Pick the first one. */
2226 xdp_prog = rcu_dereference_protected(
2227 priv->rx_ring[0]->xdp_prog,
2228 lockdep_is_held(&priv->mdev->state_lock));
2229
2230 if (xdp_prog && carry_xdp_prog) {
2231 xdp_prog = bpf_prog_add(xdp_prog, tmp->rx_ring_num);
2232 if (IS_ERR(xdp_prog)) {
2233 mlx4_en_free_resources(tmp);
2234 return PTR_ERR(xdp_prog);
2235 }
2236 for (i = 0; i < tmp->rx_ring_num; i++)
2237 rcu_assign_pointer(tmp->rx_ring[i]->xdp_prog,
2238 xdp_prog);
2239 }
2240
Eugenia Emantayevec25bc02016-07-18 18:35:12 +03002241 return 0;
2242}
2243
2244void mlx4_en_safe_replace_resources(struct mlx4_en_priv *priv,
2245 struct mlx4_en_priv *tmp)
2246{
2247 mlx4_en_free_resources(priv);
2248 mlx4_en_update_priv(priv, tmp);
2249}
2250
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002251void mlx4_en_destroy_netdev(struct net_device *dev)
2252{
2253 struct mlx4_en_priv *priv = netdev_priv(dev);
2254 struct mlx4_en_dev *mdev = priv->mdev;
2255
Yevgeny Petrilin453a6082009-06-01 20:27:13 +00002256 en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002257
2258 /* Unregister device - this will close the port if it was up */
Jiri Pirko09d4d082016-02-26 17:32:24 +01002259 if (priv->registered) {
2260 devlink_port_type_clear(mlx4_get_devlink_port(mdev->dev,
2261 priv->port));
Tariq Toukanb4353702016-11-27 19:20:51 +02002262 unregister_netdev(dev);
Jiri Pirko09d4d082016-02-26 17:32:24 +01002263 }
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002264
2265 if (priv->allocated)
2266 mlx4_free_hwq_res(mdev->dev, &priv->res, MLX4_EN_PAGE_SIZE);
2267
2268 cancel_delayed_work(&priv->stats_task);
Amir Vadaib6c39bf2013-04-23 06:06:51 +00002269 cancel_delayed_work(&priv->service_task);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002270 /* flush any pending task for this netdev */
2271 flush_workqueue(mdev->workqueue);
2272
Eugenia Emantayev90683062015-12-17 15:35:38 +02002273 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
2274 mlx4_en_remove_timestamp(mdev);
2275
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002276 /* Detach the netdev so tasks would not attempt to access it */
2277 mutex_lock(&mdev->state_lock);
2278 mdev->pndev[priv->port] = NULL;
Moni Shoua5da03542015-02-03 16:48:34 +02002279 mdev->upper[priv->port] = NULL;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002280
Eugenia Emantayev30f56e32016-07-18 18:35:11 +03002281#ifdef CONFIG_RFS_ACCEL
2282 mlx4_en_cleanup_filters(priv);
2283#endif
2284
Alexander Gullerfe0af032011-10-09 05:26:46 +00002285 mlx4_en_free_resources(priv);
Tariq Toukanb6e01232016-11-22 16:20:39 +02002286 mutex_unlock(&mdev->state_lock);
Amir Vadai564c2742012-04-04 21:33:26 +00002287
Tariq Toukanb4353702016-11-27 19:20:51 +02002288 free_netdev(dev);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002289}
2290
Martin KaFai Laub45f0672016-12-07 15:53:12 -08002291static bool mlx4_en_check_xdp_mtu(struct net_device *dev, int mtu)
2292{
2293 struct mlx4_en_priv *priv = netdev_priv(dev);
2294
2295 if (mtu > MLX4_EN_MAX_XDP_MTU) {
2296 en_err(priv, "mtu:%d > max:%d when XDP prog is attached\n",
2297 mtu, MLX4_EN_MAX_XDP_MTU);
2298 return false;
2299 }
2300
2301 return true;
2302}
2303
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002304static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
2305{
2306 struct mlx4_en_priv *priv = netdev_priv(dev);
2307 struct mlx4_en_dev *mdev = priv->mdev;
2308 int err = 0;
2309
Yevgeny Petrilin453a6082009-06-01 20:27:13 +00002310 en_dbg(DRV, priv, "Change MTU called - current:%d new:%d\n",
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002311 dev->mtu, new_mtu);
2312
Martin KaFai Laub45f0672016-12-07 15:53:12 -08002313 if (priv->tx_ring_num[TX_XDP] &&
2314 !mlx4_en_check_xdp_mtu(dev, new_mtu))
Martin KaFai Lau9f9b74e2017-01-10 09:41:49 -08002315 return -EOPNOTSUPP;
Martin KaFai Laub45f0672016-12-07 15:53:12 -08002316
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002317 dev->mtu = new_mtu;
2318
2319 if (netif_running(dev)) {
2320 mutex_lock(&mdev->state_lock);
2321 if (!mdev->device_up) {
2322 /* NIC is probably restarting - let watchdog task reset
2323 * the port */
Yevgeny Petrilin453a6082009-06-01 20:27:13 +00002324 en_dbg(DRV, priv, "Change MTU called with card down!?\n");
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002325 } else {
Amir Vadai3484aac2013-01-30 23:07:11 +00002326 mlx4_en_stop_port(dev, 1);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002327 err = mlx4_en_start_port(dev);
2328 if (err) {
Yevgeny Petrilin453a6082009-06-01 20:27:13 +00002329 en_err(priv, "Failed restarting port:%d\n",
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002330 priv->port);
2331 queue_work(mdev->workqueue, &priv->watchdog_task);
2332 }
2333 }
2334 mutex_unlock(&mdev->state_lock);
2335 }
2336 return 0;
2337}
2338
Ben Hutchings100dbda2013-11-18 23:13:31 +00002339static int mlx4_en_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
Amir Vadaiec693d42013-04-23 06:06:49 +00002340{
2341 struct mlx4_en_priv *priv = netdev_priv(dev);
2342 struct mlx4_en_dev *mdev = priv->mdev;
2343 struct hwtstamp_config config;
2344
2345 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
2346 return -EFAULT;
2347
2348 /* reserved for future extensions */
2349 if (config.flags)
2350 return -EINVAL;
2351
2352 /* device doesn't support time stamping */
2353 if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS))
2354 return -EINVAL;
2355
2356 /* TX HW timestamp */
2357 switch (config.tx_type) {
2358 case HWTSTAMP_TX_OFF:
2359 case HWTSTAMP_TX_ON:
2360 break;
2361 default:
2362 return -ERANGE;
2363 }
2364
2365 /* RX HW timestamp */
2366 switch (config.rx_filter) {
2367 case HWTSTAMP_FILTER_NONE:
2368 break;
2369 case HWTSTAMP_FILTER_ALL:
2370 case HWTSTAMP_FILTER_SOME:
2371 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
2372 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
2373 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
2374 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
2375 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
2376 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
2377 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
2378 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
2379 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
2380 case HWTSTAMP_FILTER_PTP_V2_EVENT:
2381 case HWTSTAMP_FILTER_PTP_V2_SYNC:
2382 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
Miroslav Lichvare3412572017-05-19 17:52:36 +02002383 case HWTSTAMP_FILTER_NTP_ALL:
Amir Vadaiec693d42013-04-23 06:06:49 +00002384 config.rx_filter = HWTSTAMP_FILTER_ALL;
2385 break;
2386 default:
2387 return -ERANGE;
2388 }
2389
Saeed Mahameed7787fa62014-10-27 11:37:42 +02002390 if (mlx4_en_reset_config(dev, config, dev->features)) {
Amir Vadaiec693d42013-04-23 06:06:49 +00002391 config.tx_type = HWTSTAMP_TX_OFF;
2392 config.rx_filter = HWTSTAMP_FILTER_NONE;
2393 }
2394
2395 return copy_to_user(ifr->ifr_data, &config,
2396 sizeof(config)) ? -EFAULT : 0;
2397}
2398
Ben Hutchings100dbda2013-11-18 23:13:31 +00002399static int mlx4_en_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
2400{
2401 struct mlx4_en_priv *priv = netdev_priv(dev);
2402
2403 return copy_to_user(ifr->ifr_data, &priv->hwtstamp_config,
2404 sizeof(priv->hwtstamp_config)) ? -EFAULT : 0;
2405}
2406
Amir Vadaiec693d42013-04-23 06:06:49 +00002407static int mlx4_en_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2408{
2409 switch (cmd) {
2410 case SIOCSHWTSTAMP:
Ben Hutchings100dbda2013-11-18 23:13:31 +00002411 return mlx4_en_hwtstamp_set(dev, ifr);
2412 case SIOCGHWTSTAMP:
2413 return mlx4_en_hwtstamp_get(dev, ifr);
Amir Vadaiec693d42013-04-23 06:06:49 +00002414 default:
2415 return -EOPNOTSUPP;
2416 }
2417}
2418
Hadar Hen Zione38af4f2015-07-27 14:46:34 +03002419static netdev_features_t mlx4_en_fix_features(struct net_device *netdev,
2420 netdev_features_t features)
2421{
2422 struct mlx4_en_priv *en_priv = netdev_priv(netdev);
2423 struct mlx4_en_dev *mdev = en_priv->mdev;
2424
2425 /* Since there is no support for separate RX C-TAG/S-TAG vlan accel
2426 * enable/disable make sure S-TAG flag is always in same state as
2427 * C-TAG.
2428 */
2429 if (features & NETIF_F_HW_VLAN_CTAG_RX &&
2430 !(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN))
2431 features |= NETIF_F_HW_VLAN_STAG_RX;
2432 else
2433 features &= ~NETIF_F_HW_VLAN_STAG_RX;
2434
2435 return features;
2436}
2437
Amir Vadai60d6fe92011-11-26 19:55:19 +00002438static int mlx4_en_set_features(struct net_device *netdev,
2439 netdev_features_t features)
2440{
2441 struct mlx4_en_priv *priv = netdev_priv(netdev);
Muhammad Mahajnaf0df3502015-04-02 16:31:21 +03002442 bool reset = false;
Saeed Mahameed537f6f92014-10-27 11:37:43 +02002443 int ret = 0;
2444
Muhammad Mahajnaf0df3502015-04-02 16:31:21 +03002445 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_RXFCS)) {
2446 en_info(priv, "Turn %s RX-FCS\n",
2447 (features & NETIF_F_RXFCS) ? "ON" : "OFF");
2448 reset = true;
2449 }
2450
Muhammad Mahajna78500b82015-04-02 16:31:22 +03002451 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_RXALL)) {
2452 u8 ignore_fcs_value = (features & NETIF_F_RXALL) ? 1 : 0;
2453
2454 en_info(priv, "Turn %s RX-ALL\n",
2455 ignore_fcs_value ? "ON" : "OFF");
2456 ret = mlx4_SET_PORT_fcs_check(priv->mdev->dev,
2457 priv->port, ignore_fcs_value);
2458 if (ret)
2459 return ret;
2460 }
2461
Saeed Mahameed537f6f92014-10-27 11:37:43 +02002462 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_HW_VLAN_CTAG_RX)) {
2463 en_info(priv, "Turn %s RX vlan strip offload\n",
2464 (features & NETIF_F_HW_VLAN_CTAG_RX) ? "ON" : "OFF");
Muhammad Mahajnaf0df3502015-04-02 16:31:21 +03002465 reset = true;
Saeed Mahameed537f6f92014-10-27 11:37:43 +02002466 }
Amir Vadai60d6fe92011-11-26 19:55:19 +00002467
Ido Shamaycfb53f32015-02-03 17:57:21 +02002468 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_HW_VLAN_CTAG_TX))
2469 en_info(priv, "Turn %s TX vlan strip offload\n",
2470 (features & NETIF_F_HW_VLAN_CTAG_TX) ? "ON" : "OFF");
2471
Hadar Hen Zione38af4f2015-07-27 14:46:34 +03002472 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_HW_VLAN_STAG_TX))
2473 en_info(priv, "Turn %s TX S-VLAN strip offload\n",
2474 (features & NETIF_F_HW_VLAN_STAG_TX) ? "ON" : "OFF");
2475
Ido Shamay241a08c2015-04-02 16:31:07 +03002476 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_LOOPBACK)) {
2477 en_info(priv, "Turn %s loopback\n",
2478 (features & NETIF_F_LOOPBACK) ? "ON" : "OFF");
2479 mlx4_en_update_loopback_state(netdev, features);
2480 }
Yan Burman79aeacc2013-02-07 02:25:19 +00002481
Muhammad Mahajnaf0df3502015-04-02 16:31:21 +03002482 if (reset) {
2483 ret = mlx4_en_reset_config(netdev, priv->hwtstamp_config,
2484 features);
2485 if (ret)
2486 return ret;
2487 }
Amir Vadai60d6fe92011-11-26 19:55:19 +00002488
Muhammad Mahajnaf0df3502015-04-02 16:31:21 +03002489 return 0;
Amir Vadai60d6fe92011-11-26 19:55:19 +00002490}
2491
Rony Efraim8f7ba3c2013-04-25 05:22:27 +00002492static int mlx4_en_set_vf_mac(struct net_device *dev, int queue, u8 *mac)
2493{
2494 struct mlx4_en_priv *en_priv = netdev_priv(dev);
2495 struct mlx4_en_dev *mdev = en_priv->mdev;
Rony Efraim8f7ba3c2013-04-25 05:22:27 +00002496
Eugenia Emantayev745d8ae2017-02-23 12:02:42 +02002497 return mlx4_set_vf_mac(mdev->dev, en_priv->port, queue, mac);
Rony Efraim8f7ba3c2013-04-25 05:22:27 +00002498}
2499
Moshe Shemesh79aab092016-09-22 12:11:15 +03002500static int mlx4_en_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos,
2501 __be16 vlan_proto)
Rony Efraim3f7fb022013-04-25 05:22:28 +00002502{
2503 struct mlx4_en_priv *en_priv = netdev_priv(dev);
2504 struct mlx4_en_dev *mdev = en_priv->mdev;
2505
Moshe Shemeshb42959d2016-09-22 12:11:16 +03002506 return mlx4_set_vf_vlan(mdev->dev, en_priv->port, vf, vlan, qos,
2507 vlan_proto);
Rony Efraim3f7fb022013-04-25 05:22:28 +00002508}
2509
Ido Shamaycda373f2015-04-02 16:31:16 +03002510static int mlx4_en_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate,
2511 int max_tx_rate)
2512{
2513 struct mlx4_en_priv *en_priv = netdev_priv(dev);
2514 struct mlx4_en_dev *mdev = en_priv->mdev;
2515
2516 return mlx4_set_vf_rate(mdev->dev, en_priv->port, vf, min_tx_rate,
2517 max_tx_rate);
2518}
2519
Rony Efraime6b6a232013-04-25 05:22:29 +00002520static int mlx4_en_set_vf_spoofchk(struct net_device *dev, int vf, bool setting)
2521{
2522 struct mlx4_en_priv *en_priv = netdev_priv(dev);
2523 struct mlx4_en_dev *mdev = en_priv->mdev;
2524
2525 return mlx4_set_vf_spoofchk(mdev->dev, en_priv->port, vf, setting);
2526}
2527
Rony Efraim2cccb9e2013-04-25 05:22:30 +00002528static int mlx4_en_get_vf_config(struct net_device *dev, int vf, struct ifla_vf_info *ivf)
2529{
2530 struct mlx4_en_priv *en_priv = netdev_priv(dev);
2531 struct mlx4_en_dev *mdev = en_priv->mdev;
2532
2533 return mlx4_get_vf_config(mdev->dev, en_priv->port, vf, ivf);
2534}
Rony Efraim8f7ba3c2013-04-25 05:22:27 +00002535
Rony Efraim948e3062013-06-13 13:19:11 +03002536static int mlx4_en_set_vf_link_state(struct net_device *dev, int vf, int link_state)
2537{
2538 struct mlx4_en_priv *en_priv = netdev_priv(dev);
2539 struct mlx4_en_dev *mdev = en_priv->mdev;
2540
2541 return mlx4_set_vf_link_state(mdev->dev, en_priv->port, vf, link_state);
2542}
Hadar Hen Zion84c86402013-12-19 21:20:13 +02002543
Eran Ben Elisha62a89052015-06-15 17:59:08 +03002544static int mlx4_en_get_vf_stats(struct net_device *dev, int vf,
2545 struct ifla_vf_stats *vf_stats)
2546{
2547 struct mlx4_en_priv *en_priv = netdev_priv(dev);
2548 struct mlx4_en_dev *mdev = en_priv->mdev;
2549
2550 return mlx4_get_vf_stats(mdev->dev, en_priv->port, vf, vf_stats);
2551}
2552
Hadar Hen Zion84c86402013-12-19 21:20:13 +02002553#define PORT_ID_BYTE_LEN 8
2554static int mlx4_en_get_phys_port_id(struct net_device *dev,
Jiri Pirko02637fc2014-11-28 14:34:16 +01002555 struct netdev_phys_item_id *ppid)
Hadar Hen Zion84c86402013-12-19 21:20:13 +02002556{
2557 struct mlx4_en_priv *priv = netdev_priv(dev);
2558 struct mlx4_dev *mdev = priv->mdev->dev;
2559 int i;
2560 u64 phys_port_id = mdev->caps.phys_port_id[priv->port];
2561
2562 if (!phys_port_id)
2563 return -EOPNOTSUPP;
2564
2565 ppid->id_len = sizeof(phys_port_id);
2566 for (i = PORT_ID_BYTE_LEN - 1; i >= 0; --i) {
2567 ppid->id[i] = phys_port_id & 0xff;
2568 phys_port_id >>= 8;
2569 }
2570 return 0;
2571}
2572
Or Gerlitz1b136de2014-03-27 14:02:04 +02002573static void mlx4_en_add_vxlan_offloads(struct work_struct *work)
2574{
2575 int ret;
2576 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
2577 vxlan_add_task);
2578
2579 ret = mlx4_config_vxlan_port(priv->mdev->dev, priv->vxlan_port);
2580 if (ret)
2581 goto out;
2582
2583 ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port,
2584 VXLAN_STEER_BY_OUTER_MAC, 1);
2585out:
Or Gerlitzf4a1edd2014-11-09 14:25:39 +02002586 if (ret) {
Or Gerlitz1b136de2014-03-27 14:02:04 +02002587 en_err(priv, "failed setting L2 tunnel configuration ret %d\n", ret);
Or Gerlitzf4a1edd2014-11-09 14:25:39 +02002588 return;
2589 }
2590
2591 /* set offloads */
Alexander Duyck09067122016-05-02 09:38:37 -07002592 priv->dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2593 NETIF_F_RXCSUM |
2594 NETIF_F_TSO | NETIF_F_TSO6 |
2595 NETIF_F_GSO_UDP_TUNNEL |
Alexander Duyck3c9346b2016-05-02 09:38:30 -07002596 NETIF_F_GSO_UDP_TUNNEL_CSUM |
2597 NETIF_F_GSO_PARTIAL;
Or Gerlitz1b136de2014-03-27 14:02:04 +02002598}
2599
2600static void mlx4_en_del_vxlan_offloads(struct work_struct *work)
2601{
2602 int ret;
2603 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
2604 vxlan_del_task);
Or Gerlitzf4a1edd2014-11-09 14:25:39 +02002605 /* unset offloads */
Alexander Duyck09067122016-05-02 09:38:37 -07002606 priv->dev->hw_enc_features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2607 NETIF_F_RXCSUM |
2608 NETIF_F_TSO | NETIF_F_TSO6 |
2609 NETIF_F_GSO_UDP_TUNNEL |
Alexander Duyck3c9346b2016-05-02 09:38:30 -07002610 NETIF_F_GSO_UDP_TUNNEL_CSUM |
2611 NETIF_F_GSO_PARTIAL);
Or Gerlitz1b136de2014-03-27 14:02:04 +02002612
2613 ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port,
2614 VXLAN_STEER_BY_OUTER_MAC, 0);
2615 if (ret)
2616 en_err(priv, "failed setting L2 tunnel configuration ret %d\n", ret);
2617
2618 priv->vxlan_port = 0;
2619}
2620
2621static void mlx4_en_add_vxlan_port(struct net_device *dev,
Alexander Duycka8312742016-06-16 12:22:30 -07002622 struct udp_tunnel_info *ti)
Or Gerlitz1b136de2014-03-27 14:02:04 +02002623{
2624 struct mlx4_en_priv *priv = netdev_priv(dev);
Alexander Duycka8312742016-06-16 12:22:30 -07002625 __be16 port = ti->port;
Or Gerlitz1b136de2014-03-27 14:02:04 +02002626 __be16 current_port;
2627
Alexander Duycka8312742016-06-16 12:22:30 -07002628 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
Or Gerlitz1b136de2014-03-27 14:02:04 +02002629 return;
2630
Alexander Duycka8312742016-06-16 12:22:30 -07002631 if (ti->sa_family != AF_INET)
2632 return;
2633
2634 if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
Or Gerlitz1b136de2014-03-27 14:02:04 +02002635 return;
2636
2637 current_port = priv->vxlan_port;
2638 if (current_port && current_port != port) {
2639 en_warn(priv, "vxlan port %d configured, can't add port %d\n",
2640 ntohs(current_port), ntohs(port));
2641 return;
2642 }
2643
2644 priv->vxlan_port = port;
2645 queue_work(priv->mdev->workqueue, &priv->vxlan_add_task);
2646}
2647
2648static void mlx4_en_del_vxlan_port(struct net_device *dev,
Alexander Duycka8312742016-06-16 12:22:30 -07002649 struct udp_tunnel_info *ti)
Or Gerlitz1b136de2014-03-27 14:02:04 +02002650{
2651 struct mlx4_en_priv *priv = netdev_priv(dev);
Alexander Duycka8312742016-06-16 12:22:30 -07002652 __be16 port = ti->port;
Or Gerlitz1b136de2014-03-27 14:02:04 +02002653 __be16 current_port;
2654
Alexander Duycka8312742016-06-16 12:22:30 -07002655 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
Or Gerlitz1b136de2014-03-27 14:02:04 +02002656 return;
2657
Alexander Duycka8312742016-06-16 12:22:30 -07002658 if (ti->sa_family != AF_INET)
2659 return;
2660
2661 if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
Or Gerlitz1b136de2014-03-27 14:02:04 +02002662 return;
2663
2664 current_port = priv->vxlan_port;
2665 if (current_port != port) {
2666 en_dbg(DRV, priv, "vxlan port %d isn't configured, ignoring\n", ntohs(port));
2667 return;
2668 }
2669
2670 queue_work(priv->mdev->workqueue, &priv->vxlan_del_task);
2671}
Joe Stringer956bdab2014-11-13 16:38:14 -08002672
Jesse Gross5f352272014-12-23 22:37:26 -08002673static netdev_features_t mlx4_en_features_check(struct sk_buff *skb,
2674 struct net_device *dev,
2675 netdev_features_t features)
Joe Stringer956bdab2014-11-13 16:38:14 -08002676{
Toshiaki Makita8cb65d02015-03-27 14:31:12 +09002677 features = vlan_features_check(skb, features);
Alexander Duyck09067122016-05-02 09:38:37 -07002678 features = vxlan_features_check(skb, features);
2679
2680 /* The ConnectX-3 doesn't support outer IPv6 checksums but it does
2681 * support inner IPv6 checksums and segmentation so we need to
2682 * strip that feature if this is an IPv6 encapsulated frame.
2683 */
2684 if (skb->encapsulation &&
Alexander Duycka5472242016-06-15 14:42:11 -07002685 (skb->ip_summed == CHECKSUM_PARTIAL)) {
2686 struct mlx4_en_priv *priv = netdev_priv(dev);
2687
2688 if (!priv->vxlan_port ||
2689 (ip_hdr(skb)->version != 4) ||
2690 (udp_hdr(skb)->dest != priv->vxlan_port))
2691 features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
2692 }
Alexander Duyck09067122016-05-02 09:38:37 -07002693
2694 return features;
Joe Stringer956bdab2014-11-13 16:38:14 -08002695}
Or Gerlitz1b136de2014-03-27 14:02:04 +02002696
Wu Fengguangde1cf8a2015-03-19 08:51:27 +08002697static int mlx4_en_set_tx_maxrate(struct net_device *dev, int queue_index, u32 maxrate)
Or Gerlitzc10e4fc2015-03-18 14:57:35 +02002698{
2699 struct mlx4_en_priv *priv = netdev_priv(dev);
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02002700 struct mlx4_en_tx_ring *tx_ring = priv->tx_ring[TX][queue_index];
Or Gerlitzc10e4fc2015-03-18 14:57:35 +02002701 struct mlx4_update_qp_params params;
2702 int err;
2703
2704 if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QP_RATE_LIMIT))
2705 return -EOPNOTSUPP;
2706
2707 /* rate provided to us in Mbs, check if it fits into 12 bits, if not use Gbs */
2708 if (maxrate >> 12) {
2709 params.rate_unit = MLX4_QP_RATE_LIMIT_GBS;
2710 params.rate_val = maxrate / 1000;
2711 } else if (maxrate) {
2712 params.rate_unit = MLX4_QP_RATE_LIMIT_MBS;
2713 params.rate_val = maxrate;
2714 } else { /* zero serves to revoke the QP rate-limitation */
2715 params.rate_unit = 0;
2716 params.rate_val = 0;
2717 }
2718
2719 err = mlx4_update_qp(priv->mdev->dev, tx_ring->qpn, MLX4_UPDATE_QP_RATE_LIMIT,
2720 &params);
2721 return err;
2722}
2723
Brenden Blanco47a38e12016-07-19 12:16:50 -07002724static int mlx4_xdp_set(struct net_device *dev, struct bpf_prog *prog)
2725{
2726 struct mlx4_en_priv *priv = netdev_priv(dev);
Brenden Blancod576acf2016-07-19 12:16:52 -07002727 struct mlx4_en_dev *mdev = priv->mdev;
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02002728 struct mlx4_en_port_profile new_prof;
Brenden Blanco47a38e12016-07-19 12:16:50 -07002729 struct bpf_prog *old_prog;
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02002730 struct mlx4_en_priv *tmp;
2731 int tx_changed = 0;
Brenden Blanco47a38e12016-07-19 12:16:50 -07002732 int xdp_ring_num;
Brenden Blancod576acf2016-07-19 12:16:52 -07002733 int port_up = 0;
2734 int err;
Brenden Blanco47a38e12016-07-19 12:16:50 -07002735 int i;
2736
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02002737 xdp_ring_num = prog ? priv->rx_ring_num : 0;
Brenden Blanco47a38e12016-07-19 12:16:50 -07002738
Brenden Blancod576acf2016-07-19 12:16:52 -07002739 /* No need to reconfigure buffers when simply swapping the
2740 * program for a new one.
2741 */
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02002742 if (priv->tx_ring_num[TX_XDP] == xdp_ring_num) {
Brenden Blancod576acf2016-07-19 12:16:52 -07002743 if (prog) {
2744 prog = bpf_prog_add(prog, priv->rx_ring_num - 1);
2745 if (IS_ERR(prog))
2746 return PTR_ERR(prog);
2747 }
Brenden Blanco326fe022016-09-03 21:29:58 -07002748 mutex_lock(&mdev->state_lock);
Brenden Blancod576acf2016-07-19 12:16:52 -07002749 for (i = 0; i < priv->rx_ring_num; i++) {
Brenden Blanco326fe022016-09-03 21:29:58 -07002750 old_prog = rcu_dereference_protected(
2751 priv->rx_ring[i]->xdp_prog,
2752 lockdep_is_held(&mdev->state_lock));
2753 rcu_assign_pointer(priv->rx_ring[i]->xdp_prog, prog);
Brenden Blancod576acf2016-07-19 12:16:52 -07002754 if (old_prog)
2755 bpf_prog_put(old_prog);
2756 }
Brenden Blanco326fe022016-09-03 21:29:58 -07002757 mutex_unlock(&mdev->state_lock);
Brenden Blancod576acf2016-07-19 12:16:52 -07002758 return 0;
2759 }
2760
Martin KaFai Laub45f0672016-12-07 15:53:12 -08002761 if (!mlx4_en_check_xdp_mtu(dev, dev->mtu))
Brenden Blanco47a38e12016-07-19 12:16:50 -07002762 return -EOPNOTSUPP;
Brenden Blanco47a38e12016-07-19 12:16:50 -07002763
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02002764 tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
2765 if (!tmp)
2766 return -ENOMEM;
Brenden Blanco9ecc2d82016-07-19 12:16:55 -07002767
Brenden Blanco47a38e12016-07-19 12:16:50 -07002768 if (prog) {
2769 prog = bpf_prog_add(prog, priv->rx_ring_num - 1);
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02002770 if (IS_ERR(prog)) {
2771 err = PTR_ERR(prog);
2772 goto out;
2773 }
Brenden Blanco47a38e12016-07-19 12:16:50 -07002774 }
2775
Brenden Blancod576acf2016-07-19 12:16:52 -07002776 mutex_lock(&mdev->state_lock);
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02002777 memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile));
2778 new_prof.tx_ring_num[TX_XDP] = xdp_ring_num;
2779
2780 if (priv->tx_ring_num[TX] + xdp_ring_num > MAX_TX_RINGS) {
2781 tx_changed = 1;
2782 new_prof.tx_ring_num[TX] =
2783 MAX_TX_RINGS - ALIGN(xdp_ring_num, MLX4_EN_NUM_UP);
2784 en_warn(priv, "Reducing the number of TX rings, to not exceed the max total rings number.\n");
2785 }
2786
Martin KaFai Lau770f8222017-01-31 22:35:33 -08002787 err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof, false);
Daniel Borkmannc5405942016-11-09 22:02:34 +01002788 if (err) {
2789 if (prog)
2790 bpf_prog_sub(prog, priv->rx_ring_num - 1);
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02002791 goto unlock_out;
Daniel Borkmannc5405942016-11-09 22:02:34 +01002792 }
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02002793
Brenden Blancod576acf2016-07-19 12:16:52 -07002794 if (priv->port_up) {
2795 port_up = 1;
2796 mlx4_en_stop_port(dev, 1);
2797 }
2798
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02002799 mlx4_en_safe_replace_resources(priv, tmp);
2800 if (tx_changed)
2801 netif_set_real_num_tx_queues(dev, priv->tx_ring_num[TX]);
Brenden Blanco47a38e12016-07-19 12:16:50 -07002802
Brenden Blanco47a38e12016-07-19 12:16:50 -07002803 for (i = 0; i < priv->rx_ring_num; i++) {
Brenden Blanco326fe022016-09-03 21:29:58 -07002804 old_prog = rcu_dereference_protected(
2805 priv->rx_ring[i]->xdp_prog,
2806 lockdep_is_held(&mdev->state_lock));
2807 rcu_assign_pointer(priv->rx_ring[i]->xdp_prog, prog);
Brenden Blanco47a38e12016-07-19 12:16:50 -07002808 if (old_prog)
2809 bpf_prog_put(old_prog);
2810 }
2811
Brenden Blancod576acf2016-07-19 12:16:52 -07002812 if (port_up) {
2813 err = mlx4_en_start_port(dev);
2814 if (err) {
2815 en_err(priv, "Failed starting port %d for XDP change\n",
2816 priv->port);
2817 queue_work(mdev->workqueue, &priv->watchdog_task);
2818 }
2819 }
2820
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02002821unlock_out:
Brenden Blancod576acf2016-07-19 12:16:52 -07002822 mutex_unlock(&mdev->state_lock);
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02002823out:
2824 kfree(tmp);
2825 return err;
Brenden Blanco47a38e12016-07-19 12:16:50 -07002826}
2827
Martin KaFai Lau2e37e9b2017-06-15 17:29:10 -07002828static u32 mlx4_xdp_query(struct net_device *dev)
Brenden Blanco47a38e12016-07-19 12:16:50 -07002829{
2830 struct mlx4_en_priv *priv = netdev_priv(dev);
Martin KaFai Lau2e37e9b2017-06-15 17:29:10 -07002831 struct mlx4_en_dev *mdev = priv->mdev;
2832 const struct bpf_prog *xdp_prog;
2833 u32 prog_id = 0;
Brenden Blanco47a38e12016-07-19 12:16:50 -07002834
Martin KaFai Lau2e37e9b2017-06-15 17:29:10 -07002835 if (!priv->tx_ring_num[TX_XDP])
2836 return prog_id;
2837
2838 mutex_lock(&mdev->state_lock);
2839 xdp_prog = rcu_dereference_protected(
2840 priv->rx_ring[0]->xdp_prog,
2841 lockdep_is_held(&mdev->state_lock));
2842 if (xdp_prog)
2843 prog_id = xdp_prog->aux->id;
2844 mutex_unlock(&mdev->state_lock);
2845
2846 return prog_id;
Brenden Blanco47a38e12016-07-19 12:16:50 -07002847}
2848
2849static int mlx4_xdp(struct net_device *dev, struct netdev_xdp *xdp)
2850{
2851 switch (xdp->command) {
2852 case XDP_SETUP_PROG:
2853 return mlx4_xdp_set(dev, xdp->prog);
2854 case XDP_QUERY_PROG:
Martin KaFai Lau2e37e9b2017-06-15 17:29:10 -07002855 xdp->prog_id = mlx4_xdp_query(dev);
2856 xdp->prog_attached = !!xdp->prog_id;
Brenden Blanco47a38e12016-07-19 12:16:50 -07002857 return 0;
2858 default:
2859 return -EINVAL;
2860 }
2861}
2862
Stephen Hemminger3addc562008-11-21 17:30:58 -08002863static const struct net_device_ops mlx4_netdev_ops = {
2864 .ndo_open = mlx4_en_open,
2865 .ndo_stop = mlx4_en_close,
2866 .ndo_start_xmit = mlx4_en_xmit,
Yevgeny Petrilinf813cad2009-06-01 23:24:07 +00002867 .ndo_select_queue = mlx4_en_select_queue,
Eric Dumazet9ed17db172016-05-25 09:50:38 -07002868 .ndo_get_stats64 = mlx4_en_get_stats64,
Yan Burman0eb74fd2013-02-07 02:25:23 +00002869 .ndo_set_rx_mode = mlx4_en_set_rx_mode,
Stephen Hemminger3addc562008-11-21 17:30:58 -08002870 .ndo_set_mac_address = mlx4_en_set_mac,
Stephen Hemminger52255bb2009-01-09 10:45:37 +00002871 .ndo_validate_addr = eth_validate_addr,
Stephen Hemminger3addc562008-11-21 17:30:58 -08002872 .ndo_change_mtu = mlx4_en_change_mtu,
Amir Vadaiec693d42013-04-23 06:06:49 +00002873 .ndo_do_ioctl = mlx4_en_ioctl,
Stephen Hemminger3addc562008-11-21 17:30:58 -08002874 .ndo_tx_timeout = mlx4_en_tx_timeout,
Stephen Hemminger3addc562008-11-21 17:30:58 -08002875 .ndo_vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid,
2876 .ndo_vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid,
2877#ifdef CONFIG_NET_POLL_CONTROLLER
2878 .ndo_poll_controller = mlx4_en_netpoll,
2879#endif
Amir Vadai60d6fe92011-11-26 19:55:19 +00002880 .ndo_set_features = mlx4_en_set_features,
Hadar Hen Zione38af4f2015-07-27 14:46:34 +03002881 .ndo_fix_features = mlx4_en_fix_features,
John Fastabende4c67342016-02-16 21:16:15 -08002882 .ndo_setup_tc = __mlx4_en_setup_tc,
Amir Vadai1eb8c692012-07-18 22:33:52 +00002883#ifdef CONFIG_RFS_ACCEL
2884 .ndo_rx_flow_steer = mlx4_en_filter_rfs,
2885#endif
Hadar Hen Zion84c86402013-12-19 21:20:13 +02002886 .ndo_get_phys_port_id = mlx4_en_get_phys_port_id,
Alexander Duycka8312742016-06-16 12:22:30 -07002887 .ndo_udp_tunnel_add = mlx4_en_add_vxlan_port,
2888 .ndo_udp_tunnel_del = mlx4_en_del_vxlan_port,
Jesse Gross5f352272014-12-23 22:37:26 -08002889 .ndo_features_check = mlx4_en_features_check,
Or Gerlitzc10e4fc2015-03-18 14:57:35 +02002890 .ndo_set_tx_maxrate = mlx4_en_set_tx_maxrate,
Brenden Blanco47a38e12016-07-19 12:16:50 -07002891 .ndo_xdp = mlx4_xdp,
Stephen Hemminger3addc562008-11-21 17:30:58 -08002892};
2893
Rony Efraim8f7ba3c2013-04-25 05:22:27 +00002894static const struct net_device_ops mlx4_netdev_ops_master = {
2895 .ndo_open = mlx4_en_open,
2896 .ndo_stop = mlx4_en_close,
2897 .ndo_start_xmit = mlx4_en_xmit,
2898 .ndo_select_queue = mlx4_en_select_queue,
Eric Dumazet9ed17db172016-05-25 09:50:38 -07002899 .ndo_get_stats64 = mlx4_en_get_stats64,
Rony Efraim8f7ba3c2013-04-25 05:22:27 +00002900 .ndo_set_rx_mode = mlx4_en_set_rx_mode,
2901 .ndo_set_mac_address = mlx4_en_set_mac,
2902 .ndo_validate_addr = eth_validate_addr,
2903 .ndo_change_mtu = mlx4_en_change_mtu,
2904 .ndo_tx_timeout = mlx4_en_tx_timeout,
2905 .ndo_vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid,
2906 .ndo_vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid,
2907 .ndo_set_vf_mac = mlx4_en_set_vf_mac,
Rony Efraim3f7fb022013-04-25 05:22:28 +00002908 .ndo_set_vf_vlan = mlx4_en_set_vf_vlan,
Ido Shamaycda373f2015-04-02 16:31:16 +03002909 .ndo_set_vf_rate = mlx4_en_set_vf_rate,
Rony Efraime6b6a232013-04-25 05:22:29 +00002910 .ndo_set_vf_spoofchk = mlx4_en_set_vf_spoofchk,
Rony Efraim948e3062013-06-13 13:19:11 +03002911 .ndo_set_vf_link_state = mlx4_en_set_vf_link_state,
Eran Ben Elisha62a89052015-06-15 17:59:08 +03002912 .ndo_get_vf_stats = mlx4_en_get_vf_stats,
Rony Efraim2cccb9e2013-04-25 05:22:30 +00002913 .ndo_get_vf_config = mlx4_en_get_vf_config,
Rony Efraim8f7ba3c2013-04-25 05:22:27 +00002914#ifdef CONFIG_NET_POLL_CONTROLLER
2915 .ndo_poll_controller = mlx4_en_netpoll,
2916#endif
2917 .ndo_set_features = mlx4_en_set_features,
Hadar Hen Zione38af4f2015-07-27 14:46:34 +03002918 .ndo_fix_features = mlx4_en_fix_features,
John Fastabende4c67342016-02-16 21:16:15 -08002919 .ndo_setup_tc = __mlx4_en_setup_tc,
Rony Efraim8f7ba3c2013-04-25 05:22:27 +00002920#ifdef CONFIG_RFS_ACCEL
2921 .ndo_rx_flow_steer = mlx4_en_filter_rfs,
2922#endif
Hadar Hen Zion84c86402013-12-19 21:20:13 +02002923 .ndo_get_phys_port_id = mlx4_en_get_phys_port_id,
Alexander Duycka8312742016-06-16 12:22:30 -07002924 .ndo_udp_tunnel_add = mlx4_en_add_vxlan_port,
2925 .ndo_udp_tunnel_del = mlx4_en_del_vxlan_port,
Jesse Gross5f352272014-12-23 22:37:26 -08002926 .ndo_features_check = mlx4_en_features_check,
Or Gerlitzc10e4fc2015-03-18 14:57:35 +02002927 .ndo_set_tx_maxrate = mlx4_en_set_tx_maxrate,
Brenden Blanco47a38e12016-07-19 12:16:50 -07002928 .ndo_xdp = mlx4_xdp,
Rony Efraim8f7ba3c2013-04-25 05:22:27 +00002929};
2930
Moni Shoua5da03542015-02-03 16:48:34 +02002931struct mlx4_en_bond {
2932 struct work_struct work;
2933 struct mlx4_en_priv *priv;
2934 int is_bonded;
2935 struct mlx4_port_map port_map;
2936};
2937
2938static void mlx4_en_bond_work(struct work_struct *work)
2939{
2940 struct mlx4_en_bond *bond = container_of(work,
2941 struct mlx4_en_bond,
2942 work);
2943 int err = 0;
2944 struct mlx4_dev *dev = bond->priv->mdev->dev;
2945
2946 if (bond->is_bonded) {
2947 if (!mlx4_is_bonded(dev)) {
2948 err = mlx4_bond(dev);
2949 if (err)
2950 en_err(bond->priv, "Fail to bond device\n");
2951 }
2952 if (!err) {
2953 err = mlx4_port_map_set(dev, &bond->port_map);
2954 if (err)
2955 en_err(bond->priv, "Fail to set port map [%d][%d]: %d\n",
2956 bond->port_map.port1,
2957 bond->port_map.port2,
2958 err);
2959 }
2960 } else if (mlx4_is_bonded(dev)) {
2961 err = mlx4_unbond(dev);
2962 if (err)
2963 en_err(bond->priv, "Fail to unbond device\n");
2964 }
2965 dev_put(bond->priv->dev);
2966 kfree(bond);
2967}
2968
2969static int mlx4_en_queue_bond_work(struct mlx4_en_priv *priv, int is_bonded,
2970 u8 v2p_p1, u8 v2p_p2)
2971{
2972 struct mlx4_en_bond *bond = NULL;
2973
2974 bond = kzalloc(sizeof(*bond), GFP_ATOMIC);
2975 if (!bond)
2976 return -ENOMEM;
2977
2978 INIT_WORK(&bond->work, mlx4_en_bond_work);
2979 bond->priv = priv;
2980 bond->is_bonded = is_bonded;
2981 bond->port_map.port1 = v2p_p1;
2982 bond->port_map.port2 = v2p_p2;
2983 dev_hold(priv->dev);
2984 queue_work(priv->mdev->workqueue, &bond->work);
2985 return 0;
2986}
2987
2988int mlx4_en_netdev_event(struct notifier_block *this,
2989 unsigned long event, void *ptr)
2990{
2991 struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
2992 u8 port = 0;
2993 struct mlx4_en_dev *mdev;
2994 struct mlx4_dev *dev;
2995 int i, num_eth_ports = 0;
2996 bool do_bond = true;
2997 struct mlx4_en_priv *priv;
2998 u8 v2p_port1 = 0;
2999 u8 v2p_port2 = 0;
3000
3001 if (!net_eq(dev_net(ndev), &init_net))
3002 return NOTIFY_DONE;
3003
3004 mdev = container_of(this, struct mlx4_en_dev, nb);
3005 dev = mdev->dev;
3006
3007 /* Go into this mode only when two network devices set on two ports
3008 * of the same mlx4 device are slaves of the same bonding master
3009 */
3010 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
3011 ++num_eth_ports;
3012 if (!port && (mdev->pndev[i] == ndev))
3013 port = i;
3014 mdev->upper[i] = mdev->pndev[i] ?
3015 netdev_master_upper_dev_get(mdev->pndev[i]) : NULL;
3016 /* condition not met: network device is a slave */
3017 if (!mdev->upper[i])
3018 do_bond = false;
3019 if (num_eth_ports < 2)
3020 continue;
3021 /* condition not met: same master */
3022 if (mdev->upper[i] != mdev->upper[i-1])
3023 do_bond = false;
3024 }
3025 /* condition not met: 2 salves */
3026 do_bond = (num_eth_ports == 2) ? do_bond : false;
3027
3028 /* handle only events that come with enough info */
3029 if ((do_bond && (event != NETDEV_BONDING_INFO)) || !port)
3030 return NOTIFY_DONE;
3031
3032 priv = netdev_priv(ndev);
3033 if (do_bond) {
3034 struct netdev_notifier_bonding_info *notifier_info = ptr;
3035 struct netdev_bonding_info *bonding_info =
3036 &notifier_info->bonding_info;
3037
3038 /* required mode 1, 2 or 4 */
3039 if ((bonding_info->master.bond_mode != BOND_MODE_ACTIVEBACKUP) &&
3040 (bonding_info->master.bond_mode != BOND_MODE_XOR) &&
3041 (bonding_info->master.bond_mode != BOND_MODE_8023AD))
3042 do_bond = false;
3043
3044 /* require exactly 2 slaves */
3045 if (bonding_info->master.num_slaves != 2)
3046 do_bond = false;
3047
3048 /* calc v2p */
3049 if (do_bond) {
3050 if (bonding_info->master.bond_mode ==
3051 BOND_MODE_ACTIVEBACKUP) {
3052 /* in active-backup mode virtual ports are
3053 * mapped to the physical port of the active
3054 * slave */
3055 if (bonding_info->slave.state ==
3056 BOND_STATE_BACKUP) {
3057 if (port == 1) {
3058 v2p_port1 = 2;
3059 v2p_port2 = 2;
3060 } else {
3061 v2p_port1 = 1;
3062 v2p_port2 = 1;
3063 }
3064 } else { /* BOND_STATE_ACTIVE */
3065 if (port == 1) {
3066 v2p_port1 = 1;
3067 v2p_port2 = 1;
3068 } else {
3069 v2p_port1 = 2;
3070 v2p_port2 = 2;
3071 }
3072 }
3073 } else { /* Active-Active */
3074 /* in active-active mode a virtual port is
3075 * mapped to the native physical port if and only
3076 * if the physical port is up */
3077 __s8 link = bonding_info->slave.link;
3078
3079 if (port == 1)
3080 v2p_port2 = 2;
3081 else
3082 v2p_port1 = 1;
3083 if ((link == BOND_LINK_UP) ||
3084 (link == BOND_LINK_FAIL)) {
3085 if (port == 1)
3086 v2p_port1 = 1;
3087 else
3088 v2p_port2 = 2;
3089 } else { /* BOND_LINK_DOWN || BOND_LINK_BACK */
3090 if (port == 1)
3091 v2p_port1 = 2;
3092 else
3093 v2p_port2 = 1;
3094 }
3095 }
3096 }
3097 }
3098
3099 mlx4_en_queue_bond_work(priv, do_bond,
3100 v2p_port1, v2p_port2);
3101
3102 return NOTIFY_DONE;
3103}
3104
Matan Barak0b131562015-03-30 17:45:25 +03003105void mlx4_en_update_pfc_stats_bitmap(struct mlx4_dev *dev,
3106 struct mlx4_en_stats_bitmap *stats_bitmap,
3107 u8 rx_ppp, u8 rx_pause,
3108 u8 tx_ppp, u8 tx_pause)
3109{
Eran Ben Elishab42de4d2015-06-15 17:59:06 +03003110 int last_i = NUM_MAIN_STATS + NUM_PORT_STATS + NUM_PF_STATS;
Matan Barak0b131562015-03-30 17:45:25 +03003111
3112 if (!mlx4_is_slave(dev) &&
3113 (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN)) {
3114 mutex_lock(&stats_bitmap->mutex);
3115 bitmap_clear(stats_bitmap->bitmap, last_i, NUM_FLOW_STATS);
3116
3117 if (rx_ppp)
3118 bitmap_set(stats_bitmap->bitmap, last_i,
3119 NUM_FLOW_PRIORITY_STATS_RX);
3120 last_i += NUM_FLOW_PRIORITY_STATS_RX;
3121
3122 if (rx_pause && !(rx_ppp))
3123 bitmap_set(stats_bitmap->bitmap, last_i,
3124 NUM_FLOW_STATS_RX);
3125 last_i += NUM_FLOW_STATS_RX;
3126
3127 if (tx_ppp)
3128 bitmap_set(stats_bitmap->bitmap, last_i,
3129 NUM_FLOW_PRIORITY_STATS_TX);
3130 last_i += NUM_FLOW_PRIORITY_STATS_TX;
3131
3132 if (tx_pause && !(tx_ppp))
3133 bitmap_set(stats_bitmap->bitmap, last_i,
3134 NUM_FLOW_STATS_TX);
3135 last_i += NUM_FLOW_STATS_TX;
3136
3137 mutex_unlock(&stats_bitmap->mutex);
3138 }
3139}
3140
Eran Ben Elisha6fcd2732015-03-30 17:45:23 +03003141void mlx4_en_set_stats_bitmap(struct mlx4_dev *dev,
Matan Barak0b131562015-03-30 17:45:25 +03003142 struct mlx4_en_stats_bitmap *stats_bitmap,
3143 u8 rx_ppp, u8 rx_pause,
3144 u8 tx_ppp, u8 tx_pause)
Eran Ben Elishaffa88f32015-03-30 17:45:22 +03003145{
Eran Ben Elisha6fcd2732015-03-30 17:45:23 +03003146 int last_i = 0;
3147
Eran Ben Elisha3da8a362015-03-30 17:45:24 +03003148 mutex_init(&stats_bitmap->mutex);
3149 bitmap_zero(stats_bitmap->bitmap, NUM_ALL_STATS);
Eran Ben Elisha6fcd2732015-03-30 17:45:23 +03003150
3151 if (mlx4_is_slave(dev)) {
Eran Ben Elisha3da8a362015-03-30 17:45:24 +03003152 bitmap_set(stats_bitmap->bitmap, last_i +
Eran Ben Elisha6fcd2732015-03-30 17:45:23 +03003153 MLX4_FIND_NETDEV_STAT(rx_packets), 1);
Eran Ben Elisha3da8a362015-03-30 17:45:24 +03003154 bitmap_set(stats_bitmap->bitmap, last_i +
Eran Ben Elisha6fcd2732015-03-30 17:45:23 +03003155 MLX4_FIND_NETDEV_STAT(tx_packets), 1);
Eran Ben Elisha3da8a362015-03-30 17:45:24 +03003156 bitmap_set(stats_bitmap->bitmap, last_i +
Eran Ben Elisha6fcd2732015-03-30 17:45:23 +03003157 MLX4_FIND_NETDEV_STAT(rx_bytes), 1);
Eran Ben Elisha3da8a362015-03-30 17:45:24 +03003158 bitmap_set(stats_bitmap->bitmap, last_i +
Eran Ben Elisha6fcd2732015-03-30 17:45:23 +03003159 MLX4_FIND_NETDEV_STAT(tx_bytes), 1);
Eran Ben Elisha3da8a362015-03-30 17:45:24 +03003160 bitmap_set(stats_bitmap->bitmap, last_i +
Eran Ben Elisha6fcd2732015-03-30 17:45:23 +03003161 MLX4_FIND_NETDEV_STAT(rx_dropped), 1);
Eran Ben Elisha3da8a362015-03-30 17:45:24 +03003162 bitmap_set(stats_bitmap->bitmap, last_i +
Eran Ben Elisha6fcd2732015-03-30 17:45:23 +03003163 MLX4_FIND_NETDEV_STAT(tx_dropped), 1);
3164 } else {
Eran Ben Elisha3da8a362015-03-30 17:45:24 +03003165 bitmap_set(stats_bitmap->bitmap, last_i, NUM_MAIN_STATS);
Eran Ben Elishaffa88f32015-03-30 17:45:22 +03003166 }
Eran Ben Elisha6fcd2732015-03-30 17:45:23 +03003167 last_i += NUM_MAIN_STATS;
Eran Ben Elishaffa88f32015-03-30 17:45:22 +03003168
Eran Ben Elisha3da8a362015-03-30 17:45:24 +03003169 bitmap_set(stats_bitmap->bitmap, last_i, NUM_PORT_STATS);
Eran Ben Elisha6fcd2732015-03-30 17:45:23 +03003170 last_i += NUM_PORT_STATS;
Eran Ben Elishaffa88f32015-03-30 17:45:22 +03003171
Eran Ben Elishab42de4d2015-06-15 17:59:06 +03003172 if (mlx4_is_master(dev))
3173 bitmap_set(stats_bitmap->bitmap, last_i,
3174 NUM_PF_STATS);
3175 last_i += NUM_PF_STATS;
3176
Matan Barak0b131562015-03-30 17:45:25 +03003177 mlx4_en_update_pfc_stats_bitmap(dev, stats_bitmap,
3178 rx_ppp, rx_pause,
3179 tx_ppp, tx_pause);
3180 last_i += NUM_FLOW_STATS;
3181
Eran Ben Elisha6fcd2732015-03-30 17:45:23 +03003182 if (!mlx4_is_slave(dev))
Eran Ben Elisha3da8a362015-03-30 17:45:24 +03003183 bitmap_set(stats_bitmap->bitmap, last_i, NUM_PKT_STATS);
Tariq Toukan15fca2c2016-11-02 17:12:25 +02003184 last_i += NUM_PKT_STATS;
3185
3186 bitmap_set(stats_bitmap->bitmap, last_i, NUM_XDP_STATS);
3187 last_i += NUM_XDP_STATS;
Eran Ben Elishaffa88f32015-03-30 17:45:22 +03003188}
3189
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07003190int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
3191 struct mlx4_en_port_profile *prof)
3192{
3193 struct net_device *dev;
3194 struct mlx4_en_priv *priv;
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02003195 int i, t;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07003196 int err;
3197
Tom Herbertf1593d22011-01-09 19:36:36 +00003198 dev = alloc_etherdev_mqs(sizeof(struct mlx4_en_priv),
Amir Vadaid3179662012-12-02 03:49:23 +00003199 MAX_TX_RINGS, MAX_RX_RINGS);
Joe Perches41de8d42012-01-29 13:47:52 +00003200 if (dev == NULL)
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07003201 return -ENOMEM;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07003202
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02003203 netif_set_real_num_tx_queues(dev, prof->tx_ring_num[TX]);
Amir Vadaid3179662012-12-02 03:49:23 +00003204 netif_set_real_num_rx_queues(dev, prof->rx_ring_num);
3205
Yishai Hadas872bf2f2015-01-25 16:59:35 +02003206 SET_NETDEV_DEV(dev, &mdev->dev->persist->pdev->dev);
Amir Vadai76a066f2014-02-25 18:17:51 +02003207 dev->dev_port = port - 1;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07003208
3209 /*
3210 * Initialize driver private data
3211 */
3212
3213 priv = netdev_priv(dev);
3214 memset(priv, 0, sizeof(struct mlx4_en_priv));
Eran Ben Elisha6de5f7f2015-06-15 17:59:02 +03003215 priv->counter_index = MLX4_SINK_COUNTER_INDEX(mdev->dev);
Eugenia Emantayev207af6c2014-10-27 11:37:46 +02003216 spin_lock_init(&priv->stats_lock);
3217 INIT_WORK(&priv->rx_mode_task, mlx4_en_do_set_rx_mode);
3218 INIT_WORK(&priv->watchdog_task, mlx4_en_restart);
3219 INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate);
3220 INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats);
3221 INIT_DELAYED_WORK(&priv->service_task, mlx4_en_service_task);
Eugenia Emantayev207af6c2014-10-27 11:37:46 +02003222 INIT_WORK(&priv->vxlan_add_task, mlx4_en_add_vxlan_offloads);
3223 INIT_WORK(&priv->vxlan_del_task, mlx4_en_del_vxlan_offloads);
Eugenia Emantayev207af6c2014-10-27 11:37:46 +02003224#ifdef CONFIG_RFS_ACCEL
3225 INIT_LIST_HEAD(&priv->filters);
3226 spin_lock_init(&priv->filters_lock);
3227#endif
3228
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07003229 priv->dev = dev;
3230 priv->mdev = mdev;
Yevgeny Petrilinebf8c9a2012-03-06 04:03:34 +00003231 priv->ddev = &mdev->pdev->dev;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07003232 priv->prof = prof;
3233 priv->port = port;
3234 priv->port_up = false;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07003235 priv->flags = prof->flags;
Amir Vadai0fef9d02014-07-22 15:44:10 +03003236 priv->pflags = MLX4_EN_PRIV_FLAGS_BLUEFLAME;
Amir Vadai60d6fe92011-11-26 19:55:19 +00003237 priv->ctrl_flags = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE |
3238 MLX4_WQE_CTRL_SOLICITED);
Amir Vadaid3179662012-12-02 03:49:23 +00003239 priv->num_tx_rings_p_up = mdev->profile.num_tx_rings_p_up;
Amir Vadaifbc6daf2014-07-08 11:28:12 +03003240 priv->tx_work_limit = MLX4_EN_DEFAULT_TX_WORK;
Eric Dumazetbd635c32014-11-22 17:24:19 -08003241 netdev_rss_key_fill(priv->rss_key, sizeof(priv->rss_key));
Amir Vadaid3179662012-12-02 03:49:23 +00003242
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02003243 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
3244 priv->tx_ring_num[t] = prof->tx_ring_num[t];
3245 if (!priv->tx_ring_num[t])
3246 continue;
3247
3248 priv->tx_ring[t] = kzalloc(sizeof(struct mlx4_en_tx_ring *) *
3249 MAX_TX_RINGS, GFP_KERNEL);
3250 if (!priv->tx_ring[t]) {
3251 err = -ENOMEM;
3252 goto err_free_tx;
3253 }
3254 priv->tx_cq[t] = kzalloc(sizeof(struct mlx4_en_cq *) *
3255 MAX_TX_RINGS, GFP_KERNEL);
3256 if (!priv->tx_cq[t]) {
3257 kfree(priv->tx_ring[t]);
3258 err = -ENOMEM;
3259 goto out;
3260 }
Amir Vadaibc6a4742012-05-17 00:58:10 +00003261 }
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07003262 priv->rx_ring_num = prof->rx_ring_num;
Or Gerlitz08ff3232012-10-21 14:59:24 +00003263 priv->cqe_factor = (mdev->dev->caps.cqe_size == 64) ? 1 : 0;
Ido Shamayb1b6b4d2014-09-18 11:51:01 +03003264 priv->cqe_size = mdev->dev->caps.cqe_size;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07003265 priv->mac_index = -1;
3266 priv->msg_enable = MLX4_EN_MSG_LEVEL;
Amir Vadai564c2742012-04-04 21:33:26 +00003267#ifdef CONFIG_MLX4_EN_DCB
Or Gerlitz540b3a32013-04-07 03:44:07 +00003268 if (!mlx4_is_slave(priv->mdev->dev)) {
Tariq Toukan564ed9b2016-09-11 10:56:19 +03003269 priv->dcbx_cap = DCB_CAP_DCBX_VER_CEE | DCB_CAP_DCBX_HOST |
3270 DCB_CAP_DCBX_VER_IEEE;
Rana Shahoutaf7d5182016-06-21 12:43:59 +03003271 priv->flags |= MLX4_EN_DCB_ENABLED;
Tariq Toukan564ed9b2016-09-11 10:56:19 +03003272 priv->cee_config.pfc_state = false;
Rana Shahoutaf7d5182016-06-21 12:43:59 +03003273
Tariq Toukan564ed9b2016-09-11 10:56:19 +03003274 for (i = 0; i < MLX4_EN_NUM_UP; i++)
3275 priv->cee_config.dcb_pfc[i] = pfc_disabled;
Rana Shahoutaf7d5182016-06-21 12:43:59 +03003276
Ido Shamay3742cc62015-04-02 16:31:17 +03003277 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETS_CFG) {
Or Gerlitz540b3a32013-04-07 03:44:07 +00003278 dev->dcbnl_ops = &mlx4_en_dcbnl_ops;
3279 } else {
3280 en_info(priv, "enabling only PFC DCB ops\n");
3281 dev->dcbnl_ops = &mlx4_en_dcbnl_pfc_ops;
3282 }
3283 }
Amir Vadai564c2742012-04-04 21:33:26 +00003284#endif
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07003285
Yan Burmanc07cb4b2013-02-07 02:25:25 +00003286 for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i)
3287 INIT_HLIST_HEAD(&priv->mac_hash[i]);
Yan Burman16a10ff2013-02-07 02:25:22 +00003288
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07003289 /* Query for default mac and max mtu */
3290 priv->max_mtu = mdev->dev->caps.eth_mtu_cap[priv->port];
Yan Burman6bbb6d92013-02-07 02:25:20 +00003291
Shani Michaelif8c64552014-11-09 13:51:53 +02003292 if (mdev->dev->caps.rx_checksum_flags_port[priv->port] &
3293 MLX4_RX_CSUM_MODE_VAL_NON_TCP_UDP)
3294 priv->flags |= MLX4_EN_FLAG_RX_CSUM_NON_TCP_UDP;
3295
Yan Burman6bbb6d92013-02-07 02:25:20 +00003296 /* Set default MAC */
3297 dev->addr_len = ETH_ALEN;
3298 mlx4_en_u64_to_mac(dev->dev_addr, mdev->dev->caps.def_mac[priv->port]);
3299 if (!is_valid_ether_addr(dev->dev_addr)) {
Jack Morgenstein2b3ddf22015-10-14 17:43:48 +03003300 en_err(priv, "Port: %d, invalid mac burned: %pM, quiting\n",
3301 priv->port, dev->dev_addr);
3302 err = -EINVAL;
3303 goto out;
3304 } else if (mlx4_is_slave(priv->mdev->dev) &&
3305 (priv->mdev->dev->port_random_macs & 1 << priv->port)) {
3306 /* Random MAC was assigned in mlx4_slave_cap
3307 * in mlx4_core module
3308 */
3309 dev->addr_assign_type |= NET_ADDR_RANDOM;
3310 en_warn(priv, "Assigned random MAC address %pM\n", dev->dev_addr);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07003311 }
3312
Noa Osherovich2695bab2014-07-08 11:25:24 +03003313 memcpy(priv->current_mac, dev->dev_addr, sizeof(priv->current_mac));
Yan Burman6bbb6d92013-02-07 02:25:20 +00003314
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07003315 priv->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
3316 DS_SIZE * MLX4_EN_MAX_RX_FRAGS);
3317 err = mlx4_en_alloc_resources(priv);
3318 if (err)
3319 goto out;
3320
Amir Vadaiec693d42013-04-23 06:06:49 +00003321 /* Initialize time stamping config */
3322 priv->hwtstamp_config.flags = 0;
3323 priv->hwtstamp_config.tx_type = HWTSTAMP_TX_OFF;
3324 priv->hwtstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
3325
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07003326 /* Allocate page for receive rings */
3327 err = mlx4_alloc_hwq_res(mdev->dev, &priv->res,
Haggai Abramovsky73898db2016-05-04 14:50:15 +03003328 MLX4_EN_PAGE_SIZE);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07003329 if (err) {
Yevgeny Petrilin453a6082009-06-01 20:27:13 +00003330 en_err(priv, "Failed to allocate page for rx qps\n");
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07003331 goto out;
3332 }
3333 priv->allocated = 1;
3334
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07003335 /*
3336 * Initialize netdev entry points
3337 */
Rony Efraim8f7ba3c2013-04-25 05:22:27 +00003338 if (mlx4_is_master(priv->mdev->dev))
3339 dev->netdev_ops = &mlx4_netdev_ops_master;
3340 else
3341 dev->netdev_ops = &mlx4_netdev_ops;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07003342 dev->watchdog_timeo = MLX4_EN_WATCHDOG_TIMEOUT;
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02003343 netif_set_real_num_tx_queues(dev, priv->tx_ring_num[TX]);
Ben Hutchings1eb63a22010-09-27 08:29:34 +00003344 netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
Stephen Hemminger3addc562008-11-21 17:30:58 -08003345
Wilfried Klaebe7ad24ea2014-05-11 00:12:32 +00003346 dev->ethtool_ops = &mlx4_en_ethtool_ops;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07003347
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07003348 /*
3349 * Set driver features
3350 */
Michał Mirosławc8c64cf2011-04-15 04:50:49 +00003351 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
3352 if (mdev->LSO_support)
3353 dev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
3354
3355 dev->vlan_features = dev->hw_features;
3356
Yevgeny Petrilinad861072011-10-18 01:51:24 +00003357 dev->hw_features |= NETIF_F_RXCSUM | NETIF_F_RXHASH;
Michał Mirosławc8c64cf2011-04-15 04:50:49 +00003358 dev->features = dev->hw_features | NETIF_F_HIGHDMA |
Patrick McHardyf6469682013-04-19 02:04:27 +00003359 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
3360 NETIF_F_HW_VLAN_CTAG_FILTER;
Saeed Mahameed537f6f92014-10-27 11:37:43 +02003361 dev->hw_features |= NETIF_F_LOOPBACK |
3362 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07003363
Hadar Hen Zione38af4f2015-07-27 14:46:34 +03003364 if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN)) {
3365 dev->features |= NETIF_F_HW_VLAN_STAG_RX |
3366 NETIF_F_HW_VLAN_STAG_FILTER;
3367 dev->hw_features |= NETIF_F_HW_VLAN_STAG_RX;
3368 }
3369
3370 if (mlx4_is_slave(mdev->dev)) {
Moshe Shemesh0815fe32016-09-22 12:11:14 +03003371 bool vlan_offload_disabled;
Hadar Hen Zione38af4f2015-07-27 14:46:34 +03003372 int phv;
3373
3374 err = get_phv_bit(mdev->dev, port, &phv);
3375 if (!err && phv) {
3376 dev->hw_features |= NETIF_F_HW_VLAN_STAG_TX;
3377 priv->pflags |= MLX4_EN_PRIV_FLAGS_PHV;
3378 }
Moshe Shemesh0815fe32016-09-22 12:11:14 +03003379 err = mlx4_get_is_vlan_offload_disabled(mdev->dev, port,
3380 &vlan_offload_disabled);
3381 if (!err && vlan_offload_disabled) {
3382 dev->hw_features &= ~(NETIF_F_HW_VLAN_CTAG_TX |
3383 NETIF_F_HW_VLAN_CTAG_RX |
3384 NETIF_F_HW_VLAN_STAG_TX |
3385 NETIF_F_HW_VLAN_STAG_RX);
3386 dev->features &= ~(NETIF_F_HW_VLAN_CTAG_TX |
3387 NETIF_F_HW_VLAN_CTAG_RX |
3388 NETIF_F_HW_VLAN_STAG_TX |
3389 NETIF_F_HW_VLAN_STAG_RX);
3390 }
Hadar Hen Zione38af4f2015-07-27 14:46:34 +03003391 } else {
3392 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PHV_EN &&
3393 !(mdev->dev->caps.flags2 &
3394 MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN))
3395 dev->hw_features |= NETIF_F_HW_VLAN_STAG_TX;
3396 }
3397
Muhammad Mahajnaf0df3502015-04-02 16:31:21 +03003398 if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP)
3399 dev->hw_features |= NETIF_F_RXFCS;
3400
Muhammad Mahajna78500b82015-04-02 16:31:22 +03003401 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_IGNORE_FCS)
3402 dev->hw_features |= NETIF_F_RXALL;
3403
Amir Vadai1eb8c692012-07-18 22:33:52 +00003404 if (mdev->dev->caps.steering_mode ==
Matan Barak7d077cd2014-12-11 10:58:00 +02003405 MLX4_STEERING_MODE_DEVICE_MANAGED &&
3406 mdev->dev->caps.dmfs_high_steer_mode != MLX4_STEERING_DMFS_A0_STATIC)
Amir Vadai1eb8c692012-07-18 22:33:52 +00003407 dev->hw_features |= NETIF_F_NTUPLE;
3408
Yan Burmancc5387f2013-02-07 02:25:26 +00003409 if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0)
3410 dev->priv_flags |= IFF_UNICAST_FLT;
3411
Eyal Perry947cbb02014-12-02 18:12:11 +02003412 /* Setting a default hash function value */
3413 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_TOP) {
3414 priv->rss_hash_fn = ETH_RSS_HASH_TOP;
3415 } else if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_XOR) {
3416 priv->rss_hash_fn = ETH_RSS_HASH_XOR;
3417 } else {
3418 en_warn(priv,
3419 "No RSS hash capabilities exposed, using Toeplitz\n");
3420 priv->rss_hash_fn = ETH_RSS_HASH_TOP;
3421 }
3422
Eugenia Emantayev925ab1a2016-02-17 17:24:27 +02003423 if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
Alexander Duyck3c9346b2016-05-02 09:38:30 -07003424 dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
3425 NETIF_F_GSO_UDP_TUNNEL_CSUM |
3426 NETIF_F_GSO_PARTIAL;
3427 dev->features |= NETIF_F_GSO_UDP_TUNNEL |
3428 NETIF_F_GSO_UDP_TUNNEL_CSUM |
3429 NETIF_F_GSO_PARTIAL;
3430 dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM;
Eugenia Emantayev925ab1a2016-02-17 17:24:27 +02003431 }
3432
Jarod Wilsonb80f71f2016-10-17 15:54:07 -04003433 /* MTU range: 46 - hw-specific max */
3434 dev->min_mtu = MLX4_EN_MIN_MTU;
3435 dev->max_mtu = priv->max_mtu;
3436
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07003437 mdev->pndev[port] = dev;
Moni Shoua5da03542015-02-03 16:48:34 +02003438 mdev->upper[port] = NULL;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07003439
3440 netif_carrier_off(dev);
Eugenia Emantayev4801ae72013-06-25 12:09:31 +03003441 mlx4_en_set_default_moderation(priv);
3442
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02003443 en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num[TX]);
Yevgeny Petrilin453a6082009-06-01 20:27:13 +00003444 en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num);
3445
Yan Burman79aeacc2013-02-07 02:25:19 +00003446 mlx4_en_update_loopback_state(priv->dev, priv->dev->features);
3447
Yevgeny Petrilin90822262011-03-22 22:37:41 +00003448 /* Configure port */
Yevgeny Petrilin5c8e9042012-06-25 00:24:11 +00003449 mlx4_en_calc_rx_buf(dev);
Yevgeny Petrilin90822262011-03-22 22:37:41 +00003450 err = mlx4_SET_PORT_general(mdev->dev, priv->port,
Yevgeny Petrilin5c8e9042012-06-25 00:24:11 +00003451 priv->rx_skb_size + ETH_FCS_LEN,
3452 prof->tx_pause, prof->tx_ppp,
3453 prof->rx_pause, prof->rx_ppp);
Yevgeny Petrilin90822262011-03-22 22:37:41 +00003454 if (err) {
Joe Perches1a91de22014-05-07 12:52:57 -07003455 en_err(priv, "Failed setting port general configurations for port %d, with error %d\n",
3456 priv->port, err);
Yevgeny Petrilin90822262011-03-22 22:37:41 +00003457 goto out;
3458 }
3459
Or Gerlitz837052d2013-12-23 16:09:44 +02003460 if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
Or Gerlitz1b136de2014-03-27 14:02:04 +02003461 err = mlx4_SET_PORT_VXLAN(mdev->dev, priv->port, VXLAN_STEER_BY_OUTER_MAC, 1);
Or Gerlitz837052d2013-12-23 16:09:44 +02003462 if (err) {
3463 en_err(priv, "Failed setting port L2 tunnel configuration, err %d\n",
3464 err);
3465 goto out;
3466 }
3467 }
3468
Yevgeny Petrilin90822262011-03-22 22:37:41 +00003469 /* Init port */
3470 en_warn(priv, "Initializing port\n");
3471 err = mlx4_INIT_PORT(mdev->dev, priv->port);
3472 if (err) {
3473 en_err(priv, "Failed Initializing port\n");
3474 goto out;
3475 }
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07003476 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
Amir Vadaidc8142e2013-04-25 05:22:24 +00003477
Eugenia Emantayev90683062015-12-17 15:35:38 +02003478 /* Initialize time stamp mechanism */
Amir Vadaidc8142e2013-04-25 05:22:24 +00003479 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
Eugenia Emantayev90683062015-12-17 15:35:38 +02003480 mlx4_en_init_timestamp(mdev);
3481
Eugenia Emantayevfc9f5ea2015-12-17 15:35:37 +02003482 queue_delayed_work(mdev->workqueue, &priv->service_task,
3483 SERVICE_TASK_DELAY);
Amir Vadaidc8142e2013-04-25 05:22:24 +00003484
Matan Barak0b131562015-03-30 17:45:25 +03003485 mlx4_en_set_stats_bitmap(mdev->dev, &priv->stats_bitmap,
3486 mdev->profile.prof[priv->port].rx_ppp,
3487 mdev->profile.prof[priv->port].rx_pause,
3488 mdev->profile.prof[priv->port].tx_ppp,
3489 mdev->profile.prof[priv->port].tx_pause);
Eran Ben Elisha39de9612015-03-18 16:51:38 +02003490
Ido Shamaye5eda892015-03-24 15:18:38 +02003491 err = register_netdev(dev);
3492 if (err) {
3493 en_err(priv, "Netdev registration failed for port %d\n", port);
3494 goto out;
3495 }
3496
3497 priv->registered = 1;
Jiri Pirko09d4d082016-02-26 17:32:24 +01003498 devlink_port_type_eth_set(mlx4_get_devlink_port(mdev->dev, priv->port),
3499 dev);
Ido Shamaye5eda892015-03-24 15:18:38 +02003500
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07003501 return 0;
3502
Tariq Toukan67f8b1d2016-11-02 17:12:24 +02003503err_free_tx:
3504 while (t--) {
3505 kfree(priv->tx_ring[t]);
3506 kfree(priv->tx_cq[t]);
3507 }
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07003508out:
3509 mlx4_en_destroy_netdev(dev);
3510 return err;
3511}
3512
Saeed Mahameed537f6f92014-10-27 11:37:43 +02003513int mlx4_en_reset_config(struct net_device *dev,
3514 struct hwtstamp_config ts_config,
3515 netdev_features_t features)
3516{
3517 struct mlx4_en_priv *priv = netdev_priv(dev);
3518 struct mlx4_en_dev *mdev = priv->mdev;
Eugenia Emantayevec25bc02016-07-18 18:35:12 +03003519 struct mlx4_en_port_profile new_prof;
3520 struct mlx4_en_priv *tmp;
Saeed Mahameed537f6f92014-10-27 11:37:43 +02003521 int port_up = 0;
3522 int err = 0;
3523
3524 if (priv->hwtstamp_config.tx_type == ts_config.tx_type &&
3525 priv->hwtstamp_config.rx_filter == ts_config.rx_filter &&
Muhammad Mahajnaf0df3502015-04-02 16:31:21 +03003526 !DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX) &&
3527 !DEV_FEATURE_CHANGED(dev, features, NETIF_F_RXFCS))
Saeed Mahameed537f6f92014-10-27 11:37:43 +02003528 return 0; /* Nothing to change */
3529
3530 if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX) &&
3531 (features & NETIF_F_HW_VLAN_CTAG_RX) &&
3532 (priv->hwtstamp_config.rx_filter != HWTSTAMP_FILTER_NONE)) {
3533 en_warn(priv, "Can't turn ON rx vlan offload while time-stamping rx filter is ON\n");
3534 return -EINVAL;
3535 }
3536
Eugenia Emantayevec25bc02016-07-18 18:35:12 +03003537 tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
3538 if (!tmp)
3539 return -ENOMEM;
3540
Saeed Mahameed537f6f92014-10-27 11:37:43 +02003541 mutex_lock(&mdev->state_lock);
Eugenia Emantayevec25bc02016-07-18 18:35:12 +03003542
3543 memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile));
3544 memcpy(&new_prof.hwtstamp_config, &ts_config, sizeof(ts_config));
3545
Martin KaFai Lau770f8222017-01-31 22:35:33 -08003546 err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof, true);
Eugenia Emantayevec25bc02016-07-18 18:35:12 +03003547 if (err)
3548 goto out;
3549
Saeed Mahameed537f6f92014-10-27 11:37:43 +02003550 if (priv->port_up) {
3551 port_up = 1;
3552 mlx4_en_stop_port(dev, 1);
3553 }
3554
Saeed Mahameed537f6f92014-10-27 11:37:43 +02003555 en_warn(priv, "Changing device configuration rx filter(%x) rx vlan(%x)\n",
Eugenia Emantayevec25bc02016-07-18 18:35:12 +03003556 ts_config.rx_filter,
3557 !!(features & NETIF_F_HW_VLAN_CTAG_RX));
Saeed Mahameed537f6f92014-10-27 11:37:43 +02003558
Eugenia Emantayevec25bc02016-07-18 18:35:12 +03003559 mlx4_en_safe_replace_resources(priv, tmp);
Saeed Mahameed537f6f92014-10-27 11:37:43 +02003560
3561 if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX)) {
3562 if (features & NETIF_F_HW_VLAN_CTAG_RX)
3563 dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
3564 else
3565 dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
3566 } else if (ts_config.rx_filter == HWTSTAMP_FILTER_NONE) {
3567 /* RX time-stamping is OFF, update the RX vlan offload
3568 * to the latest wanted state
3569 */
3570 if (dev->wanted_features & NETIF_F_HW_VLAN_CTAG_RX)
3571 dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
3572 else
3573 dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
3574 }
3575
Muhammad Mahajnaf0df3502015-04-02 16:31:21 +03003576 if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_RXFCS)) {
3577 if (features & NETIF_F_RXFCS)
3578 dev->features |= NETIF_F_RXFCS;
3579 else
3580 dev->features &= ~NETIF_F_RXFCS;
3581 }
3582
Saeed Mahameed537f6f92014-10-27 11:37:43 +02003583 /* RX vlan offload and RX time-stamping can't co-exist !
3584 * Regardless of the caller's choice,
3585 * Turn Off RX vlan offload in case of time-stamping is ON
3586 */
3587 if (ts_config.rx_filter != HWTSTAMP_FILTER_NONE) {
3588 if (dev->features & NETIF_F_HW_VLAN_CTAG_RX)
3589 en_warn(priv, "Turning off RX vlan offload since RX time-stamping is ON\n");
3590 dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
3591 }
3592
Saeed Mahameed537f6f92014-10-27 11:37:43 +02003593 if (port_up) {
3594 err = mlx4_en_start_port(dev);
3595 if (err)
3596 en_err(priv, "Failed starting port\n");
3597 }
3598
3599out:
3600 mutex_unlock(&mdev->state_lock);
Eugenia Emantayevec25bc02016-07-18 18:35:12 +03003601 kfree(tmp);
3602 if (!err)
3603 netdev_features_change(dev);
Saeed Mahameed537f6f92014-10-27 11:37:43 +02003604 return err;
3605}