blob: 9556230465f0c7dfcec8c47cc59cee4476581858 [file] [log] [blame]
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001/*
2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33
34#include <linux/etherdevice.h>
35#include <linux/tcp.h>
36#include <linux/if_vlan.h>
37#include <linux/delay.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090038#include <linux/slab.h>
Amir Vadai1eb8c692012-07-18 22:33:52 +000039#include <linux/hash.h>
40#include <net/ip.h>
Eliezer Tamir076bb0c2013-07-10 17:13:17 +030041#include <net/busy_poll.h>
Or Gerlitz1b136de2014-03-27 14:02:04 +020042#include <net/vxlan.h>
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -070043
44#include <linux/mlx4/driver.h>
45#include <linux/mlx4/device.h>
46#include <linux/mlx4/cmd.h>
47#include <linux/mlx4/cq.h>
48
49#include "mlx4_en.h"
50#include "en_port.h"
51
Eran Ben Elishaffa88f32015-03-30 17:45:22 +030052#define MLX4_STATS_TRAFFIC_COUNTERS_MASK 0xfULL
53#define MLX4_STATS_TRAFFIC_DROPS_MASK 0xc0ULL
54#define MLX4_STATS_ERROR_COUNTERS_MASK 0x1ffc30ULL
55#define MLX4_STATS_PORT_COUNTERS_MASK 0x7fe00000ULL
56
Amir Vadaid3179662012-12-02 03:49:23 +000057int mlx4_en_setup_tc(struct net_device *dev, u8 up)
Amir Vadai897d7842012-04-04 21:33:27 +000058{
Amir Vadaibc6a4742012-05-17 00:58:10 +000059 struct mlx4_en_priv *priv = netdev_priv(dev);
60 int i;
Amir Vadaid3179662012-12-02 03:49:23 +000061 unsigned int offset = 0;
Amir Vadaibc6a4742012-05-17 00:58:10 +000062
63 if (up && up != MLX4_EN_NUM_UP)
Amir Vadai897d7842012-04-04 21:33:27 +000064 return -EINVAL;
65
Amir Vadaibc6a4742012-05-17 00:58:10 +000066 netdev_set_num_tc(dev, up);
67
68 /* Partition Tx queues evenly amongst UP's */
Amir Vadaibc6a4742012-05-17 00:58:10 +000069 for (i = 0; i < up; i++) {
Amir Vadaid3179662012-12-02 03:49:23 +000070 netdev_set_tc_queue(dev, i, priv->num_tx_rings_p_up, offset);
71 offset += priv->num_tx_rings_p_up;
Amir Vadaibc6a4742012-05-17 00:58:10 +000072 }
73
Amir Vadai897d7842012-04-04 21:33:27 +000074 return 0;
75}
76
Cong Wange0d10952013-08-01 11:10:25 +080077#ifdef CONFIG_NET_RX_BUSY_POLL
Amir Vadai9e77a2b2013-06-18 16:18:27 +030078/* must be called with local_bh_disable()d */
79static int mlx4_en_low_latency_recv(struct napi_struct *napi)
80{
81 struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi);
82 struct net_device *dev = cq->dev;
83 struct mlx4_en_priv *priv = netdev_priv(dev);
Eugenia Emantayev41d942d2013-11-07 12:19:52 +020084 struct mlx4_en_rx_ring *rx_ring = priv->rx_ring[cq->ring];
Amir Vadai9e77a2b2013-06-18 16:18:27 +030085 int done;
86
87 if (!priv->port_up)
88 return LL_FLUSH_FAILED;
89
90 if (!mlx4_en_cq_lock_poll(cq))
91 return LL_FLUSH_BUSY;
92
93 done = mlx4_en_process_rx_cq(dev, cq, 4);
Amir Vadai85018412013-06-18 16:18:28 +030094 if (likely(done))
95 rx_ring->cleaned += done;
96 else
97 rx_ring->misses++;
Amir Vadai9e77a2b2013-06-18 16:18:27 +030098
99 mlx4_en_cq_unlock_poll(cq);
100
101 return done;
102}
Cong Wange0d10952013-08-01 11:10:25 +0800103#endif /* CONFIG_NET_RX_BUSY_POLL */
Amir Vadai9e77a2b2013-06-18 16:18:27 +0300104
Amir Vadai1eb8c692012-07-18 22:33:52 +0000105#ifdef CONFIG_RFS_ACCEL
106
107struct mlx4_en_filter {
108 struct list_head next;
109 struct work_struct work;
110
Eyal Perry75a353d2013-11-07 12:19:49 +0200111 u8 ip_proto;
Amir Vadai1eb8c692012-07-18 22:33:52 +0000112 __be32 src_ip;
113 __be32 dst_ip;
114 __be16 src_port;
115 __be16 dst_port;
116
117 int rxq_index;
118 struct mlx4_en_priv *priv;
119 u32 flow_id; /* RFS infrastructure id */
120 int id; /* mlx4_en driver id */
121 u64 reg_id; /* Flow steering API id */
122 u8 activated; /* Used to prevent expiry before filter
123 * is attached
124 */
125 struct hlist_node filter_chain;
126};
127
128static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv);
129
Eyal Perry75a353d2013-11-07 12:19:49 +0200130static enum mlx4_net_trans_rule_id mlx4_ip_proto_to_trans_rule_id(u8 ip_proto)
131{
132 switch (ip_proto) {
133 case IPPROTO_UDP:
134 return MLX4_NET_TRANS_RULE_ID_UDP;
135 case IPPROTO_TCP:
136 return MLX4_NET_TRANS_RULE_ID_TCP;
137 default:
Eyal Perryc3ca5202014-05-14 12:15:16 +0300138 return MLX4_NET_TRANS_RULE_NUM;
Eyal Perry75a353d2013-11-07 12:19:49 +0200139 }
140};
141
Amir Vadai1eb8c692012-07-18 22:33:52 +0000142static void mlx4_en_filter_work(struct work_struct *work)
143{
144 struct mlx4_en_filter *filter = container_of(work,
145 struct mlx4_en_filter,
146 work);
147 struct mlx4_en_priv *priv = filter->priv;
Eyal Perry75a353d2013-11-07 12:19:49 +0200148 struct mlx4_spec_list spec_tcp_udp = {
149 .id = mlx4_ip_proto_to_trans_rule_id(filter->ip_proto),
Amir Vadai1eb8c692012-07-18 22:33:52 +0000150 {
151 .tcp_udp = {
152 .dst_port = filter->dst_port,
153 .dst_port_msk = (__force __be16)-1,
154 .src_port = filter->src_port,
155 .src_port_msk = (__force __be16)-1,
156 },
157 },
158 };
159 struct mlx4_spec_list spec_ip = {
160 .id = MLX4_NET_TRANS_RULE_ID_IPV4,
161 {
162 .ipv4 = {
163 .dst_ip = filter->dst_ip,
164 .dst_ip_msk = (__force __be32)-1,
165 .src_ip = filter->src_ip,
166 .src_ip_msk = (__force __be32)-1,
167 },
168 },
169 };
170 struct mlx4_spec_list spec_eth = {
171 .id = MLX4_NET_TRANS_RULE_ID_ETH,
172 };
173 struct mlx4_net_trans_rule rule = {
174 .list = LIST_HEAD_INIT(rule.list),
175 .queue_mode = MLX4_NET_TRANS_Q_LIFO,
176 .exclusive = 1,
177 .allow_loopback = 1,
Hadar Hen Zionf9162532013-04-24 13:58:45 +0000178 .promisc_mode = MLX4_FS_REGULAR,
Amir Vadai1eb8c692012-07-18 22:33:52 +0000179 .port = priv->port,
180 .priority = MLX4_DOMAIN_RFS,
181 };
182 int rc;
Amir Vadai1eb8c692012-07-18 22:33:52 +0000183 __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
184
Eyal Perryc3ca5202014-05-14 12:15:16 +0300185 if (spec_tcp_udp.id >= MLX4_NET_TRANS_RULE_NUM) {
Eyal Perry75a353d2013-11-07 12:19:49 +0200186 en_warn(priv, "RFS: ignoring unsupported ip protocol (%d)\n",
187 filter->ip_proto);
188 goto ignore;
189 }
Amir Vadai1eb8c692012-07-18 22:33:52 +0000190 list_add_tail(&spec_eth.list, &rule.list);
191 list_add_tail(&spec_ip.list, &rule.list);
Eyal Perry75a353d2013-11-07 12:19:49 +0200192 list_add_tail(&spec_tcp_udp.list, &rule.list);
Amir Vadai1eb8c692012-07-18 22:33:52 +0000193
Amir Vadai1eb8c692012-07-18 22:33:52 +0000194 rule.qpn = priv->rss_map.qps[filter->rxq_index].qpn;
Yan Burman6bbb6d92013-02-07 02:25:20 +0000195 memcpy(spec_eth.eth.dst_mac, priv->dev->dev_addr, ETH_ALEN);
Amir Vadai1eb8c692012-07-18 22:33:52 +0000196 memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
197
198 filter->activated = 0;
199
200 if (filter->reg_id) {
201 rc = mlx4_flow_detach(priv->mdev->dev, filter->reg_id);
202 if (rc && rc != -ENOENT)
203 en_err(priv, "Error detaching flow. rc = %d\n", rc);
204 }
205
206 rc = mlx4_flow_attach(priv->mdev->dev, &rule, &filter->reg_id);
207 if (rc)
208 en_err(priv, "Error attaching flow. err = %d\n", rc);
209
Eyal Perry75a353d2013-11-07 12:19:49 +0200210ignore:
Amir Vadai1eb8c692012-07-18 22:33:52 +0000211 mlx4_en_filter_rfs_expire(priv);
212
213 filter->activated = 1;
214}
215
216static inline struct hlist_head *
217filter_hash_bucket(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip,
218 __be16 src_port, __be16 dst_port)
219{
220 unsigned long l;
221 int bucket_idx;
222
223 l = (__force unsigned long)src_port |
224 ((__force unsigned long)dst_port << 2);
225 l ^= (__force unsigned long)(src_ip ^ dst_ip);
226
227 bucket_idx = hash_long(l, MLX4_EN_FILTER_HASH_SHIFT);
228
229 return &priv->filter_hash[bucket_idx];
230}
231
232static struct mlx4_en_filter *
233mlx4_en_filter_alloc(struct mlx4_en_priv *priv, int rxq_index, __be32 src_ip,
Eyal Perry75a353d2013-11-07 12:19:49 +0200234 __be32 dst_ip, u8 ip_proto, __be16 src_port,
235 __be16 dst_port, u32 flow_id)
Amir Vadai1eb8c692012-07-18 22:33:52 +0000236{
237 struct mlx4_en_filter *filter = NULL;
238
239 filter = kzalloc(sizeof(struct mlx4_en_filter), GFP_ATOMIC);
240 if (!filter)
241 return NULL;
242
243 filter->priv = priv;
244 filter->rxq_index = rxq_index;
245 INIT_WORK(&filter->work, mlx4_en_filter_work);
246
247 filter->src_ip = src_ip;
248 filter->dst_ip = dst_ip;
Eyal Perry75a353d2013-11-07 12:19:49 +0200249 filter->ip_proto = ip_proto;
Amir Vadai1eb8c692012-07-18 22:33:52 +0000250 filter->src_port = src_port;
251 filter->dst_port = dst_port;
252
253 filter->flow_id = flow_id;
254
Amir Vadaiee64c0e2012-07-25 21:21:16 +0000255 filter->id = priv->last_filter_id++ % RPS_NO_FILTER;
Amir Vadai1eb8c692012-07-18 22:33:52 +0000256
257 list_add_tail(&filter->next, &priv->filters);
258 hlist_add_head(&filter->filter_chain,
259 filter_hash_bucket(priv, src_ip, dst_ip, src_port,
260 dst_port));
261
262 return filter;
263}
264
265static void mlx4_en_filter_free(struct mlx4_en_filter *filter)
266{
267 struct mlx4_en_priv *priv = filter->priv;
268 int rc;
269
270 list_del(&filter->next);
271
272 rc = mlx4_flow_detach(priv->mdev->dev, filter->reg_id);
273 if (rc && rc != -ENOENT)
274 en_err(priv, "Error detaching flow. rc = %d\n", rc);
275
276 kfree(filter);
277}
278
279static inline struct mlx4_en_filter *
280mlx4_en_filter_find(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip,
Eyal Perry75a353d2013-11-07 12:19:49 +0200281 u8 ip_proto, __be16 src_port, __be16 dst_port)
Amir Vadai1eb8c692012-07-18 22:33:52 +0000282{
Amir Vadai1eb8c692012-07-18 22:33:52 +0000283 struct mlx4_en_filter *filter;
284 struct mlx4_en_filter *ret = NULL;
285
Sasha Levinb67bfe02013-02-27 17:06:00 -0800286 hlist_for_each_entry(filter,
Amir Vadai1eb8c692012-07-18 22:33:52 +0000287 filter_hash_bucket(priv, src_ip, dst_ip,
288 src_port, dst_port),
289 filter_chain) {
290 if (filter->src_ip == src_ip &&
291 filter->dst_ip == dst_ip &&
Eyal Perry75a353d2013-11-07 12:19:49 +0200292 filter->ip_proto == ip_proto &&
Amir Vadai1eb8c692012-07-18 22:33:52 +0000293 filter->src_port == src_port &&
294 filter->dst_port == dst_port) {
295 ret = filter;
296 break;
297 }
298 }
299
300 return ret;
301}
302
303static int
304mlx4_en_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
305 u16 rxq_index, u32 flow_id)
306{
307 struct mlx4_en_priv *priv = netdev_priv(net_dev);
308 struct mlx4_en_filter *filter;
309 const struct iphdr *ip;
310 const __be16 *ports;
Eyal Perry75a353d2013-11-07 12:19:49 +0200311 u8 ip_proto;
Amir Vadai1eb8c692012-07-18 22:33:52 +0000312 __be32 src_ip;
313 __be32 dst_ip;
314 __be16 src_port;
315 __be16 dst_port;
316 int nhoff = skb_network_offset(skb);
317 int ret = 0;
318
319 if (skb->protocol != htons(ETH_P_IP))
320 return -EPROTONOSUPPORT;
321
322 ip = (const struct iphdr *)(skb->data + nhoff);
323 if (ip_is_fragment(ip))
324 return -EPROTONOSUPPORT;
325
Eyal Perry75a353d2013-11-07 12:19:49 +0200326 if ((ip->protocol != IPPROTO_TCP) && (ip->protocol != IPPROTO_UDP))
327 return -EPROTONOSUPPORT;
Amir Vadai1eb8c692012-07-18 22:33:52 +0000328 ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
329
Eyal Perry75a353d2013-11-07 12:19:49 +0200330 ip_proto = ip->protocol;
Amir Vadai1eb8c692012-07-18 22:33:52 +0000331 src_ip = ip->saddr;
332 dst_ip = ip->daddr;
333 src_port = ports[0];
334 dst_port = ports[1];
335
Amir Vadai1eb8c692012-07-18 22:33:52 +0000336 spin_lock_bh(&priv->filters_lock);
Eyal Perry75a353d2013-11-07 12:19:49 +0200337 filter = mlx4_en_filter_find(priv, src_ip, dst_ip, ip_proto,
338 src_port, dst_port);
Amir Vadai1eb8c692012-07-18 22:33:52 +0000339 if (filter) {
340 if (filter->rxq_index == rxq_index)
341 goto out;
342
343 filter->rxq_index = rxq_index;
344 } else {
345 filter = mlx4_en_filter_alloc(priv, rxq_index,
Eyal Perry75a353d2013-11-07 12:19:49 +0200346 src_ip, dst_ip, ip_proto,
Amir Vadai1eb8c692012-07-18 22:33:52 +0000347 src_port, dst_port, flow_id);
348 if (!filter) {
349 ret = -ENOMEM;
350 goto err;
351 }
352 }
353
354 queue_work(priv->mdev->workqueue, &filter->work);
355
356out:
357 ret = filter->id;
358err:
359 spin_unlock_bh(&priv->filters_lock);
360
361 return ret;
362}
363
Eugenia Emantayev41d942d2013-11-07 12:19:52 +0200364void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv)
Amir Vadai1eb8c692012-07-18 22:33:52 +0000365{
366 struct mlx4_en_filter *filter, *tmp;
367 LIST_HEAD(del_list);
368
369 spin_lock_bh(&priv->filters_lock);
370 list_for_each_entry_safe(filter, tmp, &priv->filters, next) {
371 list_move(&filter->next, &del_list);
372 hlist_del(&filter->filter_chain);
373 }
374 spin_unlock_bh(&priv->filters_lock);
375
376 list_for_each_entry_safe(filter, tmp, &del_list, next) {
377 cancel_work_sync(&filter->work);
378 mlx4_en_filter_free(filter);
379 }
380}
381
382static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv)
383{
384 struct mlx4_en_filter *filter = NULL, *tmp, *last_filter = NULL;
385 LIST_HEAD(del_list);
386 int i = 0;
387
388 spin_lock_bh(&priv->filters_lock);
389 list_for_each_entry_safe(filter, tmp, &priv->filters, next) {
390 if (i > MLX4_EN_FILTER_EXPIRY_QUOTA)
391 break;
392
393 if (filter->activated &&
394 !work_pending(&filter->work) &&
395 rps_may_expire_flow(priv->dev,
396 filter->rxq_index, filter->flow_id,
397 filter->id)) {
398 list_move(&filter->next, &del_list);
399 hlist_del(&filter->filter_chain);
400 } else
401 last_filter = filter;
402
403 i++;
404 }
405
406 if (last_filter && (&last_filter->next != priv->filters.next))
407 list_move(&priv->filters, &last_filter->next);
408
409 spin_unlock_bh(&priv->filters_lock);
410
411 list_for_each_entry_safe(filter, tmp, &del_list, next)
412 mlx4_en_filter_free(filter);
413}
414#endif
415
Patrick McHardy80d5c362013-04-19 02:04:28 +0000416static int mlx4_en_vlan_rx_add_vid(struct net_device *dev,
417 __be16 proto, u16 vid)
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700418{
419 struct mlx4_en_priv *priv = netdev_priv(dev);
420 struct mlx4_en_dev *mdev = priv->mdev;
421 int err;
Eli Cohen4c3eb3c2010-08-26 17:19:22 +0300422 int idx;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700423
Jiri Pirkof1b553f2011-07-20 04:54:22 +0000424 en_dbg(HW, priv, "adding VLAN:%d\n", vid);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700425
Jiri Pirkof1b553f2011-07-20 04:54:22 +0000426 set_bit(vid, priv->active_vlans);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700427
428 /* Add VID to port VLAN filter */
429 mutex_lock(&mdev->state_lock);
430 if (mdev->device_up && priv->port_up) {
Jiri Pirkof1b553f2011-07-20 04:54:22 +0000431 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700432 if (err)
Yevgeny Petrilin453a6082009-06-01 20:27:13 +0000433 en_err(priv, "Failed configuring VLAN filter\n");
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700434 }
Eli Cohen4c3eb3c2010-08-26 17:19:22 +0300435 if (mlx4_register_vlan(mdev->dev, priv->port, vid, &idx))
Eugenia Emantayev9e19b542013-06-25 12:09:32 +0300436 en_dbg(HW, priv, "failed adding vlan %d\n", vid);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700437 mutex_unlock(&mdev->state_lock);
Eli Cohen4c3eb3c2010-08-26 17:19:22 +0300438
Jiri Pirko8e586132011-12-08 19:52:37 -0500439 return 0;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700440}
441
Patrick McHardy80d5c362013-04-19 02:04:28 +0000442static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev,
443 __be16 proto, u16 vid)
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700444{
445 struct mlx4_en_priv *priv = netdev_priv(dev);
446 struct mlx4_en_dev *mdev = priv->mdev;
447 int err;
448
Jiri Pirkof1b553f2011-07-20 04:54:22 +0000449 en_dbg(HW, priv, "Killing VID:%d\n", vid);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700450
Jiri Pirkof1b553f2011-07-20 04:54:22 +0000451 clear_bit(vid, priv->active_vlans);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700452
453 /* Remove VID from port VLAN filter */
454 mutex_lock(&mdev->state_lock);
Jack Morgenstein2009d002013-11-03 10:03:19 +0200455 mlx4_unregister_vlan(mdev->dev, priv->port, vid);
Eli Cohen4c3eb3c2010-08-26 17:19:22 +0300456
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700457 if (mdev->device_up && priv->port_up) {
Jiri Pirkof1b553f2011-07-20 04:54:22 +0000458 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700459 if (err)
Yevgeny Petrilin453a6082009-06-01 20:27:13 +0000460 en_err(priv, "Failed configuring VLAN filter\n");
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700461 }
462 mutex_unlock(&mdev->state_lock);
Jiri Pirko8e586132011-12-08 19:52:37 -0500463
464 return 0;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700465}
466
Yan Burman6bbb6d92013-02-07 02:25:20 +0000467static void mlx4_en_u64_to_mac(unsigned char dst_mac[ETH_ALEN + 2], u64 src_mac)
468{
Yan Burmanbab6a9e2013-04-02 16:49:45 +0300469 int i;
470 for (i = ETH_ALEN - 1; i >= 0; --i) {
Yan Burman6bbb6d92013-02-07 02:25:20 +0000471 dst_mac[i] = src_mac & 0xff;
472 src_mac >>= 8;
473 }
474 memset(&dst_mac[ETH_ALEN], 0, 2);
475}
476
Or Gerlitz837052d2013-12-23 16:09:44 +0200477
478static int mlx4_en_tunnel_steer_add(struct mlx4_en_priv *priv, unsigned char *addr,
479 int qpn, u64 *reg_id)
480{
481 int err;
Or Gerlitz837052d2013-12-23 16:09:44 +0200482
Or Gerlitz5eff6da2015-01-15 15:28:54 +0200483 if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN ||
484 priv->mdev->dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC)
Or Gerlitz837052d2013-12-23 16:09:44 +0200485 return 0; /* do nothing */
486
Or Gerlitzb95089d2014-08-27 16:47:48 +0300487 err = mlx4_tunnel_steer_add(priv->mdev->dev, addr, priv->port, qpn,
488 MLX4_DOMAIN_NIC, reg_id);
Or Gerlitz837052d2013-12-23 16:09:44 +0200489 if (err) {
490 en_err(priv, "failed to add vxlan steering rule, err %d\n", err);
491 return err;
492 }
493 en_dbg(DRV, priv, "added vxlan steering rule, mac %pM reg_id %llx\n", addr, *reg_id);
494 return 0;
495}
496
497
Yan Burman16a10ff2013-02-07 02:25:22 +0000498static int mlx4_en_uc_steer_add(struct mlx4_en_priv *priv,
499 unsigned char *mac, int *qpn, u64 *reg_id)
500{
501 struct mlx4_en_dev *mdev = priv->mdev;
502 struct mlx4_dev *dev = mdev->dev;
503 int err;
504
505 switch (dev->caps.steering_mode) {
506 case MLX4_STEERING_MODE_B0: {
507 struct mlx4_qp qp;
508 u8 gid[16] = {0};
509
510 qp.qpn = *qpn;
511 memcpy(&gid[10], mac, ETH_ALEN);
512 gid[5] = priv->port;
513
514 err = mlx4_unicast_attach(dev, &qp, gid, 0, MLX4_PROT_ETH);
515 break;
516 }
517 case MLX4_STEERING_MODE_DEVICE_MANAGED: {
518 struct mlx4_spec_list spec_eth = { {NULL} };
519 __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
520
521 struct mlx4_net_trans_rule rule = {
522 .queue_mode = MLX4_NET_TRANS_Q_FIFO,
523 .exclusive = 0,
524 .allow_loopback = 1,
Hadar Hen Zionf9162532013-04-24 13:58:45 +0000525 .promisc_mode = MLX4_FS_REGULAR,
Yan Burman16a10ff2013-02-07 02:25:22 +0000526 .priority = MLX4_DOMAIN_NIC,
527 };
528
529 rule.port = priv->port;
530 rule.qpn = *qpn;
531 INIT_LIST_HEAD(&rule.list);
532
533 spec_eth.id = MLX4_NET_TRANS_RULE_ID_ETH;
534 memcpy(spec_eth.eth.dst_mac, mac, ETH_ALEN);
535 memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
536 list_add_tail(&spec_eth.list, &rule.list);
537
538 err = mlx4_flow_attach(dev, &rule, reg_id);
539 break;
540 }
541 default:
542 return -EINVAL;
543 }
544 if (err)
545 en_warn(priv, "Failed Attaching Unicast\n");
546
547 return err;
548}
549
550static void mlx4_en_uc_steer_release(struct mlx4_en_priv *priv,
551 unsigned char *mac, int qpn, u64 reg_id)
552{
553 struct mlx4_en_dev *mdev = priv->mdev;
554 struct mlx4_dev *dev = mdev->dev;
555
556 switch (dev->caps.steering_mode) {
557 case MLX4_STEERING_MODE_B0: {
558 struct mlx4_qp qp;
559 u8 gid[16] = {0};
560
561 qp.qpn = qpn;
562 memcpy(&gid[10], mac, ETH_ALEN);
563 gid[5] = priv->port;
564
565 mlx4_unicast_detach(dev, &qp, gid, MLX4_PROT_ETH);
566 break;
567 }
568 case MLX4_STEERING_MODE_DEVICE_MANAGED: {
569 mlx4_flow_detach(dev, reg_id);
570 break;
571 }
572 default:
573 en_err(priv, "Invalid steering mode.\n");
574 }
575}
576
577static int mlx4_en_get_qp(struct mlx4_en_priv *priv)
578{
579 struct mlx4_en_dev *mdev = priv->mdev;
580 struct mlx4_dev *dev = mdev->dev;
581 struct mlx4_mac_entry *entry;
582 int index = 0;
583 int err = 0;
Jack Morgensteinc2a3d4b2014-10-27 11:37:44 +0200584 u64 reg_id = 0;
Yan Burman16a10ff2013-02-07 02:25:22 +0000585 int *qpn = &priv->base_qpn;
Eugenia Emantayev98133372014-03-02 10:25:01 +0200586 u64 mac = mlx4_mac_to_u64(priv->dev->dev_addr);
Yan Burman16a10ff2013-02-07 02:25:22 +0000587
588 en_dbg(DRV, priv, "Registering MAC: %pM for adding\n",
589 priv->dev->dev_addr);
590 index = mlx4_register_mac(dev, priv->port, mac);
591 if (index < 0) {
592 err = index;
593 en_err(priv, "Failed adding MAC: %pM\n",
594 priv->dev->dev_addr);
595 return err;
596 }
597
598 if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) {
599 int base_qpn = mlx4_get_base_qpn(dev, priv->port);
600 *qpn = base_qpn + index;
601 return 0;
602 }
603
Matan Barakd57febe2014-12-11 10:57:57 +0200604 err = mlx4_qp_reserve_range(dev, 1, 1, qpn, MLX4_RESERVE_A0_QP);
Yan Burman16a10ff2013-02-07 02:25:22 +0000605 en_dbg(DRV, priv, "Reserved qp %d\n", *qpn);
606 if (err) {
607 en_err(priv, "Failed to reserve qp for mac registration\n");
608 goto qp_err;
609 }
610
611 err = mlx4_en_uc_steer_add(priv, priv->dev->dev_addr, qpn, &reg_id);
612 if (err)
613 goto steer_err;
614
Wei Yongjun9ba75fb2014-01-07 16:56:07 +0800615 err = mlx4_en_tunnel_steer_add(priv, priv->dev->dev_addr, *qpn,
616 &priv->tunnel_reg_id);
617 if (err)
Or Gerlitz837052d2013-12-23 16:09:44 +0200618 goto tunnel_err;
619
Yan Burman16a10ff2013-02-07 02:25:22 +0000620 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
621 if (!entry) {
622 err = -ENOMEM;
623 goto alloc_err;
624 }
625 memcpy(entry->mac, priv->dev->dev_addr, sizeof(entry->mac));
Eyal Perryb94901f2014-07-22 15:44:09 +0300626 memcpy(priv->current_mac, entry->mac, sizeof(priv->current_mac));
Yan Burman16a10ff2013-02-07 02:25:22 +0000627 entry->reg_id = reg_id;
628
Yan Burmanc07cb4b2013-02-07 02:25:25 +0000629 hlist_add_head_rcu(&entry->hlist,
630 &priv->mac_hash[entry->mac[MLX4_EN_MAC_HASH_IDX]]);
Yan Burman16a10ff2013-02-07 02:25:22 +0000631
Yan Burmanc07cb4b2013-02-07 02:25:25 +0000632 return 0;
Yan Burman16a10ff2013-02-07 02:25:22 +0000633
634alloc_err:
Or Gerlitz837052d2013-12-23 16:09:44 +0200635 if (priv->tunnel_reg_id)
636 mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id);
637tunnel_err:
Yan Burman16a10ff2013-02-07 02:25:22 +0000638 mlx4_en_uc_steer_release(priv, priv->dev->dev_addr, *qpn, reg_id);
639
640steer_err:
641 mlx4_qp_release_range(dev, *qpn, 1);
642
643qp_err:
644 mlx4_unregister_mac(dev, priv->port, mac);
645 return err;
646}
647
648static void mlx4_en_put_qp(struct mlx4_en_priv *priv)
649{
650 struct mlx4_en_dev *mdev = priv->mdev;
651 struct mlx4_dev *dev = mdev->dev;
Yan Burman16a10ff2013-02-07 02:25:22 +0000652 int qpn = priv->base_qpn;
Yan Burman83a5a6c2013-03-07 03:46:56 +0000653 u64 mac;
Yan Burman16a10ff2013-02-07 02:25:22 +0000654
Yan Burman83a5a6c2013-03-07 03:46:56 +0000655 if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) {
Eugenia Emantayev98133372014-03-02 10:25:01 +0200656 mac = mlx4_mac_to_u64(priv->dev->dev_addr);
Yan Burman83a5a6c2013-03-07 03:46:56 +0000657 en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n",
658 priv->dev->dev_addr);
659 mlx4_unregister_mac(dev, priv->port, mac);
660 } else {
Yan Burmanc07cb4b2013-02-07 02:25:25 +0000661 struct mlx4_mac_entry *entry;
Sasha Levinb67bfe02013-02-27 17:06:00 -0800662 struct hlist_node *tmp;
Yan Burmanc07cb4b2013-02-07 02:25:25 +0000663 struct hlist_head *bucket;
Yan Burman83a5a6c2013-03-07 03:46:56 +0000664 unsigned int i;
Yan Burmanc07cb4b2013-02-07 02:25:25 +0000665
Yan Burman83a5a6c2013-03-07 03:46:56 +0000666 for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) {
667 bucket = &priv->mac_hash[i];
668 hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
Eugenia Emantayev98133372014-03-02 10:25:01 +0200669 mac = mlx4_mac_to_u64(entry->mac);
Yan Burman83a5a6c2013-03-07 03:46:56 +0000670 en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n",
671 entry->mac);
Yan Burmanc07cb4b2013-02-07 02:25:25 +0000672 mlx4_en_uc_steer_release(priv, entry->mac,
673 qpn, entry->reg_id);
Yan Burmanc07cb4b2013-02-07 02:25:25 +0000674
Yan Burman83a5a6c2013-03-07 03:46:56 +0000675 mlx4_unregister_mac(dev, priv->port, mac);
Yan Burmanc07cb4b2013-02-07 02:25:25 +0000676 hlist_del_rcu(&entry->hlist);
677 kfree_rcu(entry, rcu);
Yan Burmanc07cb4b2013-02-07 02:25:25 +0000678 }
Yan Burman16a10ff2013-02-07 02:25:22 +0000679 }
Yan Burman83a5a6c2013-03-07 03:46:56 +0000680
Or Gerlitz837052d2013-12-23 16:09:44 +0200681 if (priv->tunnel_reg_id) {
682 mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id);
683 priv->tunnel_reg_id = 0;
684 }
685
Yan Burman83a5a6c2013-03-07 03:46:56 +0000686 en_dbg(DRV, priv, "Releasing qp: port %d, qpn %d\n",
687 priv->port, qpn);
688 mlx4_qp_release_range(dev, qpn, 1);
689 priv->flags &= ~MLX4_EN_FLAG_FORCE_PROMISC;
Yan Burman16a10ff2013-02-07 02:25:22 +0000690 }
691}
692
693static int mlx4_en_replace_mac(struct mlx4_en_priv *priv, int qpn,
Yan Burman90bbb742013-02-07 02:25:24 +0000694 unsigned char *new_mac, unsigned char *prev_mac)
Yan Burman16a10ff2013-02-07 02:25:22 +0000695{
696 struct mlx4_en_dev *mdev = priv->mdev;
697 struct mlx4_dev *dev = mdev->dev;
Yan Burman16a10ff2013-02-07 02:25:22 +0000698 int err = 0;
Eugenia Emantayev98133372014-03-02 10:25:01 +0200699 u64 new_mac_u64 = mlx4_mac_to_u64(new_mac);
Yan Burman16a10ff2013-02-07 02:25:22 +0000700
701 if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) {
Yan Burmanc07cb4b2013-02-07 02:25:25 +0000702 struct hlist_head *bucket;
703 unsigned int mac_hash;
704 struct mlx4_mac_entry *entry;
Sasha Levinb67bfe02013-02-27 17:06:00 -0800705 struct hlist_node *tmp;
Eugenia Emantayev98133372014-03-02 10:25:01 +0200706 u64 prev_mac_u64 = mlx4_mac_to_u64(prev_mac);
Yan Burman16a10ff2013-02-07 02:25:22 +0000707
Yan Burmanc07cb4b2013-02-07 02:25:25 +0000708 bucket = &priv->mac_hash[prev_mac[MLX4_EN_MAC_HASH_IDX]];
Sasha Levinb67bfe02013-02-27 17:06:00 -0800709 hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
Yan Burmanc07cb4b2013-02-07 02:25:25 +0000710 if (ether_addr_equal_64bits(entry->mac, prev_mac)) {
711 mlx4_en_uc_steer_release(priv, entry->mac,
712 qpn, entry->reg_id);
713 mlx4_unregister_mac(dev, priv->port,
714 prev_mac_u64);
715 hlist_del_rcu(&entry->hlist);
716 synchronize_rcu();
717 memcpy(entry->mac, new_mac, ETH_ALEN);
718 entry->reg_id = 0;
719 mac_hash = new_mac[MLX4_EN_MAC_HASH_IDX];
720 hlist_add_head_rcu(&entry->hlist,
721 &priv->mac_hash[mac_hash]);
722 mlx4_register_mac(dev, priv->port, new_mac_u64);
723 err = mlx4_en_uc_steer_add(priv, new_mac,
724 &qpn,
725 &entry->reg_id);
Or Gerlitz2a2083f2014-03-12 17:16:31 +0200726 if (err)
727 return err;
728 if (priv->tunnel_reg_id) {
729 mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id);
730 priv->tunnel_reg_id = 0;
731 }
732 err = mlx4_en_tunnel_steer_add(priv, new_mac, qpn,
733 &priv->tunnel_reg_id);
Yan Burmanc07cb4b2013-02-07 02:25:25 +0000734 return err;
735 }
736 }
737 return -EINVAL;
Yan Burman16a10ff2013-02-07 02:25:22 +0000738 }
739
740 return __mlx4_replace_mac(dev, priv->port, qpn, new_mac_u64);
741}
742
Noa Osherovich2695bab2014-07-08 11:25:24 +0300743static int mlx4_en_do_set_mac(struct mlx4_en_priv *priv,
744 unsigned char new_mac[ETH_ALEN + 2])
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700745{
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700746 int err = 0;
747
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700748 if (priv->port_up) {
749 /* Remove old MAC and insert the new one */
Yan Burman16a10ff2013-02-07 02:25:22 +0000750 err = mlx4_en_replace_mac(priv, priv->base_qpn,
Noa Osherovich2695bab2014-07-08 11:25:24 +0300751 new_mac, priv->current_mac);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700752 if (err)
Yevgeny Petrilin453a6082009-06-01 20:27:13 +0000753 en_err(priv, "Failed changing HW MAC address\n");
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700754 } else
Yan Burman48e551f2013-02-07 02:25:21 +0000755 en_dbg(HW, priv, "Port is down while registering mac, exiting...\n");
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700756
Noa Osherovich2695bab2014-07-08 11:25:24 +0300757 if (!err)
758 memcpy(priv->current_mac, new_mac, sizeof(priv->current_mac));
Shani Michaelliee755322014-05-14 12:15:12 +0300759
Yan Burmanbfa8ab42013-03-07 03:46:55 +0000760 return err;
761}
762
763static int mlx4_en_set_mac(struct net_device *dev, void *addr)
764{
765 struct mlx4_en_priv *priv = netdev_priv(dev);
766 struct mlx4_en_dev *mdev = priv->mdev;
767 struct sockaddr *saddr = addr;
Noa Osherovich2695bab2014-07-08 11:25:24 +0300768 unsigned char new_mac[ETH_ALEN + 2];
Yan Burmanbfa8ab42013-03-07 03:46:55 +0000769 int err;
770
771 if (!is_valid_ether_addr(saddr->sa_data))
772 return -EADDRNOTAVAIL;
773
Yan Burmanbfa8ab42013-03-07 03:46:55 +0000774 mutex_lock(&mdev->state_lock);
Noa Osherovich2695bab2014-07-08 11:25:24 +0300775 memcpy(new_mac, saddr->sa_data, ETH_ALEN);
776 err = mlx4_en_do_set_mac(priv, new_mac);
777 if (!err)
778 memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700779 mutex_unlock(&mdev->state_lock);
Yan Burmanbfa8ab42013-03-07 03:46:55 +0000780
781 return err;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700782}
783
784static void mlx4_en_clear_list(struct net_device *dev)
785{
786 struct mlx4_en_priv *priv = netdev_priv(dev);
Yevgeny Petrilin6d199932012-07-05 04:03:43 +0000787 struct mlx4_en_mc_list *tmp, *mc_to_del;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700788
Yevgeny Petrilin6d199932012-07-05 04:03:43 +0000789 list_for_each_entry_safe(mc_to_del, tmp, &priv->mc_list, list) {
790 list_del(&mc_to_del->list);
791 kfree(mc_to_del);
792 }
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700793}
794
795static void mlx4_en_cache_mclist(struct net_device *dev)
796{
797 struct mlx4_en_priv *priv = netdev_priv(dev);
Jiri Pirko22bedad32010-04-01 21:22:57 +0000798 struct netdev_hw_addr *ha;
Yevgeny Petrilin6d199932012-07-05 04:03:43 +0000799 struct mlx4_en_mc_list *tmp;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700800
Alexander Guller0e035672011-12-19 04:02:58 +0000801 mlx4_en_clear_list(dev);
Yevgeny Petrilin6d199932012-07-05 04:03:43 +0000802 netdev_for_each_mc_addr(ha, dev) {
803 tmp = kzalloc(sizeof(struct mlx4_en_mc_list), GFP_ATOMIC);
804 if (!tmp) {
Yevgeny Petrilin6d199932012-07-05 04:03:43 +0000805 mlx4_en_clear_list(dev);
806 return;
807 }
808 memcpy(tmp->addr, ha->addr, ETH_ALEN);
809 list_add_tail(&tmp->list, &priv->mc_list);
810 }
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700811}
812
Yevgeny Petrilin6d199932012-07-05 04:03:43 +0000813static void update_mclist_flags(struct mlx4_en_priv *priv,
814 struct list_head *dst,
815 struct list_head *src)
816{
817 struct mlx4_en_mc_list *dst_tmp, *src_tmp, *new_mc;
818 bool found;
819
820 /* Find all the entries that should be removed from dst,
821 * These are the entries that are not found in src
822 */
823 list_for_each_entry(dst_tmp, dst, list) {
824 found = false;
825 list_for_each_entry(src_tmp, src, list) {
dingtianhongc0623e52013-12-30 15:40:55 +0800826 if (ether_addr_equal(dst_tmp->addr, src_tmp->addr)) {
Yevgeny Petrilin6d199932012-07-05 04:03:43 +0000827 found = true;
828 break;
829 }
830 }
831 if (!found)
832 dst_tmp->action = MCLIST_REM;
833 }
834
835 /* Add entries that exist in src but not in dst
836 * mark them as need to add
837 */
838 list_for_each_entry(src_tmp, src, list) {
839 found = false;
840 list_for_each_entry(dst_tmp, dst, list) {
dingtianhongc0623e52013-12-30 15:40:55 +0800841 if (ether_addr_equal(dst_tmp->addr, src_tmp->addr)) {
Yevgeny Petrilin6d199932012-07-05 04:03:43 +0000842 dst_tmp->action = MCLIST_NONE;
843 found = true;
844 break;
845 }
846 }
847 if (!found) {
Joe Perches14f8dc42013-02-07 11:46:27 +0000848 new_mc = kmemdup(src_tmp,
849 sizeof(struct mlx4_en_mc_list),
Yevgeny Petrilin6d199932012-07-05 04:03:43 +0000850 GFP_KERNEL);
Joe Perches14f8dc42013-02-07 11:46:27 +0000851 if (!new_mc)
Yevgeny Petrilin6d199932012-07-05 04:03:43 +0000852 return;
Joe Perches14f8dc42013-02-07 11:46:27 +0000853
Yevgeny Petrilin6d199932012-07-05 04:03:43 +0000854 new_mc->action = MCLIST_ADD;
855 list_add_tail(&new_mc->list, dst);
856 }
857 }
858}
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700859
Yan Burman0eb74fd2013-02-07 02:25:23 +0000860static void mlx4_en_set_rx_mode(struct net_device *dev)
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700861{
862 struct mlx4_en_priv *priv = netdev_priv(dev);
863
864 if (!priv->port_up)
865 return;
866
Yan Burman0eb74fd2013-02-07 02:25:23 +0000867 queue_work(priv->mdev->workqueue, &priv->rx_mode_task);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700868}
869
Yan Burman0eb74fd2013-02-07 02:25:23 +0000870static void mlx4_en_set_promisc_mode(struct mlx4_en_priv *priv,
871 struct mlx4_en_dev *mdev)
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700872{
Hadar Hen Zionc96d97f2012-07-05 04:03:44 +0000873 int err = 0;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700874
Yan Burman0eb74fd2013-02-07 02:25:23 +0000875 if (!(priv->flags & MLX4_EN_FLAG_PROMISC)) {
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700876 if (netif_msg_rx_status(priv))
Yan Burman0eb74fd2013-02-07 02:25:23 +0000877 en_warn(priv, "Entering promiscuous mode\n");
878 priv->flags |= MLX4_EN_FLAG_PROMISC;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700879
Yan Burman0eb74fd2013-02-07 02:25:23 +0000880 /* Enable promiscouos mode */
Hadar Hen Zionc96d97f2012-07-05 04:03:44 +0000881 switch (mdev->dev->caps.steering_mode) {
Hadar Hen Zion592e49d2012-07-05 04:03:48 +0000882 case MLX4_STEERING_MODE_DEVICE_MANAGED:
Yan Burman0eb74fd2013-02-07 02:25:23 +0000883 err = mlx4_flow_steer_promisc_add(mdev->dev,
884 priv->port,
885 priv->base_qpn,
Hadar Hen Zionf9162532013-04-24 13:58:45 +0000886 MLX4_FS_ALL_DEFAULT);
Hadar Hen Zion592e49d2012-07-05 04:03:48 +0000887 if (err)
Yan Burman0eb74fd2013-02-07 02:25:23 +0000888 en_err(priv, "Failed enabling promiscuous mode\n");
889 priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
Hadar Hen Zion592e49d2012-07-05 04:03:48 +0000890 break;
891
Hadar Hen Zionc96d97f2012-07-05 04:03:44 +0000892 case MLX4_STEERING_MODE_B0:
Yan Burman0eb74fd2013-02-07 02:25:23 +0000893 err = mlx4_unicast_promisc_add(mdev->dev,
894 priv->base_qpn,
895 priv->port);
Yevgeny Petrilin16792002011-03-22 22:38:31 +0000896 if (err)
Yan Burman0eb74fd2013-02-07 02:25:23 +0000897 en_err(priv, "Failed enabling unicast promiscuous mode\n");
898
899 /* Add the default qp number as multicast
900 * promisc
901 */
902 if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
903 err = mlx4_multicast_promisc_add(mdev->dev,
904 priv->base_qpn,
905 priv->port);
Hadar Hen Zionc96d97f2012-07-05 04:03:44 +0000906 if (err)
Yan Burman0eb74fd2013-02-07 02:25:23 +0000907 en_err(priv, "Failed enabling multicast promiscuous mode\n");
908 priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
Hadar Hen Zionc96d97f2012-07-05 04:03:44 +0000909 }
910 break;
911
912 case MLX4_STEERING_MODE_A0:
913 err = mlx4_SET_PORT_qpn_calc(mdev->dev,
914 priv->port,
Yan Burman0eb74fd2013-02-07 02:25:23 +0000915 priv->base_qpn,
916 1);
Hadar Hen Zionc96d97f2012-07-05 04:03:44 +0000917 if (err)
Yan Burman0eb74fd2013-02-07 02:25:23 +0000918 en_err(priv, "Failed enabling promiscuous mode\n");
Hadar Hen Zionc96d97f2012-07-05 04:03:44 +0000919 break;
Yevgeny Petrilin16792002011-03-22 22:38:31 +0000920 }
921
Yan Burman0eb74fd2013-02-07 02:25:23 +0000922 /* Disable port multicast filter (unconditionally) */
923 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
924 0, MLX4_MCAST_DISABLE);
925 if (err)
926 en_err(priv, "Failed disabling multicast filter\n");
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700927 }
Yan Burman0eb74fd2013-02-07 02:25:23 +0000928}
929
930static void mlx4_en_clear_promisc_mode(struct mlx4_en_priv *priv,
931 struct mlx4_en_dev *mdev)
932{
933 int err = 0;
934
935 if (netif_msg_rx_status(priv))
936 en_warn(priv, "Leaving promiscuous mode\n");
937 priv->flags &= ~MLX4_EN_FLAG_PROMISC;
938
939 /* Disable promiscouos mode */
940 switch (mdev->dev->caps.steering_mode) {
941 case MLX4_STEERING_MODE_DEVICE_MANAGED:
942 err = mlx4_flow_steer_promisc_remove(mdev->dev,
943 priv->port,
Hadar Hen Zionf9162532013-04-24 13:58:45 +0000944 MLX4_FS_ALL_DEFAULT);
Yan Burman0eb74fd2013-02-07 02:25:23 +0000945 if (err)
946 en_err(priv, "Failed disabling promiscuous mode\n");
947 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
948 break;
949
950 case MLX4_STEERING_MODE_B0:
951 err = mlx4_unicast_promisc_remove(mdev->dev,
952 priv->base_qpn,
953 priv->port);
954 if (err)
955 en_err(priv, "Failed disabling unicast promiscuous mode\n");
956 /* Disable Multicast promisc */
957 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
958 err = mlx4_multicast_promisc_remove(mdev->dev,
959 priv->base_qpn,
960 priv->port);
961 if (err)
962 en_err(priv, "Failed disabling multicast promiscuous mode\n");
963 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
964 }
965 break;
966
967 case MLX4_STEERING_MODE_A0:
968 err = mlx4_SET_PORT_qpn_calc(mdev->dev,
969 priv->port,
970 priv->base_qpn, 0);
971 if (err)
972 en_err(priv, "Failed disabling promiscuous mode\n");
973 break;
974 }
Yan Burman0eb74fd2013-02-07 02:25:23 +0000975}
976
977static void mlx4_en_do_multicast(struct mlx4_en_priv *priv,
978 struct net_device *dev,
979 struct mlx4_en_dev *mdev)
980{
981 struct mlx4_en_mc_list *mclist, *tmp;
982 u64 mcast_addr = 0;
983 u8 mc_list[16] = {0};
984 int err = 0;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700985
986 /* Enable/disable the multicast filter according to IFF_ALLMULTI */
987 if (dev->flags & IFF_ALLMULTI) {
988 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
989 0, MLX4_MCAST_DISABLE);
990 if (err)
Yevgeny Petrilin453a6082009-06-01 20:27:13 +0000991 en_err(priv, "Failed disabling multicast filter\n");
Yevgeny Petrilin16792002011-03-22 22:38:31 +0000992
993 /* Add the default qp number as multicast promisc */
994 if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
Hadar Hen Zionc96d97f2012-07-05 04:03:44 +0000995 switch (mdev->dev->caps.steering_mode) {
Hadar Hen Zion592e49d2012-07-05 04:03:48 +0000996 case MLX4_STEERING_MODE_DEVICE_MANAGED:
997 err = mlx4_flow_steer_promisc_add(mdev->dev,
998 priv->port,
999 priv->base_qpn,
Hadar Hen Zionf9162532013-04-24 13:58:45 +00001000 MLX4_FS_MC_DEFAULT);
Hadar Hen Zion592e49d2012-07-05 04:03:48 +00001001 break;
1002
Hadar Hen Zionc96d97f2012-07-05 04:03:44 +00001003 case MLX4_STEERING_MODE_B0:
1004 err = mlx4_multicast_promisc_add(mdev->dev,
1005 priv->base_qpn,
1006 priv->port);
1007 break;
1008
1009 case MLX4_STEERING_MODE_A0:
1010 break;
1011 }
Yevgeny Petrilin16792002011-03-22 22:38:31 +00001012 if (err)
1013 en_err(priv, "Failed entering multicast promisc mode\n");
1014 priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
1015 }
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001016 } else {
Yevgeny Petrilin16792002011-03-22 22:38:31 +00001017 /* Disable Multicast promisc */
1018 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
Hadar Hen Zionc96d97f2012-07-05 04:03:44 +00001019 switch (mdev->dev->caps.steering_mode) {
Hadar Hen Zion592e49d2012-07-05 04:03:48 +00001020 case MLX4_STEERING_MODE_DEVICE_MANAGED:
1021 err = mlx4_flow_steer_promisc_remove(mdev->dev,
1022 priv->port,
Hadar Hen Zionf9162532013-04-24 13:58:45 +00001023 MLX4_FS_MC_DEFAULT);
Hadar Hen Zion592e49d2012-07-05 04:03:48 +00001024 break;
1025
Hadar Hen Zionc96d97f2012-07-05 04:03:44 +00001026 case MLX4_STEERING_MODE_B0:
1027 err = mlx4_multicast_promisc_remove(mdev->dev,
1028 priv->base_qpn,
1029 priv->port);
1030 break;
1031
1032 case MLX4_STEERING_MODE_A0:
1033 break;
1034 }
Yevgeny Petrilin16792002011-03-22 22:38:31 +00001035 if (err)
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001036 en_err(priv, "Failed disabling multicast promiscuous mode\n");
Yevgeny Petrilin16792002011-03-22 22:38:31 +00001037 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
1038 }
Jiri Pirkoff6e2162010-03-01 05:09:14 +00001039
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001040 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
1041 0, MLX4_MCAST_DISABLE);
1042 if (err)
Yevgeny Petrilin453a6082009-06-01 20:27:13 +00001043 en_err(priv, "Failed disabling multicast filter\n");
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001044
1045 /* Flush mcast filter and init it with broadcast address */
1046 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, ETH_BCAST,
1047 1, MLX4_MCAST_CONFIG);
1048
1049 /* Update multicast list - we cache all addresses so they won't
1050 * change while HW is updated holding the command semaphor */
Eugenia Emantayevdbd501a2013-01-24 01:54:16 +00001051 netif_addr_lock_bh(dev);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001052 mlx4_en_cache_mclist(dev);
Eugenia Emantayevdbd501a2013-01-24 01:54:16 +00001053 netif_addr_unlock_bh(dev);
Yevgeny Petrilin6d199932012-07-05 04:03:43 +00001054 list_for_each_entry(mclist, &priv->mc_list, list) {
Eugenia Emantayev98133372014-03-02 10:25:01 +02001055 mcast_addr = mlx4_mac_to_u64(mclist->addr);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001056 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port,
1057 mcast_addr, 0, MLX4_MCAST_CONFIG);
1058 }
1059 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
1060 0, MLX4_MCAST_ENABLE);
1061 if (err)
Yevgeny Petrilin453a6082009-06-01 20:27:13 +00001062 en_err(priv, "Failed enabling multicast filter\n");
Yevgeny Petrilin6d199932012-07-05 04:03:43 +00001063
1064 update_mclist_flags(priv, &priv->curr_list, &priv->mc_list);
1065 list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) {
1066 if (mclist->action == MCLIST_REM) {
1067 /* detach this address and delete from list */
1068 memcpy(&mc_list[10], mclist->addr, ETH_ALEN);
1069 mc_list[5] = priv->port;
1070 err = mlx4_multicast_detach(mdev->dev,
1071 &priv->rss_map.indir_qp,
1072 mc_list,
Hadar Hen Zion0ff1fb62012-07-05 04:03:46 +00001073 MLX4_PROT_ETH,
1074 mclist->reg_id);
Yevgeny Petrilin6d199932012-07-05 04:03:43 +00001075 if (err)
1076 en_err(priv, "Fail to detach multicast address\n");
1077
Or Gerlitz837052d2013-12-23 16:09:44 +02001078 if (mclist->tunnel_reg_id) {
1079 err = mlx4_flow_detach(priv->mdev->dev, mclist->tunnel_reg_id);
1080 if (err)
1081 en_err(priv, "Failed to detach multicast address\n");
1082 }
1083
Yevgeny Petrilin6d199932012-07-05 04:03:43 +00001084 /* remove from list */
1085 list_del(&mclist->list);
1086 kfree(mclist);
Dan Carpenter9c645082012-07-10 20:34:07 +00001087 } else if (mclist->action == MCLIST_ADD) {
Yevgeny Petrilin6d199932012-07-05 04:03:43 +00001088 /* attach the address */
1089 memcpy(&mc_list[10], mclist->addr, ETH_ALEN);
Hadar Hen Zion0ff1fb62012-07-05 04:03:46 +00001090 /* needed for B0 steering support */
Yevgeny Petrilin6d199932012-07-05 04:03:43 +00001091 mc_list[5] = priv->port;
1092 err = mlx4_multicast_attach(mdev->dev,
1093 &priv->rss_map.indir_qp,
Hadar Hen Zion0ff1fb62012-07-05 04:03:46 +00001094 mc_list,
1095 priv->port, 0,
1096 MLX4_PROT_ETH,
1097 &mclist->reg_id);
Yevgeny Petrilin6d199932012-07-05 04:03:43 +00001098 if (err)
1099 en_err(priv, "Fail to attach multicast address\n");
1100
Or Gerlitz837052d2013-12-23 16:09:44 +02001101 err = mlx4_en_tunnel_steer_add(priv, &mc_list[10], priv->base_qpn,
1102 &mclist->tunnel_reg_id);
1103 if (err)
1104 en_err(priv, "Failed to attach multicast address\n");
Yevgeny Petrilin6d199932012-07-05 04:03:43 +00001105 }
1106 }
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001107 }
Yan Burman0eb74fd2013-02-07 02:25:23 +00001108}
1109
Yan Burmancc5387f2013-02-07 02:25:26 +00001110static void mlx4_en_do_uc_filter(struct mlx4_en_priv *priv,
1111 struct net_device *dev,
1112 struct mlx4_en_dev *mdev)
1113{
1114 struct netdev_hw_addr *ha;
1115 struct mlx4_mac_entry *entry;
Sasha Levinb67bfe02013-02-27 17:06:00 -08001116 struct hlist_node *tmp;
Yan Burmancc5387f2013-02-07 02:25:26 +00001117 bool found;
1118 u64 mac;
1119 int err = 0;
1120 struct hlist_head *bucket;
1121 unsigned int i;
1122 int removed = 0;
1123 u32 prev_flags;
1124
1125 /* Note that we do not need to protect our mac_hash traversal with rcu,
1126 * since all modification code is protected by mdev->state_lock
1127 */
1128
1129 /* find what to remove */
1130 for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) {
1131 bucket = &priv->mac_hash[i];
Sasha Levinb67bfe02013-02-27 17:06:00 -08001132 hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
Yan Burmancc5387f2013-02-07 02:25:26 +00001133 found = false;
1134 netdev_for_each_uc_addr(ha, dev) {
1135 if (ether_addr_equal_64bits(entry->mac,
1136 ha->addr)) {
1137 found = true;
1138 break;
1139 }
1140 }
1141
1142 /* MAC address of the port is not in uc list */
Noa Osherovich2695bab2014-07-08 11:25:24 +03001143 if (ether_addr_equal_64bits(entry->mac,
1144 priv->current_mac))
Yan Burmancc5387f2013-02-07 02:25:26 +00001145 found = true;
1146
1147 if (!found) {
Eugenia Emantayev98133372014-03-02 10:25:01 +02001148 mac = mlx4_mac_to_u64(entry->mac);
Yan Burmancc5387f2013-02-07 02:25:26 +00001149 mlx4_en_uc_steer_release(priv, entry->mac,
1150 priv->base_qpn,
1151 entry->reg_id);
1152 mlx4_unregister_mac(mdev->dev, priv->port, mac);
1153
1154 hlist_del_rcu(&entry->hlist);
1155 kfree_rcu(entry, rcu);
1156 en_dbg(DRV, priv, "Removed MAC %pM on port:%d\n",
1157 entry->mac, priv->port);
1158 ++removed;
1159 }
1160 }
1161 }
1162
1163 /* if we didn't remove anything, there is no use in trying to add
1164 * again once we are in a forced promisc mode state
1165 */
1166 if ((priv->flags & MLX4_EN_FLAG_FORCE_PROMISC) && 0 == removed)
1167 return;
1168
1169 prev_flags = priv->flags;
1170 priv->flags &= ~MLX4_EN_FLAG_FORCE_PROMISC;
1171
1172 /* find what to add */
1173 netdev_for_each_uc_addr(ha, dev) {
1174 found = false;
1175 bucket = &priv->mac_hash[ha->addr[MLX4_EN_MAC_HASH_IDX]];
Sasha Levinb67bfe02013-02-27 17:06:00 -08001176 hlist_for_each_entry(entry, bucket, hlist) {
Yan Burmancc5387f2013-02-07 02:25:26 +00001177 if (ether_addr_equal_64bits(entry->mac, ha->addr)) {
1178 found = true;
1179 break;
1180 }
1181 }
1182
1183 if (!found) {
1184 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1185 if (!entry) {
1186 en_err(priv, "Failed adding MAC %pM on port:%d (out of memory)\n",
1187 ha->addr, priv->port);
1188 priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC;
1189 break;
1190 }
Eugenia Emantayev98133372014-03-02 10:25:01 +02001191 mac = mlx4_mac_to_u64(ha->addr);
Yan Burmancc5387f2013-02-07 02:25:26 +00001192 memcpy(entry->mac, ha->addr, ETH_ALEN);
1193 err = mlx4_register_mac(mdev->dev, priv->port, mac);
1194 if (err < 0) {
1195 en_err(priv, "Failed registering MAC %pM on port %d: %d\n",
1196 ha->addr, priv->port, err);
1197 kfree(entry);
1198 priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC;
1199 break;
1200 }
1201 err = mlx4_en_uc_steer_add(priv, ha->addr,
1202 &priv->base_qpn,
1203 &entry->reg_id);
1204 if (err) {
1205 en_err(priv, "Failed adding MAC %pM on port %d: %d\n",
1206 ha->addr, priv->port, err);
1207 mlx4_unregister_mac(mdev->dev, priv->port, mac);
1208 kfree(entry);
1209 priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC;
1210 break;
1211 } else {
1212 unsigned int mac_hash;
1213 en_dbg(DRV, priv, "Added MAC %pM on port:%d\n",
1214 ha->addr, priv->port);
1215 mac_hash = ha->addr[MLX4_EN_MAC_HASH_IDX];
1216 bucket = &priv->mac_hash[mac_hash];
1217 hlist_add_head_rcu(&entry->hlist, bucket);
1218 }
1219 }
1220 }
1221
1222 if (priv->flags & MLX4_EN_FLAG_FORCE_PROMISC) {
1223 en_warn(priv, "Forcing promiscuous mode on port:%d\n",
1224 priv->port);
1225 } else if (prev_flags & MLX4_EN_FLAG_FORCE_PROMISC) {
1226 en_warn(priv, "Stop forcing promiscuous mode on port:%d\n",
1227 priv->port);
1228 }
1229}
1230
Yan Burman0eb74fd2013-02-07 02:25:23 +00001231static void mlx4_en_do_set_rx_mode(struct work_struct *work)
1232{
1233 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
1234 rx_mode_task);
1235 struct mlx4_en_dev *mdev = priv->mdev;
1236 struct net_device *dev = priv->dev;
1237
1238 mutex_lock(&mdev->state_lock);
1239 if (!mdev->device_up) {
1240 en_dbg(HW, priv, "Card is not up, ignoring rx mode change.\n");
1241 goto out;
1242 }
1243 if (!priv->port_up) {
1244 en_dbg(HW, priv, "Port is down, ignoring rx mode change.\n");
1245 goto out;
1246 }
1247
1248 if (!netif_carrier_ok(dev)) {
1249 if (!mlx4_en_QUERY_PORT(mdev, priv->port)) {
1250 if (priv->port_state.link_state) {
1251 priv->last_link_state = MLX4_DEV_EVENT_PORT_UP;
1252 netif_carrier_on(dev);
1253 en_dbg(LINK, priv, "Link Up\n");
1254 }
1255 }
1256 }
1257
Yan Burmancc5387f2013-02-07 02:25:26 +00001258 if (dev->priv_flags & IFF_UNICAST_FLT)
1259 mlx4_en_do_uc_filter(priv, dev, mdev);
1260
Yan Burman0eb74fd2013-02-07 02:25:23 +00001261 /* Promsicuous mode: disable all filters */
Yan Burmancc5387f2013-02-07 02:25:26 +00001262 if ((dev->flags & IFF_PROMISC) ||
1263 (priv->flags & MLX4_EN_FLAG_FORCE_PROMISC)) {
Yan Burman0eb74fd2013-02-07 02:25:23 +00001264 mlx4_en_set_promisc_mode(priv, mdev);
1265 goto out;
1266 }
1267
1268 /* Not in promiscuous mode */
1269 if (priv->flags & MLX4_EN_FLAG_PROMISC)
1270 mlx4_en_clear_promisc_mode(priv, mdev);
1271
1272 mlx4_en_do_multicast(priv, dev, mdev);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001273out:
1274 mutex_unlock(&mdev->state_lock);
1275}
1276
1277#ifdef CONFIG_NET_POLL_CONTROLLER
1278static void mlx4_en_netpoll(struct net_device *dev)
1279{
1280 struct mlx4_en_priv *priv = netdev_priv(dev);
1281 struct mlx4_en_cq *cq;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001282 int i;
1283
1284 for (i = 0; i < priv->rx_ring_num; i++) {
Eugenia Emantayev41d942d2013-11-07 12:19:52 +02001285 cq = priv->rx_cq[i];
Chris Masonc98235c2014-04-15 18:09:24 -04001286 napi_schedule(&cq->napi);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001287 }
1288}
1289#endif
1290
1291static void mlx4_en_tx_timeout(struct net_device *dev)
1292{
1293 struct mlx4_en_priv *priv = netdev_priv(dev);
1294 struct mlx4_en_dev *mdev = priv->mdev;
Yevgeny Petrilinb944ebe2013-06-25 12:09:34 +03001295 int i;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001296
1297 if (netif_msg_timer(priv))
Yevgeny Petrilin453a6082009-06-01 20:27:13 +00001298 en_warn(priv, "Tx timeout called on port:%d\n", priv->port);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001299
Yevgeny Petrilinb944ebe2013-06-25 12:09:34 +03001300 for (i = 0; i < priv->tx_ring_num; i++) {
1301 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, i)))
1302 continue;
1303 en_warn(priv, "TX timeout on queue: %d, QP: 0x%x, CQ: 0x%x, Cons: 0x%x, Prod: 0x%x\n",
Eugenia Emantayev41d942d2013-11-07 12:19:52 +02001304 i, priv->tx_ring[i]->qpn, priv->tx_ring[i]->cqn,
1305 priv->tx_ring[i]->cons, priv->tx_ring[i]->prod);
Yevgeny Petrilinb944ebe2013-06-25 12:09:34 +03001306 }
1307
Yevgeny Petrilin1e338db2009-04-20 04:26:05 +00001308 priv->port_stats.tx_timeout++;
Yevgeny Petrilin453a6082009-06-01 20:27:13 +00001309 en_dbg(DRV, priv, "Scheduling watchdog\n");
Yevgeny Petrilin1e338db2009-04-20 04:26:05 +00001310 queue_work(mdev->workqueue, &priv->watchdog_task);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001311}
1312
1313
1314static struct net_device_stats *mlx4_en_get_stats(struct net_device *dev)
1315{
1316 struct mlx4_en_priv *priv = netdev_priv(dev);
1317
1318 spin_lock_bh(&priv->stats_lock);
1319 memcpy(&priv->ret_stats, &priv->stats, sizeof(priv->stats));
1320 spin_unlock_bh(&priv->stats_lock);
1321
1322 return &priv->ret_stats;
1323}
1324
1325static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
1326{
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001327 struct mlx4_en_cq *cq;
1328 int i;
1329
1330 /* If we haven't received a specific coalescing setting
Martin Olsson98a17082009-04-22 18:21:29 +02001331 * (module param), we set the moderation parameters as follows:
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001332 * - moder_cnt is set to the number of mtu sized packets to
Eric Dumazetecfd2ce2012-11-05 16:20:42 +00001333 * satisfy our coalescing target.
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001334 * - moder_time is set to a fixed value.
1335 */
Yevgeny Petrilin3db36fb2009-06-01 23:23:13 +00001336 priv->rx_frames = MLX4_EN_RX_COAL_TARGET;
Yevgeny Petrilin60b9f9e2008-12-25 18:19:47 -08001337 priv->rx_usecs = MLX4_EN_RX_COAL_TIME;
Yevgeny Petrilina19a8482012-04-23 02:18:33 +00001338 priv->tx_frames = MLX4_EN_TX_COAL_PKTS;
1339 priv->tx_usecs = MLX4_EN_TX_COAL_TIME;
Yan Burman48e551f2013-02-07 02:25:21 +00001340 en_dbg(INTR, priv, "Default coalesing params for mtu:%d - rx_frames:%d rx_usecs:%d\n",
1341 priv->dev->mtu, priv->rx_frames, priv->rx_usecs);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001342
1343 /* Setup cq moderation params */
1344 for (i = 0; i < priv->rx_ring_num; i++) {
Eugenia Emantayev41d942d2013-11-07 12:19:52 +02001345 cq = priv->rx_cq[i];
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001346 cq->moder_cnt = priv->rx_frames;
1347 cq->moder_time = priv->rx_usecs;
Alexander Guller6b4d8d92011-10-09 05:38:23 +00001348 priv->last_moder_time[i] = MLX4_EN_AUTO_CONF;
1349 priv->last_moder_packets[i] = 0;
1350 priv->last_moder_bytes[i] = 0;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001351 }
1352
1353 for (i = 0; i < priv->tx_ring_num; i++) {
Eugenia Emantayev41d942d2013-11-07 12:19:52 +02001354 cq = priv->tx_cq[i];
Yevgeny Petrilina19a8482012-04-23 02:18:33 +00001355 cq->moder_cnt = priv->tx_frames;
1356 cq->moder_time = priv->tx_usecs;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001357 }
1358
1359 /* Reset auto-moderation params */
1360 priv->pkt_rate_low = MLX4_EN_RX_RATE_LOW;
1361 priv->rx_usecs_low = MLX4_EN_RX_COAL_TIME_LOW;
1362 priv->pkt_rate_high = MLX4_EN_RX_RATE_HIGH;
1363 priv->rx_usecs_high = MLX4_EN_RX_COAL_TIME_HIGH;
1364 priv->sample_interval = MLX4_EN_SAMPLE_INTERVAL;
Yevgeny Petrilin60b9f9e2008-12-25 18:19:47 -08001365 priv->adaptive_rx_coal = 1;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001366 priv->last_moder_jiffies = 0;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001367 priv->last_moder_tx_packets = 0;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001368}
1369
1370static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
1371{
1372 unsigned long period = (unsigned long) (jiffies - priv->last_moder_jiffies);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001373 struct mlx4_en_cq *cq;
1374 unsigned long packets;
1375 unsigned long rate;
1376 unsigned long avg_pkt_size;
1377 unsigned long rx_packets;
1378 unsigned long rx_bytes;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001379 unsigned long rx_pkt_diff;
1380 int moder_time;
Alexander Guller6b4d8d92011-10-09 05:38:23 +00001381 int ring, err;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001382
1383 if (!priv->adaptive_rx_coal || period < priv->sample_interval * HZ)
1384 return;
1385
Alexander Guller6b4d8d92011-10-09 05:38:23 +00001386 for (ring = 0; ring < priv->rx_ring_num; ring++) {
1387 spin_lock_bh(&priv->stats_lock);
Eugenia Emantayev41d942d2013-11-07 12:19:52 +02001388 rx_packets = priv->rx_ring[ring]->packets;
1389 rx_bytes = priv->rx_ring[ring]->bytes;
Alexander Guller6b4d8d92011-10-09 05:38:23 +00001390 spin_unlock_bh(&priv->stats_lock);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001391
Alexander Guller6b4d8d92011-10-09 05:38:23 +00001392 rx_pkt_diff = ((unsigned long) (rx_packets -
1393 priv->last_moder_packets[ring]));
1394 packets = rx_pkt_diff;
1395 rate = packets * HZ / period;
1396 avg_pkt_size = packets ? ((unsigned long) (rx_bytes -
1397 priv->last_moder_bytes[ring])) / packets : 0;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001398
Alexander Guller6b4d8d92011-10-09 05:38:23 +00001399 /* Apply auto-moderation only when packet rate
1400 * exceeds a rate that it matters */
1401 if (rate > (MLX4_EN_RX_RATE_THRESH / priv->rx_ring_num) &&
1402 avg_pkt_size > MLX4_EN_AVG_PKT_SMALL) {
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001403 if (rate < priv->pkt_rate_low)
1404 moder_time = priv->rx_usecs_low;
1405 else if (rate > priv->pkt_rate_high)
1406 moder_time = priv->rx_usecs_high;
1407 else
1408 moder_time = (rate - priv->pkt_rate_low) *
1409 (priv->rx_usecs_high - priv->rx_usecs_low) /
1410 (priv->pkt_rate_high - priv->pkt_rate_low) +
1411 priv->rx_usecs_low;
Alexander Guller6b4d8d92011-10-09 05:38:23 +00001412 } else {
1413 moder_time = priv->rx_usecs_low;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001414 }
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001415
Alexander Guller6b4d8d92011-10-09 05:38:23 +00001416 if (moder_time != priv->last_moder_time[ring]) {
1417 priv->last_moder_time[ring] = moder_time;
Eugenia Emantayev41d942d2013-11-07 12:19:52 +02001418 cq = priv->rx_cq[ring];
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001419 cq->moder_time = moder_time;
Sagi Grimberga1c66932013-06-04 05:13:26 +00001420 cq->moder_cnt = priv->rx_frames;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001421 err = mlx4_en_set_cq_moder(priv, cq);
Alexander Guller6b4d8d92011-10-09 05:38:23 +00001422 if (err)
Yan Burman48e551f2013-02-07 02:25:21 +00001423 en_err(priv, "Failed modifying moderation for cq:%d\n",
1424 ring);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001425 }
Alexander Guller6b4d8d92011-10-09 05:38:23 +00001426 priv->last_moder_packets[ring] = rx_packets;
1427 priv->last_moder_bytes[ring] = rx_bytes;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001428 }
1429
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001430 priv->last_moder_jiffies = jiffies;
1431}
1432
1433static void mlx4_en_do_get_stats(struct work_struct *work)
1434{
Jean Delvarebf6aede2009-04-02 16:56:54 -07001435 struct delayed_work *delay = to_delayed_work(work);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001436 struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv,
1437 stats_task);
1438 struct mlx4_en_dev *mdev = priv->mdev;
1439 int err;
1440
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001441 mutex_lock(&mdev->state_lock);
1442 if (mdev->device_up) {
Jack Morgenstein6123db2e2013-06-25 12:09:30 +03001443 if (priv->port_up) {
1444 err = mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 0);
1445 if (err)
1446 en_dbg(HW, priv, "Could not update stats\n");
Eugenia Emantayev2d518372013-01-24 01:54:14 +00001447
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001448 mlx4_en_auto_moderation(priv);
Jack Morgenstein6123db2e2013-06-25 12:09:30 +03001449 }
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001450
1451 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
1452 }
Yevgeny Petrilind7e1a482010-08-24 03:46:38 +00001453 if (mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port]) {
Noa Osherovich2695bab2014-07-08 11:25:24 +03001454 mlx4_en_do_set_mac(priv, priv->current_mac);
Yevgeny Petrilind7e1a482010-08-24 03:46:38 +00001455 mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port] = 0;
1456 }
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001457 mutex_unlock(&mdev->state_lock);
1458}
1459
Amir Vadaib6c39bf2013-04-23 06:06:51 +00001460/* mlx4_en_service_task - Run service task for tasks that needed to be done
1461 * periodically
1462 */
1463static void mlx4_en_service_task(struct work_struct *work)
1464{
1465 struct delayed_work *delay = to_delayed_work(work);
1466 struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv,
1467 service_task);
1468 struct mlx4_en_dev *mdev = priv->mdev;
1469
1470 mutex_lock(&mdev->state_lock);
1471 if (mdev->device_up) {
Amir Vadaidc8142e2013-04-25 05:22:24 +00001472 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
1473 mlx4_en_ptp_overflow_check(mdev);
Amir Vadaib6c39bf2013-04-23 06:06:51 +00001474
1475 queue_delayed_work(mdev->workqueue, &priv->service_task,
1476 SERVICE_TASK_DELAY);
1477 }
1478 mutex_unlock(&mdev->state_lock);
1479}
1480
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001481static void mlx4_en_linkstate(struct work_struct *work)
1482{
1483 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
1484 linkstate_task);
1485 struct mlx4_en_dev *mdev = priv->mdev;
1486 int linkstate = priv->link_state;
1487
1488 mutex_lock(&mdev->state_lock);
1489 /* If observable port state changed set carrier state and
1490 * report to system log */
1491 if (priv->last_link_state != linkstate) {
1492 if (linkstate == MLX4_DEV_EVENT_PORT_DOWN) {
Yevgeny Petriline5cc44b2010-08-24 03:46:01 +00001493 en_info(priv, "Link Down\n");
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001494 netif_carrier_off(priv->dev);
1495 } else {
Yevgeny Petriline5cc44b2010-08-24 03:46:01 +00001496 en_info(priv, "Link Up\n");
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001497 netif_carrier_on(priv->dev);
1498 }
1499 }
1500 priv->last_link_state = linkstate;
1501 mutex_unlock(&mdev->state_lock);
1502}
1503
Yuval Atias9e311e72014-06-09 10:24:39 +03001504static int mlx4_en_init_affinity_hint(struct mlx4_en_priv *priv, int ring_idx)
1505{
1506 struct mlx4_en_rx_ring *ring = priv->rx_ring[ring_idx];
1507 int numa_node = priv->mdev->dev->numa_node;
1508 int ret = 0;
1509
1510 if (!zalloc_cpumask_var(&ring->affinity_mask, GFP_KERNEL))
1511 return -ENOMEM;
1512
1513 ret = cpumask_set_cpu_local_first(ring_idx, numa_node,
1514 ring->affinity_mask);
1515 if (ret)
1516 free_cpumask_var(ring->affinity_mask);
1517
1518 return ret;
1519}
1520
1521static void mlx4_en_free_affinity_hint(struct mlx4_en_priv *priv, int ring_idx)
1522{
1523 free_cpumask_var(priv->rx_ring[ring_idx]->affinity_mask);
1524}
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001525
Yevgeny Petrilin18cc42a2008-12-29 18:39:20 -08001526int mlx4_en_start_port(struct net_device *dev)
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001527{
1528 struct mlx4_en_priv *priv = netdev_priv(dev);
1529 struct mlx4_en_dev *mdev = priv->mdev;
1530 struct mlx4_en_cq *cq;
1531 struct mlx4_en_tx_ring *tx_ring;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001532 int rx_index = 0;
1533 int tx_index = 0;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001534 int err = 0;
1535 int i;
1536 int j;
Yevgeny Petrilin16792002011-03-22 22:38:31 +00001537 u8 mc_list[16] = {0};
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001538
1539 if (priv->port_up) {
Yevgeny Petrilin453a6082009-06-01 20:27:13 +00001540 en_dbg(DRV, priv, "start port called while port already up\n");
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001541 return 0;
1542 }
1543
Yevgeny Petrilin6d199932012-07-05 04:03:43 +00001544 INIT_LIST_HEAD(&priv->mc_list);
1545 INIT_LIST_HEAD(&priv->curr_list);
Hadar Hen Zion0d256c02013-01-30 23:07:08 +00001546 INIT_LIST_HEAD(&priv->ethtool_list);
1547 memset(&priv->ethtool_rules[0], 0,
1548 sizeof(struct ethtool_flow_id) * MAX_NUM_OF_FS_RULES);
Yevgeny Petrilin6d199932012-07-05 04:03:43 +00001549
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001550 /* Calculate Rx buf size */
1551 dev->mtu = min(dev->mtu, priv->max_mtu);
1552 mlx4_en_calc_rx_buf(dev);
Yevgeny Petrilin453a6082009-06-01 20:27:13 +00001553 en_dbg(DRV, priv, "Rx buf size:%d\n", priv->rx_skb_size);
Yevgeny Petrilin38aab072009-05-24 03:17:11 +00001554
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001555 /* Configure rx cq's and rings */
Yevgeny Petrilin38aab072009-05-24 03:17:11 +00001556 err = mlx4_en_activate_rx_rings(priv);
1557 if (err) {
Yevgeny Petrilin453a6082009-06-01 20:27:13 +00001558 en_err(priv, "Failed to activate RX rings\n");
Yevgeny Petrilin38aab072009-05-24 03:17:11 +00001559 return err;
1560 }
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001561 for (i = 0; i < priv->rx_ring_num; i++) {
Eugenia Emantayev41d942d2013-11-07 12:19:52 +02001562 cq = priv->rx_cq[i];
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001563
Amir Vadai9e77a2b2013-06-18 16:18:27 +03001564 mlx4_en_cq_init_lock(cq);
1565
Yuval Atias9e311e72014-06-09 10:24:39 +03001566 err = mlx4_en_init_affinity_hint(priv, i);
1567 if (err) {
1568 en_err(priv, "Failed preparing IRQ affinity hint\n");
1569 goto cq_err;
1570 }
1571
Alexander Guller76532d02011-10-09 05:26:31 +00001572 err = mlx4_en_activate_cq(priv, cq, i);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001573 if (err) {
Yevgeny Petrilin453a6082009-06-01 20:27:13 +00001574 en_err(priv, "Failed activating Rx CQ\n");
Yuval Atias9e311e72014-06-09 10:24:39 +03001575 mlx4_en_free_affinity_hint(priv, i);
Yevgeny Petrilina4233302009-04-26 20:41:34 +00001576 goto cq_err;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001577 }
Ido Shamayc3f25112014-12-16 13:28:54 +02001578
1579 for (j = 0; j < cq->size; j++) {
1580 struct mlx4_cqe *cqe = NULL;
1581
1582 cqe = mlx4_en_get_cqe(cq->buf, j, priv->cqe_size) +
1583 priv->cqe_factor;
1584 cqe->owner_sr_opcode = MLX4_CQE_OWNER_MASK;
1585 }
1586
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001587 err = mlx4_en_set_cq_moder(priv, cq);
1588 if (err) {
Joe Perches1a91de22014-05-07 12:52:57 -07001589 en_err(priv, "Failed setting cq moderation parameters\n");
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001590 mlx4_en_deactivate_cq(priv, cq);
Yuval Atias9e311e72014-06-09 10:24:39 +03001591 mlx4_en_free_affinity_hint(priv, i);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001592 goto cq_err;
1593 }
1594 mlx4_en_arm_cq(priv, cq);
Eugenia Emantayev41d942d2013-11-07 12:19:52 +02001595 priv->rx_ring[i]->cqn = cq->mcq.cqn;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001596 ++rx_index;
1597 }
1598
Eugenia Emantayevffe455a2011-12-13 04:16:21 +00001599 /* Set qp number */
1600 en_dbg(DRV, priv, "Getting qp number for port %d\n", priv->port);
Yan Burman16a10ff2013-02-07 02:25:22 +00001601 err = mlx4_en_get_qp(priv);
Yevgeny Petrilin16792002011-03-22 22:38:31 +00001602 if (err) {
Eugenia Emantayevffe455a2011-12-13 04:16:21 +00001603 en_err(priv, "Failed getting eth qp\n");
Yevgeny Petrilin16792002011-03-22 22:38:31 +00001604 goto cq_err;
1605 }
1606 mdev->mac_removed[priv->port] = 0;
1607
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001608 err = mlx4_en_config_rss_steer(priv);
1609 if (err) {
Yevgeny Petrilin453a6082009-06-01 20:27:13 +00001610 en_err(priv, "Failed configuring rss steering\n");
Yevgeny Petrilin16792002011-03-22 22:38:31 +00001611 goto mac_err;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001612 }
1613
Hadar Hen Zioncabdc8ee2012-07-05 04:03:50 +00001614 err = mlx4_en_create_drop_qp(priv);
1615 if (err)
1616 goto rss_err;
1617
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001618 /* Configure tx cq's and rings */
1619 for (i = 0; i < priv->tx_ring_num; i++) {
1620 /* Configure cq */
Eugenia Emantayev41d942d2013-11-07 12:19:52 +02001621 cq = priv->tx_cq[i];
Alexander Guller76532d02011-10-09 05:26:31 +00001622 err = mlx4_en_activate_cq(priv, cq, i);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001623 if (err) {
Yevgeny Petrilin453a6082009-06-01 20:27:13 +00001624 en_err(priv, "Failed allocating Tx CQ\n");
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001625 goto tx_err;
1626 }
1627 err = mlx4_en_set_cq_moder(priv, cq);
1628 if (err) {
Joe Perches1a91de22014-05-07 12:52:57 -07001629 en_err(priv, "Failed setting cq moderation parameters\n");
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001630 mlx4_en_deactivate_cq(priv, cq);
1631 goto tx_err;
1632 }
Yevgeny Petrilin453a6082009-06-01 20:27:13 +00001633 en_dbg(DRV, priv, "Resetting index of collapsed CQ:%d to -1\n", i);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001634 cq->buf->wqe_index = cpu_to_be16(0xffff);
1635
1636 /* Configure ring */
Eugenia Emantayev41d942d2013-11-07 12:19:52 +02001637 tx_ring = priv->tx_ring[i];
Amir Vadai0e98b522012-04-04 21:33:24 +00001638 err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn,
Amir Vadaid3179662012-12-02 03:49:23 +00001639 i / priv->num_tx_rings_p_up);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001640 if (err) {
Yevgeny Petrilin453a6082009-06-01 20:27:13 +00001641 en_err(priv, "Failed allocating Tx ring\n");
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001642 mlx4_en_deactivate_cq(priv, cq);
1643 goto tx_err;
1644 }
Yevgeny Petrilin5b263f52012-04-23 02:18:50 +00001645 tx_ring->tx_queue = netdev_get_tx_queue(dev, i);
Yevgeny Petriline22979d2012-04-23 02:18:39 +00001646
1647 /* Arm CQ for TX completions */
1648 mlx4_en_arm_cq(priv, cq);
1649
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001650 /* Set initial ownership of all Tx TXBBs to SW (1) */
1651 for (j = 0; j < tx_ring->buf_size; j += STAMP_STRIDE)
1652 *((u32 *) (tx_ring->buf + j)) = 0xffffffff;
1653 ++tx_index;
1654 }
1655
1656 /* Configure port */
1657 err = mlx4_SET_PORT_general(mdev->dev, priv->port,
1658 priv->rx_skb_size + ETH_FCS_LEN,
Yevgeny Petrilind53b93f2008-11-05 04:48:36 +00001659 priv->prof->tx_pause,
1660 priv->prof->tx_ppp,
1661 priv->prof->rx_pause,
1662 priv->prof->rx_ppp);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001663 if (err) {
Yan Burman48e551f2013-02-07 02:25:21 +00001664 en_err(priv, "Failed setting port general configurations for port %d, with error %d\n",
1665 priv->port, err);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001666 goto tx_err;
1667 }
1668 /* Set default qp number */
1669 err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, priv->base_qpn, 0);
1670 if (err) {
Yevgeny Petrilin453a6082009-06-01 20:27:13 +00001671 en_err(priv, "Failed setting default qp numbers\n");
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001672 goto tx_err;
1673 }
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001674
Or Gerlitz837052d2013-12-23 16:09:44 +02001675 if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
Or Gerlitz1b136de2014-03-27 14:02:04 +02001676 err = mlx4_SET_PORT_VXLAN(mdev->dev, priv->port, VXLAN_STEER_BY_OUTER_MAC, 1);
Or Gerlitz837052d2013-12-23 16:09:44 +02001677 if (err) {
1678 en_err(priv, "Failed setting port L2 tunnel configuration, err %d\n",
1679 err);
1680 goto tx_err;
1681 }
1682 }
1683
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001684 /* Init port */
Yevgeny Petrilin453a6082009-06-01 20:27:13 +00001685 en_dbg(HW, priv, "Initializing port\n");
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001686 err = mlx4_INIT_PORT(mdev->dev, priv->port);
1687 if (err) {
Yevgeny Petrilin453a6082009-06-01 20:27:13 +00001688 en_err(priv, "Failed Initializing port\n");
Yevgeny Petrilin16792002011-03-22 22:38:31 +00001689 goto tx_err;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001690 }
1691
Yevgeny Petrilin16792002011-03-22 22:38:31 +00001692 /* Attach rx QP to bradcast address */
Joe Perchesc7bf7162015-03-02 19:54:47 -08001693 eth_broadcast_addr(&mc_list[10]);
Hadar Hen Zion0ff1fb62012-07-05 04:03:46 +00001694 mc_list[5] = priv->port; /* needed for B0 steering support */
Yevgeny Petrilin16792002011-03-22 22:38:31 +00001695 if (mlx4_multicast_attach(mdev->dev, &priv->rss_map.indir_qp, mc_list,
Hadar Hen Zion0ff1fb62012-07-05 04:03:46 +00001696 priv->port, 0, MLX4_PROT_ETH,
1697 &priv->broadcast_id))
Yevgeny Petrilin16792002011-03-22 22:38:31 +00001698 mlx4_warn(mdev, "Failed Attaching Broadcast\n");
1699
Herbert Xub5845f92011-03-27 01:01:26 +00001700 /* Must redo promiscuous mode setup. */
1701 priv->flags &= ~(MLX4_EN_FLAG_PROMISC | MLX4_EN_FLAG_MC_PROMISC);
1702
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001703 /* Schedule multicast task to populate multicast list */
Yan Burman0eb74fd2013-02-07 02:25:23 +00001704 queue_work(mdev->workqueue, &priv->rx_mode_task);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001705
Or Gerlitza66132f2014-04-01 11:27:13 +03001706#ifdef CONFIG_MLX4_EN_VXLAN
Or Gerlitz9737c6a2014-11-18 17:51:27 +02001707 if (priv->mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
Or Gerlitz1b136de2014-03-27 14:02:04 +02001708 vxlan_get_rx_port(dev);
Or Gerlitza66132f2014-04-01 11:27:13 +03001709#endif
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001710 priv->port_up = true;
Yevgeny Petrilina11faac2009-06-20 22:15:46 +00001711 netif_tx_start_all_queues(dev);
Amir Vadai3484aac2013-01-30 23:07:11 +00001712 netif_device_attach(dev);
1713
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001714 return 0;
1715
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001716tx_err:
1717 while (tx_index--) {
Eugenia Emantayev41d942d2013-11-07 12:19:52 +02001718 mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[tx_index]);
1719 mlx4_en_deactivate_cq(priv, priv->tx_cq[tx_index]);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001720 }
Hadar Hen Zioncabdc8ee2012-07-05 04:03:50 +00001721 mlx4_en_destroy_drop_qp(priv);
1722rss_err:
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001723 mlx4_en_release_rss_steer(priv);
Yevgeny Petrilin16792002011-03-22 22:38:31 +00001724mac_err:
Yan Burman16a10ff2013-02-07 02:25:22 +00001725 mlx4_en_put_qp(priv);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001726cq_err:
Yuval Atias9e311e72014-06-09 10:24:39 +03001727 while (rx_index--) {
Eugenia Emantayev41d942d2013-11-07 12:19:52 +02001728 mlx4_en_deactivate_cq(priv, priv->rx_cq[rx_index]);
Yuval Atias9e311e72014-06-09 10:24:39 +03001729 mlx4_en_free_affinity_hint(priv, i);
1730 }
Yevgeny Petrilin38aab072009-05-24 03:17:11 +00001731 for (i = 0; i < priv->rx_ring_num; i++)
Eugenia Emantayev41d942d2013-11-07 12:19:52 +02001732 mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001733
1734 return err; /* need to close devices */
1735}
1736
1737
Amir Vadai3484aac2013-01-30 23:07:11 +00001738void mlx4_en_stop_port(struct net_device *dev, int detach)
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001739{
1740 struct mlx4_en_priv *priv = netdev_priv(dev);
1741 struct mlx4_en_dev *mdev = priv->mdev;
Yevgeny Petrilin6d199932012-07-05 04:03:43 +00001742 struct mlx4_en_mc_list *mclist, *tmp;
Hadar Hen Zion0d256c02013-01-30 23:07:08 +00001743 struct ethtool_flow_id *flow, *tmp_flow;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001744 int i;
Yevgeny Petrilin16792002011-03-22 22:38:31 +00001745 u8 mc_list[16] = {0};
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001746
1747 if (!priv->port_up) {
Yevgeny Petrilin453a6082009-06-01 20:27:13 +00001748 en_dbg(DRV, priv, "stop port called while port already down\n");
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001749 return;
1750 }
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001751
Eugenia Emantayev0cc5c8b2013-06-25 12:09:33 +03001752 /* close port*/
1753 mlx4_CLOSE_PORT(mdev->dev, priv->port);
1754
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001755 /* Synchronize with tx routine */
1756 netif_tx_lock_bh(dev);
Amir Vadai3484aac2013-01-30 23:07:11 +00001757 if (detach)
1758 netif_device_detach(dev);
Yevgeny Petrilin3c05f5e2009-06-20 22:15:52 +00001759 netif_tx_stop_all_queues(dev);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001760 netif_tx_unlock_bh(dev);
1761
Amir Vadai3484aac2013-01-30 23:07:11 +00001762 netif_tx_disable(dev);
1763
Yevgeny Petrilin7c287382010-08-24 03:45:45 +00001764 /* Set port as not active */
Yevgeny Petrilin3c05f5e2009-06-20 22:15:52 +00001765 priv->port_up = false;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001766
Aviad Yehezkeldb0e7cb2013-01-24 01:54:15 +00001767 /* Promsicuous mode */
1768 if (mdev->dev->caps.steering_mode ==
1769 MLX4_STEERING_MODE_DEVICE_MANAGED) {
1770 priv->flags &= ~(MLX4_EN_FLAG_PROMISC |
1771 MLX4_EN_FLAG_MC_PROMISC);
1772 mlx4_flow_steer_promisc_remove(mdev->dev,
1773 priv->port,
Hadar Hen Zionf9162532013-04-24 13:58:45 +00001774 MLX4_FS_ALL_DEFAULT);
Aviad Yehezkeldb0e7cb2013-01-24 01:54:15 +00001775 mlx4_flow_steer_promisc_remove(mdev->dev,
1776 priv->port,
Hadar Hen Zionf9162532013-04-24 13:58:45 +00001777 MLX4_FS_MC_DEFAULT);
Aviad Yehezkeldb0e7cb2013-01-24 01:54:15 +00001778 } else if (priv->flags & MLX4_EN_FLAG_PROMISC) {
1779 priv->flags &= ~MLX4_EN_FLAG_PROMISC;
1780
1781 /* Disable promiscouos mode */
1782 mlx4_unicast_promisc_remove(mdev->dev, priv->base_qpn,
1783 priv->port);
1784
1785 /* Disable Multicast promisc */
1786 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
1787 mlx4_multicast_promisc_remove(mdev->dev, priv->base_qpn,
1788 priv->port);
1789 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
1790 }
1791 }
1792
Yevgeny Petrilin16792002011-03-22 22:38:31 +00001793 /* Detach All multicasts */
Joe Perchesc7bf7162015-03-02 19:54:47 -08001794 eth_broadcast_addr(&mc_list[10]);
Hadar Hen Zion0ff1fb62012-07-05 04:03:46 +00001795 mc_list[5] = priv->port; /* needed for B0 steering support */
Yevgeny Petrilin16792002011-03-22 22:38:31 +00001796 mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, mc_list,
Hadar Hen Zion0ff1fb62012-07-05 04:03:46 +00001797 MLX4_PROT_ETH, priv->broadcast_id);
Yevgeny Petrilin6d199932012-07-05 04:03:43 +00001798 list_for_each_entry(mclist, &priv->curr_list, list) {
1799 memcpy(&mc_list[10], mclist->addr, ETH_ALEN);
Yevgeny Petrilin16792002011-03-22 22:38:31 +00001800 mc_list[5] = priv->port;
1801 mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp,
Hadar Hen Zion0ff1fb62012-07-05 04:03:46 +00001802 mc_list, MLX4_PROT_ETH, mclist->reg_id);
Or Gerlitzde123262014-03-13 14:52:15 +02001803 if (mclist->tunnel_reg_id)
1804 mlx4_flow_detach(mdev->dev, mclist->tunnel_reg_id);
Yevgeny Petrilin16792002011-03-22 22:38:31 +00001805 }
1806 mlx4_en_clear_list(dev);
Yevgeny Petrilin6d199932012-07-05 04:03:43 +00001807 list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) {
1808 list_del(&mclist->list);
1809 kfree(mclist);
1810 }
1811
Yevgeny Petrilin16792002011-03-22 22:38:31 +00001812 /* Flush multicast filter */
1813 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 1, MLX4_MCAST_CONFIG);
1814
Hadar Hen Zion6efb5fa2013-03-21 05:55:53 +00001815 /* Remove flow steering rules for the port*/
1816 if (mdev->dev->caps.steering_mode ==
1817 MLX4_STEERING_MODE_DEVICE_MANAGED) {
1818 ASSERT_RTNL();
1819 list_for_each_entry_safe(flow, tmp_flow,
1820 &priv->ethtool_list, list) {
1821 mlx4_flow_detach(mdev->dev, flow->id);
1822 list_del(&flow->list);
1823 }
1824 }
1825
Hadar Hen Zioncabdc8ee2012-07-05 04:03:50 +00001826 mlx4_en_destroy_drop_qp(priv);
1827
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001828 /* Free TX Rings */
1829 for (i = 0; i < priv->tx_ring_num; i++) {
Eugenia Emantayev41d942d2013-11-07 12:19:52 +02001830 mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[i]);
1831 mlx4_en_deactivate_cq(priv, priv->tx_cq[i]);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001832 }
1833 msleep(10);
1834
1835 for (i = 0; i < priv->tx_ring_num; i++)
Eugenia Emantayev41d942d2013-11-07 12:19:52 +02001836 mlx4_en_free_tx_buf(dev, priv->tx_ring[i]);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001837
1838 /* Free RSS qps */
1839 mlx4_en_release_rss_steer(priv);
1840
Eugenia Emantayevffe455a2011-12-13 04:16:21 +00001841 /* Unregister Mac address for the port */
Yan Burman16a10ff2013-02-07 02:25:22 +00001842 mlx4_en_put_qp(priv);
Or Gerlitz5930e8d2013-10-15 16:55:22 +02001843 if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN))
Matan Barak955154f2013-01-30 23:07:10 +00001844 mdev->mac_removed[priv->port] = 1;
Eugenia Emantayevffe455a2011-12-13 04:16:21 +00001845
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001846 /* Free RX Rings */
1847 for (i = 0; i < priv->rx_ring_num; i++) {
Eugenia Emantayev41d942d2013-11-07 12:19:52 +02001848 struct mlx4_en_cq *cq = priv->rx_cq[i];
Amir Vadai9e77a2b2013-06-18 16:18:27 +03001849
1850 local_bh_disable();
1851 while (!mlx4_en_cq_lock_napi(cq)) {
1852 pr_info("CQ %d locked\n", i);
1853 mdelay(1);
1854 }
1855 local_bh_enable();
1856
Ido Shamayf4a36752014-10-27 11:37:45 +02001857 napi_synchronize(&cq->napi);
Eugenia Emantayev41d942d2013-11-07 12:19:52 +02001858 mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]);
Amir Vadai9e77a2b2013-06-18 16:18:27 +03001859 mlx4_en_deactivate_cq(priv, cq);
Yuval Atias9e311e72014-06-09 10:24:39 +03001860
1861 mlx4_en_free_affinity_hint(priv, i);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001862 }
1863}
1864
1865static void mlx4_en_restart(struct work_struct *work)
1866{
1867 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
1868 watchdog_task);
1869 struct mlx4_en_dev *mdev = priv->mdev;
1870 struct net_device *dev = priv->dev;
1871
Yevgeny Petrilin453a6082009-06-01 20:27:13 +00001872 en_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port);
Yevgeny Petrilin1e338db2009-04-20 04:26:05 +00001873
1874 mutex_lock(&mdev->state_lock);
1875 if (priv->port_up) {
Amir Vadai3484aac2013-01-30 23:07:11 +00001876 mlx4_en_stop_port(dev, 1);
Yevgeny Petrilin1e338db2009-04-20 04:26:05 +00001877 if (mlx4_en_start_port(dev))
Yevgeny Petrilin453a6082009-06-01 20:27:13 +00001878 en_err(priv, "Failed restarting port %d\n", priv->port);
Yevgeny Petrilin1e338db2009-04-20 04:26:05 +00001879 }
1880 mutex_unlock(&mdev->state_lock);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001881}
1882
Eugenia Emantayevb477ba62012-01-19 09:42:37 +00001883static void mlx4_en_clear_stats(struct net_device *dev)
1884{
1885 struct mlx4_en_priv *priv = netdev_priv(dev);
1886 struct mlx4_en_dev *mdev = priv->mdev;
1887 int i;
1888
1889 if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1))
1890 en_dbg(HW, priv, "Failed dumping statistics\n");
1891
1892 memset(&priv->stats, 0, sizeof(priv->stats));
1893 memset(&priv->pstats, 0, sizeof(priv->pstats));
1894 memset(&priv->pkstats, 0, sizeof(priv->pkstats));
1895 memset(&priv->port_stats, 0, sizeof(priv->port_stats));
1896
1897 for (i = 0; i < priv->tx_ring_num; i++) {
Eugenia Emantayev41d942d2013-11-07 12:19:52 +02001898 priv->tx_ring[i]->bytes = 0;
1899 priv->tx_ring[i]->packets = 0;
1900 priv->tx_ring[i]->tx_csum = 0;
Eugenia Emantayevb477ba62012-01-19 09:42:37 +00001901 }
1902 for (i = 0; i < priv->rx_ring_num; i++) {
Eugenia Emantayev41d942d2013-11-07 12:19:52 +02001903 priv->rx_ring[i]->bytes = 0;
1904 priv->rx_ring[i]->packets = 0;
1905 priv->rx_ring[i]->csum_ok = 0;
1906 priv->rx_ring[i]->csum_none = 0;
Shani Michaelif8c64552014-11-09 13:51:53 +02001907 priv->rx_ring[i]->csum_complete = 0;
Eugenia Emantayevb477ba62012-01-19 09:42:37 +00001908 }
1909}
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001910
1911static int mlx4_en_open(struct net_device *dev)
1912{
1913 struct mlx4_en_priv *priv = netdev_priv(dev);
1914 struct mlx4_en_dev *mdev = priv->mdev;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001915 int err = 0;
1916
1917 mutex_lock(&mdev->state_lock);
1918
1919 if (!mdev->device_up) {
Yevgeny Petrilin453a6082009-06-01 20:27:13 +00001920 en_err(priv, "Cannot open - device down/disabled\n");
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001921 err = -EBUSY;
1922 goto out;
1923 }
1924
Eugenia Emantayevb477ba62012-01-19 09:42:37 +00001925 /* Reset HW statistics and SW counters */
1926 mlx4_en_clear_stats(dev);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001927
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001928 err = mlx4_en_start_port(dev);
1929 if (err)
Yevgeny Petrilin453a6082009-06-01 20:27:13 +00001930 en_err(priv, "Failed starting port:%d\n", priv->port);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001931
1932out:
1933 mutex_unlock(&mdev->state_lock);
1934 return err;
1935}
1936
1937
1938static int mlx4_en_close(struct net_device *dev)
1939{
1940 struct mlx4_en_priv *priv = netdev_priv(dev);
1941 struct mlx4_en_dev *mdev = priv->mdev;
1942
Yevgeny Petrilin453a6082009-06-01 20:27:13 +00001943 en_dbg(IFDOWN, priv, "Close port called\n");
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001944
1945 mutex_lock(&mdev->state_lock);
1946
Amir Vadai3484aac2013-01-30 23:07:11 +00001947 mlx4_en_stop_port(dev, 0);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001948 netif_carrier_off(dev);
1949
1950 mutex_unlock(&mdev->state_lock);
1951 return 0;
1952}
1953
Alexander Gullerfe0af032011-10-09 05:26:46 +00001954void mlx4_en_free_resources(struct mlx4_en_priv *priv)
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001955{
1956 int i;
1957
Amir Vadai1eb8c692012-07-18 22:33:52 +00001958#ifdef CONFIG_RFS_ACCEL
1959 free_irq_cpu_rmap(priv->dev->rx_cpu_rmap);
1960 priv->dev->rx_cpu_rmap = NULL;
1961#endif
1962
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001963 for (i = 0; i < priv->tx_ring_num; i++) {
Eugenia Emantayev41d942d2013-11-07 12:19:52 +02001964 if (priv->tx_ring && priv->tx_ring[i])
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001965 mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]);
Eugenia Emantayev41d942d2013-11-07 12:19:52 +02001966 if (priv->tx_cq && priv->tx_cq[i])
Alexander Gullerfe0af032011-10-09 05:26:46 +00001967 mlx4_en_destroy_cq(priv, &priv->tx_cq[i]);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001968 }
1969
1970 for (i = 0; i < priv->rx_ring_num; i++) {
Eugenia Emantayev41d942d2013-11-07 12:19:52 +02001971 if (priv->rx_ring[i])
Thadeu Lima de Souza Cascardo68355f72012-02-06 08:39:49 +00001972 mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i],
1973 priv->prof->rx_ring_size, priv->stride);
Eugenia Emantayev41d942d2013-11-07 12:19:52 +02001974 if (priv->rx_cq[i])
Alexander Gullerfe0af032011-10-09 05:26:46 +00001975 mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001976 }
Yevgeny Petrilin044ca2a2012-06-25 00:24:13 +00001977
1978 if (priv->base_tx_qpn) {
1979 mlx4_qp_release_range(priv->mdev->dev, priv->base_tx_qpn, priv->tx_ring_num);
1980 priv->base_tx_qpn = 0;
1981 }
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001982}
1983
Yevgeny Petrilin18cc42a2008-12-29 18:39:20 -08001984int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001985{
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001986 struct mlx4_en_port_profile *prof = priv->prof;
1987 int i;
Eugenia Emantayev163561a2013-11-07 12:19:54 +02001988 int node;
Yevgeny Petrilin87a5c382011-03-22 22:38:52 +00001989
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001990 /* Create tx Rings */
1991 for (i = 0; i < priv->tx_ring_num; i++) {
Eugenia Emantayev163561a2013-11-07 12:19:54 +02001992 node = cpu_to_node(i % num_online_cpus());
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001993 if (mlx4_en_create_cq(priv, &priv->tx_cq[i],
Eugenia Emantayev163561a2013-11-07 12:19:54 +02001994 prof->tx_ring_size, i, TX, node))
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001995 goto err;
1996
Ido Shamayd03a68f2013-12-19 21:20:14 +02001997 if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i],
Ido Shamayd03a68f2013-12-19 21:20:14 +02001998 prof->tx_ring_size, TXBB_SIZE,
1999 node, i))
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002000 goto err;
2001 }
2002
2003 /* Create rx Rings */
2004 for (i = 0; i < priv->rx_ring_num; i++) {
Eugenia Emantayev163561a2013-11-07 12:19:54 +02002005 node = cpu_to_node(i % num_online_cpus());
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002006 if (mlx4_en_create_cq(priv, &priv->rx_cq[i],
Eugenia Emantayev163561a2013-11-07 12:19:54 +02002007 prof->rx_ring_size, i, RX, node))
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002008 goto err;
2009
2010 if (mlx4_en_create_rx_ring(priv, &priv->rx_ring[i],
Eugenia Emantayev163561a2013-11-07 12:19:54 +02002011 prof->rx_ring_size, priv->stride,
2012 node))
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002013 goto err;
2014 }
2015
Amir Vadai1eb8c692012-07-18 22:33:52 +00002016#ifdef CONFIG_RFS_ACCEL
Amir Vadaia229e482013-03-07 03:46:57 +00002017 if (priv->mdev->dev->caps.comp_pool) {
2018 priv->dev->rx_cpu_rmap = alloc_irq_cpu_rmap(priv->mdev->dev->caps.comp_pool);
2019 if (!priv->dev->rx_cpu_rmap)
2020 goto err;
2021 }
Amir Vadai1eb8c692012-07-18 22:33:52 +00002022#endif
2023
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002024 return 0;
2025
2026err:
Yevgeny Petrilin453a6082009-06-01 20:27:13 +00002027 en_err(priv, "Failed to allocate NIC resources\n");
Eugenia Emantayev41d942d2013-11-07 12:19:52 +02002028 for (i = 0; i < priv->rx_ring_num; i++) {
2029 if (priv->rx_ring[i])
2030 mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i],
2031 prof->rx_ring_size,
2032 priv->stride);
2033 if (priv->rx_cq[i])
2034 mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
2035 }
2036 for (i = 0; i < priv->tx_ring_num; i++) {
2037 if (priv->tx_ring[i])
2038 mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]);
2039 if (priv->tx_cq[i])
2040 mlx4_en_destroy_cq(priv, &priv->tx_cq[i]);
2041 }
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002042 return -ENOMEM;
2043}
2044
2045
2046void mlx4_en_destroy_netdev(struct net_device *dev)
2047{
2048 struct mlx4_en_priv *priv = netdev_priv(dev);
2049 struct mlx4_en_dev *mdev = priv->mdev;
2050
Yevgeny Petrilin453a6082009-06-01 20:27:13 +00002051 en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002052
2053 /* Unregister device - this will close the port if it was up */
2054 if (priv->registered)
2055 unregister_netdev(dev);
2056
2057 if (priv->allocated)
2058 mlx4_free_hwq_res(mdev->dev, &priv->res, MLX4_EN_PAGE_SIZE);
2059
2060 cancel_delayed_work(&priv->stats_task);
Amir Vadaib6c39bf2013-04-23 06:06:51 +00002061 cancel_delayed_work(&priv->service_task);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002062 /* flush any pending task for this netdev */
2063 flush_workqueue(mdev->workqueue);
2064
2065 /* Detach the netdev so tasks would not attempt to access it */
2066 mutex_lock(&mdev->state_lock);
2067 mdev->pndev[priv->port] = NULL;
Moni Shoua5da03542015-02-03 16:48:34 +02002068 mdev->upper[priv->port] = NULL;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002069 mutex_unlock(&mdev->state_lock);
2070
Alexander Gullerfe0af032011-10-09 05:26:46 +00002071 mlx4_en_free_resources(priv);
Amir Vadai564c2742012-04-04 21:33:26 +00002072
Amir Vadaibc6a4742012-05-17 00:58:10 +00002073 kfree(priv->tx_ring);
2074 kfree(priv->tx_cq);
2075
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002076 free_netdev(dev);
2077}
2078
2079static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
2080{
2081 struct mlx4_en_priv *priv = netdev_priv(dev);
2082 struct mlx4_en_dev *mdev = priv->mdev;
2083 int err = 0;
2084
Yevgeny Petrilin453a6082009-06-01 20:27:13 +00002085 en_dbg(DRV, priv, "Change MTU called - current:%d new:%d\n",
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002086 dev->mtu, new_mtu);
2087
2088 if ((new_mtu < MLX4_EN_MIN_MTU) || (new_mtu > priv->max_mtu)) {
Yevgeny Petrilin453a6082009-06-01 20:27:13 +00002089 en_err(priv, "Bad MTU size:%d.\n", new_mtu);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002090 return -EPERM;
2091 }
2092 dev->mtu = new_mtu;
2093
2094 if (netif_running(dev)) {
2095 mutex_lock(&mdev->state_lock);
2096 if (!mdev->device_up) {
2097 /* NIC is probably restarting - let watchdog task reset
2098 * the port */
Yevgeny Petrilin453a6082009-06-01 20:27:13 +00002099 en_dbg(DRV, priv, "Change MTU called with card down!?\n");
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002100 } else {
Amir Vadai3484aac2013-01-30 23:07:11 +00002101 mlx4_en_stop_port(dev, 1);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002102 err = mlx4_en_start_port(dev);
2103 if (err) {
Yevgeny Petrilin453a6082009-06-01 20:27:13 +00002104 en_err(priv, "Failed restarting port:%d\n",
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002105 priv->port);
2106 queue_work(mdev->workqueue, &priv->watchdog_task);
2107 }
2108 }
2109 mutex_unlock(&mdev->state_lock);
2110 }
2111 return 0;
2112}
2113
Ben Hutchings100dbda2013-11-18 23:13:31 +00002114static int mlx4_en_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
Amir Vadaiec693d42013-04-23 06:06:49 +00002115{
2116 struct mlx4_en_priv *priv = netdev_priv(dev);
2117 struct mlx4_en_dev *mdev = priv->mdev;
2118 struct hwtstamp_config config;
2119
2120 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
2121 return -EFAULT;
2122
2123 /* reserved for future extensions */
2124 if (config.flags)
2125 return -EINVAL;
2126
2127 /* device doesn't support time stamping */
2128 if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS))
2129 return -EINVAL;
2130
2131 /* TX HW timestamp */
2132 switch (config.tx_type) {
2133 case HWTSTAMP_TX_OFF:
2134 case HWTSTAMP_TX_ON:
2135 break;
2136 default:
2137 return -ERANGE;
2138 }
2139
2140 /* RX HW timestamp */
2141 switch (config.rx_filter) {
2142 case HWTSTAMP_FILTER_NONE:
2143 break;
2144 case HWTSTAMP_FILTER_ALL:
2145 case HWTSTAMP_FILTER_SOME:
2146 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
2147 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
2148 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
2149 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
2150 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
2151 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
2152 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
2153 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
2154 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
2155 case HWTSTAMP_FILTER_PTP_V2_EVENT:
2156 case HWTSTAMP_FILTER_PTP_V2_SYNC:
2157 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
2158 config.rx_filter = HWTSTAMP_FILTER_ALL;
2159 break;
2160 default:
2161 return -ERANGE;
2162 }
2163
Saeed Mahameed7787fa62014-10-27 11:37:42 +02002164 if (mlx4_en_reset_config(dev, config, dev->features)) {
Amir Vadaiec693d42013-04-23 06:06:49 +00002165 config.tx_type = HWTSTAMP_TX_OFF;
2166 config.rx_filter = HWTSTAMP_FILTER_NONE;
2167 }
2168
2169 return copy_to_user(ifr->ifr_data, &config,
2170 sizeof(config)) ? -EFAULT : 0;
2171}
2172
Ben Hutchings100dbda2013-11-18 23:13:31 +00002173static int mlx4_en_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
2174{
2175 struct mlx4_en_priv *priv = netdev_priv(dev);
2176
2177 return copy_to_user(ifr->ifr_data, &priv->hwtstamp_config,
2178 sizeof(priv->hwtstamp_config)) ? -EFAULT : 0;
2179}
2180
Amir Vadaiec693d42013-04-23 06:06:49 +00002181static int mlx4_en_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2182{
2183 switch (cmd) {
2184 case SIOCSHWTSTAMP:
Ben Hutchings100dbda2013-11-18 23:13:31 +00002185 return mlx4_en_hwtstamp_set(dev, ifr);
2186 case SIOCGHWTSTAMP:
2187 return mlx4_en_hwtstamp_get(dev, ifr);
Amir Vadaiec693d42013-04-23 06:06:49 +00002188 default:
2189 return -EOPNOTSUPP;
2190 }
2191}
2192
Amir Vadai60d6fe92011-11-26 19:55:19 +00002193static int mlx4_en_set_features(struct net_device *netdev,
2194 netdev_features_t features)
2195{
2196 struct mlx4_en_priv *priv = netdev_priv(netdev);
Saeed Mahameed537f6f92014-10-27 11:37:43 +02002197 int ret = 0;
2198
2199 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_HW_VLAN_CTAG_RX)) {
2200 en_info(priv, "Turn %s RX vlan strip offload\n",
2201 (features & NETIF_F_HW_VLAN_CTAG_RX) ? "ON" : "OFF");
2202 ret = mlx4_en_reset_config(netdev, priv->hwtstamp_config,
2203 features);
2204 if (ret)
2205 return ret;
2206 }
Amir Vadai60d6fe92011-11-26 19:55:19 +00002207
Ido Shamaycfb53f32015-02-03 17:57:21 +02002208 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_HW_VLAN_CTAG_TX))
2209 en_info(priv, "Turn %s TX vlan strip offload\n",
2210 (features & NETIF_F_HW_VLAN_CTAG_TX) ? "ON" : "OFF");
2211
Amir Vadai60d6fe92011-11-26 19:55:19 +00002212 if (features & NETIF_F_LOOPBACK)
2213 priv->ctrl_flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK);
2214 else
2215 priv->ctrl_flags &=
2216 cpu_to_be32(~MLX4_WQE_CTRL_FORCE_LOOPBACK);
2217
Yan Burman79aeacc2013-02-07 02:25:19 +00002218 mlx4_en_update_loopback_state(netdev, features);
2219
Amir Vadai60d6fe92011-11-26 19:55:19 +00002220 return 0;
2221
2222}
2223
Rony Efraim8f7ba3c2013-04-25 05:22:27 +00002224static int mlx4_en_set_vf_mac(struct net_device *dev, int queue, u8 *mac)
2225{
2226 struct mlx4_en_priv *en_priv = netdev_priv(dev);
2227 struct mlx4_en_dev *mdev = en_priv->mdev;
Eugenia Emantayev98133372014-03-02 10:25:01 +02002228 u64 mac_u64 = mlx4_mac_to_u64(mac);
Rony Efraim8f7ba3c2013-04-25 05:22:27 +00002229
2230 if (!is_valid_ether_addr(mac))
2231 return -EINVAL;
2232
2233 return mlx4_set_vf_mac(mdev->dev, en_priv->port, queue, mac_u64);
2234}
2235
Rony Efraim3f7fb022013-04-25 05:22:28 +00002236static int mlx4_en_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos)
2237{
2238 struct mlx4_en_priv *en_priv = netdev_priv(dev);
2239 struct mlx4_en_dev *mdev = en_priv->mdev;
2240
2241 return mlx4_set_vf_vlan(mdev->dev, en_priv->port, vf, vlan, qos);
2242}
2243
Rony Efraime6b6a232013-04-25 05:22:29 +00002244static int mlx4_en_set_vf_spoofchk(struct net_device *dev, int vf, bool setting)
2245{
2246 struct mlx4_en_priv *en_priv = netdev_priv(dev);
2247 struct mlx4_en_dev *mdev = en_priv->mdev;
2248
2249 return mlx4_set_vf_spoofchk(mdev->dev, en_priv->port, vf, setting);
2250}
2251
Rony Efraim2cccb9e2013-04-25 05:22:30 +00002252static int mlx4_en_get_vf_config(struct net_device *dev, int vf, struct ifla_vf_info *ivf)
2253{
2254 struct mlx4_en_priv *en_priv = netdev_priv(dev);
2255 struct mlx4_en_dev *mdev = en_priv->mdev;
2256
2257 return mlx4_get_vf_config(mdev->dev, en_priv->port, vf, ivf);
2258}
Rony Efraim8f7ba3c2013-04-25 05:22:27 +00002259
Rony Efraim948e3062013-06-13 13:19:11 +03002260static int mlx4_en_set_vf_link_state(struct net_device *dev, int vf, int link_state)
2261{
2262 struct mlx4_en_priv *en_priv = netdev_priv(dev);
2263 struct mlx4_en_dev *mdev = en_priv->mdev;
2264
2265 return mlx4_set_vf_link_state(mdev->dev, en_priv->port, vf, link_state);
2266}
Hadar Hen Zion84c86402013-12-19 21:20:13 +02002267
2268#define PORT_ID_BYTE_LEN 8
2269static int mlx4_en_get_phys_port_id(struct net_device *dev,
Jiri Pirko02637fc2014-11-28 14:34:16 +01002270 struct netdev_phys_item_id *ppid)
Hadar Hen Zion84c86402013-12-19 21:20:13 +02002271{
2272 struct mlx4_en_priv *priv = netdev_priv(dev);
2273 struct mlx4_dev *mdev = priv->mdev->dev;
2274 int i;
2275 u64 phys_port_id = mdev->caps.phys_port_id[priv->port];
2276
2277 if (!phys_port_id)
2278 return -EOPNOTSUPP;
2279
2280 ppid->id_len = sizeof(phys_port_id);
2281 for (i = PORT_ID_BYTE_LEN - 1; i >= 0; --i) {
2282 ppid->id[i] = phys_port_id & 0xff;
2283 phys_port_id >>= 8;
2284 }
2285 return 0;
2286}
2287
Or Gerlitza66132f2014-04-01 11:27:13 +03002288#ifdef CONFIG_MLX4_EN_VXLAN
Or Gerlitz1b136de2014-03-27 14:02:04 +02002289static void mlx4_en_add_vxlan_offloads(struct work_struct *work)
2290{
2291 int ret;
2292 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
2293 vxlan_add_task);
2294
2295 ret = mlx4_config_vxlan_port(priv->mdev->dev, priv->vxlan_port);
2296 if (ret)
2297 goto out;
2298
2299 ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port,
2300 VXLAN_STEER_BY_OUTER_MAC, 1);
2301out:
Or Gerlitzf4a1edd2014-11-09 14:25:39 +02002302 if (ret) {
Or Gerlitz1b136de2014-03-27 14:02:04 +02002303 en_err(priv, "failed setting L2 tunnel configuration ret %d\n", ret);
Or Gerlitzf4a1edd2014-11-09 14:25:39 +02002304 return;
2305 }
2306
2307 /* set offloads */
2308 priv->dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
2309 NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL;
2310 priv->dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
2311 priv->dev->features |= NETIF_F_GSO_UDP_TUNNEL;
Or Gerlitz1b136de2014-03-27 14:02:04 +02002312}
2313
2314static void mlx4_en_del_vxlan_offloads(struct work_struct *work)
2315{
2316 int ret;
2317 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
2318 vxlan_del_task);
Or Gerlitzf4a1edd2014-11-09 14:25:39 +02002319 /* unset offloads */
2320 priv->dev->hw_enc_features &= ~(NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
2321 NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL);
2322 priv->dev->hw_features &= ~NETIF_F_GSO_UDP_TUNNEL;
2323 priv->dev->features &= ~NETIF_F_GSO_UDP_TUNNEL;
Or Gerlitz1b136de2014-03-27 14:02:04 +02002324
2325 ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port,
2326 VXLAN_STEER_BY_OUTER_MAC, 0);
2327 if (ret)
2328 en_err(priv, "failed setting L2 tunnel configuration ret %d\n", ret);
2329
2330 priv->vxlan_port = 0;
2331}
2332
2333static void mlx4_en_add_vxlan_port(struct net_device *dev,
2334 sa_family_t sa_family, __be16 port)
2335{
2336 struct mlx4_en_priv *priv = netdev_priv(dev);
2337 __be16 current_port;
2338
Or Gerlitze326f2f2014-07-02 17:36:23 +03002339 if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
Or Gerlitz1b136de2014-03-27 14:02:04 +02002340 return;
2341
2342 if (sa_family == AF_INET6)
2343 return;
2344
2345 current_port = priv->vxlan_port;
2346 if (current_port && current_port != port) {
2347 en_warn(priv, "vxlan port %d configured, can't add port %d\n",
2348 ntohs(current_port), ntohs(port));
2349 return;
2350 }
2351
2352 priv->vxlan_port = port;
2353 queue_work(priv->mdev->workqueue, &priv->vxlan_add_task);
2354}
2355
2356static void mlx4_en_del_vxlan_port(struct net_device *dev,
2357 sa_family_t sa_family, __be16 port)
2358{
2359 struct mlx4_en_priv *priv = netdev_priv(dev);
2360 __be16 current_port;
2361
2362 if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
2363 return;
2364
2365 if (sa_family == AF_INET6)
2366 return;
2367
2368 current_port = priv->vxlan_port;
2369 if (current_port != port) {
2370 en_dbg(DRV, priv, "vxlan port %d isn't configured, ignoring\n", ntohs(port));
2371 return;
2372 }
2373
2374 queue_work(priv->mdev->workqueue, &priv->vxlan_del_task);
2375}
Joe Stringer956bdab2014-11-13 16:38:14 -08002376
Jesse Gross5f352272014-12-23 22:37:26 -08002377static netdev_features_t mlx4_en_features_check(struct sk_buff *skb,
2378 struct net_device *dev,
2379 netdev_features_t features)
Joe Stringer956bdab2014-11-13 16:38:14 -08002380{
Toshiaki Makita8cb65d02015-03-27 14:31:12 +09002381 features = vlan_features_check(skb, features);
Jesse Gross5f352272014-12-23 22:37:26 -08002382 return vxlan_features_check(skb, features);
Joe Stringer956bdab2014-11-13 16:38:14 -08002383}
Or Gerlitza66132f2014-04-01 11:27:13 +03002384#endif
Or Gerlitz1b136de2014-03-27 14:02:04 +02002385
Wu Fengguangde1cf8a2015-03-19 08:51:27 +08002386static int mlx4_en_set_tx_maxrate(struct net_device *dev, int queue_index, u32 maxrate)
Or Gerlitzc10e4fc2015-03-18 14:57:35 +02002387{
2388 struct mlx4_en_priv *priv = netdev_priv(dev);
2389 struct mlx4_en_tx_ring *tx_ring = priv->tx_ring[queue_index];
2390 struct mlx4_update_qp_params params;
2391 int err;
2392
2393 if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QP_RATE_LIMIT))
2394 return -EOPNOTSUPP;
2395
2396 /* rate provided to us in Mbs, check if it fits into 12 bits, if not use Gbs */
2397 if (maxrate >> 12) {
2398 params.rate_unit = MLX4_QP_RATE_LIMIT_GBS;
2399 params.rate_val = maxrate / 1000;
2400 } else if (maxrate) {
2401 params.rate_unit = MLX4_QP_RATE_LIMIT_MBS;
2402 params.rate_val = maxrate;
2403 } else { /* zero serves to revoke the QP rate-limitation */
2404 params.rate_unit = 0;
2405 params.rate_val = 0;
2406 }
2407
2408 err = mlx4_update_qp(priv->mdev->dev, tx_ring->qpn, MLX4_UPDATE_QP_RATE_LIMIT,
2409 &params);
2410 return err;
2411}
2412
Stephen Hemminger3addc562008-11-21 17:30:58 -08002413static const struct net_device_ops mlx4_netdev_ops = {
2414 .ndo_open = mlx4_en_open,
2415 .ndo_stop = mlx4_en_close,
2416 .ndo_start_xmit = mlx4_en_xmit,
Yevgeny Petrilinf813cad2009-06-01 23:24:07 +00002417 .ndo_select_queue = mlx4_en_select_queue,
Stephen Hemminger3addc562008-11-21 17:30:58 -08002418 .ndo_get_stats = mlx4_en_get_stats,
Yan Burman0eb74fd2013-02-07 02:25:23 +00002419 .ndo_set_rx_mode = mlx4_en_set_rx_mode,
Stephen Hemminger3addc562008-11-21 17:30:58 -08002420 .ndo_set_mac_address = mlx4_en_set_mac,
Stephen Hemminger52255bb2009-01-09 10:45:37 +00002421 .ndo_validate_addr = eth_validate_addr,
Stephen Hemminger3addc562008-11-21 17:30:58 -08002422 .ndo_change_mtu = mlx4_en_change_mtu,
Amir Vadaiec693d42013-04-23 06:06:49 +00002423 .ndo_do_ioctl = mlx4_en_ioctl,
Stephen Hemminger3addc562008-11-21 17:30:58 -08002424 .ndo_tx_timeout = mlx4_en_tx_timeout,
Stephen Hemminger3addc562008-11-21 17:30:58 -08002425 .ndo_vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid,
2426 .ndo_vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid,
2427#ifdef CONFIG_NET_POLL_CONTROLLER
2428 .ndo_poll_controller = mlx4_en_netpoll,
2429#endif
Amir Vadai60d6fe92011-11-26 19:55:19 +00002430 .ndo_set_features = mlx4_en_set_features,
Amir Vadai897d7842012-04-04 21:33:27 +00002431 .ndo_setup_tc = mlx4_en_setup_tc,
Amir Vadai1eb8c692012-07-18 22:33:52 +00002432#ifdef CONFIG_RFS_ACCEL
2433 .ndo_rx_flow_steer = mlx4_en_filter_rfs,
2434#endif
Cong Wange0d10952013-08-01 11:10:25 +08002435#ifdef CONFIG_NET_RX_BUSY_POLL
Eliezer Tamir8b80cda2013-07-10 17:13:26 +03002436 .ndo_busy_poll = mlx4_en_low_latency_recv,
Amir Vadai9e77a2b2013-06-18 16:18:27 +03002437#endif
Hadar Hen Zion84c86402013-12-19 21:20:13 +02002438 .ndo_get_phys_port_id = mlx4_en_get_phys_port_id,
Or Gerlitza66132f2014-04-01 11:27:13 +03002439#ifdef CONFIG_MLX4_EN_VXLAN
Or Gerlitz1b136de2014-03-27 14:02:04 +02002440 .ndo_add_vxlan_port = mlx4_en_add_vxlan_port,
2441 .ndo_del_vxlan_port = mlx4_en_del_vxlan_port,
Jesse Gross5f352272014-12-23 22:37:26 -08002442 .ndo_features_check = mlx4_en_features_check,
Or Gerlitza66132f2014-04-01 11:27:13 +03002443#endif
Or Gerlitzc10e4fc2015-03-18 14:57:35 +02002444 .ndo_set_tx_maxrate = mlx4_en_set_tx_maxrate,
Stephen Hemminger3addc562008-11-21 17:30:58 -08002445};
2446
Rony Efraim8f7ba3c2013-04-25 05:22:27 +00002447static const struct net_device_ops mlx4_netdev_ops_master = {
2448 .ndo_open = mlx4_en_open,
2449 .ndo_stop = mlx4_en_close,
2450 .ndo_start_xmit = mlx4_en_xmit,
2451 .ndo_select_queue = mlx4_en_select_queue,
2452 .ndo_get_stats = mlx4_en_get_stats,
2453 .ndo_set_rx_mode = mlx4_en_set_rx_mode,
2454 .ndo_set_mac_address = mlx4_en_set_mac,
2455 .ndo_validate_addr = eth_validate_addr,
2456 .ndo_change_mtu = mlx4_en_change_mtu,
2457 .ndo_tx_timeout = mlx4_en_tx_timeout,
2458 .ndo_vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid,
2459 .ndo_vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid,
2460 .ndo_set_vf_mac = mlx4_en_set_vf_mac,
Rony Efraim3f7fb022013-04-25 05:22:28 +00002461 .ndo_set_vf_vlan = mlx4_en_set_vf_vlan,
Rony Efraime6b6a232013-04-25 05:22:29 +00002462 .ndo_set_vf_spoofchk = mlx4_en_set_vf_spoofchk,
Rony Efraim948e3062013-06-13 13:19:11 +03002463 .ndo_set_vf_link_state = mlx4_en_set_vf_link_state,
Rony Efraim2cccb9e2013-04-25 05:22:30 +00002464 .ndo_get_vf_config = mlx4_en_get_vf_config,
Rony Efraim8f7ba3c2013-04-25 05:22:27 +00002465#ifdef CONFIG_NET_POLL_CONTROLLER
2466 .ndo_poll_controller = mlx4_en_netpoll,
2467#endif
2468 .ndo_set_features = mlx4_en_set_features,
2469 .ndo_setup_tc = mlx4_en_setup_tc,
2470#ifdef CONFIG_RFS_ACCEL
2471 .ndo_rx_flow_steer = mlx4_en_filter_rfs,
2472#endif
Hadar Hen Zion84c86402013-12-19 21:20:13 +02002473 .ndo_get_phys_port_id = mlx4_en_get_phys_port_id,
Or Gerlitz9737c6a2014-11-18 17:51:27 +02002474#ifdef CONFIG_MLX4_EN_VXLAN
2475 .ndo_add_vxlan_port = mlx4_en_add_vxlan_port,
2476 .ndo_del_vxlan_port = mlx4_en_del_vxlan_port,
Jesse Gross5f352272014-12-23 22:37:26 -08002477 .ndo_features_check = mlx4_en_features_check,
Or Gerlitz9737c6a2014-11-18 17:51:27 +02002478#endif
Or Gerlitzc10e4fc2015-03-18 14:57:35 +02002479 .ndo_set_tx_maxrate = mlx4_en_set_tx_maxrate,
Rony Efraim8f7ba3c2013-04-25 05:22:27 +00002480};
2481
Moni Shoua5da03542015-02-03 16:48:34 +02002482struct mlx4_en_bond {
2483 struct work_struct work;
2484 struct mlx4_en_priv *priv;
2485 int is_bonded;
2486 struct mlx4_port_map port_map;
2487};
2488
2489static void mlx4_en_bond_work(struct work_struct *work)
2490{
2491 struct mlx4_en_bond *bond = container_of(work,
2492 struct mlx4_en_bond,
2493 work);
2494 int err = 0;
2495 struct mlx4_dev *dev = bond->priv->mdev->dev;
2496
2497 if (bond->is_bonded) {
2498 if (!mlx4_is_bonded(dev)) {
2499 err = mlx4_bond(dev);
2500 if (err)
2501 en_err(bond->priv, "Fail to bond device\n");
2502 }
2503 if (!err) {
2504 err = mlx4_port_map_set(dev, &bond->port_map);
2505 if (err)
2506 en_err(bond->priv, "Fail to set port map [%d][%d]: %d\n",
2507 bond->port_map.port1,
2508 bond->port_map.port2,
2509 err);
2510 }
2511 } else if (mlx4_is_bonded(dev)) {
2512 err = mlx4_unbond(dev);
2513 if (err)
2514 en_err(bond->priv, "Fail to unbond device\n");
2515 }
2516 dev_put(bond->priv->dev);
2517 kfree(bond);
2518}
2519
2520static int mlx4_en_queue_bond_work(struct mlx4_en_priv *priv, int is_bonded,
2521 u8 v2p_p1, u8 v2p_p2)
2522{
2523 struct mlx4_en_bond *bond = NULL;
2524
2525 bond = kzalloc(sizeof(*bond), GFP_ATOMIC);
2526 if (!bond)
2527 return -ENOMEM;
2528
2529 INIT_WORK(&bond->work, mlx4_en_bond_work);
2530 bond->priv = priv;
2531 bond->is_bonded = is_bonded;
2532 bond->port_map.port1 = v2p_p1;
2533 bond->port_map.port2 = v2p_p2;
2534 dev_hold(priv->dev);
2535 queue_work(priv->mdev->workqueue, &bond->work);
2536 return 0;
2537}
2538
2539int mlx4_en_netdev_event(struct notifier_block *this,
2540 unsigned long event, void *ptr)
2541{
2542 struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
2543 u8 port = 0;
2544 struct mlx4_en_dev *mdev;
2545 struct mlx4_dev *dev;
2546 int i, num_eth_ports = 0;
2547 bool do_bond = true;
2548 struct mlx4_en_priv *priv;
2549 u8 v2p_port1 = 0;
2550 u8 v2p_port2 = 0;
2551
2552 if (!net_eq(dev_net(ndev), &init_net))
2553 return NOTIFY_DONE;
2554
2555 mdev = container_of(this, struct mlx4_en_dev, nb);
2556 dev = mdev->dev;
2557
2558 /* Go into this mode only when two network devices set on two ports
2559 * of the same mlx4 device are slaves of the same bonding master
2560 */
2561 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
2562 ++num_eth_ports;
2563 if (!port && (mdev->pndev[i] == ndev))
2564 port = i;
2565 mdev->upper[i] = mdev->pndev[i] ?
2566 netdev_master_upper_dev_get(mdev->pndev[i]) : NULL;
2567 /* condition not met: network device is a slave */
2568 if (!mdev->upper[i])
2569 do_bond = false;
2570 if (num_eth_ports < 2)
2571 continue;
2572 /* condition not met: same master */
2573 if (mdev->upper[i] != mdev->upper[i-1])
2574 do_bond = false;
2575 }
2576 /* condition not met: 2 salves */
2577 do_bond = (num_eth_ports == 2) ? do_bond : false;
2578
2579 /* handle only events that come with enough info */
2580 if ((do_bond && (event != NETDEV_BONDING_INFO)) || !port)
2581 return NOTIFY_DONE;
2582
2583 priv = netdev_priv(ndev);
2584 if (do_bond) {
2585 struct netdev_notifier_bonding_info *notifier_info = ptr;
2586 struct netdev_bonding_info *bonding_info =
2587 &notifier_info->bonding_info;
2588
2589 /* required mode 1, 2 or 4 */
2590 if ((bonding_info->master.bond_mode != BOND_MODE_ACTIVEBACKUP) &&
2591 (bonding_info->master.bond_mode != BOND_MODE_XOR) &&
2592 (bonding_info->master.bond_mode != BOND_MODE_8023AD))
2593 do_bond = false;
2594
2595 /* require exactly 2 slaves */
2596 if (bonding_info->master.num_slaves != 2)
2597 do_bond = false;
2598
2599 /* calc v2p */
2600 if (do_bond) {
2601 if (bonding_info->master.bond_mode ==
2602 BOND_MODE_ACTIVEBACKUP) {
2603 /* in active-backup mode virtual ports are
2604 * mapped to the physical port of the active
2605 * slave */
2606 if (bonding_info->slave.state ==
2607 BOND_STATE_BACKUP) {
2608 if (port == 1) {
2609 v2p_port1 = 2;
2610 v2p_port2 = 2;
2611 } else {
2612 v2p_port1 = 1;
2613 v2p_port2 = 1;
2614 }
2615 } else { /* BOND_STATE_ACTIVE */
2616 if (port == 1) {
2617 v2p_port1 = 1;
2618 v2p_port2 = 1;
2619 } else {
2620 v2p_port1 = 2;
2621 v2p_port2 = 2;
2622 }
2623 }
2624 } else { /* Active-Active */
2625 /* in active-active mode a virtual port is
2626 * mapped to the native physical port if and only
2627 * if the physical port is up */
2628 __s8 link = bonding_info->slave.link;
2629
2630 if (port == 1)
2631 v2p_port2 = 2;
2632 else
2633 v2p_port1 = 1;
2634 if ((link == BOND_LINK_UP) ||
2635 (link == BOND_LINK_FAIL)) {
2636 if (port == 1)
2637 v2p_port1 = 1;
2638 else
2639 v2p_port2 = 2;
2640 } else { /* BOND_LINK_DOWN || BOND_LINK_BACK */
2641 if (port == 1)
2642 v2p_port1 = 2;
2643 else
2644 v2p_port2 = 1;
2645 }
2646 }
2647 }
2648 }
2649
2650 mlx4_en_queue_bond_work(priv, do_bond,
2651 v2p_port1, v2p_port2);
2652
2653 return NOTIFY_DONE;
2654}
2655
Eran Ben Elishaffa88f32015-03-30 17:45:22 +03002656void mlx4_en_set_stats_bitmap(struct mlx4_dev *dev, u64 *stats_bitmap)
2657{
2658 if (!mlx4_is_mfunc(dev)) {
2659 *stats_bitmap = 0;
2660 return;
2661 }
2662
2663 *stats_bitmap = (MLX4_STATS_TRAFFIC_COUNTERS_MASK |
2664 MLX4_STATS_TRAFFIC_DROPS_MASK |
2665 MLX4_STATS_PORT_COUNTERS_MASK);
2666
2667 if (mlx4_is_master(dev))
2668 *stats_bitmap |= MLX4_STATS_ERROR_COUNTERS_MASK;
2669}
2670
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002671int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
2672 struct mlx4_en_port_profile *prof)
2673{
2674 struct net_device *dev;
2675 struct mlx4_en_priv *priv;
Yan Burmanc07cb4b2013-02-07 02:25:25 +00002676 int i;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002677 int err;
Or Gerlitzef96f7d2013-06-04 05:13:28 +00002678 u64 mac_u64;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002679
Tom Herbertf1593d22011-01-09 19:36:36 +00002680 dev = alloc_etherdev_mqs(sizeof(struct mlx4_en_priv),
Amir Vadaid3179662012-12-02 03:49:23 +00002681 MAX_TX_RINGS, MAX_RX_RINGS);
Joe Perches41de8d42012-01-29 13:47:52 +00002682 if (dev == NULL)
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002683 return -ENOMEM;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002684
Amir Vadaid3179662012-12-02 03:49:23 +00002685 netif_set_real_num_tx_queues(dev, prof->tx_ring_num);
2686 netif_set_real_num_rx_queues(dev, prof->rx_ring_num);
2687
Yishai Hadas872bf2f2015-01-25 16:59:35 +02002688 SET_NETDEV_DEV(dev, &mdev->dev->persist->pdev->dev);
Amir Vadai76a066f2014-02-25 18:17:51 +02002689 dev->dev_port = port - 1;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002690
2691 /*
2692 * Initialize driver private data
2693 */
2694
2695 priv = netdev_priv(dev);
2696 memset(priv, 0, sizeof(struct mlx4_en_priv));
Eugenia Emantayev207af6c2014-10-27 11:37:46 +02002697 spin_lock_init(&priv->stats_lock);
2698 INIT_WORK(&priv->rx_mode_task, mlx4_en_do_set_rx_mode);
2699 INIT_WORK(&priv->watchdog_task, mlx4_en_restart);
2700 INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate);
2701 INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats);
2702 INIT_DELAYED_WORK(&priv->service_task, mlx4_en_service_task);
2703#ifdef CONFIG_MLX4_EN_VXLAN
2704 INIT_WORK(&priv->vxlan_add_task, mlx4_en_add_vxlan_offloads);
2705 INIT_WORK(&priv->vxlan_del_task, mlx4_en_del_vxlan_offloads);
2706#endif
2707#ifdef CONFIG_RFS_ACCEL
2708 INIT_LIST_HEAD(&priv->filters);
2709 spin_lock_init(&priv->filters_lock);
2710#endif
2711
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002712 priv->dev = dev;
2713 priv->mdev = mdev;
Yevgeny Petrilinebf8c9a2012-03-06 04:03:34 +00002714 priv->ddev = &mdev->pdev->dev;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002715 priv->prof = prof;
2716 priv->port = port;
2717 priv->port_up = false;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002718 priv->flags = prof->flags;
Amir Vadai0fef9d02014-07-22 15:44:10 +03002719 priv->pflags = MLX4_EN_PRIV_FLAGS_BLUEFLAME;
Amir Vadai60d6fe92011-11-26 19:55:19 +00002720 priv->ctrl_flags = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE |
2721 MLX4_WQE_CTRL_SOLICITED);
Amir Vadaid3179662012-12-02 03:49:23 +00002722 priv->num_tx_rings_p_up = mdev->profile.num_tx_rings_p_up;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002723 priv->tx_ring_num = prof->tx_ring_num;
Amir Vadaifbc6daf2014-07-08 11:28:12 +03002724 priv->tx_work_limit = MLX4_EN_DEFAULT_TX_WORK;
Eric Dumazetbd635c32014-11-22 17:24:19 -08002725 netdev_rss_key_fill(priv->rss_key, sizeof(priv->rss_key));
Amir Vadaid3179662012-12-02 03:49:23 +00002726
Eugenia Emantayev41d942d2013-11-07 12:19:52 +02002727 priv->tx_ring = kzalloc(sizeof(struct mlx4_en_tx_ring *) * MAX_TX_RINGS,
Amir Vadaid3179662012-12-02 03:49:23 +00002728 GFP_KERNEL);
Amir Vadaibc6a4742012-05-17 00:58:10 +00002729 if (!priv->tx_ring) {
2730 err = -ENOMEM;
2731 goto out;
2732 }
Eugenia Emantayev41d942d2013-11-07 12:19:52 +02002733 priv->tx_cq = kzalloc(sizeof(struct mlx4_en_cq *) * MAX_TX_RINGS,
Amir Vadaid3179662012-12-02 03:49:23 +00002734 GFP_KERNEL);
Amir Vadaibc6a4742012-05-17 00:58:10 +00002735 if (!priv->tx_cq) {
2736 err = -ENOMEM;
2737 goto out;
2738 }
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002739 priv->rx_ring_num = prof->rx_ring_num;
Or Gerlitz08ff3232012-10-21 14:59:24 +00002740 priv->cqe_factor = (mdev->dev->caps.cqe_size == 64) ? 1 : 0;
Ido Shamayb1b6b4d2014-09-18 11:51:01 +03002741 priv->cqe_size = mdev->dev->caps.cqe_size;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002742 priv->mac_index = -1;
2743 priv->msg_enable = MLX4_EN_MSG_LEVEL;
Amir Vadai564c2742012-04-04 21:33:26 +00002744#ifdef CONFIG_MLX4_EN_DCB
Or Gerlitz540b3a32013-04-07 03:44:07 +00002745 if (!mlx4_is_slave(priv->mdev->dev)) {
2746 if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_SET_ETH_SCHED) {
2747 dev->dcbnl_ops = &mlx4_en_dcbnl_ops;
2748 } else {
2749 en_info(priv, "enabling only PFC DCB ops\n");
2750 dev->dcbnl_ops = &mlx4_en_dcbnl_pfc_ops;
2751 }
2752 }
Amir Vadai564c2742012-04-04 21:33:26 +00002753#endif
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002754
Yan Burmanc07cb4b2013-02-07 02:25:25 +00002755 for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i)
2756 INIT_HLIST_HEAD(&priv->mac_hash[i]);
Yan Burman16a10ff2013-02-07 02:25:22 +00002757
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002758 /* Query for default mac and max mtu */
2759 priv->max_mtu = mdev->dev->caps.eth_mtu_cap[priv->port];
Yan Burman6bbb6d92013-02-07 02:25:20 +00002760
Shani Michaelif8c64552014-11-09 13:51:53 +02002761 if (mdev->dev->caps.rx_checksum_flags_port[priv->port] &
2762 MLX4_RX_CSUM_MODE_VAL_NON_TCP_UDP)
2763 priv->flags |= MLX4_EN_FLAG_RX_CSUM_NON_TCP_UDP;
2764
Yan Burman6bbb6d92013-02-07 02:25:20 +00002765 /* Set default MAC */
2766 dev->addr_len = ETH_ALEN;
2767 mlx4_en_u64_to_mac(dev->dev_addr, mdev->dev->caps.def_mac[priv->port]);
2768 if (!is_valid_ether_addr(dev->dev_addr)) {
Or Gerlitzef96f7d2013-06-04 05:13:28 +00002769 if (mlx4_is_slave(priv->mdev->dev)) {
2770 eth_hw_addr_random(dev);
2771 en_warn(priv, "Assigned random MAC address %pM\n", dev->dev_addr);
Eugenia Emantayev98133372014-03-02 10:25:01 +02002772 mac_u64 = mlx4_mac_to_u64(dev->dev_addr);
Or Gerlitzef96f7d2013-06-04 05:13:28 +00002773 mdev->dev->caps.def_mac[priv->port] = mac_u64;
2774 } else {
2775 en_err(priv, "Port: %d, invalid mac burned: %pM, quiting\n",
2776 priv->port, dev->dev_addr);
2777 err = -EINVAL;
2778 goto out;
2779 }
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002780 }
2781
Noa Osherovich2695bab2014-07-08 11:25:24 +03002782 memcpy(priv->current_mac, dev->dev_addr, sizeof(priv->current_mac));
Yan Burman6bbb6d92013-02-07 02:25:20 +00002783
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002784 priv->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
2785 DS_SIZE * MLX4_EN_MAX_RX_FRAGS);
2786 err = mlx4_en_alloc_resources(priv);
2787 if (err)
2788 goto out;
2789
Amir Vadaiec693d42013-04-23 06:06:49 +00002790 /* Initialize time stamping config */
2791 priv->hwtstamp_config.flags = 0;
2792 priv->hwtstamp_config.tx_type = HWTSTAMP_TX_OFF;
2793 priv->hwtstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
2794
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002795 /* Allocate page for receive rings */
2796 err = mlx4_alloc_hwq_res(mdev->dev, &priv->res,
2797 MLX4_EN_PAGE_SIZE, MLX4_EN_PAGE_SIZE);
2798 if (err) {
Yevgeny Petrilin453a6082009-06-01 20:27:13 +00002799 en_err(priv, "Failed to allocate page for rx qps\n");
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002800 goto out;
2801 }
2802 priv->allocated = 1;
2803
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002804 /*
2805 * Initialize netdev entry points
2806 */
Rony Efraim8f7ba3c2013-04-25 05:22:27 +00002807 if (mlx4_is_master(priv->mdev->dev))
2808 dev->netdev_ops = &mlx4_netdev_ops_master;
2809 else
2810 dev->netdev_ops = &mlx4_netdev_ops;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002811 dev->watchdog_timeo = MLX4_EN_WATCHDOG_TIMEOUT;
Ben Hutchings1eb63a22010-09-27 08:29:34 +00002812 netif_set_real_num_tx_queues(dev, priv->tx_ring_num);
2813 netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
Stephen Hemminger3addc562008-11-21 17:30:58 -08002814
Wilfried Klaebe7ad24ea2014-05-11 00:12:32 +00002815 dev->ethtool_ops = &mlx4_en_ethtool_ops;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002816
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002817 /*
2818 * Set driver features
2819 */
Michał Mirosławc8c64cf2011-04-15 04:50:49 +00002820 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2821 if (mdev->LSO_support)
2822 dev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
2823
2824 dev->vlan_features = dev->hw_features;
2825
Yevgeny Petrilinad861072011-10-18 01:51:24 +00002826 dev->hw_features |= NETIF_F_RXCSUM | NETIF_F_RXHASH;
Michał Mirosławc8c64cf2011-04-15 04:50:49 +00002827 dev->features = dev->hw_features | NETIF_F_HIGHDMA |
Patrick McHardyf6469682013-04-19 02:04:27 +00002828 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
2829 NETIF_F_HW_VLAN_CTAG_FILTER;
Saeed Mahameed537f6f92014-10-27 11:37:43 +02002830 dev->hw_features |= NETIF_F_LOOPBACK |
2831 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002832
Amir Vadai1eb8c692012-07-18 22:33:52 +00002833 if (mdev->dev->caps.steering_mode ==
Matan Barak7d077cd2014-12-11 10:58:00 +02002834 MLX4_STEERING_MODE_DEVICE_MANAGED &&
2835 mdev->dev->caps.dmfs_high_steer_mode != MLX4_STEERING_DMFS_A0_STATIC)
Amir Vadai1eb8c692012-07-18 22:33:52 +00002836 dev->hw_features |= NETIF_F_NTUPLE;
2837
Yan Burmancc5387f2013-02-07 02:25:26 +00002838 if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0)
2839 dev->priv_flags |= IFF_UNICAST_FLT;
2840
Eyal Perry947cbb02014-12-02 18:12:11 +02002841 /* Setting a default hash function value */
2842 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_TOP) {
2843 priv->rss_hash_fn = ETH_RSS_HASH_TOP;
2844 } else if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_XOR) {
2845 priv->rss_hash_fn = ETH_RSS_HASH_XOR;
2846 } else {
2847 en_warn(priv,
2848 "No RSS hash capabilities exposed, using Toeplitz\n");
2849 priv->rss_hash_fn = ETH_RSS_HASH_TOP;
2850 }
2851
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002852 mdev->pndev[port] = dev;
Moni Shoua5da03542015-02-03 16:48:34 +02002853 mdev->upper[port] = NULL;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002854
2855 netif_carrier_off(dev);
Eugenia Emantayev4801ae72013-06-25 12:09:31 +03002856 mlx4_en_set_default_moderation(priv);
2857
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002858 err = register_netdev(dev);
2859 if (err) {
Yevgeny Petrilin453a6082009-06-01 20:27:13 +00002860 en_err(priv, "Netdev registration failed for port %d\n", port);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002861 goto out;
2862 }
Alexander Guller42341442011-10-09 05:29:35 +00002863 priv->registered = 1;
Yevgeny Petrilin453a6082009-06-01 20:27:13 +00002864
2865 en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num);
2866 en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num);
2867
Yan Burman79aeacc2013-02-07 02:25:19 +00002868 mlx4_en_update_loopback_state(priv->dev, priv->dev->features);
2869
Yevgeny Petrilin90822262011-03-22 22:37:41 +00002870 /* Configure port */
Yevgeny Petrilin5c8e9042012-06-25 00:24:11 +00002871 mlx4_en_calc_rx_buf(dev);
Yevgeny Petrilin90822262011-03-22 22:37:41 +00002872 err = mlx4_SET_PORT_general(mdev->dev, priv->port,
Yevgeny Petrilin5c8e9042012-06-25 00:24:11 +00002873 priv->rx_skb_size + ETH_FCS_LEN,
2874 prof->tx_pause, prof->tx_ppp,
2875 prof->rx_pause, prof->rx_ppp);
Yevgeny Petrilin90822262011-03-22 22:37:41 +00002876 if (err) {
Joe Perches1a91de22014-05-07 12:52:57 -07002877 en_err(priv, "Failed setting port general configurations for port %d, with error %d\n",
2878 priv->port, err);
Yevgeny Petrilin90822262011-03-22 22:37:41 +00002879 goto out;
2880 }
2881
Or Gerlitz837052d2013-12-23 16:09:44 +02002882 if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
Or Gerlitz1b136de2014-03-27 14:02:04 +02002883 err = mlx4_SET_PORT_VXLAN(mdev->dev, priv->port, VXLAN_STEER_BY_OUTER_MAC, 1);
Or Gerlitz837052d2013-12-23 16:09:44 +02002884 if (err) {
2885 en_err(priv, "Failed setting port L2 tunnel configuration, err %d\n",
2886 err);
2887 goto out;
2888 }
2889 }
2890
Yevgeny Petrilin90822262011-03-22 22:37:41 +00002891 /* Init port */
2892 en_warn(priv, "Initializing port\n");
2893 err = mlx4_INIT_PORT(mdev->dev, priv->port);
2894 if (err) {
2895 en_err(priv, "Failed Initializing port\n");
2896 goto out;
2897 }
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002898 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
Amir Vadaidc8142e2013-04-25 05:22:24 +00002899
2900 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
2901 queue_delayed_work(mdev->workqueue, &priv->service_task,
2902 SERVICE_TASK_DELAY);
2903
Eran Ben Elishaffa88f32015-03-30 17:45:22 +03002904 mlx4_en_set_stats_bitmap(mdev->dev, &priv->stats_bitmap);
Eran Ben Elisha39de9612015-03-18 16:51:38 +02002905
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07002906 return 0;
2907
2908out:
2909 mlx4_en_destroy_netdev(dev);
2910 return err;
2911}
2912
Saeed Mahameed537f6f92014-10-27 11:37:43 +02002913int mlx4_en_reset_config(struct net_device *dev,
2914 struct hwtstamp_config ts_config,
2915 netdev_features_t features)
2916{
2917 struct mlx4_en_priv *priv = netdev_priv(dev);
2918 struct mlx4_en_dev *mdev = priv->mdev;
2919 int port_up = 0;
2920 int err = 0;
2921
2922 if (priv->hwtstamp_config.tx_type == ts_config.tx_type &&
2923 priv->hwtstamp_config.rx_filter == ts_config.rx_filter &&
2924 !DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX))
2925 return 0; /* Nothing to change */
2926
2927 if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX) &&
2928 (features & NETIF_F_HW_VLAN_CTAG_RX) &&
2929 (priv->hwtstamp_config.rx_filter != HWTSTAMP_FILTER_NONE)) {
2930 en_warn(priv, "Can't turn ON rx vlan offload while time-stamping rx filter is ON\n");
2931 return -EINVAL;
2932 }
2933
2934 mutex_lock(&mdev->state_lock);
2935 if (priv->port_up) {
2936 port_up = 1;
2937 mlx4_en_stop_port(dev, 1);
2938 }
2939
2940 mlx4_en_free_resources(priv);
2941
2942 en_warn(priv, "Changing device configuration rx filter(%x) rx vlan(%x)\n",
2943 ts_config.rx_filter, !!(features & NETIF_F_HW_VLAN_CTAG_RX));
2944
2945 priv->hwtstamp_config.tx_type = ts_config.tx_type;
2946 priv->hwtstamp_config.rx_filter = ts_config.rx_filter;
2947
2948 if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX)) {
2949 if (features & NETIF_F_HW_VLAN_CTAG_RX)
2950 dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
2951 else
2952 dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
2953 } else if (ts_config.rx_filter == HWTSTAMP_FILTER_NONE) {
2954 /* RX time-stamping is OFF, update the RX vlan offload
2955 * to the latest wanted state
2956 */
2957 if (dev->wanted_features & NETIF_F_HW_VLAN_CTAG_RX)
2958 dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
2959 else
2960 dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
2961 }
2962
2963 /* RX vlan offload and RX time-stamping can't co-exist !
2964 * Regardless of the caller's choice,
2965 * Turn Off RX vlan offload in case of time-stamping is ON
2966 */
2967 if (ts_config.rx_filter != HWTSTAMP_FILTER_NONE) {
2968 if (dev->features & NETIF_F_HW_VLAN_CTAG_RX)
2969 en_warn(priv, "Turning off RX vlan offload since RX time-stamping is ON\n");
2970 dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
2971 }
2972
2973 err = mlx4_en_alloc_resources(priv);
2974 if (err) {
2975 en_err(priv, "Failed reallocating port resources\n");
2976 goto out;
2977 }
2978 if (port_up) {
2979 err = mlx4_en_start_port(dev);
2980 if (err)
2981 en_err(priv, "Failed starting port\n");
2982 }
2983
2984out:
2985 mutex_unlock(&mdev->state_lock);
2986 netdev_features_change(dev);
2987 return err;
2988}