blob: 35ad0971939d4efc55604d5d383b9085f4ca5209 [file] [log] [blame]
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001/*
2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33
34#include <linux/etherdevice.h>
35#include <linux/tcp.h>
36#include <linux/if_vlan.h>
37#include <linux/delay.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090038#include <linux/slab.h>
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -070039
40#include <linux/mlx4/driver.h>
41#include <linux/mlx4/device.h>
42#include <linux/mlx4/cmd.h>
43#include <linux/mlx4/cq.h>
44
45#include "mlx4_en.h"
46#include "en_port.h"
47
Amir Vadai897d7842012-04-04 21:33:27 +000048static int mlx4_en_setup_tc(struct net_device *dev, u8 up)
49{
50 if (up != MLX4_EN_NUM_UP)
51 return -EINVAL;
52
53 return 0;
54}
55
Jiri Pirko8e586132011-12-08 19:52:37 -050056static int mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -070057{
58 struct mlx4_en_priv *priv = netdev_priv(dev);
59 struct mlx4_en_dev *mdev = priv->mdev;
60 int err;
Eli Cohen4c3eb3c2010-08-26 17:19:22 +030061 int idx;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -070062
Jiri Pirkof1b553f2011-07-20 04:54:22 +000063 en_dbg(HW, priv, "adding VLAN:%d\n", vid);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -070064
Jiri Pirkof1b553f2011-07-20 04:54:22 +000065 set_bit(vid, priv->active_vlans);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -070066
67 /* Add VID to port VLAN filter */
68 mutex_lock(&mdev->state_lock);
69 if (mdev->device_up && priv->port_up) {
Jiri Pirkof1b553f2011-07-20 04:54:22 +000070 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -070071 if (err)
Yevgeny Petrilin453a6082009-06-01 20:27:13 +000072 en_err(priv, "Failed configuring VLAN filter\n");
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -070073 }
Eli Cohen4c3eb3c2010-08-26 17:19:22 +030074 if (mlx4_register_vlan(mdev->dev, priv->port, vid, &idx))
75 en_err(priv, "failed adding vlan %d\n", vid);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -070076 mutex_unlock(&mdev->state_lock);
Eli Cohen4c3eb3c2010-08-26 17:19:22 +030077
Jiri Pirko8e586132011-12-08 19:52:37 -050078 return 0;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -070079}
80
Jiri Pirko8e586132011-12-08 19:52:37 -050081static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -070082{
83 struct mlx4_en_priv *priv = netdev_priv(dev);
84 struct mlx4_en_dev *mdev = priv->mdev;
85 int err;
Eli Cohen4c3eb3c2010-08-26 17:19:22 +030086 int idx;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -070087
Jiri Pirkof1b553f2011-07-20 04:54:22 +000088 en_dbg(HW, priv, "Killing VID:%d\n", vid);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -070089
Jiri Pirkof1b553f2011-07-20 04:54:22 +000090 clear_bit(vid, priv->active_vlans);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -070091
92 /* Remove VID from port VLAN filter */
93 mutex_lock(&mdev->state_lock);
Eli Cohen4c3eb3c2010-08-26 17:19:22 +030094 if (!mlx4_find_cached_vlan(mdev->dev, priv->port, vid, &idx))
95 mlx4_unregister_vlan(mdev->dev, priv->port, idx);
96 else
97 en_err(priv, "could not find vid %d in cache\n", vid);
98
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -070099 if (mdev->device_up && priv->port_up) {
Jiri Pirkof1b553f2011-07-20 04:54:22 +0000100 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700101 if (err)
Yevgeny Petrilin453a6082009-06-01 20:27:13 +0000102 en_err(priv, "Failed configuring VLAN filter\n");
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700103 }
104 mutex_unlock(&mdev->state_lock);
Jiri Pirko8e586132011-12-08 19:52:37 -0500105
106 return 0;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700107}
108
Yevgeny Petriline7c1c2c42010-08-24 03:46:18 +0000109u64 mlx4_en_mac_to_u64(u8 *addr)
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700110{
111 u64 mac = 0;
112 int i;
113
114 for (i = 0; i < ETH_ALEN; i++) {
115 mac <<= 8;
116 mac |= addr[i];
117 }
118 return mac;
119}
120
121static int mlx4_en_set_mac(struct net_device *dev, void *addr)
122{
123 struct mlx4_en_priv *priv = netdev_priv(dev);
124 struct mlx4_en_dev *mdev = priv->mdev;
125 struct sockaddr *saddr = addr;
126
127 if (!is_valid_ether_addr(saddr->sa_data))
128 return -EADDRNOTAVAIL;
129
130 memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN);
131 priv->mac = mlx4_en_mac_to_u64(dev->dev_addr);
132 queue_work(mdev->workqueue, &priv->mac_task);
133 return 0;
134}
135
136static void mlx4_en_do_set_mac(struct work_struct *work)
137{
138 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
139 mac_task);
140 struct mlx4_en_dev *mdev = priv->mdev;
141 int err = 0;
142
143 mutex_lock(&mdev->state_lock);
144 if (priv->port_up) {
145 /* Remove old MAC and insert the new one */
Yevgeny Petrilin16792002011-03-22 22:38:31 +0000146 err = mlx4_replace_mac(mdev->dev, priv->port,
Eugenia Emantayevffe455a2011-12-13 04:16:21 +0000147 priv->base_qpn, priv->mac);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700148 if (err)
Yevgeny Petrilin453a6082009-06-01 20:27:13 +0000149 en_err(priv, "Failed changing HW MAC address\n");
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700150 } else
Yevgeny Petrilin453a6082009-06-01 20:27:13 +0000151 en_dbg(HW, priv, "Port is down while "
152 "registering mac, exiting...\n");
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700153
154 mutex_unlock(&mdev->state_lock);
155}
156
157static void mlx4_en_clear_list(struct net_device *dev)
158{
159 struct mlx4_en_priv *priv = netdev_priv(dev);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700160
Jiri Pirkoff6e2162010-03-01 05:09:14 +0000161 kfree(priv->mc_addrs);
Alexander Guller0e035672011-12-19 04:02:58 +0000162 priv->mc_addrs = NULL;
Jiri Pirkoff6e2162010-03-01 05:09:14 +0000163 priv->mc_addrs_cnt = 0;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700164}
165
166static void mlx4_en_cache_mclist(struct net_device *dev)
167{
168 struct mlx4_en_priv *priv = netdev_priv(dev);
Jiri Pirko22bedad32010-04-01 21:22:57 +0000169 struct netdev_hw_addr *ha;
Jiri Pirkoff6e2162010-03-01 05:09:14 +0000170 char *mc_addrs;
171 int mc_addrs_cnt = netdev_mc_count(dev);
172 int i;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700173
Jiri Pirkoff6e2162010-03-01 05:09:14 +0000174 mc_addrs = kmalloc(mc_addrs_cnt * ETH_ALEN, GFP_ATOMIC);
175 if (!mc_addrs) {
176 en_err(priv, "failed to allocate multicast list\n");
177 return;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700178 }
Jiri Pirkoff6e2162010-03-01 05:09:14 +0000179 i = 0;
Jiri Pirko22bedad32010-04-01 21:22:57 +0000180 netdev_for_each_mc_addr(ha, dev)
181 memcpy(mc_addrs + i++ * ETH_ALEN, ha->addr, ETH_ALEN);
Alexander Guller0e035672011-12-19 04:02:58 +0000182 mlx4_en_clear_list(dev);
Jiri Pirkoff6e2162010-03-01 05:09:14 +0000183 priv->mc_addrs = mc_addrs;
184 priv->mc_addrs_cnt = mc_addrs_cnt;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700185}
186
187
188static void mlx4_en_set_multicast(struct net_device *dev)
189{
190 struct mlx4_en_priv *priv = netdev_priv(dev);
191
192 if (!priv->port_up)
193 return;
194
195 queue_work(priv->mdev->workqueue, &priv->mcast_task);
196}
197
198static void mlx4_en_do_set_multicast(struct work_struct *work)
199{
200 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
201 mcast_task);
202 struct mlx4_en_dev *mdev = priv->mdev;
203 struct net_device *dev = priv->dev;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700204 u64 mcast_addr = 0;
Yevgeny Petrilin16792002011-03-22 22:38:31 +0000205 u8 mc_list[16] = {0};
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700206 int err;
207
208 mutex_lock(&mdev->state_lock);
209 if (!mdev->device_up) {
Yevgeny Petrilin453a6082009-06-01 20:27:13 +0000210 en_dbg(HW, priv, "Card is not up, "
211 "ignoring multicast change.\n");
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700212 goto out;
213 }
214 if (!priv->port_up) {
Yevgeny Petrilin453a6082009-06-01 20:27:13 +0000215 en_dbg(HW, priv, "Port is down, "
216 "ignoring multicast change.\n");
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700217 goto out;
218 }
219
Eugenia Emantayevffe455a2011-12-13 04:16:21 +0000220 if (!netif_carrier_ok(dev)) {
221 if (!mlx4_en_QUERY_PORT(mdev, priv->port)) {
222 if (priv->port_state.link_state) {
223 priv->last_link_state = MLX4_DEV_EVENT_PORT_UP;
224 netif_carrier_on(dev);
225 en_dbg(LINK, priv, "Link Up\n");
226 }
227 }
228 }
229
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700230 /*
231 * Promsicuous mode: disable all filters
232 */
233
234 if (dev->flags & IFF_PROMISC) {
235 if (!(priv->flags & MLX4_EN_FLAG_PROMISC)) {
236 if (netif_msg_rx_status(priv))
Yevgeny Petrilin453a6082009-06-01 20:27:13 +0000237 en_warn(priv, "Entering promiscuous mode\n");
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700238 priv->flags |= MLX4_EN_FLAG_PROMISC;
239
240 /* Enable promiscouos mode */
Or Gerlitzccf86322011-07-07 19:19:29 +0000241 if (!(mdev->dev->caps.flags &
242 MLX4_DEV_CAP_FLAG_VEP_UC_STEER))
Yevgeny Petrilin16792002011-03-22 22:38:31 +0000243 err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port,
244 priv->base_qpn, 1);
245 else
246 err = mlx4_unicast_promisc_add(mdev->dev, priv->base_qpn,
247 priv->port);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700248 if (err)
Yevgeny Petrilin453a6082009-06-01 20:27:13 +0000249 en_err(priv, "Failed enabling "
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300250 "promiscuous mode\n");
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700251
252 /* Disable port multicast filter (unconditionally) */
253 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
254 0, MLX4_MCAST_DISABLE);
255 if (err)
Yevgeny Petrilin453a6082009-06-01 20:27:13 +0000256 en_err(priv, "Failed disabling "
257 "multicast filter\n");
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700258
Yevgeny Petrilin16792002011-03-22 22:38:31 +0000259 /* Add the default qp number as multicast promisc */
260 if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
261 err = mlx4_multicast_promisc_add(mdev->dev, priv->base_qpn,
262 priv->port);
263 if (err)
264 en_err(priv, "Failed entering multicast promisc mode\n");
265 priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
266 }
267
Jiri Pirkof1b553f2011-07-20 04:54:22 +0000268 /* Disable port VLAN filter */
269 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
270 if (err)
271 en_err(priv, "Failed disabling VLAN filter\n");
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700272 }
273 goto out;
274 }
275
276 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300277 * Not in promiscuous mode
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700278 */
279
280 if (priv->flags & MLX4_EN_FLAG_PROMISC) {
281 if (netif_msg_rx_status(priv))
Yevgeny Petrilin453a6082009-06-01 20:27:13 +0000282 en_warn(priv, "Leaving promiscuous mode\n");
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700283 priv->flags &= ~MLX4_EN_FLAG_PROMISC;
284
285 /* Disable promiscouos mode */
Or Gerlitzccf86322011-07-07 19:19:29 +0000286 if (!(mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER))
Yevgeny Petrilin16792002011-03-22 22:38:31 +0000287 err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port,
288 priv->base_qpn, 0);
289 else
290 err = mlx4_unicast_promisc_remove(mdev->dev, priv->base_qpn,
291 priv->port);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700292 if (err)
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300293 en_err(priv, "Failed disabling promiscuous mode\n");
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700294
Yevgeny Petrilin16792002011-03-22 22:38:31 +0000295 /* Disable Multicast promisc */
296 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
297 err = mlx4_multicast_promisc_remove(mdev->dev, priv->base_qpn,
298 priv->port);
299 if (err)
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300300 en_err(priv, "Failed disabling multicast promiscuous mode\n");
Yevgeny Petrilin16792002011-03-22 22:38:31 +0000301 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
302 }
303
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700304 /* Enable port VLAN filter */
Jiri Pirkof1b553f2011-07-20 04:54:22 +0000305 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700306 if (err)
Yevgeny Petrilin453a6082009-06-01 20:27:13 +0000307 en_err(priv, "Failed enabling VLAN filter\n");
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700308 }
309
310 /* Enable/disable the multicast filter according to IFF_ALLMULTI */
311 if (dev->flags & IFF_ALLMULTI) {
312 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
313 0, MLX4_MCAST_DISABLE);
314 if (err)
Yevgeny Petrilin453a6082009-06-01 20:27:13 +0000315 en_err(priv, "Failed disabling multicast filter\n");
Yevgeny Petrilin16792002011-03-22 22:38:31 +0000316
317 /* Add the default qp number as multicast promisc */
318 if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
319 err = mlx4_multicast_promisc_add(mdev->dev, priv->base_qpn,
320 priv->port);
321 if (err)
322 en_err(priv, "Failed entering multicast promisc mode\n");
323 priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
324 }
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700325 } else {
Jiri Pirkoff6e2162010-03-01 05:09:14 +0000326 int i;
Yevgeny Petrilin16792002011-03-22 22:38:31 +0000327 /* Disable Multicast promisc */
328 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
329 err = mlx4_multicast_promisc_remove(mdev->dev, priv->base_qpn,
330 priv->port);
331 if (err)
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300332 en_err(priv, "Failed disabling multicast promiscuous mode\n");
Yevgeny Petrilin16792002011-03-22 22:38:31 +0000333 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
334 }
Jiri Pirkoff6e2162010-03-01 05:09:14 +0000335
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700336 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
337 0, MLX4_MCAST_DISABLE);
338 if (err)
Yevgeny Petrilin453a6082009-06-01 20:27:13 +0000339 en_err(priv, "Failed disabling multicast filter\n");
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700340
Yevgeny Petrilin16792002011-03-22 22:38:31 +0000341 /* Detach our qp from all the multicast addresses */
342 for (i = 0; i < priv->mc_addrs_cnt; i++) {
343 memcpy(&mc_list[10], priv->mc_addrs + i * ETH_ALEN, ETH_ALEN);
344 mc_list[5] = priv->port;
345 mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp,
346 mc_list, MLX4_PROT_ETH);
347 }
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700348 /* Flush mcast filter and init it with broadcast address */
349 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, ETH_BCAST,
350 1, MLX4_MCAST_CONFIG);
351
352 /* Update multicast list - we cache all addresses so they won't
353 * change while HW is updated holding the command semaphor */
354 netif_tx_lock_bh(dev);
355 mlx4_en_cache_mclist(dev);
356 netif_tx_unlock_bh(dev);
Jiri Pirkoff6e2162010-03-01 05:09:14 +0000357 for (i = 0; i < priv->mc_addrs_cnt; i++) {
358 mcast_addr =
359 mlx4_en_mac_to_u64(priv->mc_addrs + i * ETH_ALEN);
Yevgeny Petrilin16792002011-03-22 22:38:31 +0000360 memcpy(&mc_list[10], priv->mc_addrs + i * ETH_ALEN, ETH_ALEN);
361 mc_list[5] = priv->port;
362 mlx4_multicast_attach(mdev->dev, &priv->rss_map.indir_qp,
363 mc_list, 0, MLX4_PROT_ETH);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700364 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port,
365 mcast_addr, 0, MLX4_MCAST_CONFIG);
366 }
367 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
368 0, MLX4_MCAST_ENABLE);
369 if (err)
Yevgeny Petrilin453a6082009-06-01 20:27:13 +0000370 en_err(priv, "Failed enabling multicast filter\n");
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700371 }
372out:
373 mutex_unlock(&mdev->state_lock);
374}
375
376#ifdef CONFIG_NET_POLL_CONTROLLER
377static void mlx4_en_netpoll(struct net_device *dev)
378{
379 struct mlx4_en_priv *priv = netdev_priv(dev);
380 struct mlx4_en_cq *cq;
381 unsigned long flags;
382 int i;
383
384 for (i = 0; i < priv->rx_ring_num; i++) {
385 cq = &priv->rx_cq[i];
386 spin_lock_irqsave(&cq->lock, flags);
387 napi_synchronize(&cq->napi);
388 mlx4_en_process_rx_cq(dev, cq, 0);
389 spin_unlock_irqrestore(&cq->lock, flags);
390 }
391}
392#endif
393
394static void mlx4_en_tx_timeout(struct net_device *dev)
395{
396 struct mlx4_en_priv *priv = netdev_priv(dev);
397 struct mlx4_en_dev *mdev = priv->mdev;
398
399 if (netif_msg_timer(priv))
Yevgeny Petrilin453a6082009-06-01 20:27:13 +0000400 en_warn(priv, "Tx timeout called on port:%d\n", priv->port);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700401
Yevgeny Petrilin1e338db2009-04-20 04:26:05 +0000402 priv->port_stats.tx_timeout++;
Yevgeny Petrilin453a6082009-06-01 20:27:13 +0000403 en_dbg(DRV, priv, "Scheduling watchdog\n");
Yevgeny Petrilin1e338db2009-04-20 04:26:05 +0000404 queue_work(mdev->workqueue, &priv->watchdog_task);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700405}
406
407
408static struct net_device_stats *mlx4_en_get_stats(struct net_device *dev)
409{
410 struct mlx4_en_priv *priv = netdev_priv(dev);
411
412 spin_lock_bh(&priv->stats_lock);
413 memcpy(&priv->ret_stats, &priv->stats, sizeof(priv->stats));
414 spin_unlock_bh(&priv->stats_lock);
415
416 return &priv->ret_stats;
417}
418
419static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
420{
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700421 struct mlx4_en_cq *cq;
422 int i;
423
424 /* If we haven't received a specific coalescing setting
Martin Olsson98a17082009-04-22 18:21:29 +0200425 * (module param), we set the moderation parameters as follows:
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700426 * - moder_cnt is set to the number of mtu sized packets to
427 * satisfy our coelsing target.
428 * - moder_time is set to a fixed value.
429 */
Yevgeny Petrilin3db36fb2009-06-01 23:23:13 +0000430 priv->rx_frames = MLX4_EN_RX_COAL_TARGET;
Yevgeny Petrilin60b9f9e2008-12-25 18:19:47 -0800431 priv->rx_usecs = MLX4_EN_RX_COAL_TIME;
Yevgeny Petrilina19a8482012-04-23 02:18:33 +0000432 priv->tx_frames = MLX4_EN_TX_COAL_PKTS;
433 priv->tx_usecs = MLX4_EN_TX_COAL_TIME;
Yevgeny Petrilin453a6082009-06-01 20:27:13 +0000434 en_dbg(INTR, priv, "Default coalesing params for mtu:%d - "
435 "rx_frames:%d rx_usecs:%d\n",
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700436 priv->dev->mtu, priv->rx_frames, priv->rx_usecs);
437
438 /* Setup cq moderation params */
439 for (i = 0; i < priv->rx_ring_num; i++) {
440 cq = &priv->rx_cq[i];
441 cq->moder_cnt = priv->rx_frames;
442 cq->moder_time = priv->rx_usecs;
Alexander Guller6b4d8d92011-10-09 05:38:23 +0000443 priv->last_moder_time[i] = MLX4_EN_AUTO_CONF;
444 priv->last_moder_packets[i] = 0;
445 priv->last_moder_bytes[i] = 0;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700446 }
447
448 for (i = 0; i < priv->tx_ring_num; i++) {
449 cq = &priv->tx_cq[i];
Yevgeny Petrilina19a8482012-04-23 02:18:33 +0000450 cq->moder_cnt = priv->tx_frames;
451 cq->moder_time = priv->tx_usecs;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700452 }
453
454 /* Reset auto-moderation params */
455 priv->pkt_rate_low = MLX4_EN_RX_RATE_LOW;
456 priv->rx_usecs_low = MLX4_EN_RX_COAL_TIME_LOW;
457 priv->pkt_rate_high = MLX4_EN_RX_RATE_HIGH;
458 priv->rx_usecs_high = MLX4_EN_RX_COAL_TIME_HIGH;
459 priv->sample_interval = MLX4_EN_SAMPLE_INTERVAL;
Yevgeny Petrilin60b9f9e2008-12-25 18:19:47 -0800460 priv->adaptive_rx_coal = 1;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700461 priv->last_moder_jiffies = 0;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700462 priv->last_moder_tx_packets = 0;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700463}
464
465static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
466{
467 unsigned long period = (unsigned long) (jiffies - priv->last_moder_jiffies);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700468 struct mlx4_en_cq *cq;
469 unsigned long packets;
470 unsigned long rate;
471 unsigned long avg_pkt_size;
472 unsigned long rx_packets;
473 unsigned long rx_bytes;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700474 unsigned long rx_pkt_diff;
475 int moder_time;
Alexander Guller6b4d8d92011-10-09 05:38:23 +0000476 int ring, err;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700477
478 if (!priv->adaptive_rx_coal || period < priv->sample_interval * HZ)
479 return;
480
Alexander Guller6b4d8d92011-10-09 05:38:23 +0000481 for (ring = 0; ring < priv->rx_ring_num; ring++) {
482 spin_lock_bh(&priv->stats_lock);
483 rx_packets = priv->rx_ring[ring].packets;
484 rx_bytes = priv->rx_ring[ring].bytes;
485 spin_unlock_bh(&priv->stats_lock);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700486
Alexander Guller6b4d8d92011-10-09 05:38:23 +0000487 rx_pkt_diff = ((unsigned long) (rx_packets -
488 priv->last_moder_packets[ring]));
489 packets = rx_pkt_diff;
490 rate = packets * HZ / period;
491 avg_pkt_size = packets ? ((unsigned long) (rx_bytes -
492 priv->last_moder_bytes[ring])) / packets : 0;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700493
Alexander Guller6b4d8d92011-10-09 05:38:23 +0000494 /* Apply auto-moderation only when packet rate
495 * exceeds a rate that it matters */
496 if (rate > (MLX4_EN_RX_RATE_THRESH / priv->rx_ring_num) &&
497 avg_pkt_size > MLX4_EN_AVG_PKT_SMALL) {
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700498 if (rate < priv->pkt_rate_low)
499 moder_time = priv->rx_usecs_low;
500 else if (rate > priv->pkt_rate_high)
501 moder_time = priv->rx_usecs_high;
502 else
503 moder_time = (rate - priv->pkt_rate_low) *
504 (priv->rx_usecs_high - priv->rx_usecs_low) /
505 (priv->pkt_rate_high - priv->pkt_rate_low) +
506 priv->rx_usecs_low;
Alexander Guller6b4d8d92011-10-09 05:38:23 +0000507 } else {
508 moder_time = priv->rx_usecs_low;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700509 }
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700510
Alexander Guller6b4d8d92011-10-09 05:38:23 +0000511 if (moder_time != priv->last_moder_time[ring]) {
512 priv->last_moder_time[ring] = moder_time;
513 cq = &priv->rx_cq[ring];
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700514 cq->moder_time = moder_time;
515 err = mlx4_en_set_cq_moder(priv, cq);
Alexander Guller6b4d8d92011-10-09 05:38:23 +0000516 if (err)
517 en_err(priv, "Failed modifying moderation "
518 "for cq:%d\n", ring);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700519 }
Alexander Guller6b4d8d92011-10-09 05:38:23 +0000520 priv->last_moder_packets[ring] = rx_packets;
521 priv->last_moder_bytes[ring] = rx_bytes;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700522 }
523
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700524 priv->last_moder_jiffies = jiffies;
525}
526
527static void mlx4_en_do_get_stats(struct work_struct *work)
528{
Jean Delvarebf6aede2009-04-02 16:56:54 -0700529 struct delayed_work *delay = to_delayed_work(work);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700530 struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv,
531 stats_task);
532 struct mlx4_en_dev *mdev = priv->mdev;
533 int err;
534
535 err = mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 0);
536 if (err)
Frans Pop2381a552010-03-24 07:57:36 +0000537 en_dbg(HW, priv, "Could not update stats\n");
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700538
539 mutex_lock(&mdev->state_lock);
540 if (mdev->device_up) {
541 if (priv->port_up)
542 mlx4_en_auto_moderation(priv);
543
544 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
545 }
Yevgeny Petrilind7e1a482010-08-24 03:46:38 +0000546 if (mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port]) {
547 queue_work(mdev->workqueue, &priv->mac_task);
548 mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port] = 0;
549 }
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700550 mutex_unlock(&mdev->state_lock);
551}
552
553static void mlx4_en_linkstate(struct work_struct *work)
554{
555 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
556 linkstate_task);
557 struct mlx4_en_dev *mdev = priv->mdev;
558 int linkstate = priv->link_state;
559
560 mutex_lock(&mdev->state_lock);
561 /* If observable port state changed set carrier state and
562 * report to system log */
563 if (priv->last_link_state != linkstate) {
564 if (linkstate == MLX4_DEV_EVENT_PORT_DOWN) {
Yevgeny Petriline5cc44b2010-08-24 03:46:01 +0000565 en_info(priv, "Link Down\n");
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700566 netif_carrier_off(priv->dev);
567 } else {
Yevgeny Petriline5cc44b2010-08-24 03:46:01 +0000568 en_info(priv, "Link Up\n");
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700569 netif_carrier_on(priv->dev);
570 }
571 }
572 priv->last_link_state = linkstate;
573 mutex_unlock(&mdev->state_lock);
574}
575
576
Yevgeny Petrilin18cc42a2008-12-29 18:39:20 -0800577int mlx4_en_start_port(struct net_device *dev)
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700578{
579 struct mlx4_en_priv *priv = netdev_priv(dev);
580 struct mlx4_en_dev *mdev = priv->mdev;
581 struct mlx4_en_cq *cq;
582 struct mlx4_en_tx_ring *tx_ring;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700583 int rx_index = 0;
584 int tx_index = 0;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700585 int err = 0;
586 int i;
587 int j;
Yevgeny Petrilin16792002011-03-22 22:38:31 +0000588 u8 mc_list[16] = {0};
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700589
590 if (priv->port_up) {
Yevgeny Petrilin453a6082009-06-01 20:27:13 +0000591 en_dbg(DRV, priv, "start port called while port already up\n");
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700592 return 0;
593 }
594
595 /* Calculate Rx buf size */
596 dev->mtu = min(dev->mtu, priv->max_mtu);
597 mlx4_en_calc_rx_buf(dev);
Yevgeny Petrilin453a6082009-06-01 20:27:13 +0000598 en_dbg(DRV, priv, "Rx buf size:%d\n", priv->rx_skb_size);
Yevgeny Petrilin38aab072009-05-24 03:17:11 +0000599
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700600 /* Configure rx cq's and rings */
Yevgeny Petrilin38aab072009-05-24 03:17:11 +0000601 err = mlx4_en_activate_rx_rings(priv);
602 if (err) {
Yevgeny Petrilin453a6082009-06-01 20:27:13 +0000603 en_err(priv, "Failed to activate RX rings\n");
Yevgeny Petrilin38aab072009-05-24 03:17:11 +0000604 return err;
605 }
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700606 for (i = 0; i < priv->rx_ring_num; i++) {
607 cq = &priv->rx_cq[i];
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700608
Alexander Guller76532d02011-10-09 05:26:31 +0000609 err = mlx4_en_activate_cq(priv, cq, i);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700610 if (err) {
Yevgeny Petrilin453a6082009-06-01 20:27:13 +0000611 en_err(priv, "Failed activating Rx CQ\n");
Yevgeny Petrilina4233302009-04-26 20:41:34 +0000612 goto cq_err;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700613 }
614 for (j = 0; j < cq->size; j++)
615 cq->buf[j].owner_sr_opcode = MLX4_CQE_OWNER_MASK;
616 err = mlx4_en_set_cq_moder(priv, cq);
617 if (err) {
Yevgeny Petrilin453a6082009-06-01 20:27:13 +0000618 en_err(priv, "Failed setting cq moderation parameters");
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700619 mlx4_en_deactivate_cq(priv, cq);
620 goto cq_err;
621 }
622 mlx4_en_arm_cq(priv, cq);
Yevgeny Petrilin38aab072009-05-24 03:17:11 +0000623 priv->rx_ring[i].cqn = cq->mcq.cqn;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700624 ++rx_index;
625 }
626
Eugenia Emantayevffe455a2011-12-13 04:16:21 +0000627 /* Set qp number */
628 en_dbg(DRV, priv, "Getting qp number for port %d\n", priv->port);
629 err = mlx4_get_eth_qp(mdev->dev, priv->port,
630 priv->mac, &priv->base_qpn);
Yevgeny Petrilin16792002011-03-22 22:38:31 +0000631 if (err) {
Eugenia Emantayevffe455a2011-12-13 04:16:21 +0000632 en_err(priv, "Failed getting eth qp\n");
Yevgeny Petrilin16792002011-03-22 22:38:31 +0000633 goto cq_err;
634 }
635 mdev->mac_removed[priv->port] = 0;
636
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700637 err = mlx4_en_config_rss_steer(priv);
638 if (err) {
Yevgeny Petrilin453a6082009-06-01 20:27:13 +0000639 en_err(priv, "Failed configuring rss steering\n");
Yevgeny Petrilin16792002011-03-22 22:38:31 +0000640 goto mac_err;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700641 }
642
643 /* Configure tx cq's and rings */
644 for (i = 0; i < priv->tx_ring_num; i++) {
645 /* Configure cq */
646 cq = &priv->tx_cq[i];
Alexander Guller76532d02011-10-09 05:26:31 +0000647 err = mlx4_en_activate_cq(priv, cq, i);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700648 if (err) {
Yevgeny Petrilin453a6082009-06-01 20:27:13 +0000649 en_err(priv, "Failed allocating Tx CQ\n");
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700650 goto tx_err;
651 }
652 err = mlx4_en_set_cq_moder(priv, cq);
653 if (err) {
Yevgeny Petrilin453a6082009-06-01 20:27:13 +0000654 en_err(priv, "Failed setting cq moderation parameters");
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700655 mlx4_en_deactivate_cq(priv, cq);
656 goto tx_err;
657 }
Yevgeny Petrilin453a6082009-06-01 20:27:13 +0000658 en_dbg(DRV, priv, "Resetting index of collapsed CQ:%d to -1\n", i);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700659 cq->buf->wqe_index = cpu_to_be16(0xffff);
660
661 /* Configure ring */
662 tx_ring = &priv->tx_ring[i];
Amir Vadai0e98b522012-04-04 21:33:24 +0000663 err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn,
664 max(0, i - MLX4_EN_NUM_TX_RINGS));
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700665 if (err) {
Yevgeny Petrilin453a6082009-06-01 20:27:13 +0000666 en_err(priv, "Failed allocating Tx ring\n");
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700667 mlx4_en_deactivate_cq(priv, cq);
668 goto tx_err;
669 }
Yevgeny Petriline22979d2012-04-23 02:18:39 +0000670
671 /* Arm CQ for TX completions */
672 mlx4_en_arm_cq(priv, cq);
673
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700674 /* Set initial ownership of all Tx TXBBs to SW (1) */
675 for (j = 0; j < tx_ring->buf_size; j += STAMP_STRIDE)
676 *((u32 *) (tx_ring->buf + j)) = 0xffffffff;
677 ++tx_index;
678 }
679
680 /* Configure port */
681 err = mlx4_SET_PORT_general(mdev->dev, priv->port,
682 priv->rx_skb_size + ETH_FCS_LEN,
Yevgeny Petrilind53b93f2008-11-05 04:48:36 +0000683 priv->prof->tx_pause,
684 priv->prof->tx_ppp,
685 priv->prof->rx_pause,
686 priv->prof->rx_ppp);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700687 if (err) {
Yevgeny Petrilin453a6082009-06-01 20:27:13 +0000688 en_err(priv, "Failed setting port general configurations "
689 "for port %d, with error %d\n", priv->port, err);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700690 goto tx_err;
691 }
692 /* Set default qp number */
693 err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, priv->base_qpn, 0);
694 if (err) {
Yevgeny Petrilin453a6082009-06-01 20:27:13 +0000695 en_err(priv, "Failed setting default qp numbers\n");
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700696 goto tx_err;
697 }
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700698
699 /* Init port */
Yevgeny Petrilin453a6082009-06-01 20:27:13 +0000700 en_dbg(HW, priv, "Initializing port\n");
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700701 err = mlx4_INIT_PORT(mdev->dev, priv->port);
702 if (err) {
Yevgeny Petrilin453a6082009-06-01 20:27:13 +0000703 en_err(priv, "Failed Initializing port\n");
Yevgeny Petrilin16792002011-03-22 22:38:31 +0000704 goto tx_err;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700705 }
706
Yevgeny Petrilin16792002011-03-22 22:38:31 +0000707 /* Attach rx QP to bradcast address */
708 memset(&mc_list[10], 0xff, ETH_ALEN);
709 mc_list[5] = priv->port;
710 if (mlx4_multicast_attach(mdev->dev, &priv->rss_map.indir_qp, mc_list,
711 0, MLX4_PROT_ETH))
712 mlx4_warn(mdev, "Failed Attaching Broadcast\n");
713
Herbert Xub5845f92011-03-27 01:01:26 +0000714 /* Must redo promiscuous mode setup. */
715 priv->flags &= ~(MLX4_EN_FLAG_PROMISC | MLX4_EN_FLAG_MC_PROMISC);
716
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700717 /* Schedule multicast task to populate multicast list */
718 queue_work(mdev->workqueue, &priv->mcast_task);
719
Eugenia Emantayev93ece0c2012-01-19 09:45:05 +0000720 mlx4_set_stats_bitmap(mdev->dev, &priv->stats_bitmap);
721
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700722 priv->port_up = true;
Yevgeny Petrilina11faac2009-06-20 22:15:46 +0000723 netif_tx_start_all_queues(dev);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700724 return 0;
725
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700726tx_err:
727 while (tx_index--) {
728 mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[tx_index]);
729 mlx4_en_deactivate_cq(priv, &priv->tx_cq[tx_index]);
730 }
731
732 mlx4_en_release_rss_steer(priv);
Yevgeny Petrilin16792002011-03-22 22:38:31 +0000733mac_err:
Eugenia Emantayevffe455a2011-12-13 04:16:21 +0000734 mlx4_put_eth_qp(mdev->dev, priv->port, priv->mac, priv->base_qpn);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700735cq_err:
736 while (rx_index--)
737 mlx4_en_deactivate_cq(priv, &priv->rx_cq[rx_index]);
Yevgeny Petrilin38aab072009-05-24 03:17:11 +0000738 for (i = 0; i < priv->rx_ring_num; i++)
739 mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700740
741 return err; /* need to close devices */
742}
743
744
Yevgeny Petrilin18cc42a2008-12-29 18:39:20 -0800745void mlx4_en_stop_port(struct net_device *dev)
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700746{
747 struct mlx4_en_priv *priv = netdev_priv(dev);
748 struct mlx4_en_dev *mdev = priv->mdev;
749 int i;
Yevgeny Petrilin16792002011-03-22 22:38:31 +0000750 u8 mc_list[16] = {0};
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700751
752 if (!priv->port_up) {
Yevgeny Petrilin453a6082009-06-01 20:27:13 +0000753 en_dbg(DRV, priv, "stop port called while port already down\n");
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700754 return;
755 }
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700756
757 /* Synchronize with tx routine */
758 netif_tx_lock_bh(dev);
Yevgeny Petrilin3c05f5e2009-06-20 22:15:52 +0000759 netif_tx_stop_all_queues(dev);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700760 netif_tx_unlock_bh(dev);
761
Yevgeny Petrilin7c287382010-08-24 03:45:45 +0000762 /* Set port as not active */
Yevgeny Petrilin3c05f5e2009-06-20 22:15:52 +0000763 priv->port_up = false;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700764
Yevgeny Petrilin16792002011-03-22 22:38:31 +0000765 /* Detach All multicasts */
766 memset(&mc_list[10], 0xff, ETH_ALEN);
767 mc_list[5] = priv->port;
768 mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, mc_list,
769 MLX4_PROT_ETH);
770 for (i = 0; i < priv->mc_addrs_cnt; i++) {
771 memcpy(&mc_list[10], priv->mc_addrs + i * ETH_ALEN, ETH_ALEN);
772 mc_list[5] = priv->port;
773 mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp,
774 mc_list, MLX4_PROT_ETH);
775 }
776 mlx4_en_clear_list(dev);
777 /* Flush multicast filter */
778 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 1, MLX4_MCAST_CONFIG);
779
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700780 /* Free TX Rings */
781 for (i = 0; i < priv->tx_ring_num; i++) {
782 mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[i]);
783 mlx4_en_deactivate_cq(priv, &priv->tx_cq[i]);
784 }
785 msleep(10);
786
787 for (i = 0; i < priv->tx_ring_num; i++)
788 mlx4_en_free_tx_buf(dev, &priv->tx_ring[i]);
789
790 /* Free RSS qps */
791 mlx4_en_release_rss_steer(priv);
792
Eugenia Emantayevffe455a2011-12-13 04:16:21 +0000793 /* Unregister Mac address for the port */
794 mlx4_put_eth_qp(mdev->dev, priv->port, priv->mac, priv->base_qpn);
795 mdev->mac_removed[priv->port] = 1;
796
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700797 /* Free RX Rings */
798 for (i = 0; i < priv->rx_ring_num; i++) {
799 mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]);
800 while (test_bit(NAPI_STATE_SCHED, &priv->rx_cq[i].napi.state))
801 msleep(1);
802 mlx4_en_deactivate_cq(priv, &priv->rx_cq[i]);
803 }
Yevgeny Petrilin7c287382010-08-24 03:45:45 +0000804
805 /* close port*/
806 mlx4_CLOSE_PORT(mdev->dev, priv->port);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700807}
808
809static void mlx4_en_restart(struct work_struct *work)
810{
811 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
812 watchdog_task);
813 struct mlx4_en_dev *mdev = priv->mdev;
814 struct net_device *dev = priv->dev;
815
Yevgeny Petrilin453a6082009-06-01 20:27:13 +0000816 en_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port);
Yevgeny Petrilin1e338db2009-04-20 04:26:05 +0000817
818 mutex_lock(&mdev->state_lock);
819 if (priv->port_up) {
820 mlx4_en_stop_port(dev);
821 if (mlx4_en_start_port(dev))
Yevgeny Petrilin453a6082009-06-01 20:27:13 +0000822 en_err(priv, "Failed restarting port %d\n", priv->port);
Yevgeny Petrilin1e338db2009-04-20 04:26:05 +0000823 }
824 mutex_unlock(&mdev->state_lock);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700825}
826
Eugenia Emantayevb477ba62012-01-19 09:42:37 +0000827static void mlx4_en_clear_stats(struct net_device *dev)
828{
829 struct mlx4_en_priv *priv = netdev_priv(dev);
830 struct mlx4_en_dev *mdev = priv->mdev;
831 int i;
832
833 if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1))
834 en_dbg(HW, priv, "Failed dumping statistics\n");
835
836 memset(&priv->stats, 0, sizeof(priv->stats));
837 memset(&priv->pstats, 0, sizeof(priv->pstats));
838 memset(&priv->pkstats, 0, sizeof(priv->pkstats));
839 memset(&priv->port_stats, 0, sizeof(priv->port_stats));
840
841 for (i = 0; i < priv->tx_ring_num; i++) {
842 priv->tx_ring[i].bytes = 0;
843 priv->tx_ring[i].packets = 0;
844 priv->tx_ring[i].tx_csum = 0;
845 }
846 for (i = 0; i < priv->rx_ring_num; i++) {
847 priv->rx_ring[i].bytes = 0;
848 priv->rx_ring[i].packets = 0;
849 priv->rx_ring[i].csum_ok = 0;
850 priv->rx_ring[i].csum_none = 0;
851 }
852}
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700853
854static int mlx4_en_open(struct net_device *dev)
855{
856 struct mlx4_en_priv *priv = netdev_priv(dev);
857 struct mlx4_en_dev *mdev = priv->mdev;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700858 int err = 0;
859
860 mutex_lock(&mdev->state_lock);
861
862 if (!mdev->device_up) {
Yevgeny Petrilin453a6082009-06-01 20:27:13 +0000863 en_err(priv, "Cannot open - device down/disabled\n");
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700864 err = -EBUSY;
865 goto out;
866 }
867
Eugenia Emantayevb477ba62012-01-19 09:42:37 +0000868 /* Reset HW statistics and SW counters */
869 mlx4_en_clear_stats(dev);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700870
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700871 err = mlx4_en_start_port(dev);
872 if (err)
Yevgeny Petrilin453a6082009-06-01 20:27:13 +0000873 en_err(priv, "Failed starting port:%d\n", priv->port);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700874
875out:
876 mutex_unlock(&mdev->state_lock);
877 return err;
878}
879
880
881static int mlx4_en_close(struct net_device *dev)
882{
883 struct mlx4_en_priv *priv = netdev_priv(dev);
884 struct mlx4_en_dev *mdev = priv->mdev;
885
Yevgeny Petrilin453a6082009-06-01 20:27:13 +0000886 en_dbg(IFDOWN, priv, "Close port called\n");
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700887
888 mutex_lock(&mdev->state_lock);
889
890 mlx4_en_stop_port(dev);
891 netif_carrier_off(dev);
892
893 mutex_unlock(&mdev->state_lock);
894 return 0;
895}
896
Alexander Gullerfe0af032011-10-09 05:26:46 +0000897void mlx4_en_free_resources(struct mlx4_en_priv *priv)
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700898{
899 int i;
900
901 for (i = 0; i < priv->tx_ring_num; i++) {
902 if (priv->tx_ring[i].tx_info)
903 mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]);
904 if (priv->tx_cq[i].buf)
Alexander Gullerfe0af032011-10-09 05:26:46 +0000905 mlx4_en_destroy_cq(priv, &priv->tx_cq[i]);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700906 }
907
908 for (i = 0; i < priv->rx_ring_num; i++) {
909 if (priv->rx_ring[i].rx_info)
Thadeu Lima de Souza Cascardo68355f72012-02-06 08:39:49 +0000910 mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i],
911 priv->prof->rx_ring_size, priv->stride);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700912 if (priv->rx_cq[i].buf)
Alexander Gullerfe0af032011-10-09 05:26:46 +0000913 mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700914 }
915}
916
Yevgeny Petrilin18cc42a2008-12-29 18:39:20 -0800917int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700918{
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700919 struct mlx4_en_port_profile *prof = priv->prof;
920 int i;
Yevgeny Petrilin87a5c382011-03-22 22:38:52 +0000921 int base_tx_qpn, err;
922
923 err = mlx4_qp_reserve_range(priv->mdev->dev, priv->tx_ring_num, 256, &base_tx_qpn);
924 if (err) {
925 en_err(priv, "failed reserving range for TX rings\n");
926 return err;
927 }
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700928
929 /* Create tx Rings */
930 for (i = 0; i < priv->tx_ring_num; i++) {
931 if (mlx4_en_create_cq(priv, &priv->tx_cq[i],
932 prof->tx_ring_size, i, TX))
933 goto err;
934
Yevgeny Petrilin87a5c382011-03-22 22:38:52 +0000935 if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i], base_tx_qpn + i,
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700936 prof->tx_ring_size, TXBB_SIZE))
937 goto err;
938 }
939
940 /* Create rx Rings */
941 for (i = 0; i < priv->rx_ring_num; i++) {
942 if (mlx4_en_create_cq(priv, &priv->rx_cq[i],
943 prof->rx_ring_size, i, RX))
944 goto err;
945
946 if (mlx4_en_create_rx_ring(priv, &priv->rx_ring[i],
947 prof->rx_ring_size, priv->stride))
948 goto err;
949 }
950
951 return 0;
952
953err:
Yevgeny Petrilin453a6082009-06-01 20:27:13 +0000954 en_err(priv, "Failed to allocate NIC resources\n");
Yevgeny Petrilin87a5c382011-03-22 22:38:52 +0000955 mlx4_qp_release_range(priv->mdev->dev, base_tx_qpn, priv->tx_ring_num);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700956 return -ENOMEM;
957}
958
959
960void mlx4_en_destroy_netdev(struct net_device *dev)
961{
962 struct mlx4_en_priv *priv = netdev_priv(dev);
963 struct mlx4_en_dev *mdev = priv->mdev;
964
Yevgeny Petrilin453a6082009-06-01 20:27:13 +0000965 en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700966
967 /* Unregister device - this will close the port if it was up */
968 if (priv->registered)
969 unregister_netdev(dev);
970
971 if (priv->allocated)
972 mlx4_free_hwq_res(mdev->dev, &priv->res, MLX4_EN_PAGE_SIZE);
973
974 cancel_delayed_work(&priv->stats_task);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700975 /* flush any pending task for this netdev */
976 flush_workqueue(mdev->workqueue);
977
978 /* Detach the netdev so tasks would not attempt to access it */
979 mutex_lock(&mdev->state_lock);
980 mdev->pndev[priv->port] = NULL;
981 mutex_unlock(&mdev->state_lock);
982
Alexander Gullerfe0af032011-10-09 05:26:46 +0000983 mlx4_en_free_resources(priv);
Amir Vadai564c2742012-04-04 21:33:26 +0000984
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700985 free_netdev(dev);
986}
987
988static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
989{
990 struct mlx4_en_priv *priv = netdev_priv(dev);
991 struct mlx4_en_dev *mdev = priv->mdev;
992 int err = 0;
993
Yevgeny Petrilin453a6082009-06-01 20:27:13 +0000994 en_dbg(DRV, priv, "Change MTU called - current:%d new:%d\n",
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700995 dev->mtu, new_mtu);
996
997 if ((new_mtu < MLX4_EN_MIN_MTU) || (new_mtu > priv->max_mtu)) {
Yevgeny Petrilin453a6082009-06-01 20:27:13 +0000998 en_err(priv, "Bad MTU size:%d.\n", new_mtu);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -0700999 return -EPERM;
1000 }
1001 dev->mtu = new_mtu;
1002
1003 if (netif_running(dev)) {
1004 mutex_lock(&mdev->state_lock);
1005 if (!mdev->device_up) {
1006 /* NIC is probably restarting - let watchdog task reset
1007 * the port */
Yevgeny Petrilin453a6082009-06-01 20:27:13 +00001008 en_dbg(DRV, priv, "Change MTU called with card down!?\n");
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001009 } else {
1010 mlx4_en_stop_port(dev);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001011 err = mlx4_en_start_port(dev);
1012 if (err) {
Yevgeny Petrilin453a6082009-06-01 20:27:13 +00001013 en_err(priv, "Failed restarting port:%d\n",
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001014 priv->port);
1015 queue_work(mdev->workqueue, &priv->watchdog_task);
1016 }
1017 }
1018 mutex_unlock(&mdev->state_lock);
1019 }
1020 return 0;
1021}
1022
Amir Vadai60d6fe92011-11-26 19:55:19 +00001023static int mlx4_en_set_features(struct net_device *netdev,
1024 netdev_features_t features)
1025{
1026 struct mlx4_en_priv *priv = netdev_priv(netdev);
1027
1028 if (features & NETIF_F_LOOPBACK)
1029 priv->ctrl_flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK);
1030 else
1031 priv->ctrl_flags &=
1032 cpu_to_be32(~MLX4_WQE_CTRL_FORCE_LOOPBACK);
1033
1034 return 0;
1035
1036}
1037
Stephen Hemminger3addc562008-11-21 17:30:58 -08001038static const struct net_device_ops mlx4_netdev_ops = {
1039 .ndo_open = mlx4_en_open,
1040 .ndo_stop = mlx4_en_close,
1041 .ndo_start_xmit = mlx4_en_xmit,
Yevgeny Petrilinf813cad2009-06-01 23:24:07 +00001042 .ndo_select_queue = mlx4_en_select_queue,
Stephen Hemminger3addc562008-11-21 17:30:58 -08001043 .ndo_get_stats = mlx4_en_get_stats,
Jiri Pirkoafc4b132011-08-16 06:29:01 +00001044 .ndo_set_rx_mode = mlx4_en_set_multicast,
Stephen Hemminger3addc562008-11-21 17:30:58 -08001045 .ndo_set_mac_address = mlx4_en_set_mac,
Stephen Hemminger52255bb2009-01-09 10:45:37 +00001046 .ndo_validate_addr = eth_validate_addr,
Stephen Hemminger3addc562008-11-21 17:30:58 -08001047 .ndo_change_mtu = mlx4_en_change_mtu,
1048 .ndo_tx_timeout = mlx4_en_tx_timeout,
Stephen Hemminger3addc562008-11-21 17:30:58 -08001049 .ndo_vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid,
1050 .ndo_vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid,
1051#ifdef CONFIG_NET_POLL_CONTROLLER
1052 .ndo_poll_controller = mlx4_en_netpoll,
1053#endif
Amir Vadai60d6fe92011-11-26 19:55:19 +00001054 .ndo_set_features = mlx4_en_set_features,
Amir Vadai897d7842012-04-04 21:33:27 +00001055 .ndo_setup_tc = mlx4_en_setup_tc,
Stephen Hemminger3addc562008-11-21 17:30:58 -08001056};
1057
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001058int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
1059 struct mlx4_en_port_profile *prof)
1060{
1061 struct net_device *dev;
1062 struct mlx4_en_priv *priv;
1063 int i;
1064 int err;
1065
Tom Herbertf1593d22011-01-09 19:36:36 +00001066 dev = alloc_etherdev_mqs(sizeof(struct mlx4_en_priv),
1067 prof->tx_ring_num, prof->rx_ring_num);
Joe Perches41de8d42012-01-29 13:47:52 +00001068 if (dev == NULL)
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001069 return -ENOMEM;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001070
1071 SET_NETDEV_DEV(dev, &mdev->dev->pdev->dev);
Eli Cohen741a00b2010-05-26 19:56:24 +00001072 dev->dev_id = port - 1;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001073
1074 /*
1075 * Initialize driver private data
1076 */
1077
1078 priv = netdev_priv(dev);
1079 memset(priv, 0, sizeof(struct mlx4_en_priv));
1080 priv->dev = dev;
1081 priv->mdev = mdev;
Yevgeny Petrilinebf8c9a2012-03-06 04:03:34 +00001082 priv->ddev = &mdev->pdev->dev;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001083 priv->prof = prof;
1084 priv->port = port;
1085 priv->port_up = false;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001086 priv->flags = prof->flags;
Amir Vadai60d6fe92011-11-26 19:55:19 +00001087 priv->ctrl_flags = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE |
1088 MLX4_WQE_CTRL_SOLICITED);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001089 priv->tx_ring_num = prof->tx_ring_num;
1090 priv->rx_ring_num = prof->rx_ring_num;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001091 priv->mac_index = -1;
1092 priv->msg_enable = MLX4_EN_MSG_LEVEL;
1093 spin_lock_init(&priv->stats_lock);
1094 INIT_WORK(&priv->mcast_task, mlx4_en_do_set_multicast);
1095 INIT_WORK(&priv->mac_task, mlx4_en_do_set_mac);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001096 INIT_WORK(&priv->watchdog_task, mlx4_en_restart);
1097 INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate);
1098 INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats);
Amir Vadai564c2742012-04-04 21:33:26 +00001099#ifdef CONFIG_MLX4_EN_DCB
1100 if (!mlx4_is_slave(priv->mdev->dev))
1101 dev->dcbnl_ops = &mlx4_en_dcbnl_ops;
1102#endif
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001103
1104 /* Query for default mac and max mtu */
1105 priv->max_mtu = mdev->dev->caps.eth_mtu_cap[priv->port];
1106 priv->mac = mdev->dev->caps.def_mac[priv->port];
1107 if (ILLEGAL_MAC(priv->mac)) {
Yevgeny Petrilin453a6082009-06-01 20:27:13 +00001108 en_err(priv, "Port: %d, invalid mac burned: 0x%llx, quiting\n",
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001109 priv->port, priv->mac);
1110 err = -EINVAL;
1111 goto out;
1112 }
1113
1114 priv->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
1115 DS_SIZE * MLX4_EN_MAX_RX_FRAGS);
1116 err = mlx4_en_alloc_resources(priv);
1117 if (err)
1118 goto out;
1119
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001120 /* Allocate page for receive rings */
1121 err = mlx4_alloc_hwq_res(mdev->dev, &priv->res,
1122 MLX4_EN_PAGE_SIZE, MLX4_EN_PAGE_SIZE);
1123 if (err) {
Yevgeny Petrilin453a6082009-06-01 20:27:13 +00001124 en_err(priv, "Failed to allocate page for rx qps\n");
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001125 goto out;
1126 }
1127 priv->allocated = 1;
1128
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001129 /*
1130 * Initialize netdev entry points
1131 */
Stephen Hemminger3addc562008-11-21 17:30:58 -08001132 dev->netdev_ops = &mlx4_netdev_ops;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001133 dev->watchdog_timeo = MLX4_EN_WATCHDOG_TIMEOUT;
Ben Hutchings1eb63a22010-09-27 08:29:34 +00001134 netif_set_real_num_tx_queues(dev, priv->tx_ring_num);
1135 netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
Stephen Hemminger3addc562008-11-21 17:30:58 -08001136
Amir Vadai897d7842012-04-04 21:33:27 +00001137 netdev_set_num_tc(dev, MLX4_EN_NUM_UP);
1138
1139 /* First 9 rings are for UP 0 */
1140 netdev_set_tc_queue(dev, 0, MLX4_EN_NUM_TX_RINGS + 1, 0);
1141
1142 /* Partition Tx queues evenly amongst UP's 1-7 */
1143 for (i = 1; i < MLX4_EN_NUM_UP; i++)
1144 netdev_set_tc_queue(dev, i, 1, MLX4_EN_NUM_TX_RINGS + i);
1145
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001146 SET_ETHTOOL_OPS(dev, &mlx4_en_ethtool_ops);
1147
1148 /* Set defualt MAC */
1149 dev->addr_len = ETH_ALEN;
Yevgeny Petrilin8bf2e582010-08-24 03:45:30 +00001150 for (i = 0; i < ETH_ALEN; i++) {
1151 dev->dev_addr[ETH_ALEN - 1 - i] = (u8) (priv->mac >> (8 * i));
1152 dev->perm_addr[ETH_ALEN - 1 - i] = (u8) (priv->mac >> (8 * i));
1153 }
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001154
1155 /*
1156 * Set driver features
1157 */
Michał Mirosławc8c64cf2011-04-15 04:50:49 +00001158 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
1159 if (mdev->LSO_support)
1160 dev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
1161
1162 dev->vlan_features = dev->hw_features;
1163
Yevgeny Petrilinad861072011-10-18 01:51:24 +00001164 dev->hw_features |= NETIF_F_RXCSUM | NETIF_F_RXHASH;
Michał Mirosławc8c64cf2011-04-15 04:50:49 +00001165 dev->features = dev->hw_features | NETIF_F_HIGHDMA |
1166 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
1167 NETIF_F_HW_VLAN_FILTER;
Amir Vadai60d6fe92011-11-26 19:55:19 +00001168 dev->hw_features |= NETIF_F_LOOPBACK;
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001169
1170 mdev->pndev[port] = dev;
1171
1172 netif_carrier_off(dev);
1173 err = register_netdev(dev);
1174 if (err) {
Yevgeny Petrilin453a6082009-06-01 20:27:13 +00001175 en_err(priv, "Netdev registration failed for port %d\n", port);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001176 goto out;
1177 }
Alexander Guller42341442011-10-09 05:29:35 +00001178 priv->registered = 1;
Yevgeny Petrilin453a6082009-06-01 20:27:13 +00001179
1180 en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num);
1181 en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num);
1182
Yevgeny Petrilin90822262011-03-22 22:37:41 +00001183 /* Configure port */
1184 err = mlx4_SET_PORT_general(mdev->dev, priv->port,
1185 MLX4_EN_MIN_MTU,
1186 0, 0, 0, 0);
1187 if (err) {
1188 en_err(priv, "Failed setting port general configurations "
1189 "for port %d, with error %d\n", priv->port, err);
1190 goto out;
1191 }
1192
1193 /* Init port */
1194 en_warn(priv, "Initializing port\n");
1195 err = mlx4_INIT_PORT(mdev->dev, priv->port);
1196 if (err) {
1197 en_err(priv, "Failed Initializing port\n");
1198 goto out;
1199 }
Yevgeny Petrilin39f17b42011-03-22 22:37:28 +00001200 mlx4_en_set_default_moderation(priv);
Yevgeny Petrilinc27a02c2008-10-22 15:47:49 -07001201 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
1202 return 0;
1203
1204out:
1205 mlx4_en_destroy_netdev(dev);
1206 return err;
1207}
1208