blob: 952305054e16cb001fecd11e3822be81d62315c8 [file] [log] [blame]
Aviv Heller7907f232016-04-17 16:57:32 +03001/*
2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/netdevice.h>
34#include <linux/mlx5/driver.h>
35#include <linux/mlx5/vport.h>
36#include "mlx5_core.h"
37
38enum {
39 MLX5_LAG_FLAG_BONDED = 1 << 0,
40};
41
42struct lag_func {
43 struct mlx5_core_dev *dev;
44 struct net_device *netdev;
45};
46
47/* Used for collection of netdev event info. */
48struct lag_tracker {
49 enum netdev_lag_tx_type tx_type;
50 struct netdev_lag_lower_state_info netdev_state[MLX5_MAX_PORTS];
51 bool is_bonded;
52};
53
54/* LAG data of a ConnectX card.
55 * It serves both its phys functions.
56 */
57struct mlx5_lag {
58 u8 flags;
59 u8 v2p_map[MLX5_MAX_PORTS];
60 struct lag_func pf[MLX5_MAX_PORTS];
61 struct lag_tracker tracker;
62 struct delayed_work bond_work;
63 struct notifier_block nb;
64};
65
66/* General purpose, use for short periods of time.
67 * Beware of lock dependencies (preferably, no locks should be acquired
68 * under it).
69 */
70static DEFINE_MUTEX(lag_mutex);
71
72static int mlx5_cmd_create_lag(struct mlx5_core_dev *dev, u8 remap_port1,
73 u8 remap_port2)
74{
75 u32 in[MLX5_ST_SZ_DW(create_lag_in)] = {0};
76 u32 out[MLX5_ST_SZ_DW(create_lag_out)] = {0};
77 void *lag_ctx = MLX5_ADDR_OF(create_lag_in, in, ctx);
78
79 MLX5_SET(create_lag_in, in, opcode, MLX5_CMD_OP_CREATE_LAG);
80
81 MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, remap_port1);
82 MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, remap_port2);
83
84 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
85}
86
87static int mlx5_cmd_modify_lag(struct mlx5_core_dev *dev, u8 remap_port1,
88 u8 remap_port2)
89{
90 u32 in[MLX5_ST_SZ_DW(modify_lag_in)] = {0};
91 u32 out[MLX5_ST_SZ_DW(modify_lag_out)] = {0};
92 void *lag_ctx = MLX5_ADDR_OF(modify_lag_in, in, ctx);
93
94 MLX5_SET(modify_lag_in, in, opcode, MLX5_CMD_OP_MODIFY_LAG);
95 MLX5_SET(modify_lag_in, in, field_select, 0x1);
96
97 MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, remap_port1);
98 MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, remap_port2);
99
100 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
101}
102
103static int mlx5_cmd_destroy_lag(struct mlx5_core_dev *dev)
104{
105 u32 in[MLX5_ST_SZ_DW(destroy_lag_in)] = {0};
106 u32 out[MLX5_ST_SZ_DW(destroy_lag_out)] = {0};
107
108 MLX5_SET(destroy_lag_in, in, opcode, MLX5_CMD_OP_DESTROY_LAG);
109
110 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
111}
112
113static struct mlx5_lag *mlx5_lag_dev_get(struct mlx5_core_dev *dev)
114{
115 return dev->priv.lag;
116}
117
118static int mlx5_lag_dev_get_netdev_idx(struct mlx5_lag *ldev,
119 struct net_device *ndev)
120{
121 int i;
122
123 for (i = 0; i < MLX5_MAX_PORTS; i++)
124 if (ldev->pf[i].netdev == ndev)
125 return i;
126
127 return -1;
128}
129
130static bool mlx5_lag_is_bonded(struct mlx5_lag *ldev)
131{
132 return !!(ldev->flags & MLX5_LAG_FLAG_BONDED);
133}
134
135static void mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker,
136 u8 *port1, u8 *port2)
137{
138 if (tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP) {
139 if (tracker->netdev_state[0].tx_enabled) {
140 *port1 = 1;
141 *port2 = 1;
142 } else {
143 *port1 = 2;
144 *port2 = 2;
145 }
146 } else {
147 *port1 = 1;
148 *port2 = 2;
149 if (!tracker->netdev_state[0].link_up)
150 *port1 = 2;
151 else if (!tracker->netdev_state[1].link_up)
152 *port2 = 1;
153 }
154}
155
156static void mlx5_activate_lag(struct mlx5_lag *ldev,
157 struct lag_tracker *tracker)
158{
159 struct mlx5_core_dev *dev0 = ldev->pf[0].dev;
160 int err;
161
162 ldev->flags |= MLX5_LAG_FLAG_BONDED;
163
164 mlx5_infer_tx_affinity_mapping(tracker, &ldev->v2p_map[0],
165 &ldev->v2p_map[1]);
166
167 err = mlx5_cmd_create_lag(dev0, ldev->v2p_map[0], ldev->v2p_map[1]);
168 if (err)
169 mlx5_core_err(dev0,
170 "Failed to create LAG (%d)\n",
171 err);
172}
173
174static void mlx5_deactivate_lag(struct mlx5_lag *ldev)
175{
176 struct mlx5_core_dev *dev0 = ldev->pf[0].dev;
177 int err;
178
179 ldev->flags &= ~MLX5_LAG_FLAG_BONDED;
180
181 err = mlx5_cmd_destroy_lag(dev0);
182 if (err)
183 mlx5_core_err(dev0,
184 "Failed to destroy LAG (%d)\n",
185 err);
186}
187
188static void mlx5_do_bond(struct mlx5_lag *ldev)
189{
190 struct mlx5_core_dev *dev0 = ldev->pf[0].dev;
191 struct mlx5_core_dev *dev1 = ldev->pf[1].dev;
192 struct lag_tracker tracker;
193 u8 v2p_port1, v2p_port2;
194 int i, err;
195
196 if (!dev0 || !dev1)
197 return;
198
199 mutex_lock(&lag_mutex);
200 tracker = ldev->tracker;
201 mutex_unlock(&lag_mutex);
202
203 if (tracker.is_bonded && !mlx5_lag_is_bonded(ldev)) {
204 for (i = 0; i < MLX5_MAX_PORTS; i++)
205 mlx5_remove_dev_by_protocol(ldev->pf[i].dev,
206 MLX5_INTERFACE_PROTOCOL_IB);
207
208 mlx5_activate_lag(ldev, &tracker);
209
210 mlx5_add_dev_by_protocol(dev0, MLX5_INTERFACE_PROTOCOL_IB);
211 mlx5_nic_vport_enable_roce(dev1);
212 } else if (tracker.is_bonded && mlx5_lag_is_bonded(ldev)) {
213 mlx5_infer_tx_affinity_mapping(&tracker, &v2p_port1,
214 &v2p_port2);
215
216 if ((v2p_port1 != ldev->v2p_map[0]) ||
217 (v2p_port2 != ldev->v2p_map[1])) {
218 ldev->v2p_map[0] = v2p_port1;
219 ldev->v2p_map[1] = v2p_port2;
220
221 err = mlx5_cmd_modify_lag(dev0, v2p_port1, v2p_port2);
222 if (err)
223 mlx5_core_err(dev0,
224 "Failed to modify LAG (%d)\n",
225 err);
226 }
227 } else if (!tracker.is_bonded && mlx5_lag_is_bonded(ldev)) {
228 mlx5_remove_dev_by_protocol(dev0, MLX5_INTERFACE_PROTOCOL_IB);
229 mlx5_nic_vport_disable_roce(dev1);
230
231 mlx5_deactivate_lag(ldev);
232
233 for (i = 0; i < MLX5_MAX_PORTS; i++)
234 if (ldev->pf[i].dev)
235 mlx5_add_dev_by_protocol(ldev->pf[i].dev,
236 MLX5_INTERFACE_PROTOCOL_IB);
237 }
238}
239
240static void mlx5_queue_bond_work(struct mlx5_lag *ldev, unsigned long delay)
241{
242 schedule_delayed_work(&ldev->bond_work, delay);
243}
244
245static void mlx5_do_bond_work(struct work_struct *work)
246{
247 struct delayed_work *delayed_work = to_delayed_work(work);
248 struct mlx5_lag *ldev = container_of(delayed_work, struct mlx5_lag,
249 bond_work);
250 int status;
251
252 status = mutex_trylock(&mlx5_intf_mutex);
253 if (!status) {
254 /* 1 sec delay. */
255 mlx5_queue_bond_work(ldev, HZ);
256 return;
257 }
258
259 mlx5_do_bond(ldev);
260 mutex_unlock(&mlx5_intf_mutex);
261}
262
263static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev,
264 struct lag_tracker *tracker,
265 struct net_device *ndev,
266 struct netdev_notifier_changeupper_info *info)
267{
268 struct net_device *upper = info->upper_dev, *ndev_tmp;
269 struct netdev_lag_upper_info *lag_upper_info;
270 bool is_bonded;
271 int bond_status = 0;
272 int num_slaves = 0;
273 int idx;
274
275 if (!netif_is_lag_master(upper))
276 return 0;
277
278 lag_upper_info = info->upper_info;
279
280 /* The event may still be of interest if the slave does not belong to
281 * us, but is enslaved to a master which has one or more of our netdevs
282 * as slaves (e.g., if a new slave is added to a master that bonds two
283 * of our netdevs, we should unbond).
284 */
285 rcu_read_lock();
286 for_each_netdev_in_bond_rcu(upper, ndev_tmp) {
287 idx = mlx5_lag_dev_get_netdev_idx(ldev, ndev_tmp);
288 if (idx > -1)
289 bond_status |= (1 << idx);
290
291 num_slaves++;
292 }
293 rcu_read_unlock();
294
295 /* None of this lagdev's netdevs are slaves of this master. */
296 if (!(bond_status & 0x3))
297 return 0;
298
299 if (lag_upper_info)
300 tracker->tx_type = lag_upper_info->tx_type;
301
302 /* Determine bonding status:
303 * A device is considered bonded if both its physical ports are slaves
304 * of the same lag master, and only them.
305 * Lag mode must be activebackup or hash.
306 */
307 is_bonded = (num_slaves == MLX5_MAX_PORTS) &&
308 (bond_status == 0x3) &&
309 ((tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP) ||
310 (tracker->tx_type == NETDEV_LAG_TX_TYPE_HASH));
311
312 if (tracker->is_bonded != is_bonded) {
313 tracker->is_bonded = is_bonded;
314 return 1;
315 }
316
317 return 0;
318}
319
320static int mlx5_handle_changelowerstate_event(struct mlx5_lag *ldev,
321 struct lag_tracker *tracker,
322 struct net_device *ndev,
323 struct netdev_notifier_changelowerstate_info *info)
324{
325 struct netdev_lag_lower_state_info *lag_lower_info;
326 int idx;
327
328 if (!netif_is_lag_port(ndev))
329 return 0;
330
331 idx = mlx5_lag_dev_get_netdev_idx(ldev, ndev);
332 if (idx == -1)
333 return 0;
334
335 /* This information is used to determine virtual to physical
336 * port mapping.
337 */
338 lag_lower_info = info->lower_state_info;
339 if (!lag_lower_info)
340 return 0;
341
342 tracker->netdev_state[idx] = *lag_lower_info;
343
344 return 1;
345}
346
347static int mlx5_lag_netdev_event(struct notifier_block *this,
348 unsigned long event, void *ptr)
349{
350 struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
351 struct lag_tracker tracker;
352 struct mlx5_lag *ldev;
353 int changed = 0;
354
355 if (!net_eq(dev_net(ndev), &init_net))
356 return NOTIFY_DONE;
357
358 if ((event != NETDEV_CHANGEUPPER) && (event != NETDEV_CHANGELOWERSTATE))
359 return NOTIFY_DONE;
360
361 ldev = container_of(this, struct mlx5_lag, nb);
362 tracker = ldev->tracker;
363
364 switch (event) {
365 case NETDEV_CHANGEUPPER:
366 changed = mlx5_handle_changeupper_event(ldev, &tracker, ndev,
367 ptr);
368 break;
369 case NETDEV_CHANGELOWERSTATE:
370 changed = mlx5_handle_changelowerstate_event(ldev, &tracker,
371 ndev, ptr);
372 break;
373 }
374
375 mutex_lock(&lag_mutex);
376 ldev->tracker = tracker;
377 mutex_unlock(&lag_mutex);
378
379 if (changed)
380 mlx5_queue_bond_work(ldev, 0);
381
382 return NOTIFY_DONE;
383}
384
385static struct mlx5_lag *mlx5_lag_dev_alloc(void)
386{
387 struct mlx5_lag *ldev;
388
389 ldev = kzalloc(sizeof(*ldev), GFP_KERNEL);
390 if (!ldev)
391 return NULL;
392
393 INIT_DELAYED_WORK(&ldev->bond_work, mlx5_do_bond_work);
394
395 return ldev;
396}
397
398static void mlx5_lag_dev_free(struct mlx5_lag *ldev)
399{
400 kfree(ldev);
401}
402
403static void mlx5_lag_dev_add_pf(struct mlx5_lag *ldev,
404 struct mlx5_core_dev *dev,
405 struct net_device *netdev)
406{
407 unsigned int fn = PCI_FUNC(dev->pdev->devfn);
408
409 if (fn >= MLX5_MAX_PORTS)
410 return;
411
412 mutex_lock(&lag_mutex);
413 ldev->pf[fn].dev = dev;
414 ldev->pf[fn].netdev = netdev;
415 ldev->tracker.netdev_state[fn].link_up = 0;
416 ldev->tracker.netdev_state[fn].tx_enabled = 0;
417
418 dev->priv.lag = ldev;
419 mutex_unlock(&lag_mutex);
420}
421
422static void mlx5_lag_dev_remove_pf(struct mlx5_lag *ldev,
423 struct mlx5_core_dev *dev)
424{
425 int i;
426
427 for (i = 0; i < MLX5_MAX_PORTS; i++)
428 if (ldev->pf[i].dev == dev)
429 break;
430
431 if (i == MLX5_MAX_PORTS)
432 return;
433
434 mutex_lock(&lag_mutex);
435 memset(&ldev->pf[i], 0, sizeof(*ldev->pf));
436
437 dev->priv.lag = NULL;
438 mutex_unlock(&lag_mutex);
439}
440
441static u16 mlx5_gen_pci_id(struct mlx5_core_dev *dev)
442{
443 return (u16)((dev->pdev->bus->number << 8) |
444 PCI_SLOT(dev->pdev->devfn));
445}
446
447/* Must be called with intf_mutex held */
448void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev)
449{
450 struct mlx5_lag *ldev = NULL;
451 struct mlx5_core_dev *tmp_dev;
452 struct mlx5_priv *priv;
453 u16 pci_id;
454
455 if (!MLX5_CAP_GEN(dev, vport_group_manager) ||
456 !MLX5_CAP_GEN(dev, lag_master) ||
457 (MLX5_CAP_GEN(dev, num_lag_ports) != MLX5_MAX_PORTS))
458 return;
459
460 pci_id = mlx5_gen_pci_id(dev);
461
462 mlx5_core_for_each_priv(priv) {
463 tmp_dev = container_of(priv, struct mlx5_core_dev, priv);
464 if ((dev != tmp_dev) &&
465 (mlx5_gen_pci_id(tmp_dev) == pci_id)) {
466 ldev = tmp_dev->priv.lag;
467 break;
468 }
469 }
470
471 if (!ldev) {
472 ldev = mlx5_lag_dev_alloc();
473 if (!ldev) {
474 mlx5_core_err(dev, "Failed to alloc lag dev\n");
475 return;
476 }
477 }
478
479 mlx5_lag_dev_add_pf(ldev, dev, netdev);
480
481 if (!ldev->nb.notifier_call) {
482 ldev->nb.notifier_call = mlx5_lag_netdev_event;
483 if (register_netdevice_notifier(&ldev->nb)) {
484 ldev->nb.notifier_call = NULL;
485 mlx5_core_err(dev, "Failed to register LAG netdev notifier\n");
486 }
487 }
488}
489
490/* Must be called with intf_mutex held */
491void mlx5_lag_remove(struct mlx5_core_dev *dev)
492{
493 struct mlx5_lag *ldev;
494 int i;
495
496 ldev = mlx5_lag_dev_get(dev);
497 if (!ldev)
498 return;
499
500 if (mlx5_lag_is_bonded(ldev))
501 mlx5_deactivate_lag(ldev);
502
503 mlx5_lag_dev_remove_pf(ldev, dev);
504
505 for (i = 0; i < MLX5_MAX_PORTS; i++)
506 if (ldev->pf[i].dev)
507 break;
508
509 if (i == MLX5_MAX_PORTS) {
510 if (ldev->nb.notifier_call)
511 unregister_netdevice_notifier(&ldev->nb);
512 cancel_delayed_work_sync(&ldev->bond_work);
513 mlx5_lag_dev_free(ldev);
514 }
515}
516
517bool mlx5_lag_is_active(struct mlx5_core_dev *dev)
518{
519 struct mlx5_lag *ldev;
520 bool res;
521
522 mutex_lock(&lag_mutex);
523 ldev = mlx5_lag_dev_get(dev);
524 res = ldev && mlx5_lag_is_bonded(ldev);
525 mutex_unlock(&lag_mutex);
526
527 return res;
528}
529EXPORT_SYMBOL(mlx5_lag_is_active);
530
Aviv Heller6a320472016-05-09 11:06:44 +0000531struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev)
532{
533 struct net_device *ndev = NULL;
534 struct mlx5_lag *ldev;
535
536 mutex_lock(&lag_mutex);
537 ldev = mlx5_lag_dev_get(dev);
538
539 if (!(ldev && mlx5_lag_is_bonded(ldev)))
540 goto unlock;
541
542 if (ldev->tracker.tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP) {
543 ndev = ldev->tracker.netdev_state[0].tx_enabled ?
544 ldev->pf[0].netdev : ldev->pf[1].netdev;
545 } else {
546 ndev = ldev->pf[0].netdev;
547 }
548 if (ndev)
549 dev_hold(ndev);
550
551unlock:
552 mutex_unlock(&lag_mutex);
553
554 return ndev;
555}
556EXPORT_SYMBOL(mlx5_lag_get_roce_netdev);
557