blob: 17d093cd6fc88ff32da6d3c37e22f79f7db94ecc [file] [log] [blame]
Saeed Mahameed073bb182015-12-01 18:03:18 +02001/*
2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/etherdevice.h>
34#include <linux/mlx5/driver.h>
35#include <linux/mlx5/mlx5_ifc.h>
36#include <linux/mlx5/vport.h>
Maor Gottlieb86d722a2015-12-10 17:12:44 +020037#include <linux/mlx5/fs.h>
Saeed Mahameed073bb182015-12-01 18:03:18 +020038#include "mlx5_core.h"
39#include "eswitch.h"
40
Saeed Mahameed81848732015-12-01 18:03:20 +020041#define UPLINK_VPORT 0xFFFF
42
Saeed Mahameed073bb182015-12-01 18:03:18 +020043#define MLX5_DEBUG_ESWITCH_MASK BIT(3)
44
45#define esw_info(dev, format, ...) \
46 pr_info("(%s): E-Switch: " format, (dev)->priv.name, ##__VA_ARGS__)
47
48#define esw_warn(dev, format, ...) \
49 pr_warn("(%s): E-Switch: " format, (dev)->priv.name, ##__VA_ARGS__)
50
51#define esw_debug(dev, format, ...) \
52 mlx5_core_dbg_mask(dev, MLX5_DEBUG_ESWITCH_MASK, format, ##__VA_ARGS__)
53
54enum {
55 MLX5_ACTION_NONE = 0,
56 MLX5_ACTION_ADD = 1,
57 MLX5_ACTION_DEL = 2,
58};
59
Saeed Mahameed81848732015-12-01 18:03:20 +020060/* E-Switch UC L2 table hash node */
61struct esw_uc_addr {
Saeed Mahameed073bb182015-12-01 18:03:18 +020062 struct l2addr_node node;
Saeed Mahameed073bb182015-12-01 18:03:18 +020063 u32 table_index;
64 u32 vport;
65};
66
Saeed Mahameed81848732015-12-01 18:03:20 +020067/* E-Switch MC FDB table hash node */
68struct esw_mc_addr { /* SRIOV only */
69 struct l2addr_node node;
70 struct mlx5_flow_rule *uplink_rule; /* Forward to uplink rule */
71 u32 refcnt;
72};
73
74/* Vport UC/MC hash node */
75struct vport_addr {
76 struct l2addr_node node;
77 u8 action;
78 u32 vport;
79 struct mlx5_flow_rule *flow_rule; /* SRIOV only */
Saeed Mahameed073bb182015-12-01 18:03:18 +020080};
81
82enum {
83 UC_ADDR_CHANGE = BIT(0),
84 MC_ADDR_CHANGE = BIT(1),
85};
86
Saeed Mahameed81848732015-12-01 18:03:20 +020087/* Vport context events */
88#define SRIOV_VPORT_EVENTS (UC_ADDR_CHANGE | \
89 MC_ADDR_CHANGE)
90
91static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport,
Saeed Mahameed073bb182015-12-01 18:03:18 +020092 u32 events_mask)
93{
94 int in[MLX5_ST_SZ_DW(modify_nic_vport_context_in)];
95 int out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)];
96 void *nic_vport_ctx;
97 int err;
98
99 memset(out, 0, sizeof(out));
100 memset(in, 0, sizeof(in));
101
102 MLX5_SET(modify_nic_vport_context_in, in,
103 opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
104 MLX5_SET(modify_nic_vport_context_in, in, field_select.change_event, 1);
105 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
106 if (vport)
107 MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
108 nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
109 in, nic_vport_context);
110
111 MLX5_SET(nic_vport_context, nic_vport_ctx, arm_change_event, 1);
112
113 if (events_mask & UC_ADDR_CHANGE)
114 MLX5_SET(nic_vport_context, nic_vport_ctx,
115 event_on_uc_address_change, 1);
116 if (events_mask & MC_ADDR_CHANGE)
117 MLX5_SET(nic_vport_context, nic_vport_ctx,
118 event_on_mc_address_change, 1);
119
120 err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
121 if (err)
122 goto ex;
123 err = mlx5_cmd_status_to_err_v2(out);
124 if (err)
125 goto ex;
126 return 0;
127ex:
128 return err;
129}
130
Saeed Mahameed9e7ea352015-12-01 18:03:23 +0200131/* E-Switch vport context HW commands */
132static int query_esw_vport_context_cmd(struct mlx5_core_dev *mdev, u32 vport,
133 u32 *out, int outlen)
134{
135 u32 in[MLX5_ST_SZ_DW(query_esw_vport_context_in)];
136
137 memset(in, 0, sizeof(in));
138
139 MLX5_SET(query_nic_vport_context_in, in, opcode,
140 MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT);
141
142 MLX5_SET(query_esw_vport_context_in, in, vport_number, vport);
143 if (vport)
144 MLX5_SET(query_esw_vport_context_in, in, other_vport, 1);
145
146 return mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out, outlen);
147}
148
149static int query_esw_vport_cvlan(struct mlx5_core_dev *dev, u32 vport,
150 u16 *vlan, u8 *qos)
151{
152 u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)];
153 int err;
154 bool cvlan_strip;
155 bool cvlan_insert;
156
157 memset(out, 0, sizeof(out));
158
159 *vlan = 0;
160 *qos = 0;
161
162 if (!MLX5_CAP_ESW(dev, vport_cvlan_strip) ||
163 !MLX5_CAP_ESW(dev, vport_cvlan_insert_if_not_exist))
164 return -ENOTSUPP;
165
166 err = query_esw_vport_context_cmd(dev, vport, out, sizeof(out));
167 if (err)
168 goto out;
169
170 cvlan_strip = MLX5_GET(query_esw_vport_context_out, out,
171 esw_vport_context.vport_cvlan_strip);
172
173 cvlan_insert = MLX5_GET(query_esw_vport_context_out, out,
174 esw_vport_context.vport_cvlan_insert);
175
176 if (cvlan_strip || cvlan_insert) {
177 *vlan = MLX5_GET(query_esw_vport_context_out, out,
178 esw_vport_context.cvlan_id);
179 *qos = MLX5_GET(query_esw_vport_context_out, out,
180 esw_vport_context.cvlan_pcp);
181 }
182
183 esw_debug(dev, "Query Vport[%d] cvlan: VLAN %d qos=%d\n",
184 vport, *vlan, *qos);
185out:
186 return err;
187}
188
189static int modify_esw_vport_context_cmd(struct mlx5_core_dev *dev, u16 vport,
190 void *in, int inlen)
191{
192 u32 out[MLX5_ST_SZ_DW(modify_esw_vport_context_out)];
193
194 memset(out, 0, sizeof(out));
195
196 MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport);
197 if (vport)
198 MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1);
199
200 MLX5_SET(modify_esw_vport_context_in, in, opcode,
201 MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT);
202
203 return mlx5_cmd_exec_check_status(dev, in, inlen,
204 out, sizeof(out));
205}
206
207static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u32 vport,
208 u16 vlan, u8 qos, bool set)
209{
210 u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)];
211
212 memset(in, 0, sizeof(in));
213
214 if (!MLX5_CAP_ESW(dev, vport_cvlan_strip) ||
215 !MLX5_CAP_ESW(dev, vport_cvlan_insert_if_not_exist))
216 return -ENOTSUPP;
217
218 esw_debug(dev, "Set Vport[%d] VLAN %d qos %d set=%d\n",
219 vport, vlan, qos, set);
220
221 if (set) {
222 MLX5_SET(modify_esw_vport_context_in, in,
223 esw_vport_context.vport_cvlan_strip, 1);
224 /* insert only if no vlan in packet */
225 MLX5_SET(modify_esw_vport_context_in, in,
226 esw_vport_context.vport_cvlan_insert, 1);
227 MLX5_SET(modify_esw_vport_context_in, in,
228 esw_vport_context.cvlan_pcp, qos);
229 MLX5_SET(modify_esw_vport_context_in, in,
230 esw_vport_context.cvlan_id, vlan);
231 }
232
233 MLX5_SET(modify_esw_vport_context_in, in,
234 field_select.vport_cvlan_strip, 1);
235 MLX5_SET(modify_esw_vport_context_in, in,
236 field_select.vport_cvlan_insert, 1);
237
238 return modify_esw_vport_context_cmd(dev, vport, in, sizeof(in));
239}
240
Saeed Mahameed073bb182015-12-01 18:03:18 +0200241/* HW L2 Table (MPFS) management */
242static int set_l2_table_entry_cmd(struct mlx5_core_dev *dev, u32 index,
243 u8 *mac, u8 vlan_valid, u16 vlan)
244{
245 u32 in[MLX5_ST_SZ_DW(set_l2_table_entry_in)];
246 u32 out[MLX5_ST_SZ_DW(set_l2_table_entry_out)];
247 u8 *in_mac_addr;
248
249 memset(in, 0, sizeof(in));
250 memset(out, 0, sizeof(out));
251
252 MLX5_SET(set_l2_table_entry_in, in, opcode,
253 MLX5_CMD_OP_SET_L2_TABLE_ENTRY);
254 MLX5_SET(set_l2_table_entry_in, in, table_index, index);
255 MLX5_SET(set_l2_table_entry_in, in, vlan_valid, vlan_valid);
256 MLX5_SET(set_l2_table_entry_in, in, vlan, vlan);
257
258 in_mac_addr = MLX5_ADDR_OF(set_l2_table_entry_in, in, mac_address);
259 ether_addr_copy(&in_mac_addr[2], mac);
260
261 return mlx5_cmd_exec_check_status(dev, in, sizeof(in),
262 out, sizeof(out));
263}
264
265static int del_l2_table_entry_cmd(struct mlx5_core_dev *dev, u32 index)
266{
267 u32 in[MLX5_ST_SZ_DW(delete_l2_table_entry_in)];
268 u32 out[MLX5_ST_SZ_DW(delete_l2_table_entry_out)];
269
270 memset(in, 0, sizeof(in));
271 memset(out, 0, sizeof(out));
272
273 MLX5_SET(delete_l2_table_entry_in, in, opcode,
274 MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY);
275 MLX5_SET(delete_l2_table_entry_in, in, table_index, index);
276 return mlx5_cmd_exec_check_status(dev, in, sizeof(in),
277 out, sizeof(out));
278}
279
280static int alloc_l2_table_index(struct mlx5_l2_table *l2_table, u32 *ix)
281{
282 int err = 0;
283
284 *ix = find_first_zero_bit(l2_table->bitmap, l2_table->size);
285 if (*ix >= l2_table->size)
286 err = -ENOSPC;
287 else
288 __set_bit(*ix, l2_table->bitmap);
289
290 return err;
291}
292
293static void free_l2_table_index(struct mlx5_l2_table *l2_table, u32 ix)
294{
295 __clear_bit(ix, l2_table->bitmap);
296}
297
298static int set_l2_table_entry(struct mlx5_core_dev *dev, u8 *mac,
299 u8 vlan_valid, u16 vlan,
300 u32 *index)
301{
302 struct mlx5_l2_table *l2_table = &dev->priv.eswitch->l2_table;
303 int err;
304
305 err = alloc_l2_table_index(l2_table, index);
306 if (err)
307 return err;
308
309 err = set_l2_table_entry_cmd(dev, *index, mac, vlan_valid, vlan);
310 if (err)
311 free_l2_table_index(l2_table, *index);
312
313 return err;
314}
315
316static void del_l2_table_entry(struct mlx5_core_dev *dev, u32 index)
317{
318 struct mlx5_l2_table *l2_table = &dev->priv.eswitch->l2_table;
319
320 del_l2_table_entry_cmd(dev, index);
321 free_l2_table_index(l2_table, index);
322}
323
Saeed Mahameed81848732015-12-01 18:03:20 +0200324/* E-Switch FDB */
325static struct mlx5_flow_rule *
326esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u8 mac[ETH_ALEN], u32 vport)
327{
328 int match_header = MLX5_MATCH_OUTER_HEADERS;
329 struct mlx5_flow_destination dest;
330 struct mlx5_flow_rule *flow_rule = NULL;
331 u32 *match_v;
332 u32 *match_c;
333 u8 *dmac_v;
334 u8 *dmac_c;
335
336 match_v = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
337 match_c = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
338 if (!match_v || !match_c) {
339 pr_warn("FDB: Failed to alloc match parameters\n");
340 goto out;
341 }
342 dmac_v = MLX5_ADDR_OF(fte_match_param, match_v,
343 outer_headers.dmac_47_16);
344 dmac_c = MLX5_ADDR_OF(fte_match_param, match_c,
345 outer_headers.dmac_47_16);
346
347 ether_addr_copy(dmac_v, mac);
348 /* Match criteria mask */
349 memset(dmac_c, 0xff, 6);
350
351 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
352 dest.vport_num = vport;
353
354 esw_debug(esw->dev,
355 "\tFDB add rule dmac_v(%pM) dmac_c(%pM) -> vport(%d)\n",
356 dmac_v, dmac_c, vport);
357 flow_rule =
Maor Gottlieb86d722a2015-12-10 17:12:44 +0200358 mlx5_add_flow_rule(esw->fdb_table.fdb,
Saeed Mahameed81848732015-12-01 18:03:20 +0200359 match_header,
360 match_c,
361 match_v,
362 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
363 0, &dest);
364 if (IS_ERR_OR_NULL(flow_rule)) {
365 pr_warn(
366 "FDB: Failed to add flow rule: dmac_v(%pM) dmac_c(%pM) -> vport(%d), err(%ld)\n",
367 dmac_v, dmac_c, vport, PTR_ERR(flow_rule));
368 flow_rule = NULL;
369 }
370out:
371 kfree(match_v);
372 kfree(match_c);
373 return flow_rule;
374}
375
376static int esw_create_fdb_table(struct mlx5_eswitch *esw, int nvports)
377{
Maor Gottlieb86d722a2015-12-10 17:12:44 +0200378 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
Saeed Mahameed81848732015-12-01 18:03:20 +0200379 struct mlx5_core_dev *dev = esw->dev;
Maor Gottlieb86d722a2015-12-10 17:12:44 +0200380 struct mlx5_flow_namespace *root_ns;
Saeed Mahameed81848732015-12-01 18:03:20 +0200381 struct mlx5_flow_table *fdb;
Maor Gottlieb86d722a2015-12-10 17:12:44 +0200382 struct mlx5_flow_group *g;
383 void *match_criteria;
384 int table_size;
385 u32 *flow_group_in;
Saeed Mahameed81848732015-12-01 18:03:20 +0200386 u8 *dmac;
Maor Gottlieb86d722a2015-12-10 17:12:44 +0200387 int err = 0;
Saeed Mahameed81848732015-12-01 18:03:20 +0200388
389 esw_debug(dev, "Create FDB log_max_size(%d)\n",
390 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
391
Maor Gottlieb86d722a2015-12-10 17:12:44 +0200392 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
393 if (!root_ns) {
394 esw_warn(dev, "Failed to get FDB flow namespace\n");
395 return -ENOMEM;
396 }
Saeed Mahameed81848732015-12-01 18:03:20 +0200397
Maor Gottlieb86d722a2015-12-10 17:12:44 +0200398 flow_group_in = mlx5_vzalloc(inlen);
399 if (!flow_group_in)
400 return -ENOMEM;
401 memset(flow_group_in, 0, inlen);
Saeed Mahameed81848732015-12-01 18:03:20 +0200402
Maor Gottlieb86d722a2015-12-10 17:12:44 +0200403 table_size = BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
Maor Gottliebd63cd282016-04-29 01:36:35 +0300404 fdb = mlx5_create_flow_table(root_ns, 0, table_size, 0);
Maor Gottlieb86d722a2015-12-10 17:12:44 +0200405 if (IS_ERR_OR_NULL(fdb)) {
406 err = PTR_ERR(fdb);
407 esw_warn(dev, "Failed to create FDB Table err %d\n", err);
408 goto out;
409 }
410
411 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
412 MLX5_MATCH_OUTER_HEADERS);
413 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
414 dmac = MLX5_ADDR_OF(fte_match_param, match_criteria, outer_headers.dmac_47_16);
415 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
416 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 1);
417 eth_broadcast_addr(dmac);
418
419 g = mlx5_create_flow_group(fdb, flow_group_in);
420 if (IS_ERR_OR_NULL(g)) {
421 err = PTR_ERR(g);
422 esw_warn(dev, "Failed to create flow group err(%d)\n", err);
423 goto out;
424 }
425
426 esw->fdb_table.addr_grp = g;
Saeed Mahameed81848732015-12-01 18:03:20 +0200427 esw->fdb_table.fdb = fdb;
Maor Gottlieb86d722a2015-12-10 17:12:44 +0200428out:
429 kfree(flow_group_in);
430 if (err && !IS_ERR_OR_NULL(fdb))
431 mlx5_destroy_flow_table(fdb);
432 return err;
Saeed Mahameed81848732015-12-01 18:03:20 +0200433}
434
435static void esw_destroy_fdb_table(struct mlx5_eswitch *esw)
436{
437 if (!esw->fdb_table.fdb)
438 return;
439
Maor Gottlieb86d722a2015-12-10 17:12:44 +0200440 esw_debug(esw->dev, "Destroy FDB Table\n");
441 mlx5_destroy_flow_group(esw->fdb_table.addr_grp);
Saeed Mahameed81848732015-12-01 18:03:20 +0200442 mlx5_destroy_flow_table(esw->fdb_table.fdb);
443 esw->fdb_table.fdb = NULL;
Maor Gottlieb86d722a2015-12-10 17:12:44 +0200444 esw->fdb_table.addr_grp = NULL;
Saeed Mahameed81848732015-12-01 18:03:20 +0200445}
446
447/* E-Switch vport UC/MC lists management */
448typedef int (*vport_addr_action)(struct mlx5_eswitch *esw,
449 struct vport_addr *vaddr);
450
451static int esw_add_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
452{
453 struct hlist_head *hash = esw->l2_table.l2_hash;
454 struct esw_uc_addr *esw_uc;
455 u8 *mac = vaddr->node.addr;
456 u32 vport = vaddr->vport;
457 int err;
458
459 esw_uc = l2addr_hash_find(hash, mac, struct esw_uc_addr);
460 if (esw_uc) {
461 esw_warn(esw->dev,
462 "Failed to set L2 mac(%pM) for vport(%d), mac is already in use by vport(%d)\n",
463 mac, vport, esw_uc->vport);
464 return -EEXIST;
465 }
466
467 esw_uc = l2addr_hash_add(hash, mac, struct esw_uc_addr, GFP_KERNEL);
468 if (!esw_uc)
469 return -ENOMEM;
470 esw_uc->vport = vport;
471
472 err = set_l2_table_entry(esw->dev, mac, 0, 0, &esw_uc->table_index);
473 if (err)
474 goto abort;
475
476 if (esw->fdb_table.fdb) /* SRIOV is enabled: Forward UC MAC to vport */
477 vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport);
478
479 esw_debug(esw->dev, "\tADDED UC MAC: vport[%d] %pM index:%d fr(%p)\n",
480 vport, mac, esw_uc->table_index, vaddr->flow_rule);
481 return err;
482abort:
483 l2addr_hash_del(esw_uc);
484 return err;
485}
486
487static int esw_del_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
488{
489 struct hlist_head *hash = esw->l2_table.l2_hash;
490 struct esw_uc_addr *esw_uc;
491 u8 *mac = vaddr->node.addr;
492 u32 vport = vaddr->vport;
493
494 esw_uc = l2addr_hash_find(hash, mac, struct esw_uc_addr);
495 if (!esw_uc || esw_uc->vport != vport) {
496 esw_debug(esw->dev,
497 "MAC(%pM) doesn't belong to vport (%d)\n",
498 mac, vport);
499 return -EINVAL;
500 }
501 esw_debug(esw->dev, "\tDELETE UC MAC: vport[%d] %pM index:%d fr(%p)\n",
502 vport, mac, esw_uc->table_index, vaddr->flow_rule);
503
504 del_l2_table_entry(esw->dev, esw_uc->table_index);
505
506 if (vaddr->flow_rule)
Maor Gottlieb86d722a2015-12-10 17:12:44 +0200507 mlx5_del_flow_rule(vaddr->flow_rule);
Saeed Mahameed81848732015-12-01 18:03:20 +0200508 vaddr->flow_rule = NULL;
509
510 l2addr_hash_del(esw_uc);
Saeed Mahameed073bb182015-12-01 18:03:18 +0200511 return 0;
512}
513
Saeed Mahameed81848732015-12-01 18:03:20 +0200514static int esw_add_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
515{
516 struct hlist_head *hash = esw->mc_table;
517 struct esw_mc_addr *esw_mc;
518 u8 *mac = vaddr->node.addr;
519 u32 vport = vaddr->vport;
Saeed Mahameed073bb182015-12-01 18:03:18 +0200520
Saeed Mahameed81848732015-12-01 18:03:20 +0200521 if (!esw->fdb_table.fdb)
522 return 0;
523
524 esw_mc = l2addr_hash_find(hash, mac, struct esw_mc_addr);
525 if (esw_mc)
526 goto add;
527
528 esw_mc = l2addr_hash_add(hash, mac, struct esw_mc_addr, GFP_KERNEL);
529 if (!esw_mc)
530 return -ENOMEM;
531
532 esw_mc->uplink_rule = /* Forward MC MAC to Uplink */
533 esw_fdb_set_vport_rule(esw, mac, UPLINK_VPORT);
534add:
535 esw_mc->refcnt++;
536 /* Forward MC MAC to vport */
537 vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport);
538 esw_debug(esw->dev,
539 "\tADDED MC MAC: vport[%d] %pM fr(%p) refcnt(%d) uplinkfr(%p)\n",
540 vport, mac, vaddr->flow_rule,
541 esw_mc->refcnt, esw_mc->uplink_rule);
542 return 0;
543}
544
545static int esw_del_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
546{
547 struct hlist_head *hash = esw->mc_table;
548 struct esw_mc_addr *esw_mc;
549 u8 *mac = vaddr->node.addr;
550 u32 vport = vaddr->vport;
551
552 if (!esw->fdb_table.fdb)
553 return 0;
554
555 esw_mc = l2addr_hash_find(hash, mac, struct esw_mc_addr);
556 if (!esw_mc) {
557 esw_warn(esw->dev,
558 "Failed to find eswitch MC addr for MAC(%pM) vport(%d)",
559 mac, vport);
560 return -EINVAL;
561 }
562 esw_debug(esw->dev,
563 "\tDELETE MC MAC: vport[%d] %pM fr(%p) refcnt(%d) uplinkfr(%p)\n",
564 vport, mac, vaddr->flow_rule, esw_mc->refcnt,
565 esw_mc->uplink_rule);
566
567 if (vaddr->flow_rule)
Maor Gottlieb86d722a2015-12-10 17:12:44 +0200568 mlx5_del_flow_rule(vaddr->flow_rule);
Saeed Mahameed81848732015-12-01 18:03:20 +0200569 vaddr->flow_rule = NULL;
570
571 if (--esw_mc->refcnt)
572 return 0;
573
574 if (esw_mc->uplink_rule)
Maor Gottlieb86d722a2015-12-10 17:12:44 +0200575 mlx5_del_flow_rule(esw_mc->uplink_rule);
Saeed Mahameed81848732015-12-01 18:03:20 +0200576
577 l2addr_hash_del(esw_mc);
578 return 0;
579}
580
581/* Apply vport UC/MC list to HW l2 table and FDB table */
582static void esw_apply_vport_addr_list(struct mlx5_eswitch *esw,
583 u32 vport_num, int list_type)
Saeed Mahameed073bb182015-12-01 18:03:18 +0200584{
585 struct mlx5_vport *vport = &esw->vports[vport_num];
Saeed Mahameed81848732015-12-01 18:03:20 +0200586 bool is_uc = list_type == MLX5_NVPRT_LIST_TYPE_UC;
587 vport_addr_action vport_addr_add;
588 vport_addr_action vport_addr_del;
589 struct vport_addr *addr;
Saeed Mahameed073bb182015-12-01 18:03:18 +0200590 struct l2addr_node *node;
591 struct hlist_head *hash;
592 struct hlist_node *tmp;
593 int hi;
594
Saeed Mahameed81848732015-12-01 18:03:20 +0200595 vport_addr_add = is_uc ? esw_add_uc_addr :
596 esw_add_mc_addr;
597 vport_addr_del = is_uc ? esw_del_uc_addr :
598 esw_del_mc_addr;
599
600 hash = is_uc ? vport->uc_list : vport->mc_list;
Saeed Mahameed073bb182015-12-01 18:03:18 +0200601 for_each_l2hash_node(node, tmp, hash, hi) {
Saeed Mahameed81848732015-12-01 18:03:20 +0200602 addr = container_of(node, struct vport_addr, node);
Saeed Mahameed073bb182015-12-01 18:03:18 +0200603 switch (addr->action) {
604 case MLX5_ACTION_ADD:
Saeed Mahameed81848732015-12-01 18:03:20 +0200605 vport_addr_add(esw, addr);
Saeed Mahameed073bb182015-12-01 18:03:18 +0200606 addr->action = MLX5_ACTION_NONE;
607 break;
608 case MLX5_ACTION_DEL:
Saeed Mahameed81848732015-12-01 18:03:20 +0200609 vport_addr_del(esw, addr);
Saeed Mahameed073bb182015-12-01 18:03:18 +0200610 l2addr_hash_del(addr);
611 break;
612 }
613 }
614}
615
Saeed Mahameed81848732015-12-01 18:03:20 +0200616/* Sync vport UC/MC list from vport context */
617static void esw_update_vport_addr_list(struct mlx5_eswitch *esw,
618 u32 vport_num, int list_type)
Saeed Mahameed073bb182015-12-01 18:03:18 +0200619{
620 struct mlx5_vport *vport = &esw->vports[vport_num];
Saeed Mahameed81848732015-12-01 18:03:20 +0200621 bool is_uc = list_type == MLX5_NVPRT_LIST_TYPE_UC;
Saeed Mahameed073bb182015-12-01 18:03:18 +0200622 u8 (*mac_list)[ETH_ALEN];
Saeed Mahameed81848732015-12-01 18:03:20 +0200623 struct l2addr_node *node;
624 struct vport_addr *addr;
Saeed Mahameed073bb182015-12-01 18:03:18 +0200625 struct hlist_head *hash;
626 struct hlist_node *tmp;
627 int size;
628 int err;
629 int hi;
630 int i;
631
Saeed Mahameed81848732015-12-01 18:03:20 +0200632 size = is_uc ? MLX5_MAX_UC_PER_VPORT(esw->dev) :
633 MLX5_MAX_MC_PER_VPORT(esw->dev);
Saeed Mahameed073bb182015-12-01 18:03:18 +0200634
635 mac_list = kcalloc(size, ETH_ALEN, GFP_KERNEL);
636 if (!mac_list)
637 return;
638
Saeed Mahameed81848732015-12-01 18:03:20 +0200639 hash = is_uc ? vport->uc_list : vport->mc_list;
Saeed Mahameed073bb182015-12-01 18:03:18 +0200640
641 for_each_l2hash_node(node, tmp, hash, hi) {
Saeed Mahameed81848732015-12-01 18:03:20 +0200642 addr = container_of(node, struct vport_addr, node);
Saeed Mahameed073bb182015-12-01 18:03:18 +0200643 addr->action = MLX5_ACTION_DEL;
644 }
645
Saeed Mahameed81848732015-12-01 18:03:20 +0200646 err = mlx5_query_nic_vport_mac_list(esw->dev, vport_num, list_type,
Saeed Mahameed073bb182015-12-01 18:03:18 +0200647 mac_list, &size);
648 if (err)
Mohamad Haj Yahia761e2052016-05-03 17:13:56 +0300649 goto out;
Saeed Mahameed81848732015-12-01 18:03:20 +0200650 esw_debug(esw->dev, "vport[%d] context update %s list size (%d)\n",
651 vport_num, is_uc ? "UC" : "MC", size);
Saeed Mahameed073bb182015-12-01 18:03:18 +0200652
653 for (i = 0; i < size; i++) {
Saeed Mahameed81848732015-12-01 18:03:20 +0200654 if (is_uc && !is_valid_ether_addr(mac_list[i]))
Saeed Mahameed073bb182015-12-01 18:03:18 +0200655 continue;
656
Saeed Mahameed81848732015-12-01 18:03:20 +0200657 if (!is_uc && !is_multicast_ether_addr(mac_list[i]))
658 continue;
659
660 addr = l2addr_hash_find(hash, mac_list[i], struct vport_addr);
Saeed Mahameed073bb182015-12-01 18:03:18 +0200661 if (addr) {
662 addr->action = MLX5_ACTION_NONE;
663 continue;
664 }
665
Saeed Mahameed81848732015-12-01 18:03:20 +0200666 addr = l2addr_hash_add(hash, mac_list[i], struct vport_addr,
Saeed Mahameed073bb182015-12-01 18:03:18 +0200667 GFP_KERNEL);
668 if (!addr) {
669 esw_warn(esw->dev,
670 "Failed to add MAC(%pM) to vport[%d] DB\n",
671 mac_list[i], vport_num);
672 continue;
673 }
Saeed Mahameed81848732015-12-01 18:03:20 +0200674 addr->vport = vport_num;
Saeed Mahameed073bb182015-12-01 18:03:18 +0200675 addr->action = MLX5_ACTION_ADD;
676 }
Mohamad Haj Yahia761e2052016-05-03 17:13:56 +0300677out:
Saeed Mahameed073bb182015-12-01 18:03:18 +0200678 kfree(mac_list);
679}
680
681static void esw_vport_change_handler(struct work_struct *work)
682{
683 struct mlx5_vport *vport =
684 container_of(work, struct mlx5_vport, vport_change_handler);
685 struct mlx5_core_dev *dev = vport->dev;
Saeed Mahameed81848732015-12-01 18:03:20 +0200686 struct mlx5_eswitch *esw = dev->priv.eswitch;
Saeed Mahameed073bb182015-12-01 18:03:18 +0200687 u8 mac[ETH_ALEN];
688
689 mlx5_query_nic_vport_mac_address(dev, vport->vport, mac);
Saeed Mahameed81848732015-12-01 18:03:20 +0200690 esw_debug(dev, "vport[%d] Context Changed: perm mac: %pM\n",
691 vport->vport, mac);
Saeed Mahameed073bb182015-12-01 18:03:18 +0200692
Saeed Mahameed81848732015-12-01 18:03:20 +0200693 if (vport->enabled_events & UC_ADDR_CHANGE) {
694 esw_update_vport_addr_list(esw, vport->vport,
695 MLX5_NVPRT_LIST_TYPE_UC);
696 esw_apply_vport_addr_list(esw, vport->vport,
697 MLX5_NVPRT_LIST_TYPE_UC);
698 }
Saeed Mahameed073bb182015-12-01 18:03:18 +0200699
Saeed Mahameed81848732015-12-01 18:03:20 +0200700 if (vport->enabled_events & MC_ADDR_CHANGE) {
701 esw_update_vport_addr_list(esw, vport->vport,
702 MLX5_NVPRT_LIST_TYPE_MC);
703 esw_apply_vport_addr_list(esw, vport->vport,
704 MLX5_NVPRT_LIST_TYPE_MC);
705 }
Saeed Mahameed073bb182015-12-01 18:03:18 +0200706
Saeed Mahameed81848732015-12-01 18:03:20 +0200707 esw_debug(esw->dev, "vport[%d] Context Changed: Done\n", vport->vport);
Saeed Mahameed073bb182015-12-01 18:03:18 +0200708 if (vport->enabled)
709 arm_vport_context_events_cmd(dev, vport->vport,
Saeed Mahameed81848732015-12-01 18:03:20 +0200710 vport->enabled_events);
Saeed Mahameed073bb182015-12-01 18:03:18 +0200711}
712
Mohamad Haj Yahia5742df02016-05-03 17:13:57 +0300713static void esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
714 struct mlx5_vport *vport)
715{
716 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
717 struct mlx5_flow_group *vlan_grp = NULL;
718 struct mlx5_flow_group *drop_grp = NULL;
719 struct mlx5_core_dev *dev = esw->dev;
720 struct mlx5_flow_namespace *root_ns;
721 struct mlx5_flow_table *acl;
722 void *match_criteria;
723 u32 *flow_group_in;
724 /* The egress acl table contains 2 rules:
725 * 1)Allow traffic with vlan_tag=vst_vlan_id
726 * 2)Drop all other traffic.
727 */
728 int table_size = 2;
729 int err = 0;
730
731 if (!MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support))
732 return;
733
734 esw_debug(dev, "Create vport[%d] egress ACL log_max_size(%d)\n",
735 vport->vport, MLX5_CAP_ESW_EGRESS_ACL(dev, log_max_ft_size));
736
737 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_EGRESS);
738 if (!root_ns) {
739 esw_warn(dev, "Failed to get E-Switch egress flow namespace\n");
740 return;
741 }
742
743 flow_group_in = mlx5_vzalloc(inlen);
744 if (!flow_group_in)
745 return;
746
747 acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport);
748 if (IS_ERR_OR_NULL(acl)) {
749 err = PTR_ERR(acl);
750 esw_warn(dev, "Failed to create E-Switch vport[%d] egress flow Table, err(%d)\n",
751 vport->vport, err);
752 goto out;
753 }
754
755 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
756 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
757 MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.vlan_tag);
758 MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.first_vid);
759 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
760 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
761
762 vlan_grp = mlx5_create_flow_group(acl, flow_group_in);
763 if (IS_ERR_OR_NULL(vlan_grp)) {
764 err = PTR_ERR(vlan_grp);
765 esw_warn(dev, "Failed to create E-Switch vport[%d] egress allowed vlans flow group, err(%d)\n",
766 vport->vport, err);
767 goto out;
768 }
769
770 memset(flow_group_in, 0, inlen);
771 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1);
772 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
773 drop_grp = mlx5_create_flow_group(acl, flow_group_in);
774 if (IS_ERR_OR_NULL(drop_grp)) {
775 err = PTR_ERR(drop_grp);
776 esw_warn(dev, "Failed to create E-Switch vport[%d] egress drop flow group, err(%d)\n",
777 vport->vport, err);
778 goto out;
779 }
780
781 vport->egress.acl = acl;
782 vport->egress.drop_grp = drop_grp;
783 vport->egress.allowed_vlans_grp = vlan_grp;
784out:
785 kfree(flow_group_in);
786 if (err && !IS_ERR_OR_NULL(vlan_grp))
787 mlx5_destroy_flow_group(vlan_grp);
788 if (err && !IS_ERR_OR_NULL(acl))
789 mlx5_destroy_flow_table(acl);
790}
791
Mohamad Haj Yahiadfcb1ed2016-05-03 17:13:58 +0300792static void esw_vport_cleanup_egress_rules(struct mlx5_eswitch *esw,
793 struct mlx5_vport *vport)
794{
795 if (!IS_ERR_OR_NULL(vport->egress.allowed_vlan))
796 mlx5_del_flow_rule(vport->egress.allowed_vlan);
797
798 if (!IS_ERR_OR_NULL(vport->egress.drop_rule))
799 mlx5_del_flow_rule(vport->egress.drop_rule);
800
801 vport->egress.allowed_vlan = NULL;
802 vport->egress.drop_rule = NULL;
803}
804
Mohamad Haj Yahia5742df02016-05-03 17:13:57 +0300805static void esw_vport_disable_egress_acl(struct mlx5_eswitch *esw,
806 struct mlx5_vport *vport)
807{
808 if (IS_ERR_OR_NULL(vport->egress.acl))
809 return;
810
811 esw_debug(esw->dev, "Destroy vport[%d] E-Switch egress ACL\n", vport->vport);
812
Mohamad Haj Yahiadfcb1ed2016-05-03 17:13:58 +0300813 esw_vport_cleanup_egress_rules(esw, vport);
Mohamad Haj Yahia5742df02016-05-03 17:13:57 +0300814 mlx5_destroy_flow_group(vport->egress.allowed_vlans_grp);
815 mlx5_destroy_flow_group(vport->egress.drop_grp);
816 mlx5_destroy_flow_table(vport->egress.acl);
817 vport->egress.allowed_vlans_grp = NULL;
818 vport->egress.drop_grp = NULL;
819 vport->egress.acl = NULL;
820}
821
822static void esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
823 struct mlx5_vport *vport)
824{
825 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
826 struct mlx5_core_dev *dev = esw->dev;
827 struct mlx5_flow_namespace *root_ns;
828 struct mlx5_flow_table *acl;
829 struct mlx5_flow_group *g;
830 void *match_criteria;
831 u32 *flow_group_in;
832 /* The ingress acl table contains 4 groups
833 * (2 active rules at the same time -
834 * 1 allow rule from one of the first 3 groups.
835 * 1 drop rule from the last group):
836 * 1)Allow untagged traffic with smac=original mac.
837 * 2)Allow untagged traffic.
838 * 3)Allow traffic with smac=original mac.
839 * 4)Drop all other traffic.
840 */
841 int table_size = 4;
842 int err = 0;
843
844 if (!MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support))
845 return;
846
847 esw_debug(dev, "Create vport[%d] ingress ACL log_max_size(%d)\n",
848 vport->vport, MLX5_CAP_ESW_INGRESS_ACL(dev, log_max_ft_size));
849
850 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS);
851 if (!root_ns) {
852 esw_warn(dev, "Failed to get E-Switch ingress flow namespace\n");
853 return;
854 }
855
856 flow_group_in = mlx5_vzalloc(inlen);
857 if (!flow_group_in)
858 return;
859
860 acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport);
861 if (IS_ERR_OR_NULL(acl)) {
862 err = PTR_ERR(acl);
863 esw_warn(dev, "Failed to create E-Switch vport[%d] ingress flow Table, err(%d)\n",
864 vport->vport, err);
865 goto out;
866 }
867 vport->ingress.acl = acl;
868
869 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
870
871 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
872 MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.vlan_tag);
873 MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_47_16);
874 MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_15_0);
875 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
876 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
877
878 g = mlx5_create_flow_group(acl, flow_group_in);
879 if (IS_ERR_OR_NULL(g)) {
880 err = PTR_ERR(g);
881 esw_warn(dev, "Failed to create E-Switch vport[%d] ingress untagged spoofchk flow group, err(%d)\n",
882 vport->vport, err);
883 goto out;
884 }
885 vport->ingress.allow_untagged_spoofchk_grp = g;
886
887 memset(flow_group_in, 0, inlen);
888 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
889 MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.vlan_tag);
890 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1);
891 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
892
893 g = mlx5_create_flow_group(acl, flow_group_in);
894 if (IS_ERR_OR_NULL(g)) {
895 err = PTR_ERR(g);
896 esw_warn(dev, "Failed to create E-Switch vport[%d] ingress untagged flow group, err(%d)\n",
897 vport->vport, err);
898 goto out;
899 }
900 vport->ingress.allow_untagged_only_grp = g;
901
902 memset(flow_group_in, 0, inlen);
903 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
904 MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_47_16);
905 MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_15_0);
906 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 2);
907 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 2);
908
909 g = mlx5_create_flow_group(acl, flow_group_in);
910 if (IS_ERR_OR_NULL(g)) {
911 err = PTR_ERR(g);
912 esw_warn(dev, "Failed to create E-Switch vport[%d] ingress spoofchk flow group, err(%d)\n",
913 vport->vport, err);
914 goto out;
915 }
916 vport->ingress.allow_spoofchk_only_grp = g;
917
918 memset(flow_group_in, 0, inlen);
919 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 3);
920 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 3);
921
922 g = mlx5_create_flow_group(acl, flow_group_in);
923 if (IS_ERR_OR_NULL(g)) {
924 err = PTR_ERR(g);
925 esw_warn(dev, "Failed to create E-Switch vport[%d] ingress drop flow group, err(%d)\n",
926 vport->vport, err);
927 goto out;
928 }
929 vport->ingress.drop_grp = g;
930
931out:
932 if (err) {
933 if (!IS_ERR_OR_NULL(vport->ingress.allow_spoofchk_only_grp))
934 mlx5_destroy_flow_group(
935 vport->ingress.allow_spoofchk_only_grp);
936 if (!IS_ERR_OR_NULL(vport->ingress.allow_untagged_only_grp))
937 mlx5_destroy_flow_group(
938 vport->ingress.allow_untagged_only_grp);
939 if (!IS_ERR_OR_NULL(vport->ingress.allow_untagged_spoofchk_grp))
940 mlx5_destroy_flow_group(
941 vport->ingress.allow_untagged_spoofchk_grp);
942 if (!IS_ERR_OR_NULL(vport->ingress.acl))
943 mlx5_destroy_flow_table(vport->ingress.acl);
944 }
945
946 kfree(flow_group_in);
947}
948
Mohamad Haj Yahiadfcb1ed2016-05-03 17:13:58 +0300949static void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw,
950 struct mlx5_vport *vport)
951{
952 if (!IS_ERR_OR_NULL(vport->ingress.drop_rule))
953 mlx5_del_flow_rule(vport->ingress.drop_rule);
Mohamad Haj Yahiaf9423802016-05-03 17:13:59 +0300954
955 if (!IS_ERR_OR_NULL(vport->ingress.allow_rule))
956 mlx5_del_flow_rule(vport->ingress.allow_rule);
957
Mohamad Haj Yahiadfcb1ed2016-05-03 17:13:58 +0300958 vport->ingress.drop_rule = NULL;
Mohamad Haj Yahiaf9423802016-05-03 17:13:59 +0300959 vport->ingress.allow_rule = NULL;
Mohamad Haj Yahiadfcb1ed2016-05-03 17:13:58 +0300960}
961
Mohamad Haj Yahia5742df02016-05-03 17:13:57 +0300962static void esw_vport_disable_ingress_acl(struct mlx5_eswitch *esw,
963 struct mlx5_vport *vport)
964{
965 if (IS_ERR_OR_NULL(vport->ingress.acl))
966 return;
967
968 esw_debug(esw->dev, "Destroy vport[%d] E-Switch ingress ACL\n", vport->vport);
969
Mohamad Haj Yahiadfcb1ed2016-05-03 17:13:58 +0300970 esw_vport_cleanup_ingress_rules(esw, vport);
Mohamad Haj Yahia5742df02016-05-03 17:13:57 +0300971 mlx5_destroy_flow_group(vport->ingress.allow_spoofchk_only_grp);
972 mlx5_destroy_flow_group(vport->ingress.allow_untagged_only_grp);
973 mlx5_destroy_flow_group(vport->ingress.allow_untagged_spoofchk_grp);
974 mlx5_destroy_flow_group(vport->ingress.drop_grp);
975 mlx5_destroy_flow_table(vport->ingress.acl);
976 vport->ingress.acl = NULL;
977 vport->ingress.drop_grp = NULL;
978 vport->ingress.allow_spoofchk_only_grp = NULL;
979 vport->ingress.allow_untagged_only_grp = NULL;
980 vport->ingress.allow_untagged_spoofchk_grp = NULL;
981}
982
Mohamad Haj Yahiadfcb1ed2016-05-03 17:13:58 +0300983static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
984 struct mlx5_vport *vport)
985{
Mohamad Haj Yahiaf9423802016-05-03 17:13:59 +0300986 u8 smac[ETH_ALEN];
Mohamad Haj Yahiadfcb1ed2016-05-03 17:13:58 +0300987 u32 *match_v;
988 u32 *match_c;
989 int err = 0;
Mohamad Haj Yahiaf9423802016-05-03 17:13:59 +0300990 u8 *smac_v;
Mohamad Haj Yahiadfcb1ed2016-05-03 17:13:58 +0300991
992 if (IS_ERR_OR_NULL(vport->ingress.acl)) {
993 esw_warn(esw->dev,
994 "vport[%d] configure ingress rules failed, ingress acl is not initialized!\n",
995 vport->vport);
996 return -EPERM;
997 }
998
Mohamad Haj Yahiaf9423802016-05-03 17:13:59 +0300999 if (vport->spoofchk) {
1000 err = mlx5_query_nic_vport_mac_address(esw->dev, vport->vport, smac);
1001 if (err) {
1002 esw_warn(esw->dev,
1003 "vport[%d] configure ingress rules failed, query smac failed, err(%d)\n",
1004 vport->vport, err);
1005 return err;
1006 }
1007
1008 if (!is_valid_ether_addr(smac)) {
1009 mlx5_core_warn(esw->dev,
1010 "vport[%d] configure ingress rules failed, illegal mac with spoofchk\n",
1011 vport->vport);
1012 return -EPERM;
1013 }
1014 }
1015
Mohamad Haj Yahiadfcb1ed2016-05-03 17:13:58 +03001016 esw_vport_cleanup_ingress_rules(esw, vport);
1017
Mohamad Haj Yahiaf9423802016-05-03 17:13:59 +03001018 if (!vport->vlan && !vport->qos && !vport->spoofchk)
Mohamad Haj Yahiadfcb1ed2016-05-03 17:13:58 +03001019 return 0;
1020
1021 esw_debug(esw->dev,
1022 "vport[%d] configure ingress rules, vlan(%d) qos(%d)\n",
1023 vport->vport, vport->vlan, vport->qos);
1024
1025 match_v = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
1026 match_c = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
1027 if (!match_v || !match_c) {
1028 err = -ENOMEM;
1029 esw_warn(esw->dev, "vport[%d] configure ingress rules failed, err(%d)\n",
1030 vport->vport, err);
1031 goto out;
1032 }
Mohamad Haj Yahiadfcb1ed2016-05-03 17:13:58 +03001033
Mohamad Haj Yahiaf9423802016-05-03 17:13:59 +03001034 if (vport->vlan || vport->qos)
1035 MLX5_SET_TO_ONES(fte_match_param, match_c, outer_headers.vlan_tag);
1036
1037 if (vport->spoofchk) {
1038 MLX5_SET_TO_ONES(fte_match_param, match_c, outer_headers.smac_47_16);
1039 MLX5_SET_TO_ONES(fte_match_param, match_c, outer_headers.smac_15_0);
1040 smac_v = MLX5_ADDR_OF(fte_match_param,
1041 match_v,
1042 outer_headers.smac_47_16);
1043 ether_addr_copy(smac_v, smac);
1044 }
1045
1046 vport->ingress.allow_rule =
Mohamad Haj Yahiadfcb1ed2016-05-03 17:13:58 +03001047 mlx5_add_flow_rule(vport->ingress.acl,
1048 MLX5_MATCH_OUTER_HEADERS,
1049 match_c,
1050 match_v,
Mohamad Haj Yahiaf9423802016-05-03 17:13:59 +03001051 MLX5_FLOW_CONTEXT_ACTION_ALLOW,
1052 0, NULL);
1053 if (IS_ERR_OR_NULL(vport->ingress.allow_rule)) {
1054 err = PTR_ERR(vport->ingress.allow_rule);
1055 pr_warn("vport[%d] configure ingress allow rule, err(%d)\n",
1056 vport->vport, err);
1057 vport->ingress.allow_rule = NULL;
1058 goto out;
1059 }
1060
1061 memset(match_c, 0, MLX5_ST_SZ_BYTES(fte_match_param));
1062 memset(match_v, 0, MLX5_ST_SZ_BYTES(fte_match_param));
1063 vport->ingress.drop_rule =
1064 mlx5_add_flow_rule(vport->ingress.acl,
1065 0,
1066 match_c,
1067 match_v,
Mohamad Haj Yahiadfcb1ed2016-05-03 17:13:58 +03001068 MLX5_FLOW_CONTEXT_ACTION_DROP,
1069 0, NULL);
1070 if (IS_ERR_OR_NULL(vport->ingress.drop_rule)) {
1071 err = PTR_ERR(vport->ingress.drop_rule);
Mohamad Haj Yahiaf9423802016-05-03 17:13:59 +03001072 pr_warn("vport[%d] configure ingress drop rule, err(%d)\n",
Mohamad Haj Yahiadfcb1ed2016-05-03 17:13:58 +03001073 vport->vport, err);
1074 vport->ingress.drop_rule = NULL;
Mohamad Haj Yahiaf9423802016-05-03 17:13:59 +03001075 goto out;
Mohamad Haj Yahiadfcb1ed2016-05-03 17:13:58 +03001076 }
Mohamad Haj Yahiaf9423802016-05-03 17:13:59 +03001077
Mohamad Haj Yahiadfcb1ed2016-05-03 17:13:58 +03001078out:
Mohamad Haj Yahiaf9423802016-05-03 17:13:59 +03001079 if (err)
1080 esw_vport_cleanup_ingress_rules(esw, vport);
1081
Mohamad Haj Yahiadfcb1ed2016-05-03 17:13:58 +03001082 kfree(match_v);
1083 kfree(match_c);
1084 return err;
1085}
1086
1087static int esw_vport_egress_config(struct mlx5_eswitch *esw,
1088 struct mlx5_vport *vport)
1089{
1090 u32 *match_v;
1091 u32 *match_c;
1092 int err = 0;
1093
1094 if (IS_ERR_OR_NULL(vport->egress.acl)) {
1095 esw_warn(esw->dev, "vport[%d] configure rgress rules failed, egress acl is not initialized!\n",
1096 vport->vport);
1097 return -EPERM;
1098 }
1099
1100 esw_vport_cleanup_egress_rules(esw, vport);
1101
1102 if (!vport->vlan && !vport->qos)
1103 return 0;
1104
1105 esw_debug(esw->dev,
1106 "vport[%d] configure egress rules, vlan(%d) qos(%d)\n",
1107 vport->vport, vport->vlan, vport->qos);
1108
1109 match_v = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
1110 match_c = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
1111 if (!match_v || !match_c) {
1112 err = -ENOMEM;
1113 esw_warn(esw->dev, "vport[%d] configure egress rules failed, err(%d)\n",
1114 vport->vport, err);
1115 goto out;
1116 }
1117
1118 /* Allowed vlan rule */
1119 MLX5_SET_TO_ONES(fte_match_param, match_c, outer_headers.vlan_tag);
1120 MLX5_SET_TO_ONES(fte_match_param, match_v, outer_headers.vlan_tag);
1121 MLX5_SET_TO_ONES(fte_match_param, match_c, outer_headers.first_vid);
1122 MLX5_SET(fte_match_param, match_v, outer_headers.first_vid, vport->vlan);
1123
1124 vport->egress.allowed_vlan =
1125 mlx5_add_flow_rule(vport->egress.acl,
1126 MLX5_MATCH_OUTER_HEADERS,
1127 match_c,
1128 match_v,
1129 MLX5_FLOW_CONTEXT_ACTION_ALLOW,
1130 0, NULL);
1131 if (IS_ERR_OR_NULL(vport->egress.allowed_vlan)) {
1132 err = PTR_ERR(vport->egress.allowed_vlan);
1133 pr_warn("vport[%d] configure egress allowed vlan rule failed, err(%d)\n",
1134 vport->vport, err);
1135 vport->egress.allowed_vlan = NULL;
1136 goto out;
1137 }
1138
1139 /* Drop others rule (star rule) */
1140 memset(match_c, 0, MLX5_ST_SZ_BYTES(fte_match_param));
1141 memset(match_v, 0, MLX5_ST_SZ_BYTES(fte_match_param));
1142 vport->egress.drop_rule =
1143 mlx5_add_flow_rule(vport->egress.acl,
1144 0,
1145 match_c,
1146 match_v,
1147 MLX5_FLOW_CONTEXT_ACTION_DROP,
1148 0, NULL);
1149 if (IS_ERR_OR_NULL(vport->egress.drop_rule)) {
1150 err = PTR_ERR(vport->egress.drop_rule);
1151 pr_warn("vport[%d] configure egress drop rule failed, err(%d)\n",
1152 vport->vport, err);
1153 vport->egress.drop_rule = NULL;
1154 }
1155out:
1156 kfree(match_v);
1157 kfree(match_c);
1158 return err;
1159}
1160
Saeed Mahameed81848732015-12-01 18:03:20 +02001161static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num,
1162 int enable_events)
Saeed Mahameed073bb182015-12-01 18:03:18 +02001163{
1164 struct mlx5_vport *vport = &esw->vports[vport_num];
Saeed Mahameed073bb182015-12-01 18:03:18 +02001165
Mohamad Haj Yahiadfcb1ed2016-05-03 17:13:58 +03001166 mutex_lock(&esw->state_lock);
Saeed Mahameed81848732015-12-01 18:03:20 +02001167 WARN_ON(vport->enabled);
1168
1169 esw_debug(esw->dev, "Enabling VPORT(%d)\n", vport_num);
Mohamad Haj Yahia5742df02016-05-03 17:13:57 +03001170
1171 if (vport_num) { /* Only VFs need ACLs for VST and spoofchk filtering */
1172 esw_vport_enable_ingress_acl(esw, vport);
1173 esw_vport_enable_egress_acl(esw, vport);
Mohamad Haj Yahiadfcb1ed2016-05-03 17:13:58 +03001174 esw_vport_ingress_config(esw, vport);
1175 esw_vport_egress_config(esw, vport);
Mohamad Haj Yahia5742df02016-05-03 17:13:57 +03001176 }
1177
Saeed Mahameed81848732015-12-01 18:03:20 +02001178 mlx5_modify_vport_admin_state(esw->dev,
1179 MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
1180 vport_num,
1181 MLX5_ESW_VPORT_ADMIN_STATE_AUTO);
1182
1183 /* Sync with current vport context */
1184 vport->enabled_events = enable_events;
1185 esw_vport_change_handler(&vport->vport_change_handler);
1186
Saeed Mahameed073bb182015-12-01 18:03:18 +02001187 vport->enabled = true;
Saeed Mahameed073bb182015-12-01 18:03:18 +02001188
Saeed Mahameed81848732015-12-01 18:03:20 +02001189 arm_vport_context_events_cmd(esw->dev, vport_num, enable_events);
1190
1191 esw->enabled_vports++;
1192 esw_debug(esw->dev, "Enabled VPORT(%d)\n", vport_num);
Mohamad Haj Yahiadfcb1ed2016-05-03 17:13:58 +03001193 mutex_unlock(&esw->state_lock);
Saeed Mahameed81848732015-12-01 18:03:20 +02001194}
1195
1196static void esw_cleanup_vport(struct mlx5_eswitch *esw, u16 vport_num)
1197{
1198 struct mlx5_vport *vport = &esw->vports[vport_num];
1199 struct l2addr_node *node;
1200 struct vport_addr *addr;
1201 struct hlist_node *tmp;
1202 int hi;
1203
1204 for_each_l2hash_node(node, tmp, vport->uc_list, hi) {
1205 addr = container_of(node, struct vport_addr, node);
1206 addr->action = MLX5_ACTION_DEL;
1207 }
1208 esw_apply_vport_addr_list(esw, vport_num, MLX5_NVPRT_LIST_TYPE_UC);
1209
1210 for_each_l2hash_node(node, tmp, vport->mc_list, hi) {
1211 addr = container_of(node, struct vport_addr, node);
1212 addr->action = MLX5_ACTION_DEL;
1213 }
1214 esw_apply_vport_addr_list(esw, vport_num, MLX5_NVPRT_LIST_TYPE_MC);
Saeed Mahameed073bb182015-12-01 18:03:18 +02001215}
1216
1217static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num)
1218{
1219 struct mlx5_vport *vport = &esw->vports[vport_num];
Saeed Mahameed073bb182015-12-01 18:03:18 +02001220
1221 if (!vport->enabled)
1222 return;
1223
Saeed Mahameed81848732015-12-01 18:03:20 +02001224 esw_debug(esw->dev, "Disabling vport(%d)\n", vport_num);
Saeed Mahameed073bb182015-12-01 18:03:18 +02001225 /* Mark this vport as disabled to discard new events */
Saeed Mahameed073bb182015-12-01 18:03:18 +02001226 vport->enabled = false;
Saeed Mahameed81848732015-12-01 18:03:20 +02001227 vport->enabled_events = 0;
Mohamad Haj Yahia831cae12016-05-03 17:13:55 +03001228
1229 synchronize_irq(mlx5_get_msix_vec(esw->dev, MLX5_EQ_VEC_ASYNC));
Saeed Mahameed073bb182015-12-01 18:03:18 +02001230
Saeed Mahameed81848732015-12-01 18:03:20 +02001231 mlx5_modify_vport_admin_state(esw->dev,
1232 MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
1233 vport_num,
1234 MLX5_ESW_VPORT_ADMIN_STATE_DOWN);
Saeed Mahameed073bb182015-12-01 18:03:18 +02001235 /* Wait for current already scheduled events to complete */
1236 flush_workqueue(esw->work_queue);
Saeed Mahameed073bb182015-12-01 18:03:18 +02001237 /* Disable events from this vport */
1238 arm_vport_context_events_cmd(esw->dev, vport->vport, 0);
Mohamad Haj Yahiadfcb1ed2016-05-03 17:13:58 +03001239 mutex_lock(&esw->state_lock);
Saeed Mahameed81848732015-12-01 18:03:20 +02001240 /* We don't assume VFs will cleanup after themselves */
1241 esw_cleanup_vport(esw, vport_num);
Mohamad Haj Yahia5742df02016-05-03 17:13:57 +03001242 if (vport_num) {
1243 esw_vport_disable_egress_acl(esw, vport);
1244 esw_vport_disable_ingress_acl(esw, vport);
1245 }
Saeed Mahameed81848732015-12-01 18:03:20 +02001246 esw->enabled_vports--;
Mohamad Haj Yahiadfcb1ed2016-05-03 17:13:58 +03001247 mutex_unlock(&esw->state_lock);
Saeed Mahameed073bb182015-12-01 18:03:18 +02001248}
1249
1250/* Public E-Switch API */
Saeed Mahameed81848732015-12-01 18:03:20 +02001251int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs)
1252{
1253 int err;
1254 int i;
1255
1256 if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) ||
1257 MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1258 return 0;
1259
1260 if (!MLX5_CAP_GEN(esw->dev, eswitch_flow_table) ||
1261 !MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) {
1262 esw_warn(esw->dev, "E-Switch FDB is not supported, aborting ...\n");
1263 return -ENOTSUPP;
1264 }
1265
Mohamad Haj Yahia5742df02016-05-03 17:13:57 +03001266 if (!MLX5_CAP_ESW_INGRESS_ACL(esw->dev, ft_support))
1267 esw_warn(esw->dev, "E-Switch ingress ACL is not supported by FW\n");
1268
1269 if (!MLX5_CAP_ESW_EGRESS_ACL(esw->dev, ft_support))
1270 esw_warn(esw->dev, "E-Switch engress ACL is not supported by FW\n");
1271
Saeed Mahameed81848732015-12-01 18:03:20 +02001272 esw_info(esw->dev, "E-Switch enable SRIOV: nvfs(%d)\n", nvfs);
1273
1274 esw_disable_vport(esw, 0);
1275
1276 err = esw_create_fdb_table(esw, nvfs + 1);
1277 if (err)
1278 goto abort;
1279
1280 for (i = 0; i <= nvfs; i++)
1281 esw_enable_vport(esw, i, SRIOV_VPORT_EVENTS);
1282
1283 esw_info(esw->dev, "SRIOV enabled: active vports(%d)\n",
1284 esw->enabled_vports);
1285 return 0;
1286
1287abort:
1288 esw_enable_vport(esw, 0, UC_ADDR_CHANGE);
1289 return err;
1290}
1291
1292void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw)
1293{
1294 int i;
1295
1296 if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) ||
1297 MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1298 return;
1299
1300 esw_info(esw->dev, "disable SRIOV: active vports(%d)\n",
1301 esw->enabled_vports);
1302
1303 for (i = 0; i < esw->total_vports; i++)
1304 esw_disable_vport(esw, i);
1305
1306 esw_destroy_fdb_table(esw);
1307
1308 /* VPORT 0 (PF) must be enabled back with non-sriov configuration */
1309 esw_enable_vport(esw, 0, UC_ADDR_CHANGE);
1310}
1311
Saeed Mahameed073bb182015-12-01 18:03:18 +02001312int mlx5_eswitch_init(struct mlx5_core_dev *dev)
1313{
1314 int l2_table_size = 1 << MLX5_CAP_GEN(dev, log_max_l2_table);
Mohamad Haj Yahiaefdc8102016-05-03 17:13:54 +03001315 int total_vports = MLX5_TOTAL_VPORTS(dev);
Saeed Mahameed073bb182015-12-01 18:03:18 +02001316 struct mlx5_eswitch *esw;
1317 int vport_num;
1318 int err;
1319
1320 if (!MLX5_CAP_GEN(dev, vport_group_manager) ||
1321 MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1322 return 0;
1323
1324 esw_info(dev,
1325 "Total vports %d, l2 table size(%d), per vport: max uc(%d) max mc(%d)\n",
1326 total_vports, l2_table_size,
1327 MLX5_MAX_UC_PER_VPORT(dev),
1328 MLX5_MAX_MC_PER_VPORT(dev));
1329
1330 esw = kzalloc(sizeof(*esw), GFP_KERNEL);
1331 if (!esw)
1332 return -ENOMEM;
1333
1334 esw->dev = dev;
1335
1336 esw->l2_table.bitmap = kcalloc(BITS_TO_LONGS(l2_table_size),
1337 sizeof(uintptr_t), GFP_KERNEL);
1338 if (!esw->l2_table.bitmap) {
1339 err = -ENOMEM;
1340 goto abort;
1341 }
1342 esw->l2_table.size = l2_table_size;
1343
1344 esw->work_queue = create_singlethread_workqueue("mlx5_esw_wq");
1345 if (!esw->work_queue) {
1346 err = -ENOMEM;
1347 goto abort;
1348 }
1349
1350 esw->vports = kcalloc(total_vports, sizeof(struct mlx5_vport),
1351 GFP_KERNEL);
1352 if (!esw->vports) {
1353 err = -ENOMEM;
1354 goto abort;
1355 }
1356
Mohamad Haj Yahiadfcb1ed2016-05-03 17:13:58 +03001357 mutex_init(&esw->state_lock);
1358
Saeed Mahameed073bb182015-12-01 18:03:18 +02001359 for (vport_num = 0; vport_num < total_vports; vport_num++) {
1360 struct mlx5_vport *vport = &esw->vports[vport_num];
1361
1362 vport->vport = vport_num;
1363 vport->dev = dev;
1364 INIT_WORK(&vport->vport_change_handler,
1365 esw_vport_change_handler);
Saeed Mahameed073bb182015-12-01 18:03:18 +02001366 }
1367
Saeed Mahameed81848732015-12-01 18:03:20 +02001368 esw->total_vports = total_vports;
1369 esw->enabled_vports = 0;
Saeed Mahameed073bb182015-12-01 18:03:18 +02001370
Saeed Mahameed81848732015-12-01 18:03:20 +02001371 dev->priv.eswitch = esw;
1372 esw_enable_vport(esw, 0, UC_ADDR_CHANGE);
Saeed Mahameed073bb182015-12-01 18:03:18 +02001373 /* VF Vports will be enabled when SRIOV is enabled */
1374 return 0;
1375abort:
1376 if (esw->work_queue)
1377 destroy_workqueue(esw->work_queue);
1378 kfree(esw->l2_table.bitmap);
1379 kfree(esw->vports);
1380 kfree(esw);
1381 return err;
1382}
1383
1384void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
1385{
1386 if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) ||
1387 MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1388 return;
1389
1390 esw_info(esw->dev, "cleanup\n");
1391 esw_disable_vport(esw, 0);
1392
1393 esw->dev->priv.eswitch = NULL;
1394 destroy_workqueue(esw->work_queue);
1395 kfree(esw->l2_table.bitmap);
1396 kfree(esw->vports);
1397 kfree(esw);
1398}
1399
1400void mlx5_eswitch_vport_event(struct mlx5_eswitch *esw, struct mlx5_eqe *eqe)
1401{
1402 struct mlx5_eqe_vport_change *vc_eqe = &eqe->data.vport_change;
1403 u16 vport_num = be16_to_cpu(vc_eqe->vport_num);
1404 struct mlx5_vport *vport;
1405
1406 if (!esw) {
1407 pr_warn("MLX5 E-Switch: vport %d got an event while eswitch is not initialized\n",
1408 vport_num);
1409 return;
1410 }
1411
1412 vport = &esw->vports[vport_num];
Saeed Mahameed073bb182015-12-01 18:03:18 +02001413 if (vport->enabled)
1414 queue_work(esw->work_queue, &vport->vport_change_handler);
Saeed Mahameed073bb182015-12-01 18:03:18 +02001415}
Saeed Mahameed77256572015-12-01 18:03:21 +02001416
1417/* Vport Administration */
1418#define ESW_ALLOWED(esw) \
1419 (esw && MLX5_CAP_GEN(esw->dev, vport_group_manager) && mlx5_core_is_pf(esw->dev))
1420#define LEGAL_VPORT(esw, vport) (vport >= 0 && vport < esw->total_vports)
1421
1422int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
1423 int vport, u8 mac[ETH_ALEN])
1424{
1425 int err = 0;
Mohamad Haj Yahiaf9423802016-05-03 17:13:59 +03001426 struct mlx5_vport *evport;
Saeed Mahameed77256572015-12-01 18:03:21 +02001427
1428 if (!ESW_ALLOWED(esw))
1429 return -EPERM;
1430 if (!LEGAL_VPORT(esw, vport))
1431 return -EINVAL;
1432
Mohamad Haj Yahiaf9423802016-05-03 17:13:59 +03001433 evport = &esw->vports[vport];
1434
1435 if (evport->spoofchk && !is_valid_ether_addr(mac)) {
1436 mlx5_core_warn(esw->dev,
1437 "MAC invalidation is not allowed when spoofchk is on, vport(%d)\n",
1438 vport);
1439 return -EPERM;
1440 }
1441
Saeed Mahameed77256572015-12-01 18:03:21 +02001442 err = mlx5_modify_nic_vport_mac_address(esw->dev, vport, mac);
1443 if (err) {
1444 mlx5_core_warn(esw->dev,
1445 "Failed to mlx5_modify_nic_vport_mac vport(%d) err=(%d)\n",
1446 vport, err);
1447 return err;
1448 }
1449
Mohamad Haj Yahiaf9423802016-05-03 17:13:59 +03001450 mutex_lock(&esw->state_lock);
1451 if (evport->enabled)
1452 err = esw_vport_ingress_config(esw, evport);
1453 mutex_unlock(&esw->state_lock);
1454
Saeed Mahameed77256572015-12-01 18:03:21 +02001455 return err;
1456}
1457
1458int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
1459 int vport, int link_state)
1460{
1461 if (!ESW_ALLOWED(esw))
1462 return -EPERM;
1463 if (!LEGAL_VPORT(esw, vport))
1464 return -EINVAL;
1465
1466 return mlx5_modify_vport_admin_state(esw->dev,
1467 MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
1468 vport, link_state);
1469}
1470
1471int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
1472 int vport, struct ifla_vf_info *ivi)
1473{
Mohamad Haj Yahiaf9423802016-05-03 17:13:59 +03001474 struct mlx5_vport *evport;
Saeed Mahameed9e7ea352015-12-01 18:03:23 +02001475 u16 vlan;
1476 u8 qos;
1477
Saeed Mahameed77256572015-12-01 18:03:21 +02001478 if (!ESW_ALLOWED(esw))
1479 return -EPERM;
1480 if (!LEGAL_VPORT(esw, vport))
1481 return -EINVAL;
1482
Mohamad Haj Yahiaf9423802016-05-03 17:13:59 +03001483 evport = &esw->vports[vport];
1484
Saeed Mahameed77256572015-12-01 18:03:21 +02001485 memset(ivi, 0, sizeof(*ivi));
1486 ivi->vf = vport - 1;
1487
1488 mlx5_query_nic_vport_mac_address(esw->dev, vport, ivi->mac);
1489 ivi->linkstate = mlx5_query_vport_admin_state(esw->dev,
1490 MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
1491 vport);
Saeed Mahameed9e7ea352015-12-01 18:03:23 +02001492 query_esw_vport_cvlan(esw->dev, vport, &vlan, &qos);
1493 ivi->vlan = vlan;
1494 ivi->qos = qos;
Mohamad Haj Yahiaf9423802016-05-03 17:13:59 +03001495 ivi->spoofchk = evport->spoofchk;
Saeed Mahameed77256572015-12-01 18:03:21 +02001496
1497 return 0;
1498}
Saeed Mahameed9e7ea352015-12-01 18:03:23 +02001499
1500int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
1501 int vport, u16 vlan, u8 qos)
1502{
Mohamad Haj Yahiadfcb1ed2016-05-03 17:13:58 +03001503 struct mlx5_vport *evport;
1504 int err = 0;
Saeed Mahameed9e7ea352015-12-01 18:03:23 +02001505 int set = 0;
1506
1507 if (!ESW_ALLOWED(esw))
1508 return -EPERM;
1509 if (!LEGAL_VPORT(esw, vport) || (vlan > 4095) || (qos > 7))
1510 return -EINVAL;
1511
1512 if (vlan || qos)
1513 set = 1;
1514
Mohamad Haj Yahiadfcb1ed2016-05-03 17:13:58 +03001515 evport = &esw->vports[vport];
1516
1517 err = modify_esw_vport_cvlan(esw->dev, vport, vlan, qos, set);
1518 if (err)
1519 return err;
1520
1521 mutex_lock(&esw->state_lock);
1522 evport->vlan = vlan;
1523 evport->qos = qos;
1524 if (evport->enabled) {
1525 err = esw_vport_ingress_config(esw, evport);
1526 if (err)
1527 goto out;
1528 err = esw_vport_egress_config(esw, evport);
1529 }
1530
1531out:
1532 mutex_unlock(&esw->state_lock);
1533 return err;
Saeed Mahameed9e7ea352015-12-01 18:03:23 +02001534}
Saeed Mahameed3b751a2a2015-12-01 18:03:24 +02001535
Mohamad Haj Yahiaf9423802016-05-03 17:13:59 +03001536int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw,
1537 int vport, bool spoofchk)
1538{
1539 struct mlx5_vport *evport;
1540 bool pschk;
1541 int err = 0;
1542
1543 if (!ESW_ALLOWED(esw))
1544 return -EPERM;
1545 if (!LEGAL_VPORT(esw, vport))
1546 return -EINVAL;
1547
1548 evport = &esw->vports[vport];
1549
1550 mutex_lock(&esw->state_lock);
1551 pschk = evport->spoofchk;
1552 evport->spoofchk = spoofchk;
1553 if (evport->enabled)
1554 err = esw_vport_ingress_config(esw, evport);
1555 if (err)
1556 evport->spoofchk = pschk;
1557 mutex_unlock(&esw->state_lock);
1558
1559 return err;
1560}
1561
Saeed Mahameed3b751a2a2015-12-01 18:03:24 +02001562int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
1563 int vport,
1564 struct ifla_vf_stats *vf_stats)
1565{
1566 int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
1567 u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)];
1568 int err = 0;
1569 u32 *out;
1570
1571 if (!ESW_ALLOWED(esw))
1572 return -EPERM;
1573 if (!LEGAL_VPORT(esw, vport))
1574 return -EINVAL;
1575
1576 out = mlx5_vzalloc(outlen);
1577 if (!out)
1578 return -ENOMEM;
1579
1580 memset(in, 0, sizeof(in));
1581
1582 MLX5_SET(query_vport_counter_in, in, opcode,
1583 MLX5_CMD_OP_QUERY_VPORT_COUNTER);
1584 MLX5_SET(query_vport_counter_in, in, op_mod, 0);
1585 MLX5_SET(query_vport_counter_in, in, vport_number, vport);
1586 if (vport)
1587 MLX5_SET(query_vport_counter_in, in, other_vport, 1);
1588
1589 memset(out, 0, outlen);
1590 err = mlx5_cmd_exec(esw->dev, in, sizeof(in), out, outlen);
1591 if (err)
1592 goto free_out;
1593
1594 #define MLX5_GET_CTR(p, x) \
1595 MLX5_GET64(query_vport_counter_out, p, x)
1596
1597 memset(vf_stats, 0, sizeof(*vf_stats));
1598 vf_stats->rx_packets =
1599 MLX5_GET_CTR(out, received_eth_unicast.packets) +
1600 MLX5_GET_CTR(out, received_eth_multicast.packets) +
1601 MLX5_GET_CTR(out, received_eth_broadcast.packets);
1602
1603 vf_stats->rx_bytes =
1604 MLX5_GET_CTR(out, received_eth_unicast.octets) +
1605 MLX5_GET_CTR(out, received_eth_multicast.octets) +
1606 MLX5_GET_CTR(out, received_eth_broadcast.octets);
1607
1608 vf_stats->tx_packets =
1609 MLX5_GET_CTR(out, transmitted_eth_unicast.packets) +
1610 MLX5_GET_CTR(out, transmitted_eth_multicast.packets) +
1611 MLX5_GET_CTR(out, transmitted_eth_broadcast.packets);
1612
1613 vf_stats->tx_bytes =
1614 MLX5_GET_CTR(out, transmitted_eth_unicast.octets) +
1615 MLX5_GET_CTR(out, transmitted_eth_multicast.octets) +
1616 MLX5_GET_CTR(out, transmitted_eth_broadcast.octets);
1617
1618 vf_stats->multicast =
1619 MLX5_GET_CTR(out, received_eth_multicast.packets);
1620
1621 vf_stats->broadcast =
1622 MLX5_GET_CTR(out, received_eth_broadcast.packets);
1623
1624free_out:
1625 kvfree(out);
1626 return err;
1627}