blob: 8dbed1d4ef2fbce6094a8e176b4681b0e4d820ef [file] [log] [blame]
Ido Schimmel464dce12016-07-02 11:00:15 +02001/*
2 * drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
3 * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
5 * Copyright (c) 2016 Ido Schimmel <idosch@mellanox.com>
Yotam Gigic723c7352016-07-05 11:27:43 +02006 * Copyright (c) 2016 Yotam Gigi <yotamg@mellanox.com>
Ido Schimmel464dce12016-07-02 11:00:15 +02007 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include <linux/kernel.h>
38#include <linux/types.h>
Jiri Pirko5e9c16c2016-07-04 08:23:04 +020039#include <linux/rhashtable.h>
40#include <linux/bitops.h>
41#include <linux/in6.h>
Yotam Gigic723c7352016-07-05 11:27:43 +020042#include <linux/notifier.h>
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +010043#include <linux/inetdevice.h>
Ido Schimmel9db032b2017-03-16 09:08:17 +010044#include <linux/netdevice.h>
Yotam Gigic723c7352016-07-05 11:27:43 +020045#include <net/netevent.h>
Jiri Pirko6cf3c972016-07-05 11:27:39 +020046#include <net/neighbour.h>
47#include <net/arp.h>
Jiri Pirkob45f64d2016-09-26 12:52:31 +020048#include <net/ip_fib.h>
Ido Schimmel5d7bfd12017-03-16 09:08:14 +010049#include <net/fib_rules.h>
Ido Schimmel57837882017-03-16 09:08:16 +010050#include <net/l3mdev.h>
Ido Schimmel464dce12016-07-02 11:00:15 +020051
52#include "spectrum.h"
53#include "core.h"
54#include "reg.h"
55
Ido Schimmel4724ba562017-03-10 08:53:39 +010056struct mlxsw_sp_rif {
57 struct list_head nexthop_list;
58 struct list_head neigh_list;
59 struct net_device *dev;
60 struct mlxsw_sp_fid *f;
61 unsigned char addr[ETH_ALEN];
62 int mtu;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +010063 u16 rif_index;
Ido Schimmel69132292017-03-10 08:53:42 +010064 u16 vr_id;
Ido Schimmel4724ba562017-03-10 08:53:39 +010065};
66
67static struct mlxsw_sp_rif *
68mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
69 const struct net_device *dev);
70
Jiri Pirko53342022016-07-04 08:23:08 +020071#define mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) \
72 for_each_set_bit(prefix, (prefix_usage)->b, MLXSW_SP_PREFIX_COUNT)
73
74static bool
Jiri Pirko6b75c482016-07-04 08:23:09 +020075mlxsw_sp_prefix_usage_subset(struct mlxsw_sp_prefix_usage *prefix_usage1,
76 struct mlxsw_sp_prefix_usage *prefix_usage2)
77{
78 unsigned char prefix;
79
80 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage1) {
81 if (!test_bit(prefix, prefix_usage2->b))
82 return false;
83 }
84 return true;
85}
86
87static bool
Jiri Pirko53342022016-07-04 08:23:08 +020088mlxsw_sp_prefix_usage_eq(struct mlxsw_sp_prefix_usage *prefix_usage1,
89 struct mlxsw_sp_prefix_usage *prefix_usage2)
90{
91 return !memcmp(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
92}
93
Jiri Pirko6b75c482016-07-04 08:23:09 +020094static bool
95mlxsw_sp_prefix_usage_none(struct mlxsw_sp_prefix_usage *prefix_usage)
96{
97 struct mlxsw_sp_prefix_usage prefix_usage_none = {{ 0 } };
98
99 return mlxsw_sp_prefix_usage_eq(prefix_usage, &prefix_usage_none);
100}
101
102static void
103mlxsw_sp_prefix_usage_cpy(struct mlxsw_sp_prefix_usage *prefix_usage1,
104 struct mlxsw_sp_prefix_usage *prefix_usage2)
105{
106 memcpy(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
107}
108
109static void
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200110mlxsw_sp_prefix_usage_set(struct mlxsw_sp_prefix_usage *prefix_usage,
111 unsigned char prefix_len)
112{
113 set_bit(prefix_len, prefix_usage->b);
114}
115
116static void
117mlxsw_sp_prefix_usage_clear(struct mlxsw_sp_prefix_usage *prefix_usage,
118 unsigned char prefix_len)
119{
120 clear_bit(prefix_len, prefix_usage->b);
121}
122
123struct mlxsw_sp_fib_key {
124 unsigned char addr[sizeof(struct in6_addr)];
125 unsigned char prefix_len;
126};
127
Jiri Pirko61c503f2016-07-04 08:23:11 +0200128enum mlxsw_sp_fib_entry_type {
129 MLXSW_SP_FIB_ENTRY_TYPE_REMOTE,
130 MLXSW_SP_FIB_ENTRY_TYPE_LOCAL,
131 MLXSW_SP_FIB_ENTRY_TYPE_TRAP,
132};
133
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +0200134struct mlxsw_sp_nexthop_group;
135
Ido Schimmel9aecce12017-02-09 10:28:42 +0100136struct mlxsw_sp_fib_node {
137 struct list_head entry_list;
Jiri Pirkob45f64d2016-09-26 12:52:31 +0200138 struct list_head list;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100139 struct rhash_head ht_node;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100140 struct mlxsw_sp_fib *fib;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100141 struct mlxsw_sp_fib_key key;
142};
143
144struct mlxsw_sp_fib_entry_params {
145 u32 tb_id;
146 u32 prio;
147 u8 tos;
148 u8 type;
149};
150
151struct mlxsw_sp_fib_entry {
152 struct list_head list;
153 struct mlxsw_sp_fib_node *fib_node;
154 enum mlxsw_sp_fib_entry_type type;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +0200155 struct list_head nexthop_group_node;
156 struct mlxsw_sp_nexthop_group *nh_group;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100157 struct mlxsw_sp_fib_entry_params params;
Ido Schimmel013b20f2017-02-08 11:16:36 +0100158 bool offloaded;
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200159};
160
161struct mlxsw_sp_fib {
162 struct rhashtable ht;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100163 struct list_head node_list;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100164 struct mlxsw_sp_vr *vr;
165 struct mlxsw_sp_lpm_tree *lpm_tree;
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200166 unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT];
167 struct mlxsw_sp_prefix_usage prefix_usage;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100168 enum mlxsw_sp_l3proto proto;
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200169};
170
Ido Schimmel9aecce12017-02-09 10:28:42 +0100171static const struct rhashtable_params mlxsw_sp_fib_ht_params;
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200172
Ido Schimmel76610eb2017-03-10 08:53:41 +0100173static struct mlxsw_sp_fib *mlxsw_sp_fib_create(struct mlxsw_sp_vr *vr,
174 enum mlxsw_sp_l3proto proto)
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200175{
176 struct mlxsw_sp_fib *fib;
177 int err;
178
179 fib = kzalloc(sizeof(*fib), GFP_KERNEL);
180 if (!fib)
181 return ERR_PTR(-ENOMEM);
182 err = rhashtable_init(&fib->ht, &mlxsw_sp_fib_ht_params);
183 if (err)
184 goto err_rhashtable_init;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100185 INIT_LIST_HEAD(&fib->node_list);
Ido Schimmel76610eb2017-03-10 08:53:41 +0100186 fib->proto = proto;
187 fib->vr = vr;
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200188 return fib;
189
190err_rhashtable_init:
191 kfree(fib);
192 return ERR_PTR(err);
193}
194
195static void mlxsw_sp_fib_destroy(struct mlxsw_sp_fib *fib)
196{
Ido Schimmel9aecce12017-02-09 10:28:42 +0100197 WARN_ON(!list_empty(&fib->node_list));
Ido Schimmel76610eb2017-03-10 08:53:41 +0100198 WARN_ON(fib->lpm_tree);
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200199 rhashtable_destroy(&fib->ht);
200 kfree(fib);
201}
202
Jiri Pirko53342022016-07-04 08:23:08 +0200203static struct mlxsw_sp_lpm_tree *
Ido Schimmel382dbb42017-03-10 08:53:40 +0100204mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp *mlxsw_sp)
Jiri Pirko53342022016-07-04 08:23:08 +0200205{
206 static struct mlxsw_sp_lpm_tree *lpm_tree;
207 int i;
208
Ido Schimmel8494ab02017-03-24 08:02:47 +0100209 for (i = 0; i < mlxsw_sp->router.lpm.tree_count; i++) {
210 lpm_tree = &mlxsw_sp->router.lpm.trees[i];
Ido Schimmel382dbb42017-03-10 08:53:40 +0100211 if (lpm_tree->ref_count == 0)
212 return lpm_tree;
Jiri Pirko53342022016-07-04 08:23:08 +0200213 }
214 return NULL;
215}
216
217static int mlxsw_sp_lpm_tree_alloc(struct mlxsw_sp *mlxsw_sp,
218 struct mlxsw_sp_lpm_tree *lpm_tree)
219{
220 char ralta_pl[MLXSW_REG_RALTA_LEN];
221
Ido Schimmel1a9234e662016-09-19 08:29:26 +0200222 mlxsw_reg_ralta_pack(ralta_pl, true,
223 (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
224 lpm_tree->id);
Jiri Pirko53342022016-07-04 08:23:08 +0200225 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
226}
227
228static int mlxsw_sp_lpm_tree_free(struct mlxsw_sp *mlxsw_sp,
229 struct mlxsw_sp_lpm_tree *lpm_tree)
230{
231 char ralta_pl[MLXSW_REG_RALTA_LEN];
232
Ido Schimmel1a9234e662016-09-19 08:29:26 +0200233 mlxsw_reg_ralta_pack(ralta_pl, false,
234 (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
235 lpm_tree->id);
Jiri Pirko53342022016-07-04 08:23:08 +0200236 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
237}
238
239static int
240mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp *mlxsw_sp,
241 struct mlxsw_sp_prefix_usage *prefix_usage,
242 struct mlxsw_sp_lpm_tree *lpm_tree)
243{
244 char ralst_pl[MLXSW_REG_RALST_LEN];
245 u8 root_bin = 0;
246 u8 prefix;
247 u8 last_prefix = MLXSW_REG_RALST_BIN_NO_CHILD;
248
249 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage)
250 root_bin = prefix;
251
252 mlxsw_reg_ralst_pack(ralst_pl, root_bin, lpm_tree->id);
253 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) {
254 if (prefix == 0)
255 continue;
256 mlxsw_reg_ralst_bin_pack(ralst_pl, prefix, last_prefix,
257 MLXSW_REG_RALST_BIN_NO_CHILD);
258 last_prefix = prefix;
259 }
260 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
261}
262
263static struct mlxsw_sp_lpm_tree *
264mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
265 struct mlxsw_sp_prefix_usage *prefix_usage,
Ido Schimmel382dbb42017-03-10 08:53:40 +0100266 enum mlxsw_sp_l3proto proto)
Jiri Pirko53342022016-07-04 08:23:08 +0200267{
268 struct mlxsw_sp_lpm_tree *lpm_tree;
269 int err;
270
Ido Schimmel382dbb42017-03-10 08:53:40 +0100271 lpm_tree = mlxsw_sp_lpm_tree_find_unused(mlxsw_sp);
Jiri Pirko53342022016-07-04 08:23:08 +0200272 if (!lpm_tree)
273 return ERR_PTR(-EBUSY);
274 lpm_tree->proto = proto;
275 err = mlxsw_sp_lpm_tree_alloc(mlxsw_sp, lpm_tree);
276 if (err)
277 return ERR_PTR(err);
278
279 err = mlxsw_sp_lpm_tree_left_struct_set(mlxsw_sp, prefix_usage,
280 lpm_tree);
281 if (err)
282 goto err_left_struct_set;
Jiri Pirko2083d362016-10-25 11:25:56 +0200283 memcpy(&lpm_tree->prefix_usage, prefix_usage,
284 sizeof(lpm_tree->prefix_usage));
Jiri Pirko53342022016-07-04 08:23:08 +0200285 return lpm_tree;
286
287err_left_struct_set:
288 mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
289 return ERR_PTR(err);
290}
291
292static int mlxsw_sp_lpm_tree_destroy(struct mlxsw_sp *mlxsw_sp,
293 struct mlxsw_sp_lpm_tree *lpm_tree)
294{
295 return mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
296}
297
298static struct mlxsw_sp_lpm_tree *
299mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
300 struct mlxsw_sp_prefix_usage *prefix_usage,
Ido Schimmel382dbb42017-03-10 08:53:40 +0100301 enum mlxsw_sp_l3proto proto)
Jiri Pirko53342022016-07-04 08:23:08 +0200302{
303 struct mlxsw_sp_lpm_tree *lpm_tree;
304 int i;
305
Ido Schimmel8494ab02017-03-24 08:02:47 +0100306 for (i = 0; i < mlxsw_sp->router.lpm.tree_count; i++) {
307 lpm_tree = &mlxsw_sp->router.lpm.trees[i];
Jiri Pirko8b99bec2016-10-25 11:25:57 +0200308 if (lpm_tree->ref_count != 0 &&
309 lpm_tree->proto == proto &&
Jiri Pirko53342022016-07-04 08:23:08 +0200310 mlxsw_sp_prefix_usage_eq(&lpm_tree->prefix_usage,
311 prefix_usage))
312 goto inc_ref_count;
313 }
314 lpm_tree = mlxsw_sp_lpm_tree_create(mlxsw_sp, prefix_usage,
Ido Schimmel382dbb42017-03-10 08:53:40 +0100315 proto);
Jiri Pirko53342022016-07-04 08:23:08 +0200316 if (IS_ERR(lpm_tree))
317 return lpm_tree;
318
319inc_ref_count:
320 lpm_tree->ref_count++;
321 return lpm_tree;
322}
323
324static int mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
325 struct mlxsw_sp_lpm_tree *lpm_tree)
326{
327 if (--lpm_tree->ref_count == 0)
328 return mlxsw_sp_lpm_tree_destroy(mlxsw_sp, lpm_tree);
329 return 0;
330}
331
Ido Schimmel8494ab02017-03-24 08:02:47 +0100332#define MLXSW_SP_LPM_TREE_MIN 2 /* trees 0 and 1 are reserved */
333
334static int mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp)
Jiri Pirko53342022016-07-04 08:23:08 +0200335{
336 struct mlxsw_sp_lpm_tree *lpm_tree;
Ido Schimmel8494ab02017-03-24 08:02:47 +0100337 u64 max_trees;
Jiri Pirko53342022016-07-04 08:23:08 +0200338 int i;
339
Ido Schimmel8494ab02017-03-24 08:02:47 +0100340 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LPM_TREES))
341 return -EIO;
342
343 max_trees = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LPM_TREES);
344 mlxsw_sp->router.lpm.tree_count = max_trees - MLXSW_SP_LPM_TREE_MIN;
345 mlxsw_sp->router.lpm.trees = kcalloc(mlxsw_sp->router.lpm.tree_count,
346 sizeof(struct mlxsw_sp_lpm_tree),
347 GFP_KERNEL);
348 if (!mlxsw_sp->router.lpm.trees)
349 return -ENOMEM;
350
351 for (i = 0; i < mlxsw_sp->router.lpm.tree_count; i++) {
352 lpm_tree = &mlxsw_sp->router.lpm.trees[i];
Jiri Pirko53342022016-07-04 08:23:08 +0200353 lpm_tree->id = i + MLXSW_SP_LPM_TREE_MIN;
354 }
Ido Schimmel8494ab02017-03-24 08:02:47 +0100355
356 return 0;
357}
358
359static void mlxsw_sp_lpm_fini(struct mlxsw_sp *mlxsw_sp)
360{
361 kfree(mlxsw_sp->router.lpm.trees);
Jiri Pirko53342022016-07-04 08:23:08 +0200362}
363
Ido Schimmel76610eb2017-03-10 08:53:41 +0100364static bool mlxsw_sp_vr_is_used(const struct mlxsw_sp_vr *vr)
365{
366 return !!vr->fib4;
367}
368
Jiri Pirko6b75c482016-07-04 08:23:09 +0200369static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp)
370{
371 struct mlxsw_sp_vr *vr;
372 int i;
373
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200374 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
Jiri Pirko6b75c482016-07-04 08:23:09 +0200375 vr = &mlxsw_sp->router.vrs[i];
Ido Schimmel76610eb2017-03-10 08:53:41 +0100376 if (!mlxsw_sp_vr_is_used(vr))
Jiri Pirko6b75c482016-07-04 08:23:09 +0200377 return vr;
378 }
379 return NULL;
380}
381
382static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel76610eb2017-03-10 08:53:41 +0100383 const struct mlxsw_sp_fib *fib)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200384{
385 char raltb_pl[MLXSW_REG_RALTB_LEN];
386
Ido Schimmel76610eb2017-03-10 08:53:41 +0100387 mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
388 (enum mlxsw_reg_ralxx_protocol) fib->proto,
389 fib->lpm_tree->id);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200390 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
391}
392
393static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel76610eb2017-03-10 08:53:41 +0100394 const struct mlxsw_sp_fib *fib)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200395{
396 char raltb_pl[MLXSW_REG_RALTB_LEN];
397
398 /* Bind to tree 0 which is default */
Ido Schimmel76610eb2017-03-10 08:53:41 +0100399 mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
400 (enum mlxsw_reg_ralxx_protocol) fib->proto, 0);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200401 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
402}
403
404static u32 mlxsw_sp_fix_tb_id(u32 tb_id)
405{
406 /* For our purpose, squash main and local table into one */
407 if (tb_id == RT_TABLE_LOCAL)
408 tb_id = RT_TABLE_MAIN;
409 return tb_id;
410}
411
412static struct mlxsw_sp_vr *mlxsw_sp_vr_find(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel76610eb2017-03-10 08:53:41 +0100413 u32 tb_id)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200414{
415 struct mlxsw_sp_vr *vr;
416 int i;
417
418 tb_id = mlxsw_sp_fix_tb_id(tb_id);
Nogah Frankel9497c042016-09-20 11:16:54 +0200419
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200420 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
Jiri Pirko6b75c482016-07-04 08:23:09 +0200421 vr = &mlxsw_sp->router.vrs[i];
Ido Schimmel76610eb2017-03-10 08:53:41 +0100422 if (mlxsw_sp_vr_is_used(vr) && vr->tb_id == tb_id)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200423 return vr;
424 }
425 return NULL;
426}
427
Ido Schimmel76610eb2017-03-10 08:53:41 +0100428static struct mlxsw_sp_fib *mlxsw_sp_vr_fib(const struct mlxsw_sp_vr *vr,
429 enum mlxsw_sp_l3proto proto)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200430{
Ido Schimmel76610eb2017-03-10 08:53:41 +0100431 switch (proto) {
432 case MLXSW_SP_L3_PROTO_IPV4:
433 return vr->fib4;
434 case MLXSW_SP_L3_PROTO_IPV6:
435 BUG_ON(1);
436 }
437 return NULL;
438}
439
440static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
441 u32 tb_id)
442{
Jiri Pirko6b75c482016-07-04 08:23:09 +0200443 struct mlxsw_sp_vr *vr;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200444
445 vr = mlxsw_sp_vr_find_unused(mlxsw_sp);
446 if (!vr)
447 return ERR_PTR(-EBUSY);
Ido Schimmel76610eb2017-03-10 08:53:41 +0100448 vr->fib4 = mlxsw_sp_fib_create(vr, MLXSW_SP_L3_PROTO_IPV4);
449 if (IS_ERR(vr->fib4))
450 return ERR_CAST(vr->fib4);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200451 vr->tb_id = tb_id;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200452 return vr;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200453}
454
Ido Schimmel76610eb2017-03-10 08:53:41 +0100455static void mlxsw_sp_vr_destroy(struct mlxsw_sp_vr *vr)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200456{
Ido Schimmel76610eb2017-03-10 08:53:41 +0100457 mlxsw_sp_fib_destroy(vr->fib4);
458 vr->fib4 = NULL;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200459}
460
461static int
Ido Schimmel76610eb2017-03-10 08:53:41 +0100462mlxsw_sp_vr_lpm_tree_check(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fib *fib,
Jiri Pirko6b75c482016-07-04 08:23:09 +0200463 struct mlxsw_sp_prefix_usage *req_prefix_usage)
464{
Ido Schimmel76610eb2017-03-10 08:53:41 +0100465 struct mlxsw_sp_lpm_tree *lpm_tree = fib->lpm_tree;
Ido Schimmelf7df4922017-02-28 08:55:40 +0100466 struct mlxsw_sp_lpm_tree *new_tree;
467 int err;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200468
Ido Schimmelf7df4922017-02-28 08:55:40 +0100469 if (mlxsw_sp_prefix_usage_eq(req_prefix_usage, &lpm_tree->prefix_usage))
Jiri Pirko6b75c482016-07-04 08:23:09 +0200470 return 0;
471
Ido Schimmelf7df4922017-02-28 08:55:40 +0100472 new_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, req_prefix_usage,
Ido Schimmel76610eb2017-03-10 08:53:41 +0100473 fib->proto);
Ido Schimmelf7df4922017-02-28 08:55:40 +0100474 if (IS_ERR(new_tree)) {
Jiri Pirko6b75c482016-07-04 08:23:09 +0200475 /* We failed to get a tree according to the required
476 * prefix usage. However, the current tree might be still good
477 * for us if our requirement is subset of the prefixes used
478 * in the tree.
479 */
480 if (mlxsw_sp_prefix_usage_subset(req_prefix_usage,
Ido Schimmelf7df4922017-02-28 08:55:40 +0100481 &lpm_tree->prefix_usage))
Jiri Pirko6b75c482016-07-04 08:23:09 +0200482 return 0;
Ido Schimmelf7df4922017-02-28 08:55:40 +0100483 return PTR_ERR(new_tree);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200484 }
485
Ido Schimmelf7df4922017-02-28 08:55:40 +0100486 /* Prevent packet loss by overwriting existing binding */
Ido Schimmel76610eb2017-03-10 08:53:41 +0100487 fib->lpm_tree = new_tree;
488 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib);
Ido Schimmelf7df4922017-02-28 08:55:40 +0100489 if (err)
490 goto err_tree_bind;
491 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
492
493 return 0;
494
495err_tree_bind:
Ido Schimmel76610eb2017-03-10 08:53:41 +0100496 fib->lpm_tree = lpm_tree;
Ido Schimmelf7df4922017-02-28 08:55:40 +0100497 mlxsw_sp_lpm_tree_put(mlxsw_sp, new_tree);
498 return err;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200499}
500
Ido Schimmel76610eb2017-03-10 08:53:41 +0100501static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200502{
503 struct mlxsw_sp_vr *vr;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200504
505 tb_id = mlxsw_sp_fix_tb_id(tb_id);
Ido Schimmel76610eb2017-03-10 08:53:41 +0100506 vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
507 if (!vr)
508 vr = mlxsw_sp_vr_create(mlxsw_sp, tb_id);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200509 return vr;
510}
511
Ido Schimmel76610eb2017-03-10 08:53:41 +0100512static void mlxsw_sp_vr_put(struct mlxsw_sp_vr *vr)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200513{
Ido Schimmel69132292017-03-10 08:53:42 +0100514 if (!vr->rif_count && list_empty(&vr->fib4->node_list))
Ido Schimmel76610eb2017-03-10 08:53:41 +0100515 mlxsw_sp_vr_destroy(vr);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200516}
517
Nogah Frankel9497c042016-09-20 11:16:54 +0200518static int mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200519{
520 struct mlxsw_sp_vr *vr;
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200521 u64 max_vrs;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200522 int i;
523
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200524 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_VRS))
Nogah Frankel9497c042016-09-20 11:16:54 +0200525 return -EIO;
526
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200527 max_vrs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS);
528 mlxsw_sp->router.vrs = kcalloc(max_vrs, sizeof(struct mlxsw_sp_vr),
Nogah Frankel9497c042016-09-20 11:16:54 +0200529 GFP_KERNEL);
530 if (!mlxsw_sp->router.vrs)
531 return -ENOMEM;
532
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200533 for (i = 0; i < max_vrs; i++) {
Jiri Pirko6b75c482016-07-04 08:23:09 +0200534 vr = &mlxsw_sp->router.vrs[i];
535 vr->id = i;
536 }
Nogah Frankel9497c042016-09-20 11:16:54 +0200537
538 return 0;
539}
540
Ido Schimmelac571de2016-11-14 11:26:32 +0100541static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp);
542
Nogah Frankel9497c042016-09-20 11:16:54 +0200543static void mlxsw_sp_vrs_fini(struct mlxsw_sp *mlxsw_sp)
544{
Ido Schimmel30572242016-12-03 16:45:01 +0100545 /* At this stage we're guaranteed not to have new incoming
546 * FIB notifications and the work queue is free from FIBs
547 * sitting on top of mlxsw netdevs. However, we can still
548 * have other FIBs queued. Flush the queue before flushing
549 * the device's tables. No need for locks, as we're the only
550 * writer.
551 */
552 mlxsw_core_flush_owq();
Ido Schimmelac571de2016-11-14 11:26:32 +0100553 mlxsw_sp_router_fib_flush(mlxsw_sp);
Nogah Frankel9497c042016-09-20 11:16:54 +0200554 kfree(mlxsw_sp->router.vrs);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200555}
556
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200557struct mlxsw_sp_neigh_key {
Jiri Pirko33b13412016-11-10 12:31:04 +0100558 struct neighbour *n;
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200559};
560
561struct mlxsw_sp_neigh_entry {
Ido Schimmel9665b742017-02-08 11:16:42 +0100562 struct list_head rif_list_node;
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200563 struct rhash_head ht_node;
564 struct mlxsw_sp_neigh_key key;
565 u16 rif;
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100566 bool connected;
Yotam Gigia6bf9e92016-07-05 11:27:44 +0200567 unsigned char ha[ETH_ALEN];
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +0200568 struct list_head nexthop_list; /* list of nexthops using
569 * this neigh entry
570 */
Yotam Gigib2157142016-07-05 11:27:51 +0200571 struct list_head nexthop_neighs_list_node;
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200572};
573
574static const struct rhashtable_params mlxsw_sp_neigh_ht_params = {
575 .key_offset = offsetof(struct mlxsw_sp_neigh_entry, key),
576 .head_offset = offsetof(struct mlxsw_sp_neigh_entry, ht_node),
577 .key_len = sizeof(struct mlxsw_sp_neigh_key),
578};
579
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100580static struct mlxsw_sp_neigh_entry *
581mlxsw_sp_neigh_entry_alloc(struct mlxsw_sp *mlxsw_sp, struct neighbour *n,
582 u16 rif)
583{
584 struct mlxsw_sp_neigh_entry *neigh_entry;
585
586 neigh_entry = kzalloc(sizeof(*neigh_entry), GFP_KERNEL);
587 if (!neigh_entry)
588 return NULL;
589
590 neigh_entry->key.n = n;
591 neigh_entry->rif = rif;
592 INIT_LIST_HEAD(&neigh_entry->nexthop_list);
593
594 return neigh_entry;
595}
596
597static void mlxsw_sp_neigh_entry_free(struct mlxsw_sp_neigh_entry *neigh_entry)
598{
599 kfree(neigh_entry);
600}
601
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200602static int
603mlxsw_sp_neigh_entry_insert(struct mlxsw_sp *mlxsw_sp,
604 struct mlxsw_sp_neigh_entry *neigh_entry)
605{
606 return rhashtable_insert_fast(&mlxsw_sp->router.neigh_ht,
607 &neigh_entry->ht_node,
608 mlxsw_sp_neigh_ht_params);
609}
610
611static void
612mlxsw_sp_neigh_entry_remove(struct mlxsw_sp *mlxsw_sp,
613 struct mlxsw_sp_neigh_entry *neigh_entry)
614{
615 rhashtable_remove_fast(&mlxsw_sp->router.neigh_ht,
616 &neigh_entry->ht_node,
617 mlxsw_sp_neigh_ht_params);
618}
619
620static struct mlxsw_sp_neigh_entry *
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100621mlxsw_sp_neigh_entry_create(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200622{
623 struct mlxsw_sp_neigh_entry *neigh_entry;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +0100624 struct mlxsw_sp_rif *rif;
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100625 int err;
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200626
Arkadi Sharshevskybf952332017-03-17 09:38:00 +0100627 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, n->dev);
628 if (!rif)
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100629 return ERR_PTR(-EINVAL);
630
Arkadi Sharshevskybf952332017-03-17 09:38:00 +0100631 neigh_entry = mlxsw_sp_neigh_entry_alloc(mlxsw_sp, n, rif->rif_index);
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200632 if (!neigh_entry)
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100633 return ERR_PTR(-ENOMEM);
634
635 err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
636 if (err)
637 goto err_neigh_entry_insert;
638
Arkadi Sharshevskybf952332017-03-17 09:38:00 +0100639 list_add(&neigh_entry->rif_list_node, &rif->neigh_list);
Ido Schimmel9665b742017-02-08 11:16:42 +0100640
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200641 return neigh_entry;
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100642
643err_neigh_entry_insert:
644 mlxsw_sp_neigh_entry_free(neigh_entry);
645 return ERR_PTR(err);
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200646}
647
648static void
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100649mlxsw_sp_neigh_entry_destroy(struct mlxsw_sp *mlxsw_sp,
650 struct mlxsw_sp_neigh_entry *neigh_entry)
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200651{
Ido Schimmel9665b742017-02-08 11:16:42 +0100652 list_del(&neigh_entry->rif_list_node);
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100653 mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry);
654 mlxsw_sp_neigh_entry_free(neigh_entry);
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200655}
656
657static struct mlxsw_sp_neigh_entry *
Jiri Pirko33b13412016-11-10 12:31:04 +0100658mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200659{
Jiri Pirko33b13412016-11-10 12:31:04 +0100660 struct mlxsw_sp_neigh_key key;
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200661
Jiri Pirko33b13412016-11-10 12:31:04 +0100662 key.n = n;
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200663 return rhashtable_lookup_fast(&mlxsw_sp->router.neigh_ht,
664 &key, mlxsw_sp_neigh_ht_params);
665}
666
Yotam Gigic723c7352016-07-05 11:27:43 +0200667static void
668mlxsw_sp_router_neighs_update_interval_init(struct mlxsw_sp *mlxsw_sp)
669{
670 unsigned long interval = NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME);
671
672 mlxsw_sp->router.neighs_update.interval = jiffies_to_msecs(interval);
673}
674
675static void mlxsw_sp_router_neigh_ent_ipv4_process(struct mlxsw_sp *mlxsw_sp,
676 char *rauhtd_pl,
677 int ent_index)
678{
679 struct net_device *dev;
680 struct neighbour *n;
681 __be32 dipn;
682 u32 dip;
683 u16 rif;
684
685 mlxsw_reg_rauhtd_ent_ipv4_unpack(rauhtd_pl, ent_index, &rif, &dip);
686
687 if (!mlxsw_sp->rifs[rif]) {
688 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
689 return;
690 }
691
692 dipn = htonl(dip);
693 dev = mlxsw_sp->rifs[rif]->dev;
694 n = neigh_lookup(&arp_tbl, &dipn, dev);
695 if (!n) {
696 netdev_err(dev, "Failed to find matching neighbour for IP=%pI4h\n",
697 &dip);
698 return;
699 }
700
701 netdev_dbg(dev, "Updating neighbour with IP=%pI4h\n", &dip);
702 neigh_event_send(n, NULL);
703 neigh_release(n);
704}
705
706static void mlxsw_sp_router_neigh_rec_ipv4_process(struct mlxsw_sp *mlxsw_sp,
707 char *rauhtd_pl,
708 int rec_index)
709{
710 u8 num_entries;
711 int i;
712
713 num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
714 rec_index);
715 /* Hardware starts counting at 0, so add 1. */
716 num_entries++;
717
718 /* Each record consists of several neighbour entries. */
719 for (i = 0; i < num_entries; i++) {
720 int ent_index;
721
722 ent_index = rec_index * MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC + i;
723 mlxsw_sp_router_neigh_ent_ipv4_process(mlxsw_sp, rauhtd_pl,
724 ent_index);
725 }
726
727}
728
729static void mlxsw_sp_router_neigh_rec_process(struct mlxsw_sp *mlxsw_sp,
730 char *rauhtd_pl, int rec_index)
731{
732 switch (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, rec_index)) {
733 case MLXSW_REG_RAUHTD_TYPE_IPV4:
734 mlxsw_sp_router_neigh_rec_ipv4_process(mlxsw_sp, rauhtd_pl,
735 rec_index);
736 break;
737 case MLXSW_REG_RAUHTD_TYPE_IPV6:
738 WARN_ON_ONCE(1);
739 break;
740 }
741}
742
Arkadi Sharshevsky42cdb332016-11-11 16:34:26 +0100743static bool mlxsw_sp_router_rauhtd_is_full(char *rauhtd_pl)
744{
745 u8 num_rec, last_rec_index, num_entries;
746
747 num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
748 last_rec_index = num_rec - 1;
749
750 if (num_rec < MLXSW_REG_RAUHTD_REC_MAX_NUM)
751 return false;
752 if (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, last_rec_index) ==
753 MLXSW_REG_RAUHTD_TYPE_IPV6)
754 return true;
755
756 num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
757 last_rec_index);
758 if (++num_entries == MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC)
759 return true;
760 return false;
761}
762
Yotam Gigib2157142016-07-05 11:27:51 +0200763static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp)
Yotam Gigic723c7352016-07-05 11:27:43 +0200764{
Yotam Gigic723c7352016-07-05 11:27:43 +0200765 char *rauhtd_pl;
766 u8 num_rec;
767 int i, err;
768
769 rauhtd_pl = kmalloc(MLXSW_REG_RAUHTD_LEN, GFP_KERNEL);
770 if (!rauhtd_pl)
Yotam Gigib2157142016-07-05 11:27:51 +0200771 return -ENOMEM;
Yotam Gigic723c7352016-07-05 11:27:43 +0200772
773 /* Make sure the neighbour's netdev isn't removed in the
774 * process.
775 */
776 rtnl_lock();
777 do {
778 mlxsw_reg_rauhtd_pack(rauhtd_pl, MLXSW_REG_RAUHTD_TYPE_IPV4);
779 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(rauhtd),
780 rauhtd_pl);
781 if (err) {
782 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to dump neighbour talbe\n");
783 break;
784 }
785 num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
786 for (i = 0; i < num_rec; i++)
787 mlxsw_sp_router_neigh_rec_process(mlxsw_sp, rauhtd_pl,
788 i);
Arkadi Sharshevsky42cdb332016-11-11 16:34:26 +0100789 } while (mlxsw_sp_router_rauhtd_is_full(rauhtd_pl));
Yotam Gigic723c7352016-07-05 11:27:43 +0200790 rtnl_unlock();
791
792 kfree(rauhtd_pl);
Yotam Gigib2157142016-07-05 11:27:51 +0200793 return err;
794}
795
796static void mlxsw_sp_router_neighs_update_nh(struct mlxsw_sp *mlxsw_sp)
797{
798 struct mlxsw_sp_neigh_entry *neigh_entry;
799
800 /* Take RTNL mutex here to prevent lists from changes */
801 rtnl_lock();
802 list_for_each_entry(neigh_entry, &mlxsw_sp->router.nexthop_neighs_list,
Ido Schimmel8a0b7272017-02-06 16:20:15 +0100803 nexthop_neighs_list_node)
Yotam Gigib2157142016-07-05 11:27:51 +0200804 /* If this neigh have nexthops, make the kernel think this neigh
805 * is active regardless of the traffic.
806 */
Ido Schimmel8a0b7272017-02-06 16:20:15 +0100807 neigh_event_send(neigh_entry->key.n, NULL);
Yotam Gigib2157142016-07-05 11:27:51 +0200808 rtnl_unlock();
809}
810
811static void
812mlxsw_sp_router_neighs_update_work_schedule(struct mlxsw_sp *mlxsw_sp)
813{
814 unsigned long interval = mlxsw_sp->router.neighs_update.interval;
815
816 mlxsw_core_schedule_dw(&mlxsw_sp->router.neighs_update.dw,
817 msecs_to_jiffies(interval));
818}
819
820static void mlxsw_sp_router_neighs_update_work(struct work_struct *work)
821{
822 struct mlxsw_sp *mlxsw_sp = container_of(work, struct mlxsw_sp,
823 router.neighs_update.dw.work);
824 int err;
825
826 err = mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp);
827 if (err)
828 dev_err(mlxsw_sp->bus_info->dev, "Could not update kernel for neigh activity");
829
830 mlxsw_sp_router_neighs_update_nh(mlxsw_sp);
831
Yotam Gigic723c7352016-07-05 11:27:43 +0200832 mlxsw_sp_router_neighs_update_work_schedule(mlxsw_sp);
833}
834
Yotam Gigi0b2361d2016-07-05 11:27:52 +0200835static void mlxsw_sp_router_probe_unresolved_nexthops(struct work_struct *work)
836{
837 struct mlxsw_sp_neigh_entry *neigh_entry;
838 struct mlxsw_sp *mlxsw_sp = container_of(work, struct mlxsw_sp,
839 router.nexthop_probe_dw.work);
840
841 /* Iterate over nexthop neighbours, find those who are unresolved and
842 * send arp on them. This solves the chicken-egg problem when
843 * the nexthop wouldn't get offloaded until the neighbor is resolved
844 * but it wouldn't get resolved ever in case traffic is flowing in HW
845 * using different nexthop.
846 *
847 * Take RTNL mutex here to prevent lists from changes.
848 */
849 rtnl_lock();
850 list_for_each_entry(neigh_entry, &mlxsw_sp->router.nexthop_neighs_list,
Ido Schimmel8a0b7272017-02-06 16:20:15 +0100851 nexthop_neighs_list_node)
Ido Schimmel01b1aa32017-02-06 16:20:16 +0100852 if (!neigh_entry->connected)
Jiri Pirko33b13412016-11-10 12:31:04 +0100853 neigh_event_send(neigh_entry->key.n, NULL);
Yotam Gigi0b2361d2016-07-05 11:27:52 +0200854 rtnl_unlock();
855
856 mlxsw_core_schedule_dw(&mlxsw_sp->router.nexthop_probe_dw,
857 MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL);
858}
859
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +0200860static void
861mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
862 struct mlxsw_sp_neigh_entry *neigh_entry,
863 bool removing);
864
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100865static enum mlxsw_reg_rauht_op mlxsw_sp_rauht_op(bool adding)
Yotam Gigia6bf9e92016-07-05 11:27:44 +0200866{
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100867 return adding ? MLXSW_REG_RAUHT_OP_WRITE_ADD :
868 MLXSW_REG_RAUHT_OP_WRITE_DELETE;
869}
870
871static void
872mlxsw_sp_router_neigh_entry_op4(struct mlxsw_sp *mlxsw_sp,
873 struct mlxsw_sp_neigh_entry *neigh_entry,
874 enum mlxsw_reg_rauht_op op)
875{
Jiri Pirko33b13412016-11-10 12:31:04 +0100876 struct neighbour *n = neigh_entry->key.n;
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100877 u32 dip = ntohl(*((__be32 *) n->primary_key));
Yotam Gigia6bf9e92016-07-05 11:27:44 +0200878 char rauht_pl[MLXSW_REG_RAUHT_LEN];
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100879
880 mlxsw_reg_rauht_pack4(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
881 dip);
882 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
883}
884
885static void
886mlxsw_sp_neigh_entry_update(struct mlxsw_sp *mlxsw_sp,
887 struct mlxsw_sp_neigh_entry *neigh_entry,
888 bool adding)
889{
890 if (!adding && !neigh_entry->connected)
891 return;
892 neigh_entry->connected = adding;
893 if (neigh_entry->key.n->tbl == &arp_tbl)
894 mlxsw_sp_router_neigh_entry_op4(mlxsw_sp, neigh_entry,
895 mlxsw_sp_rauht_op(adding));
896 else
897 WARN_ON_ONCE(1);
898}
899
900struct mlxsw_sp_neigh_event_work {
901 struct work_struct work;
902 struct mlxsw_sp *mlxsw_sp;
903 struct neighbour *n;
904};
905
906static void mlxsw_sp_router_neigh_event_work(struct work_struct *work)
907{
908 struct mlxsw_sp_neigh_event_work *neigh_work =
909 container_of(work, struct mlxsw_sp_neigh_event_work, work);
910 struct mlxsw_sp *mlxsw_sp = neigh_work->mlxsw_sp;
911 struct mlxsw_sp_neigh_entry *neigh_entry;
912 struct neighbour *n = neigh_work->n;
913 unsigned char ha[ETH_ALEN];
Yotam Gigia6bf9e92016-07-05 11:27:44 +0200914 bool entry_connected;
Ido Schimmel93a87e52016-12-23 09:32:49 +0100915 u8 nud_state, dead;
Yotam Gigia6bf9e92016-07-05 11:27:44 +0200916
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100917 /* If these parameters are changed after we release the lock,
918 * then we are guaranteed to receive another event letting us
919 * know about it.
920 */
Yotam Gigia6bf9e92016-07-05 11:27:44 +0200921 read_lock_bh(&n->lock);
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100922 memcpy(ha, n->ha, ETH_ALEN);
Yotam Gigia6bf9e92016-07-05 11:27:44 +0200923 nud_state = n->nud_state;
Ido Schimmel93a87e52016-12-23 09:32:49 +0100924 dead = n->dead;
Yotam Gigia6bf9e92016-07-05 11:27:44 +0200925 read_unlock_bh(&n->lock);
926
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100927 rtnl_lock();
Ido Schimmel93a87e52016-12-23 09:32:49 +0100928 entry_connected = nud_state & NUD_VALID && !dead;
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100929 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
930 if (!entry_connected && !neigh_entry)
931 goto out;
932 if (!neigh_entry) {
933 neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
934 if (IS_ERR(neigh_entry))
935 goto out;
Yotam Gigia6bf9e92016-07-05 11:27:44 +0200936 }
937
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100938 memcpy(neigh_entry->ha, ha, ETH_ALEN);
939 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, entry_connected);
940 mlxsw_sp_nexthop_neigh_update(mlxsw_sp, neigh_entry, !entry_connected);
941
942 if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
943 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
944
945out:
946 rtnl_unlock();
Yotam Gigia6bf9e92016-07-05 11:27:44 +0200947 neigh_release(n);
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100948 kfree(neigh_work);
Yotam Gigia6bf9e92016-07-05 11:27:44 +0200949}
950
Jiri Pirkoe7322632016-09-01 10:37:43 +0200951int mlxsw_sp_router_netevent_event(struct notifier_block *unused,
952 unsigned long event, void *ptr)
Yotam Gigic723c7352016-07-05 11:27:43 +0200953{
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100954 struct mlxsw_sp_neigh_event_work *neigh_work;
Yotam Gigic723c7352016-07-05 11:27:43 +0200955 struct mlxsw_sp_port *mlxsw_sp_port;
956 struct mlxsw_sp *mlxsw_sp;
957 unsigned long interval;
958 struct neigh_parms *p;
Yotam Gigia6bf9e92016-07-05 11:27:44 +0200959 struct neighbour *n;
Yotam Gigic723c7352016-07-05 11:27:43 +0200960
961 switch (event) {
962 case NETEVENT_DELAY_PROBE_TIME_UPDATE:
963 p = ptr;
964
965 /* We don't care about changes in the default table. */
966 if (!p->dev || p->tbl != &arp_tbl)
967 return NOTIFY_DONE;
968
969 /* We are in atomic context and can't take RTNL mutex,
970 * so use RCU variant to walk the device chain.
971 */
972 mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(p->dev);
973 if (!mlxsw_sp_port)
974 return NOTIFY_DONE;
975
976 mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
977 interval = jiffies_to_msecs(NEIGH_VAR(p, DELAY_PROBE_TIME));
978 mlxsw_sp->router.neighs_update.interval = interval;
979
980 mlxsw_sp_port_dev_put(mlxsw_sp_port);
981 break;
Yotam Gigia6bf9e92016-07-05 11:27:44 +0200982 case NETEVENT_NEIGH_UPDATE:
983 n = ptr;
Yotam Gigia6bf9e92016-07-05 11:27:44 +0200984
985 if (n->tbl != &arp_tbl)
986 return NOTIFY_DONE;
987
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100988 mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(n->dev);
Yotam Gigia6bf9e92016-07-05 11:27:44 +0200989 if (!mlxsw_sp_port)
990 return NOTIFY_DONE;
991
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100992 neigh_work = kzalloc(sizeof(*neigh_work), GFP_ATOMIC);
993 if (!neigh_work) {
Yotam Gigia6bf9e92016-07-05 11:27:44 +0200994 mlxsw_sp_port_dev_put(mlxsw_sp_port);
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100995 return NOTIFY_BAD;
Yotam Gigia6bf9e92016-07-05 11:27:44 +0200996 }
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100997
998 INIT_WORK(&neigh_work->work, mlxsw_sp_router_neigh_event_work);
999 neigh_work->mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1000 neigh_work->n = n;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001001
1002 /* Take a reference to ensure the neighbour won't be
1003 * destructed until we drop the reference in delayed
1004 * work.
1005 */
1006 neigh_clone(n);
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001007 mlxsw_core_schedule_work(&neigh_work->work);
1008 mlxsw_sp_port_dev_put(mlxsw_sp_port);
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001009 break;
Yotam Gigic723c7352016-07-05 11:27:43 +02001010 }
1011
1012 return NOTIFY_DONE;
1013}
1014
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001015static int mlxsw_sp_neigh_init(struct mlxsw_sp *mlxsw_sp)
1016{
Yotam Gigic723c7352016-07-05 11:27:43 +02001017 int err;
1018
1019 err = rhashtable_init(&mlxsw_sp->router.neigh_ht,
1020 &mlxsw_sp_neigh_ht_params);
1021 if (err)
1022 return err;
1023
1024 /* Initialize the polling interval according to the default
1025 * table.
1026 */
1027 mlxsw_sp_router_neighs_update_interval_init(mlxsw_sp);
1028
Yotam Gigi0b2361d2016-07-05 11:27:52 +02001029 /* Create the delayed works for the activity_update */
Yotam Gigic723c7352016-07-05 11:27:43 +02001030 INIT_DELAYED_WORK(&mlxsw_sp->router.neighs_update.dw,
1031 mlxsw_sp_router_neighs_update_work);
Yotam Gigi0b2361d2016-07-05 11:27:52 +02001032 INIT_DELAYED_WORK(&mlxsw_sp->router.nexthop_probe_dw,
1033 mlxsw_sp_router_probe_unresolved_nexthops);
Yotam Gigic723c7352016-07-05 11:27:43 +02001034 mlxsw_core_schedule_dw(&mlxsw_sp->router.neighs_update.dw, 0);
Yotam Gigi0b2361d2016-07-05 11:27:52 +02001035 mlxsw_core_schedule_dw(&mlxsw_sp->router.nexthop_probe_dw, 0);
Yotam Gigic723c7352016-07-05 11:27:43 +02001036 return 0;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001037}
1038
1039static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp)
1040{
Yotam Gigic723c7352016-07-05 11:27:43 +02001041 cancel_delayed_work_sync(&mlxsw_sp->router.neighs_update.dw);
Yotam Gigi0b2361d2016-07-05 11:27:52 +02001042 cancel_delayed_work_sync(&mlxsw_sp->router.nexthop_probe_dw);
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001043 rhashtable_destroy(&mlxsw_sp->router.neigh_ht);
1044}
1045
Ido Schimmel9665b742017-02-08 11:16:42 +01001046static int mlxsw_sp_neigh_rif_flush(struct mlxsw_sp *mlxsw_sp,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001047 const struct mlxsw_sp_rif *rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01001048{
1049 char rauht_pl[MLXSW_REG_RAUHT_LEN];
1050
1051 mlxsw_reg_rauht_pack(rauht_pl, MLXSW_REG_RAUHT_OP_WRITE_DELETE_ALL,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001052 rif->rif_index, rif->addr);
Ido Schimmel9665b742017-02-08 11:16:42 +01001053 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
1054}
1055
1056static void mlxsw_sp_neigh_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001057 struct mlxsw_sp_rif *rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01001058{
1059 struct mlxsw_sp_neigh_entry *neigh_entry, *tmp;
1060
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001061 mlxsw_sp_neigh_rif_flush(mlxsw_sp, rif);
1062 list_for_each_entry_safe(neigh_entry, tmp, &rif->neigh_list,
Ido Schimmel9665b742017-02-08 11:16:42 +01001063 rif_list_node)
1064 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
1065}
1066
Ido Schimmelc53b8e12017-02-08 11:16:30 +01001067struct mlxsw_sp_nexthop_key {
1068 struct fib_nh *fib_nh;
1069};
1070
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001071struct mlxsw_sp_nexthop {
1072 struct list_head neigh_list_node; /* member of neigh entry list */
Ido Schimmel9665b742017-02-08 11:16:42 +01001073 struct list_head rif_list_node;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001074 struct mlxsw_sp_nexthop_group *nh_grp; /* pointer back to the group
1075 * this belongs to
1076 */
Ido Schimmelc53b8e12017-02-08 11:16:30 +01001077 struct rhash_head ht_node;
1078 struct mlxsw_sp_nexthop_key key;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001079 struct mlxsw_sp_rif *rif;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001080 u8 should_offload:1, /* set indicates this neigh is connected and
1081 * should be put to KVD linear area of this group.
1082 */
1083 offloaded:1, /* set in case the neigh is actually put into
1084 * KVD linear area of this group.
1085 */
1086 update:1; /* set indicates that MAC of this neigh should be
1087 * updated in HW
1088 */
1089 struct mlxsw_sp_neigh_entry *neigh_entry;
1090};
1091
Ido Schimmele9ad5e72017-02-08 11:16:29 +01001092struct mlxsw_sp_nexthop_group_key {
1093 struct fib_info *fi;
1094};
1095
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001096struct mlxsw_sp_nexthop_group {
Ido Schimmele9ad5e72017-02-08 11:16:29 +01001097 struct rhash_head ht_node;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001098 struct list_head fib_list; /* list of fib entries that use this group */
Ido Schimmele9ad5e72017-02-08 11:16:29 +01001099 struct mlxsw_sp_nexthop_group_key key;
Ido Schimmelb3e8d1e2017-02-08 11:16:32 +01001100 u8 adj_index_valid:1,
1101 gateway:1; /* routes using the group use a gateway */
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001102 u32 adj_index;
1103 u16 ecmp_size;
1104 u16 count;
1105 struct mlxsw_sp_nexthop nexthops[0];
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001106#define nh_rif nexthops[0].rif
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001107};
1108
Ido Schimmele9ad5e72017-02-08 11:16:29 +01001109static const struct rhashtable_params mlxsw_sp_nexthop_group_ht_params = {
1110 .key_offset = offsetof(struct mlxsw_sp_nexthop_group, key),
1111 .head_offset = offsetof(struct mlxsw_sp_nexthop_group, ht_node),
1112 .key_len = sizeof(struct mlxsw_sp_nexthop_group_key),
1113};
1114
1115static int mlxsw_sp_nexthop_group_insert(struct mlxsw_sp *mlxsw_sp,
1116 struct mlxsw_sp_nexthop_group *nh_grp)
1117{
1118 return rhashtable_insert_fast(&mlxsw_sp->router.nexthop_group_ht,
1119 &nh_grp->ht_node,
1120 mlxsw_sp_nexthop_group_ht_params);
1121}
1122
1123static void mlxsw_sp_nexthop_group_remove(struct mlxsw_sp *mlxsw_sp,
1124 struct mlxsw_sp_nexthop_group *nh_grp)
1125{
1126 rhashtable_remove_fast(&mlxsw_sp->router.nexthop_group_ht,
1127 &nh_grp->ht_node,
1128 mlxsw_sp_nexthop_group_ht_params);
1129}
1130
1131static struct mlxsw_sp_nexthop_group *
1132mlxsw_sp_nexthop_group_lookup(struct mlxsw_sp *mlxsw_sp,
1133 struct mlxsw_sp_nexthop_group_key key)
1134{
1135 return rhashtable_lookup_fast(&mlxsw_sp->router.nexthop_group_ht, &key,
1136 mlxsw_sp_nexthop_group_ht_params);
1137}
1138
Ido Schimmelc53b8e12017-02-08 11:16:30 +01001139static const struct rhashtable_params mlxsw_sp_nexthop_ht_params = {
1140 .key_offset = offsetof(struct mlxsw_sp_nexthop, key),
1141 .head_offset = offsetof(struct mlxsw_sp_nexthop, ht_node),
1142 .key_len = sizeof(struct mlxsw_sp_nexthop_key),
1143};
1144
1145static int mlxsw_sp_nexthop_insert(struct mlxsw_sp *mlxsw_sp,
1146 struct mlxsw_sp_nexthop *nh)
1147{
1148 return rhashtable_insert_fast(&mlxsw_sp->router.nexthop_ht,
1149 &nh->ht_node, mlxsw_sp_nexthop_ht_params);
1150}
1151
1152static void mlxsw_sp_nexthop_remove(struct mlxsw_sp *mlxsw_sp,
1153 struct mlxsw_sp_nexthop *nh)
1154{
1155 rhashtable_remove_fast(&mlxsw_sp->router.nexthop_ht, &nh->ht_node,
1156 mlxsw_sp_nexthop_ht_params);
1157}
1158
Ido Schimmelad178c82017-02-08 11:16:40 +01001159static struct mlxsw_sp_nexthop *
1160mlxsw_sp_nexthop_lookup(struct mlxsw_sp *mlxsw_sp,
1161 struct mlxsw_sp_nexthop_key key)
1162{
1163 return rhashtable_lookup_fast(&mlxsw_sp->router.nexthop_ht, &key,
1164 mlxsw_sp_nexthop_ht_params);
1165}
1166
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001167static int mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel76610eb2017-03-10 08:53:41 +01001168 const struct mlxsw_sp_fib *fib,
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001169 u32 adj_index, u16 ecmp_size,
1170 u32 new_adj_index,
1171 u16 new_ecmp_size)
1172{
1173 char raleu_pl[MLXSW_REG_RALEU_LEN];
1174
Ido Schimmel1a9234e662016-09-19 08:29:26 +02001175 mlxsw_reg_raleu_pack(raleu_pl,
Ido Schimmel76610eb2017-03-10 08:53:41 +01001176 (enum mlxsw_reg_ralxx_protocol) fib->proto,
1177 fib->vr->id, adj_index, ecmp_size, new_adj_index,
Ido Schimmel1a9234e662016-09-19 08:29:26 +02001178 new_ecmp_size);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001179 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raleu), raleu_pl);
1180}
1181
1182static int mlxsw_sp_adj_index_mass_update(struct mlxsw_sp *mlxsw_sp,
1183 struct mlxsw_sp_nexthop_group *nh_grp,
1184 u32 old_adj_index, u16 old_ecmp_size)
1185{
1186 struct mlxsw_sp_fib_entry *fib_entry;
Ido Schimmel76610eb2017-03-10 08:53:41 +01001187 struct mlxsw_sp_fib *fib = NULL;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001188 int err;
1189
1190 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
Ido Schimmel76610eb2017-03-10 08:53:41 +01001191 if (fib == fib_entry->fib_node->fib)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001192 continue;
Ido Schimmel76610eb2017-03-10 08:53:41 +01001193 fib = fib_entry->fib_node->fib;
1194 err = mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp, fib,
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001195 old_adj_index,
1196 old_ecmp_size,
1197 nh_grp->adj_index,
1198 nh_grp->ecmp_size);
1199 if (err)
1200 return err;
1201 }
1202 return 0;
1203}
1204
1205static int mlxsw_sp_nexthop_mac_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
1206 struct mlxsw_sp_nexthop *nh)
1207{
1208 struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
1209 char ratr_pl[MLXSW_REG_RATR_LEN];
1210
1211 mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY,
1212 true, adj_index, neigh_entry->rif);
1213 mlxsw_reg_ratr_eth_entry_pack(ratr_pl, neigh_entry->ha);
1214 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
1215}
1216
1217static int
1218mlxsw_sp_nexthop_group_mac_update(struct mlxsw_sp *mlxsw_sp,
Ido Schimmela59b7e02017-01-23 11:11:42 +01001219 struct mlxsw_sp_nexthop_group *nh_grp,
1220 bool reallocate)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001221{
1222 u32 adj_index = nh_grp->adj_index; /* base */
1223 struct mlxsw_sp_nexthop *nh;
1224 int i;
1225 int err;
1226
1227 for (i = 0; i < nh_grp->count; i++) {
1228 nh = &nh_grp->nexthops[i];
1229
1230 if (!nh->should_offload) {
1231 nh->offloaded = 0;
1232 continue;
1233 }
1234
Ido Schimmela59b7e02017-01-23 11:11:42 +01001235 if (nh->update || reallocate) {
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001236 err = mlxsw_sp_nexthop_mac_update(mlxsw_sp,
1237 adj_index, nh);
1238 if (err)
1239 return err;
1240 nh->update = 0;
1241 nh->offloaded = 1;
1242 }
1243 adj_index++;
1244 }
1245 return 0;
1246}
1247
1248static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
1249 struct mlxsw_sp_fib_entry *fib_entry);
1250
1251static int
1252mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp *mlxsw_sp,
1253 struct mlxsw_sp_nexthop_group *nh_grp)
1254{
1255 struct mlxsw_sp_fib_entry *fib_entry;
1256 int err;
1257
1258 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
1259 err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
1260 if (err)
1261 return err;
1262 }
1263 return 0;
1264}
1265
1266static void
1267mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
1268 struct mlxsw_sp_nexthop_group *nh_grp)
1269{
1270 struct mlxsw_sp_nexthop *nh;
1271 bool offload_change = false;
1272 u32 adj_index;
1273 u16 ecmp_size = 0;
1274 bool old_adj_index_valid;
1275 u32 old_adj_index;
1276 u16 old_ecmp_size;
1277 int ret;
1278 int i;
1279 int err;
1280
Ido Schimmelb3e8d1e2017-02-08 11:16:32 +01001281 if (!nh_grp->gateway) {
1282 mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
1283 return;
1284 }
1285
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001286 for (i = 0; i < nh_grp->count; i++) {
1287 nh = &nh_grp->nexthops[i];
1288
1289 if (nh->should_offload ^ nh->offloaded) {
1290 offload_change = true;
1291 if (nh->should_offload)
1292 nh->update = 1;
1293 }
1294 if (nh->should_offload)
1295 ecmp_size++;
1296 }
1297 if (!offload_change) {
1298 /* Nothing was added or removed, so no need to reallocate. Just
1299 * update MAC on existing adjacency indexes.
1300 */
Ido Schimmela59b7e02017-01-23 11:11:42 +01001301 err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp,
1302 false);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001303 if (err) {
1304 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
1305 goto set_trap;
1306 }
1307 return;
1308 }
1309 if (!ecmp_size)
1310 /* No neigh of this group is connected so we just set
1311 * the trap and let everthing flow through kernel.
1312 */
1313 goto set_trap;
1314
1315 ret = mlxsw_sp_kvdl_alloc(mlxsw_sp, ecmp_size);
1316 if (ret < 0) {
1317 /* We ran out of KVD linear space, just set the
1318 * trap and let everything flow through kernel.
1319 */
1320 dev_warn(mlxsw_sp->bus_info->dev, "Failed to allocate KVD linear area for nexthop group.\n");
1321 goto set_trap;
1322 }
1323 adj_index = ret;
1324 old_adj_index_valid = nh_grp->adj_index_valid;
1325 old_adj_index = nh_grp->adj_index;
1326 old_ecmp_size = nh_grp->ecmp_size;
1327 nh_grp->adj_index_valid = 1;
1328 nh_grp->adj_index = adj_index;
1329 nh_grp->ecmp_size = ecmp_size;
Ido Schimmela59b7e02017-01-23 11:11:42 +01001330 err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp, true);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001331 if (err) {
1332 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
1333 goto set_trap;
1334 }
1335
1336 if (!old_adj_index_valid) {
1337 /* The trap was set for fib entries, so we have to call
1338 * fib entry update to unset it and use adjacency index.
1339 */
1340 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
1341 if (err) {
1342 dev_warn(mlxsw_sp->bus_info->dev, "Failed to add adjacency index to fib entries.\n");
1343 goto set_trap;
1344 }
1345 return;
1346 }
1347
1348 err = mlxsw_sp_adj_index_mass_update(mlxsw_sp, nh_grp,
1349 old_adj_index, old_ecmp_size);
1350 mlxsw_sp_kvdl_free(mlxsw_sp, old_adj_index);
1351 if (err) {
1352 dev_warn(mlxsw_sp->bus_info->dev, "Failed to mass-update adjacency index for nexthop group.\n");
1353 goto set_trap;
1354 }
1355 return;
1356
1357set_trap:
1358 old_adj_index_valid = nh_grp->adj_index_valid;
1359 nh_grp->adj_index_valid = 0;
1360 for (i = 0; i < nh_grp->count; i++) {
1361 nh = &nh_grp->nexthops[i];
1362 nh->offloaded = 0;
1363 }
1364 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
1365 if (err)
1366 dev_warn(mlxsw_sp->bus_info->dev, "Failed to set traps for fib entries.\n");
1367 if (old_adj_index_valid)
1368 mlxsw_sp_kvdl_free(mlxsw_sp, nh_grp->adj_index);
1369}
1370
1371static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh,
1372 bool removing)
1373{
1374 if (!removing && !nh->should_offload)
1375 nh->should_offload = 1;
1376 else if (removing && nh->offloaded)
1377 nh->should_offload = 0;
1378 nh->update = 1;
1379}
1380
1381static void
1382mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
1383 struct mlxsw_sp_neigh_entry *neigh_entry,
1384 bool removing)
1385{
1386 struct mlxsw_sp_nexthop *nh;
1387
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001388 list_for_each_entry(nh, &neigh_entry->nexthop_list,
1389 neigh_list_node) {
1390 __mlxsw_sp_nexthop_neigh_update(nh, removing);
1391 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
1392 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001393}
1394
Ido Schimmel9665b742017-02-08 11:16:42 +01001395static void mlxsw_sp_nexthop_rif_init(struct mlxsw_sp_nexthop *nh,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001396 struct mlxsw_sp_rif *rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01001397{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001398 if (nh->rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01001399 return;
1400
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001401 nh->rif = rif;
1402 list_add(&nh->rif_list_node, &rif->nexthop_list);
Ido Schimmel9665b742017-02-08 11:16:42 +01001403}
1404
1405static void mlxsw_sp_nexthop_rif_fini(struct mlxsw_sp_nexthop *nh)
1406{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001407 if (!nh->rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01001408 return;
1409
1410 list_del(&nh->rif_list_node);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001411 nh->rif = NULL;
Ido Schimmel9665b742017-02-08 11:16:42 +01001412}
1413
Ido Schimmela8c97012017-02-08 11:16:35 +01001414static int mlxsw_sp_nexthop_neigh_init(struct mlxsw_sp *mlxsw_sp,
1415 struct mlxsw_sp_nexthop *nh)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001416{
1417 struct mlxsw_sp_neigh_entry *neigh_entry;
Ido Schimmela8c97012017-02-08 11:16:35 +01001418 struct fib_nh *fib_nh = nh->key.fib_nh;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001419 struct neighbour *n;
Ido Schimmel93a87e52016-12-23 09:32:49 +01001420 u8 nud_state, dead;
Ido Schimmelc53b8e12017-02-08 11:16:30 +01001421 int err;
1422
Ido Schimmelad178c82017-02-08 11:16:40 +01001423 if (!nh->nh_grp->gateway || nh->neigh_entry)
Ido Schimmelb8399a12017-02-08 11:16:33 +01001424 return 0;
1425
Jiri Pirko33b13412016-11-10 12:31:04 +01001426 /* Take a reference of neigh here ensuring that neigh would
1427 * not be detructed before the nexthop entry is finished.
1428 * The reference is taken either in neigh_lookup() or
Ido Schimmelfd76d912017-02-06 16:20:17 +01001429 * in neigh_create() in case n is not found.
Jiri Pirko33b13412016-11-10 12:31:04 +01001430 */
Ido Schimmela8c97012017-02-08 11:16:35 +01001431 n = neigh_lookup(&arp_tbl, &fib_nh->nh_gw, fib_nh->nh_dev);
Jiri Pirko33b13412016-11-10 12:31:04 +01001432 if (!n) {
Ido Schimmela8c97012017-02-08 11:16:35 +01001433 n = neigh_create(&arp_tbl, &fib_nh->nh_gw, fib_nh->nh_dev);
1434 if (IS_ERR(n))
1435 return PTR_ERR(n);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001436 neigh_event_send(n, NULL);
Jiri Pirko33b13412016-11-10 12:31:04 +01001437 }
1438 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
1439 if (!neigh_entry) {
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001440 neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
1441 if (IS_ERR(neigh_entry)) {
Ido Schimmelc53b8e12017-02-08 11:16:30 +01001442 err = -EINVAL;
1443 goto err_neigh_entry_create;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001444 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001445 }
Yotam Gigib2157142016-07-05 11:27:51 +02001446
1447 /* If that is the first nexthop connected to that neigh, add to
1448 * nexthop_neighs_list
1449 */
1450 if (list_empty(&neigh_entry->nexthop_list))
1451 list_add_tail(&neigh_entry->nexthop_neighs_list_node,
1452 &mlxsw_sp->router.nexthop_neighs_list);
1453
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001454 nh->neigh_entry = neigh_entry;
1455 list_add_tail(&nh->neigh_list_node, &neigh_entry->nexthop_list);
1456 read_lock_bh(&n->lock);
1457 nud_state = n->nud_state;
Ido Schimmel93a87e52016-12-23 09:32:49 +01001458 dead = n->dead;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001459 read_unlock_bh(&n->lock);
Ido Schimmel93a87e52016-12-23 09:32:49 +01001460 __mlxsw_sp_nexthop_neigh_update(nh, !(nud_state & NUD_VALID && !dead));
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001461
1462 return 0;
Ido Schimmelc53b8e12017-02-08 11:16:30 +01001463
1464err_neigh_entry_create:
1465 neigh_release(n);
Ido Schimmelc53b8e12017-02-08 11:16:30 +01001466 return err;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001467}
1468
Ido Schimmela8c97012017-02-08 11:16:35 +01001469static void mlxsw_sp_nexthop_neigh_fini(struct mlxsw_sp *mlxsw_sp,
1470 struct mlxsw_sp_nexthop *nh)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001471{
1472 struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
Ido Schimmela8c97012017-02-08 11:16:35 +01001473 struct neighbour *n;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001474
Ido Schimmelb8399a12017-02-08 11:16:33 +01001475 if (!neigh_entry)
Ido Schimmela8c97012017-02-08 11:16:35 +01001476 return;
1477 n = neigh_entry->key.n;
Ido Schimmelb8399a12017-02-08 11:16:33 +01001478
Ido Schimmel58312122016-12-23 09:32:50 +01001479 __mlxsw_sp_nexthop_neigh_update(nh, true);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001480 list_del(&nh->neigh_list_node);
Ido Schimmele58be792017-02-08 11:16:28 +01001481 nh->neigh_entry = NULL;
Yotam Gigib2157142016-07-05 11:27:51 +02001482
1483 /* If that is the last nexthop connected to that neigh, remove from
1484 * nexthop_neighs_list
1485 */
Ido Schimmele58be792017-02-08 11:16:28 +01001486 if (list_empty(&neigh_entry->nexthop_list))
1487 list_del(&neigh_entry->nexthop_neighs_list_node);
Yotam Gigib2157142016-07-05 11:27:51 +02001488
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001489 if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
1490 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
1491
1492 neigh_release(n);
Ido Schimmela8c97012017-02-08 11:16:35 +01001493}
Ido Schimmelc53b8e12017-02-08 11:16:30 +01001494
Ido Schimmela8c97012017-02-08 11:16:35 +01001495static int mlxsw_sp_nexthop_init(struct mlxsw_sp *mlxsw_sp,
1496 struct mlxsw_sp_nexthop_group *nh_grp,
1497 struct mlxsw_sp_nexthop *nh,
1498 struct fib_nh *fib_nh)
1499{
1500 struct net_device *dev = fib_nh->nh_dev;
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +01001501 struct in_device *in_dev;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001502 struct mlxsw_sp_rif *rif;
Ido Schimmela8c97012017-02-08 11:16:35 +01001503 int err;
1504
1505 nh->nh_grp = nh_grp;
1506 nh->key.fib_nh = fib_nh;
1507 err = mlxsw_sp_nexthop_insert(mlxsw_sp, nh);
1508 if (err)
1509 return err;
1510
Ido Schimmel97989ee2017-03-10 08:53:38 +01001511 if (!dev)
1512 return 0;
1513
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +01001514 in_dev = __in_dev_get_rtnl(dev);
1515 if (in_dev && IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
1516 fib_nh->nh_flags & RTNH_F_LINKDOWN)
1517 return 0;
1518
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001519 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
1520 if (!rif)
Ido Schimmela8c97012017-02-08 11:16:35 +01001521 return 0;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001522 mlxsw_sp_nexthop_rif_init(nh, rif);
Ido Schimmela8c97012017-02-08 11:16:35 +01001523
1524 err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
1525 if (err)
1526 goto err_nexthop_neigh_init;
1527
1528 return 0;
1529
1530err_nexthop_neigh_init:
1531 mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
1532 return err;
1533}
1534
1535static void mlxsw_sp_nexthop_fini(struct mlxsw_sp *mlxsw_sp,
1536 struct mlxsw_sp_nexthop *nh)
1537{
1538 mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh);
Ido Schimmel9665b742017-02-08 11:16:42 +01001539 mlxsw_sp_nexthop_rif_fini(nh);
Ido Schimmelc53b8e12017-02-08 11:16:30 +01001540 mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001541}
1542
Ido Schimmelad178c82017-02-08 11:16:40 +01001543static void mlxsw_sp_nexthop_event(struct mlxsw_sp *mlxsw_sp,
1544 unsigned long event, struct fib_nh *fib_nh)
1545{
1546 struct mlxsw_sp_nexthop_key key;
1547 struct mlxsw_sp_nexthop *nh;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001548 struct mlxsw_sp_rif *rif;
Ido Schimmelad178c82017-02-08 11:16:40 +01001549
1550 if (mlxsw_sp->router.aborted)
1551 return;
1552
1553 key.fib_nh = fib_nh;
1554 nh = mlxsw_sp_nexthop_lookup(mlxsw_sp, key);
1555 if (WARN_ON_ONCE(!nh))
1556 return;
1557
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001558 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, fib_nh->nh_dev);
1559 if (!rif)
Ido Schimmelad178c82017-02-08 11:16:40 +01001560 return;
1561
1562 switch (event) {
1563 case FIB_EVENT_NH_ADD:
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001564 mlxsw_sp_nexthop_rif_init(nh, rif);
Ido Schimmelad178c82017-02-08 11:16:40 +01001565 mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
1566 break;
1567 case FIB_EVENT_NH_DEL:
1568 mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh);
Ido Schimmel9665b742017-02-08 11:16:42 +01001569 mlxsw_sp_nexthop_rif_fini(nh);
Ido Schimmelad178c82017-02-08 11:16:40 +01001570 break;
1571 }
1572
1573 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
1574}
1575
Ido Schimmel9665b742017-02-08 11:16:42 +01001576static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001577 struct mlxsw_sp_rif *rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01001578{
1579 struct mlxsw_sp_nexthop *nh, *tmp;
1580
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001581 list_for_each_entry_safe(nh, tmp, &rif->nexthop_list, rif_list_node) {
Ido Schimmel9665b742017-02-08 11:16:42 +01001582 mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh);
1583 mlxsw_sp_nexthop_rif_fini(nh);
1584 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
1585 }
1586}
1587
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001588static struct mlxsw_sp_nexthop_group *
1589mlxsw_sp_nexthop_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi)
1590{
1591 struct mlxsw_sp_nexthop_group *nh_grp;
1592 struct mlxsw_sp_nexthop *nh;
1593 struct fib_nh *fib_nh;
1594 size_t alloc_size;
1595 int i;
1596 int err;
1597
1598 alloc_size = sizeof(*nh_grp) +
1599 fi->fib_nhs * sizeof(struct mlxsw_sp_nexthop);
1600 nh_grp = kzalloc(alloc_size, GFP_KERNEL);
1601 if (!nh_grp)
1602 return ERR_PTR(-ENOMEM);
1603 INIT_LIST_HEAD(&nh_grp->fib_list);
Ido Schimmelb3e8d1e2017-02-08 11:16:32 +01001604 nh_grp->gateway = fi->fib_nh->nh_scope == RT_SCOPE_LINK;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001605 nh_grp->count = fi->fib_nhs;
Ido Schimmele9ad5e72017-02-08 11:16:29 +01001606 nh_grp->key.fi = fi;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001607 for (i = 0; i < nh_grp->count; i++) {
1608 nh = &nh_grp->nexthops[i];
1609 fib_nh = &fi->fib_nh[i];
1610 err = mlxsw_sp_nexthop_init(mlxsw_sp, nh_grp, nh, fib_nh);
1611 if (err)
1612 goto err_nexthop_init;
1613 }
Ido Schimmele9ad5e72017-02-08 11:16:29 +01001614 err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
1615 if (err)
1616 goto err_nexthop_group_insert;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001617 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
1618 return nh_grp;
1619
Ido Schimmele9ad5e72017-02-08 11:16:29 +01001620err_nexthop_group_insert:
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001621err_nexthop_init:
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +01001622 for (i--; i >= 0; i--) {
1623 nh = &nh_grp->nexthops[i];
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001624 mlxsw_sp_nexthop_fini(mlxsw_sp, nh);
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +01001625 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001626 kfree(nh_grp);
1627 return ERR_PTR(err);
1628}
1629
1630static void
1631mlxsw_sp_nexthop_group_destroy(struct mlxsw_sp *mlxsw_sp,
1632 struct mlxsw_sp_nexthop_group *nh_grp)
1633{
1634 struct mlxsw_sp_nexthop *nh;
1635 int i;
1636
Ido Schimmele9ad5e72017-02-08 11:16:29 +01001637 mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001638 for (i = 0; i < nh_grp->count; i++) {
1639 nh = &nh_grp->nexthops[i];
1640 mlxsw_sp_nexthop_fini(mlxsw_sp, nh);
1641 }
Ido Schimmel58312122016-12-23 09:32:50 +01001642 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
1643 WARN_ON_ONCE(nh_grp->adj_index_valid);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001644 kfree(nh_grp);
1645}
1646
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001647static int mlxsw_sp_nexthop_group_get(struct mlxsw_sp *mlxsw_sp,
1648 struct mlxsw_sp_fib_entry *fib_entry,
1649 struct fib_info *fi)
1650{
Ido Schimmele9ad5e72017-02-08 11:16:29 +01001651 struct mlxsw_sp_nexthop_group_key key;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001652 struct mlxsw_sp_nexthop_group *nh_grp;
1653
Ido Schimmele9ad5e72017-02-08 11:16:29 +01001654 key.fi = fi;
1655 nh_grp = mlxsw_sp_nexthop_group_lookup(mlxsw_sp, key);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001656 if (!nh_grp) {
1657 nh_grp = mlxsw_sp_nexthop_group_create(mlxsw_sp, fi);
1658 if (IS_ERR(nh_grp))
1659 return PTR_ERR(nh_grp);
1660 }
1661 list_add_tail(&fib_entry->nexthop_group_node, &nh_grp->fib_list);
1662 fib_entry->nh_group = nh_grp;
1663 return 0;
1664}
1665
1666static void mlxsw_sp_nexthop_group_put(struct mlxsw_sp *mlxsw_sp,
1667 struct mlxsw_sp_fib_entry *fib_entry)
1668{
1669 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
1670
1671 list_del(&fib_entry->nexthop_group_node);
1672 if (!list_empty(&nh_grp->fib_list))
1673 return;
1674 mlxsw_sp_nexthop_group_destroy(mlxsw_sp, nh_grp);
1675}
1676
Ido Schimmel013b20f2017-02-08 11:16:36 +01001677static bool
1678mlxsw_sp_fib_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
1679{
1680 struct mlxsw_sp_nexthop_group *nh_group = fib_entry->nh_group;
1681
Ido Schimmel9aecce12017-02-09 10:28:42 +01001682 if (fib_entry->params.tos)
1683 return false;
1684
Ido Schimmel013b20f2017-02-08 11:16:36 +01001685 switch (fib_entry->type) {
1686 case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
1687 return !!nh_group->adj_index_valid;
1688 case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
Ido Schimmel70ad3502017-02-08 11:16:38 +01001689 return !!nh_group->nh_rif;
Ido Schimmel013b20f2017-02-08 11:16:36 +01001690 default:
1691 return false;
1692 }
1693}
1694
1695static void mlxsw_sp_fib_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
1696{
1697 fib_entry->offloaded = true;
1698
Ido Schimmel76610eb2017-03-10 08:53:41 +01001699 switch (fib_entry->fib_node->fib->proto) {
Ido Schimmel013b20f2017-02-08 11:16:36 +01001700 case MLXSW_SP_L3_PROTO_IPV4:
1701 fib_info_offload_inc(fib_entry->nh_group->key.fi);
1702 break;
1703 case MLXSW_SP_L3_PROTO_IPV6:
1704 WARN_ON_ONCE(1);
1705 }
1706}
1707
1708static void
1709mlxsw_sp_fib_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
1710{
Ido Schimmel76610eb2017-03-10 08:53:41 +01001711 switch (fib_entry->fib_node->fib->proto) {
Ido Schimmel013b20f2017-02-08 11:16:36 +01001712 case MLXSW_SP_L3_PROTO_IPV4:
1713 fib_info_offload_dec(fib_entry->nh_group->key.fi);
1714 break;
1715 case MLXSW_SP_L3_PROTO_IPV6:
1716 WARN_ON_ONCE(1);
1717 }
1718
1719 fib_entry->offloaded = false;
1720}
1721
1722static void
1723mlxsw_sp_fib_entry_offload_refresh(struct mlxsw_sp_fib_entry *fib_entry,
1724 enum mlxsw_reg_ralue_op op, int err)
1725{
1726 switch (op) {
1727 case MLXSW_REG_RALUE_OP_WRITE_DELETE:
1728 if (!fib_entry->offloaded)
1729 return;
1730 return mlxsw_sp_fib_entry_offload_unset(fib_entry);
1731 case MLXSW_REG_RALUE_OP_WRITE_WRITE:
1732 if (err)
1733 return;
1734 if (mlxsw_sp_fib_entry_should_offload(fib_entry) &&
1735 !fib_entry->offloaded)
1736 mlxsw_sp_fib_entry_offload_set(fib_entry);
1737 else if (!mlxsw_sp_fib_entry_should_offload(fib_entry) &&
1738 fib_entry->offloaded)
1739 mlxsw_sp_fib_entry_offload_unset(fib_entry);
1740 return;
1741 default:
1742 return;
1743 }
1744}
1745
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001746static int mlxsw_sp_fib_entry_op4_remote(struct mlxsw_sp *mlxsw_sp,
1747 struct mlxsw_sp_fib_entry *fib_entry,
1748 enum mlxsw_reg_ralue_op op)
1749{
1750 char ralue_pl[MLXSW_REG_RALUE_LEN];
Ido Schimmel76610eb2017-03-10 08:53:41 +01001751 struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;
Ido Schimmel9aecce12017-02-09 10:28:42 +01001752 u32 *p_dip = (u32 *) fib_entry->fib_node->key.addr;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001753 enum mlxsw_reg_ralue_trap_action trap_action;
1754 u16 trap_id = 0;
1755 u32 adjacency_index = 0;
1756 u16 ecmp_size = 0;
1757
1758 /* In case the nexthop group adjacency index is valid, use it
1759 * with provided ECMP size. Otherwise, setup trap and pass
1760 * traffic to kernel.
1761 */
Ido Schimmel4b411472017-02-08 11:16:37 +01001762 if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001763 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
1764 adjacency_index = fib_entry->nh_group->adj_index;
1765 ecmp_size = fib_entry->nh_group->ecmp_size;
1766 } else {
1767 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
1768 trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
1769 }
1770
Ido Schimmel1a9234e662016-09-19 08:29:26 +02001771 mlxsw_reg_ralue_pack4(ralue_pl,
Ido Schimmel76610eb2017-03-10 08:53:41 +01001772 (enum mlxsw_reg_ralxx_protocol) fib->proto, op,
1773 fib->vr->id, fib_entry->fib_node->key.prefix_len,
Ido Schimmel9aecce12017-02-09 10:28:42 +01001774 *p_dip);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001775 mlxsw_reg_ralue_act_remote_pack(ralue_pl, trap_action, trap_id,
1776 adjacency_index, ecmp_size);
1777 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
1778}
1779
Jiri Pirko61c503f2016-07-04 08:23:11 +02001780static int mlxsw_sp_fib_entry_op4_local(struct mlxsw_sp *mlxsw_sp,
1781 struct mlxsw_sp_fib_entry *fib_entry,
1782 enum mlxsw_reg_ralue_op op)
1783{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001784 struct mlxsw_sp_rif *rif = fib_entry->nh_group->nh_rif;
Ido Schimmel76610eb2017-03-10 08:53:41 +01001785 struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;
Ido Schimmel70ad3502017-02-08 11:16:38 +01001786 enum mlxsw_reg_ralue_trap_action trap_action;
Jiri Pirko61c503f2016-07-04 08:23:11 +02001787 char ralue_pl[MLXSW_REG_RALUE_LEN];
Ido Schimmel9aecce12017-02-09 10:28:42 +01001788 u32 *p_dip = (u32 *) fib_entry->fib_node->key.addr;
Ido Schimmel70ad3502017-02-08 11:16:38 +01001789 u16 trap_id = 0;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001790 u16 rif_index = 0;
Ido Schimmel70ad3502017-02-08 11:16:38 +01001791
1792 if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
1793 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001794 rif_index = rif->rif_index;
Ido Schimmel70ad3502017-02-08 11:16:38 +01001795 } else {
1796 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
1797 trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
1798 }
Jiri Pirko61c503f2016-07-04 08:23:11 +02001799
Ido Schimmel1a9234e662016-09-19 08:29:26 +02001800 mlxsw_reg_ralue_pack4(ralue_pl,
Ido Schimmel76610eb2017-03-10 08:53:41 +01001801 (enum mlxsw_reg_ralxx_protocol) fib->proto, op,
1802 fib->vr->id, fib_entry->fib_node->key.prefix_len,
Ido Schimmel9aecce12017-02-09 10:28:42 +01001803 *p_dip);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001804 mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, trap_id,
1805 rif_index);
Jiri Pirko61c503f2016-07-04 08:23:11 +02001806 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
1807}
1808
1809static int mlxsw_sp_fib_entry_op4_trap(struct mlxsw_sp *mlxsw_sp,
1810 struct mlxsw_sp_fib_entry *fib_entry,
1811 enum mlxsw_reg_ralue_op op)
1812{
Ido Schimmel76610eb2017-03-10 08:53:41 +01001813 struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;
Jiri Pirko61c503f2016-07-04 08:23:11 +02001814 char ralue_pl[MLXSW_REG_RALUE_LEN];
Ido Schimmel9aecce12017-02-09 10:28:42 +01001815 u32 *p_dip = (u32 *) fib_entry->fib_node->key.addr;
Jiri Pirko61c503f2016-07-04 08:23:11 +02001816
Ido Schimmel1a9234e662016-09-19 08:29:26 +02001817 mlxsw_reg_ralue_pack4(ralue_pl,
Ido Schimmel76610eb2017-03-10 08:53:41 +01001818 (enum mlxsw_reg_ralxx_protocol) fib->proto, op,
1819 fib->vr->id, fib_entry->fib_node->key.prefix_len,
Ido Schimmel9aecce12017-02-09 10:28:42 +01001820 *p_dip);
Jiri Pirko61c503f2016-07-04 08:23:11 +02001821 mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
1822 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
1823}
1824
1825static int mlxsw_sp_fib_entry_op4(struct mlxsw_sp *mlxsw_sp,
1826 struct mlxsw_sp_fib_entry *fib_entry,
1827 enum mlxsw_reg_ralue_op op)
1828{
1829 switch (fib_entry->type) {
1830 case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001831 return mlxsw_sp_fib_entry_op4_remote(mlxsw_sp, fib_entry, op);
Jiri Pirko61c503f2016-07-04 08:23:11 +02001832 case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
1833 return mlxsw_sp_fib_entry_op4_local(mlxsw_sp, fib_entry, op);
1834 case MLXSW_SP_FIB_ENTRY_TYPE_TRAP:
1835 return mlxsw_sp_fib_entry_op4_trap(mlxsw_sp, fib_entry, op);
1836 }
1837 return -EINVAL;
1838}
1839
1840static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
1841 struct mlxsw_sp_fib_entry *fib_entry,
1842 enum mlxsw_reg_ralue_op op)
1843{
Ido Schimmel013b20f2017-02-08 11:16:36 +01001844 int err = -EINVAL;
1845
Ido Schimmel76610eb2017-03-10 08:53:41 +01001846 switch (fib_entry->fib_node->fib->proto) {
Jiri Pirko61c503f2016-07-04 08:23:11 +02001847 case MLXSW_SP_L3_PROTO_IPV4:
Ido Schimmel013b20f2017-02-08 11:16:36 +01001848 err = mlxsw_sp_fib_entry_op4(mlxsw_sp, fib_entry, op);
1849 break;
Jiri Pirko61c503f2016-07-04 08:23:11 +02001850 case MLXSW_SP_L3_PROTO_IPV6:
Ido Schimmel013b20f2017-02-08 11:16:36 +01001851 return err;
Jiri Pirko61c503f2016-07-04 08:23:11 +02001852 }
Ido Schimmel013b20f2017-02-08 11:16:36 +01001853 mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, err);
1854 return err;
Jiri Pirko61c503f2016-07-04 08:23:11 +02001855}
1856
1857static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
1858 struct mlxsw_sp_fib_entry *fib_entry)
1859{
Jiri Pirko7146da32016-09-01 10:37:41 +02001860 return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
1861 MLXSW_REG_RALUE_OP_WRITE_WRITE);
Jiri Pirko61c503f2016-07-04 08:23:11 +02001862}
1863
1864static int mlxsw_sp_fib_entry_del(struct mlxsw_sp *mlxsw_sp,
1865 struct mlxsw_sp_fib_entry *fib_entry)
1866{
1867 return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
1868 MLXSW_REG_RALUE_OP_WRITE_DELETE);
1869}
1870
Jiri Pirko61c503f2016-07-04 08:23:11 +02001871static int
Ido Schimmel013b20f2017-02-08 11:16:36 +01001872mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp,
1873 const struct fib_entry_notifier_info *fen_info,
1874 struct mlxsw_sp_fib_entry *fib_entry)
Jiri Pirko61c503f2016-07-04 08:23:11 +02001875{
Jiri Pirkob45f64d2016-09-26 12:52:31 +02001876 struct fib_info *fi = fen_info->fi;
Jiri Pirko61c503f2016-07-04 08:23:11 +02001877
Ido Schimmel97989ee2017-03-10 08:53:38 +01001878 switch (fen_info->type) {
1879 case RTN_BROADCAST: /* fall through */
1880 case RTN_LOCAL:
Jiri Pirko61c503f2016-07-04 08:23:11 +02001881 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
1882 return 0;
Ido Schimmel97989ee2017-03-10 08:53:38 +01001883 case RTN_UNREACHABLE: /* fall through */
1884 case RTN_BLACKHOLE: /* fall through */
1885 case RTN_PROHIBIT:
1886 /* Packets hitting these routes need to be trapped, but
1887 * can do so with a lower priority than packets directed
1888 * at the host, so use action type local instead of trap.
1889 */
Jiri Pirkob45f64d2016-09-26 12:52:31 +02001890 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
Ido Schimmel97989ee2017-03-10 08:53:38 +01001891 return 0;
1892 case RTN_UNICAST:
1893 if (fi->fib_nh->nh_scope != RT_SCOPE_LINK)
1894 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
1895 else
1896 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
1897 return 0;
1898 default:
1899 return -EINVAL;
1900 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001901}
1902
Jiri Pirko5b004412016-09-01 10:37:40 +02001903static struct mlxsw_sp_fib_entry *
Ido Schimmel9aecce12017-02-09 10:28:42 +01001904mlxsw_sp_fib4_entry_create(struct mlxsw_sp *mlxsw_sp,
1905 struct mlxsw_sp_fib_node *fib_node,
1906 const struct fib_entry_notifier_info *fen_info)
Jiri Pirko5b004412016-09-01 10:37:40 +02001907{
1908 struct mlxsw_sp_fib_entry *fib_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01001909 int err;
1910
1911 fib_entry = kzalloc(sizeof(*fib_entry), GFP_KERNEL);
1912 if (!fib_entry) {
1913 err = -ENOMEM;
1914 goto err_fib_entry_alloc;
1915 }
1916
1917 err = mlxsw_sp_fib4_entry_type_set(mlxsw_sp, fen_info, fib_entry);
1918 if (err)
1919 goto err_fib4_entry_type_set;
1920
1921 err = mlxsw_sp_nexthop_group_get(mlxsw_sp, fib_entry, fen_info->fi);
1922 if (err)
1923 goto err_nexthop_group_get;
1924
1925 fib_entry->params.prio = fen_info->fi->fib_priority;
1926 fib_entry->params.tb_id = fen_info->tb_id;
1927 fib_entry->params.type = fen_info->type;
1928 fib_entry->params.tos = fen_info->tos;
1929
1930 fib_entry->fib_node = fib_node;
1931
1932 return fib_entry;
1933
1934err_nexthop_group_get:
1935err_fib4_entry_type_set:
1936 kfree(fib_entry);
1937err_fib_entry_alloc:
1938 return ERR_PTR(err);
1939}
1940
1941static void mlxsw_sp_fib4_entry_destroy(struct mlxsw_sp *mlxsw_sp,
1942 struct mlxsw_sp_fib_entry *fib_entry)
1943{
1944 mlxsw_sp_nexthop_group_put(mlxsw_sp, fib_entry);
1945 kfree(fib_entry);
1946}
1947
1948static struct mlxsw_sp_fib_node *
1949mlxsw_sp_fib4_node_get(struct mlxsw_sp *mlxsw_sp,
1950 const struct fib_entry_notifier_info *fen_info);
1951
1952static struct mlxsw_sp_fib_entry *
1953mlxsw_sp_fib4_entry_lookup(struct mlxsw_sp *mlxsw_sp,
1954 const struct fib_entry_notifier_info *fen_info)
1955{
1956 struct mlxsw_sp_fib_entry *fib_entry;
1957 struct mlxsw_sp_fib_node *fib_node;
1958
1959 fib_node = mlxsw_sp_fib4_node_get(mlxsw_sp, fen_info);
1960 if (IS_ERR(fib_node))
1961 return NULL;
1962
1963 list_for_each_entry(fib_entry, &fib_node->entry_list, list) {
1964 if (fib_entry->params.tb_id == fen_info->tb_id &&
1965 fib_entry->params.tos == fen_info->tos &&
1966 fib_entry->params.type == fen_info->type &&
1967 fib_entry->nh_group->key.fi == fen_info->fi) {
1968 return fib_entry;
1969 }
1970 }
1971
1972 return NULL;
1973}
1974
1975static const struct rhashtable_params mlxsw_sp_fib_ht_params = {
1976 .key_offset = offsetof(struct mlxsw_sp_fib_node, key),
1977 .head_offset = offsetof(struct mlxsw_sp_fib_node, ht_node),
1978 .key_len = sizeof(struct mlxsw_sp_fib_key),
1979 .automatic_shrinking = true,
1980};
1981
1982static int mlxsw_sp_fib_node_insert(struct mlxsw_sp_fib *fib,
1983 struct mlxsw_sp_fib_node *fib_node)
1984{
1985 return rhashtable_insert_fast(&fib->ht, &fib_node->ht_node,
1986 mlxsw_sp_fib_ht_params);
1987}
1988
1989static void mlxsw_sp_fib_node_remove(struct mlxsw_sp_fib *fib,
1990 struct mlxsw_sp_fib_node *fib_node)
1991{
1992 rhashtable_remove_fast(&fib->ht, &fib_node->ht_node,
1993 mlxsw_sp_fib_ht_params);
1994}
1995
1996static struct mlxsw_sp_fib_node *
1997mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
1998 size_t addr_len, unsigned char prefix_len)
1999{
2000 struct mlxsw_sp_fib_key key;
2001
2002 memset(&key, 0, sizeof(key));
2003 memcpy(key.addr, addr, addr_len);
2004 key.prefix_len = prefix_len;
2005 return rhashtable_lookup_fast(&fib->ht, &key, mlxsw_sp_fib_ht_params);
2006}
2007
2008static struct mlxsw_sp_fib_node *
Ido Schimmel76610eb2017-03-10 08:53:41 +01002009mlxsw_sp_fib_node_create(struct mlxsw_sp_fib *fib, const void *addr,
Ido Schimmel9aecce12017-02-09 10:28:42 +01002010 size_t addr_len, unsigned char prefix_len)
2011{
2012 struct mlxsw_sp_fib_node *fib_node;
2013
2014 fib_node = kzalloc(sizeof(*fib_node), GFP_KERNEL);
2015 if (!fib_node)
2016 return NULL;
2017
2018 INIT_LIST_HEAD(&fib_node->entry_list);
Ido Schimmel76610eb2017-03-10 08:53:41 +01002019 list_add(&fib_node->list, &fib->node_list);
Ido Schimmel9aecce12017-02-09 10:28:42 +01002020 memcpy(fib_node->key.addr, addr, addr_len);
2021 fib_node->key.prefix_len = prefix_len;
Ido Schimmel9aecce12017-02-09 10:28:42 +01002022
2023 return fib_node;
2024}
2025
2026static void mlxsw_sp_fib_node_destroy(struct mlxsw_sp_fib_node *fib_node)
2027{
Ido Schimmel9aecce12017-02-09 10:28:42 +01002028 list_del(&fib_node->list);
2029 WARN_ON(!list_empty(&fib_node->entry_list));
2030 kfree(fib_node);
2031}
2032
2033static bool
2034mlxsw_sp_fib_node_entry_is_first(const struct mlxsw_sp_fib_node *fib_node,
2035 const struct mlxsw_sp_fib_entry *fib_entry)
2036{
2037 return list_first_entry(&fib_node->entry_list,
2038 struct mlxsw_sp_fib_entry, list) == fib_entry;
2039}
2040
2041static void mlxsw_sp_fib_node_prefix_inc(struct mlxsw_sp_fib_node *fib_node)
2042{
2043 unsigned char prefix_len = fib_node->key.prefix_len;
Ido Schimmel76610eb2017-03-10 08:53:41 +01002044 struct mlxsw_sp_fib *fib = fib_node->fib;
Ido Schimmel9aecce12017-02-09 10:28:42 +01002045
2046 if (fib->prefix_ref_count[prefix_len]++ == 0)
2047 mlxsw_sp_prefix_usage_set(&fib->prefix_usage, prefix_len);
2048}
2049
2050static void mlxsw_sp_fib_node_prefix_dec(struct mlxsw_sp_fib_node *fib_node)
2051{
2052 unsigned char prefix_len = fib_node->key.prefix_len;
Ido Schimmel76610eb2017-03-10 08:53:41 +01002053 struct mlxsw_sp_fib *fib = fib_node->fib;
Ido Schimmel9aecce12017-02-09 10:28:42 +01002054
2055 if (--fib->prefix_ref_count[prefix_len] == 0)
2056 mlxsw_sp_prefix_usage_clear(&fib->prefix_usage, prefix_len);
2057}
2058
Ido Schimmel76610eb2017-03-10 08:53:41 +01002059static int mlxsw_sp_fib_node_init(struct mlxsw_sp *mlxsw_sp,
2060 struct mlxsw_sp_fib_node *fib_node,
2061 struct mlxsw_sp_fib *fib)
2062{
2063 struct mlxsw_sp_prefix_usage req_prefix_usage;
2064 struct mlxsw_sp_lpm_tree *lpm_tree;
2065 int err;
2066
2067 err = mlxsw_sp_fib_node_insert(fib, fib_node);
2068 if (err)
2069 return err;
2070 fib_node->fib = fib;
2071
2072 mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, &fib->prefix_usage);
2073 mlxsw_sp_prefix_usage_set(&req_prefix_usage, fib_node->key.prefix_len);
2074
2075 if (!mlxsw_sp_prefix_usage_none(&fib->prefix_usage)) {
2076 err = mlxsw_sp_vr_lpm_tree_check(mlxsw_sp, fib,
2077 &req_prefix_usage);
2078 if (err)
2079 goto err_tree_check;
2080 } else {
2081 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
2082 fib->proto);
2083 if (IS_ERR(lpm_tree))
2084 return PTR_ERR(lpm_tree);
2085 fib->lpm_tree = lpm_tree;
2086 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib);
2087 if (err)
2088 goto err_tree_bind;
2089 }
2090
2091 mlxsw_sp_fib_node_prefix_inc(fib_node);
2092
2093 return 0;
2094
2095err_tree_bind:
2096 fib->lpm_tree = NULL;
2097 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
2098err_tree_check:
2099 fib_node->fib = NULL;
2100 mlxsw_sp_fib_node_remove(fib, fib_node);
2101 return err;
2102}
2103
2104static void mlxsw_sp_fib_node_fini(struct mlxsw_sp *mlxsw_sp,
2105 struct mlxsw_sp_fib_node *fib_node)
2106{
2107 struct mlxsw_sp_lpm_tree *lpm_tree = fib_node->fib->lpm_tree;
2108 struct mlxsw_sp_fib *fib = fib_node->fib;
2109
2110 mlxsw_sp_fib_node_prefix_dec(fib_node);
2111
2112 if (mlxsw_sp_prefix_usage_none(&fib->prefix_usage)) {
2113 mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, fib);
2114 fib->lpm_tree = NULL;
2115 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
2116 } else {
2117 mlxsw_sp_vr_lpm_tree_check(mlxsw_sp, fib, &fib->prefix_usage);
2118 }
2119
2120 fib_node->fib = NULL;
2121 mlxsw_sp_fib_node_remove(fib, fib_node);
2122}
2123
Ido Schimmel9aecce12017-02-09 10:28:42 +01002124static struct mlxsw_sp_fib_node *
2125mlxsw_sp_fib4_node_get(struct mlxsw_sp *mlxsw_sp,
2126 const struct fib_entry_notifier_info *fen_info)
2127{
2128 struct mlxsw_sp_fib_node *fib_node;
Ido Schimmel76610eb2017-03-10 08:53:41 +01002129 struct mlxsw_sp_fib *fib;
Jiri Pirko5b004412016-09-01 10:37:40 +02002130 struct mlxsw_sp_vr *vr;
2131 int err;
2132
Ido Schimmel76610eb2017-03-10 08:53:41 +01002133 vr = mlxsw_sp_vr_get(mlxsw_sp, fen_info->tb_id);
Jiri Pirko5b004412016-09-01 10:37:40 +02002134 if (IS_ERR(vr))
2135 return ERR_CAST(vr);
Ido Schimmel76610eb2017-03-10 08:53:41 +01002136 fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV4);
Jiri Pirko5b004412016-09-01 10:37:40 +02002137
Ido Schimmel76610eb2017-03-10 08:53:41 +01002138 fib_node = mlxsw_sp_fib_node_lookup(fib, &fen_info->dst,
Ido Schimmel9aecce12017-02-09 10:28:42 +01002139 sizeof(fen_info->dst),
2140 fen_info->dst_len);
2141 if (fib_node)
2142 return fib_node;
2143
Ido Schimmel76610eb2017-03-10 08:53:41 +01002144 fib_node = mlxsw_sp_fib_node_create(fib, &fen_info->dst,
Ido Schimmel9aecce12017-02-09 10:28:42 +01002145 sizeof(fen_info->dst),
2146 fen_info->dst_len);
2147 if (!fib_node) {
Jiri Pirko5b004412016-09-01 10:37:40 +02002148 err = -ENOMEM;
Ido Schimmel9aecce12017-02-09 10:28:42 +01002149 goto err_fib_node_create;
Jiri Pirko5b004412016-09-01 10:37:40 +02002150 }
Jiri Pirko5b004412016-09-01 10:37:40 +02002151
Ido Schimmel76610eb2017-03-10 08:53:41 +01002152 err = mlxsw_sp_fib_node_init(mlxsw_sp, fib_node, fib);
2153 if (err)
2154 goto err_fib_node_init;
2155
Ido Schimmel9aecce12017-02-09 10:28:42 +01002156 return fib_node;
Jiri Pirko5b004412016-09-01 10:37:40 +02002157
Ido Schimmel76610eb2017-03-10 08:53:41 +01002158err_fib_node_init:
2159 mlxsw_sp_fib_node_destroy(fib_node);
Ido Schimmel9aecce12017-02-09 10:28:42 +01002160err_fib_node_create:
Ido Schimmel76610eb2017-03-10 08:53:41 +01002161 mlxsw_sp_vr_put(vr);
Jiri Pirko5b004412016-09-01 10:37:40 +02002162 return ERR_PTR(err);
2163}
2164
Ido Schimmel9aecce12017-02-09 10:28:42 +01002165static void mlxsw_sp_fib4_node_put(struct mlxsw_sp *mlxsw_sp,
2166 struct mlxsw_sp_fib_node *fib_node)
Jiri Pirko5b004412016-09-01 10:37:40 +02002167{
Ido Schimmel76610eb2017-03-10 08:53:41 +01002168 struct mlxsw_sp_vr *vr = fib_node->fib->vr;
Jiri Pirko5b004412016-09-01 10:37:40 +02002169
Ido Schimmel9aecce12017-02-09 10:28:42 +01002170 if (!list_empty(&fib_node->entry_list))
2171 return;
Ido Schimmel76610eb2017-03-10 08:53:41 +01002172 mlxsw_sp_fib_node_fini(mlxsw_sp, fib_node);
Ido Schimmel9aecce12017-02-09 10:28:42 +01002173 mlxsw_sp_fib_node_destroy(fib_node);
Ido Schimmel76610eb2017-03-10 08:53:41 +01002174 mlxsw_sp_vr_put(vr);
Jiri Pirko5b004412016-09-01 10:37:40 +02002175}
2176
Ido Schimmel9aecce12017-02-09 10:28:42 +01002177static struct mlxsw_sp_fib_entry *
2178mlxsw_sp_fib4_node_entry_find(const struct mlxsw_sp_fib_node *fib_node,
2179 const struct mlxsw_sp_fib_entry_params *params)
Jiri Pirko61c503f2016-07-04 08:23:11 +02002180{
Jiri Pirko61c503f2016-07-04 08:23:11 +02002181 struct mlxsw_sp_fib_entry *fib_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01002182
2183 list_for_each_entry(fib_entry, &fib_node->entry_list, list) {
2184 if (fib_entry->params.tb_id > params->tb_id)
2185 continue;
2186 if (fib_entry->params.tb_id != params->tb_id)
2187 break;
2188 if (fib_entry->params.tos > params->tos)
2189 continue;
2190 if (fib_entry->params.prio >= params->prio ||
2191 fib_entry->params.tos < params->tos)
2192 return fib_entry;
2193 }
2194
2195 return NULL;
2196}
2197
Ido Schimmel4283bce2017-02-09 10:28:43 +01002198static int mlxsw_sp_fib4_node_list_append(struct mlxsw_sp_fib_entry *fib_entry,
2199 struct mlxsw_sp_fib_entry *new_entry)
2200{
2201 struct mlxsw_sp_fib_node *fib_node;
2202
2203 if (WARN_ON(!fib_entry))
2204 return -EINVAL;
2205
2206 fib_node = fib_entry->fib_node;
2207 list_for_each_entry_from(fib_entry, &fib_node->entry_list, list) {
2208 if (fib_entry->params.tb_id != new_entry->params.tb_id ||
2209 fib_entry->params.tos != new_entry->params.tos ||
2210 fib_entry->params.prio != new_entry->params.prio)
2211 break;
2212 }
2213
2214 list_add_tail(&new_entry->list, &fib_entry->list);
2215 return 0;
2216}
2217
Ido Schimmel9aecce12017-02-09 10:28:42 +01002218static int
2219mlxsw_sp_fib4_node_list_insert(struct mlxsw_sp_fib_node *fib_node,
Ido Schimmel4283bce2017-02-09 10:28:43 +01002220 struct mlxsw_sp_fib_entry *new_entry,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002221 bool replace, bool append)
Ido Schimmel9aecce12017-02-09 10:28:42 +01002222{
2223 struct mlxsw_sp_fib_entry *fib_entry;
2224
2225 fib_entry = mlxsw_sp_fib4_node_entry_find(fib_node, &new_entry->params);
2226
Ido Schimmel4283bce2017-02-09 10:28:43 +01002227 if (append)
2228 return mlxsw_sp_fib4_node_list_append(fib_entry, new_entry);
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002229 if (replace && WARN_ON(!fib_entry))
2230 return -EINVAL;
Ido Schimmel4283bce2017-02-09 10:28:43 +01002231
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002232 /* Insert new entry before replaced one, so that we can later
2233 * remove the second.
2234 */
Ido Schimmel9aecce12017-02-09 10:28:42 +01002235 if (fib_entry) {
2236 list_add_tail(&new_entry->list, &fib_entry->list);
2237 } else {
2238 struct mlxsw_sp_fib_entry *last;
2239
2240 list_for_each_entry(last, &fib_node->entry_list, list) {
2241 if (new_entry->params.tb_id > last->params.tb_id)
2242 break;
2243 fib_entry = last;
2244 }
2245
2246 if (fib_entry)
2247 list_add(&new_entry->list, &fib_entry->list);
2248 else
2249 list_add(&new_entry->list, &fib_node->entry_list);
2250 }
2251
2252 return 0;
2253}
2254
2255static void
2256mlxsw_sp_fib4_node_list_remove(struct mlxsw_sp_fib_entry *fib_entry)
2257{
2258 list_del(&fib_entry->list);
2259}
2260
2261static int
2262mlxsw_sp_fib4_node_entry_add(struct mlxsw_sp *mlxsw_sp,
2263 const struct mlxsw_sp_fib_node *fib_node,
2264 struct mlxsw_sp_fib_entry *fib_entry)
2265{
2266 if (!mlxsw_sp_fib_node_entry_is_first(fib_node, fib_entry))
2267 return 0;
2268
2269 /* To prevent packet loss, overwrite the previously offloaded
2270 * entry.
2271 */
2272 if (!list_is_singular(&fib_node->entry_list)) {
2273 enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_DELETE;
2274 struct mlxsw_sp_fib_entry *n = list_next_entry(fib_entry, list);
2275
2276 mlxsw_sp_fib_entry_offload_refresh(n, op, 0);
2277 }
2278
2279 return mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
2280}
2281
2282static void
2283mlxsw_sp_fib4_node_entry_del(struct mlxsw_sp *mlxsw_sp,
2284 const struct mlxsw_sp_fib_node *fib_node,
2285 struct mlxsw_sp_fib_entry *fib_entry)
2286{
2287 if (!mlxsw_sp_fib_node_entry_is_first(fib_node, fib_entry))
2288 return;
2289
2290 /* Promote the next entry by overwriting the deleted entry */
2291 if (!list_is_singular(&fib_node->entry_list)) {
2292 struct mlxsw_sp_fib_entry *n = list_next_entry(fib_entry, list);
2293 enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_DELETE;
2294
2295 mlxsw_sp_fib_entry_update(mlxsw_sp, n);
2296 mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, 0);
2297 return;
2298 }
2299
2300 mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry);
2301}
2302
2303static int mlxsw_sp_fib4_node_entry_link(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel4283bce2017-02-09 10:28:43 +01002304 struct mlxsw_sp_fib_entry *fib_entry,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002305 bool replace, bool append)
Ido Schimmel9aecce12017-02-09 10:28:42 +01002306{
2307 struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
2308 int err;
2309
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002310 err = mlxsw_sp_fib4_node_list_insert(fib_node, fib_entry, replace,
2311 append);
Ido Schimmel9aecce12017-02-09 10:28:42 +01002312 if (err)
2313 return err;
2314
2315 err = mlxsw_sp_fib4_node_entry_add(mlxsw_sp, fib_node, fib_entry);
2316 if (err)
2317 goto err_fib4_node_entry_add;
2318
Ido Schimmel9aecce12017-02-09 10:28:42 +01002319 return 0;
2320
2321err_fib4_node_entry_add:
2322 mlxsw_sp_fib4_node_list_remove(fib_entry);
2323 return err;
2324}
2325
2326static void
2327mlxsw_sp_fib4_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
2328 struct mlxsw_sp_fib_entry *fib_entry)
2329{
2330 struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
2331
Ido Schimmel9aecce12017-02-09 10:28:42 +01002332 mlxsw_sp_fib4_node_entry_del(mlxsw_sp, fib_node, fib_entry);
2333 mlxsw_sp_fib4_node_list_remove(fib_entry);
2334}
2335
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002336static void mlxsw_sp_fib4_entry_replace(struct mlxsw_sp *mlxsw_sp,
2337 struct mlxsw_sp_fib_entry *fib_entry,
2338 bool replace)
2339{
2340 struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
2341 struct mlxsw_sp_fib_entry *replaced;
2342
2343 if (!replace)
2344 return;
2345
2346 /* We inserted the new entry before replaced one */
2347 replaced = list_next_entry(fib_entry, list);
2348
2349 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, replaced);
2350 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, replaced);
2351 mlxsw_sp_fib4_node_put(mlxsw_sp, fib_node);
2352}
2353
Ido Schimmel9aecce12017-02-09 10:28:42 +01002354static int
2355mlxsw_sp_router_fib4_add(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel4283bce2017-02-09 10:28:43 +01002356 const struct fib_entry_notifier_info *fen_info,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002357 bool replace, bool append)
Ido Schimmel9aecce12017-02-09 10:28:42 +01002358{
2359 struct mlxsw_sp_fib_entry *fib_entry;
2360 struct mlxsw_sp_fib_node *fib_node;
Jiri Pirko61c503f2016-07-04 08:23:11 +02002361 int err;
2362
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002363 if (mlxsw_sp->router.aborted)
2364 return 0;
2365
Ido Schimmel9aecce12017-02-09 10:28:42 +01002366 fib_node = mlxsw_sp_fib4_node_get(mlxsw_sp, fen_info);
2367 if (IS_ERR(fib_node)) {
2368 dev_warn(mlxsw_sp->bus_info->dev, "Failed to get FIB node\n");
2369 return PTR_ERR(fib_node);
2370 }
2371
2372 fib_entry = mlxsw_sp_fib4_entry_create(mlxsw_sp, fib_node, fen_info);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002373 if (IS_ERR(fib_entry)) {
Ido Schimmel9aecce12017-02-09 10:28:42 +01002374 dev_warn(mlxsw_sp->bus_info->dev, "Failed to create FIB entry\n");
2375 err = PTR_ERR(fib_entry);
2376 goto err_fib4_entry_create;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002377 }
Jiri Pirko61c503f2016-07-04 08:23:11 +02002378
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002379 err = mlxsw_sp_fib4_node_entry_link(mlxsw_sp, fib_entry, replace,
2380 append);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002381 if (err) {
Ido Schimmel9aecce12017-02-09 10:28:42 +01002382 dev_warn(mlxsw_sp->bus_info->dev, "Failed to link FIB entry to node\n");
2383 goto err_fib4_node_entry_link;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002384 }
Ido Schimmel9aecce12017-02-09 10:28:42 +01002385
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002386 mlxsw_sp_fib4_entry_replace(mlxsw_sp, fib_entry, replace);
2387
Jiri Pirko61c503f2016-07-04 08:23:11 +02002388 return 0;
2389
Ido Schimmel9aecce12017-02-09 10:28:42 +01002390err_fib4_node_entry_link:
2391 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib_entry);
2392err_fib4_entry_create:
2393 mlxsw_sp_fib4_node_put(mlxsw_sp, fib_node);
Jiri Pirko61c503f2016-07-04 08:23:11 +02002394 return err;
2395}
2396
Jiri Pirko37956d72016-10-20 16:05:43 +02002397static void mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
2398 struct fib_entry_notifier_info *fen_info)
Jiri Pirko61c503f2016-07-04 08:23:11 +02002399{
Jiri Pirko61c503f2016-07-04 08:23:11 +02002400 struct mlxsw_sp_fib_entry *fib_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01002401 struct mlxsw_sp_fib_node *fib_node;
Jiri Pirko61c503f2016-07-04 08:23:11 +02002402
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002403 if (mlxsw_sp->router.aborted)
Jiri Pirko37956d72016-10-20 16:05:43 +02002404 return;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002405
Ido Schimmel9aecce12017-02-09 10:28:42 +01002406 fib_entry = mlxsw_sp_fib4_entry_lookup(mlxsw_sp, fen_info);
2407 if (WARN_ON(!fib_entry))
Jiri Pirko37956d72016-10-20 16:05:43 +02002408 return;
Ido Schimmel9aecce12017-02-09 10:28:42 +01002409 fib_node = fib_entry->fib_node;
Jiri Pirko5b004412016-09-01 10:37:40 +02002410
Ido Schimmel9aecce12017-02-09 10:28:42 +01002411 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, fib_entry);
2412 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib_entry);
2413 mlxsw_sp_fib4_node_put(mlxsw_sp, fib_node);
Jiri Pirko61c503f2016-07-04 08:23:11 +02002414}
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002415
2416static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp)
2417{
2418 char ralta_pl[MLXSW_REG_RALTA_LEN];
2419 char ralst_pl[MLXSW_REG_RALST_LEN];
Ido Schimmelb5d90e62017-03-10 08:53:43 +01002420 int i, err;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002421
2422 mlxsw_reg_ralta_pack(ralta_pl, true, MLXSW_REG_RALXX_PROTOCOL_IPV4,
2423 MLXSW_SP_LPM_TREE_MIN);
2424 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
2425 if (err)
2426 return err;
2427
2428 mlxsw_reg_ralst_pack(ralst_pl, 0xff, MLXSW_SP_LPM_TREE_MIN);
2429 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
2430 if (err)
2431 return err;
2432
Ido Schimmelb5d90e62017-03-10 08:53:43 +01002433 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
2434 struct mlxsw_sp_vr *vr = &mlxsw_sp->router.vrs[i];
2435 char raltb_pl[MLXSW_REG_RALTB_LEN];
2436 char ralue_pl[MLXSW_REG_RALUE_LEN];
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002437
Ido Schimmelb5d90e62017-03-10 08:53:43 +01002438 if (!mlxsw_sp_vr_is_used(vr))
2439 continue;
2440
2441 mlxsw_reg_raltb_pack(raltb_pl, vr->id,
2442 MLXSW_REG_RALXX_PROTOCOL_IPV4,
2443 MLXSW_SP_LPM_TREE_MIN);
2444 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb),
2445 raltb_pl);
2446 if (err)
2447 return err;
2448
2449 mlxsw_reg_ralue_pack4(ralue_pl, MLXSW_SP_L3_PROTO_IPV4,
2450 MLXSW_REG_RALUE_OP_WRITE_WRITE, vr->id, 0,
2451 0);
2452 mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
2453 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue),
2454 ralue_pl);
2455 if (err)
2456 return err;
2457 }
2458
2459 return 0;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002460}
2461
Ido Schimmel9aecce12017-02-09 10:28:42 +01002462static void mlxsw_sp_fib4_node_flush(struct mlxsw_sp *mlxsw_sp,
2463 struct mlxsw_sp_fib_node *fib_node)
2464{
2465 struct mlxsw_sp_fib_entry *fib_entry, *tmp;
2466
2467 list_for_each_entry_safe(fib_entry, tmp, &fib_node->entry_list, list) {
2468 bool do_break = &tmp->list == &fib_node->entry_list;
2469
2470 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, fib_entry);
2471 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib_entry);
2472 mlxsw_sp_fib4_node_put(mlxsw_sp, fib_node);
2473 /* Break when entry list is empty and node was freed.
2474 * Otherwise, we'll access freed memory in the next
2475 * iteration.
2476 */
2477 if (do_break)
2478 break;
2479 }
2480}
2481
2482static void mlxsw_sp_fib_node_flush(struct mlxsw_sp *mlxsw_sp,
2483 struct mlxsw_sp_fib_node *fib_node)
2484{
Ido Schimmel76610eb2017-03-10 08:53:41 +01002485 switch (fib_node->fib->proto) {
Ido Schimmel9aecce12017-02-09 10:28:42 +01002486 case MLXSW_SP_L3_PROTO_IPV4:
2487 mlxsw_sp_fib4_node_flush(mlxsw_sp, fib_node);
2488 break;
2489 case MLXSW_SP_L3_PROTO_IPV6:
2490 WARN_ON_ONCE(1);
2491 break;
2492 }
2493}
2494
Ido Schimmel76610eb2017-03-10 08:53:41 +01002495static void mlxsw_sp_vr_fib_flush(struct mlxsw_sp *mlxsw_sp,
2496 struct mlxsw_sp_vr *vr,
2497 enum mlxsw_sp_l3proto proto)
2498{
2499 struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
2500 struct mlxsw_sp_fib_node *fib_node, *tmp;
2501
2502 list_for_each_entry_safe(fib_node, tmp, &fib->node_list, list) {
2503 bool do_break = &tmp->list == &fib->node_list;
2504
2505 mlxsw_sp_fib_node_flush(mlxsw_sp, fib_node);
2506 if (do_break)
2507 break;
2508 }
2509}
2510
Ido Schimmelac571de2016-11-14 11:26:32 +01002511static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp)
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002512{
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002513 int i;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002514
Jiri Pirkoc1a38312016-10-21 16:07:23 +02002515 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
Ido Schimmel76610eb2017-03-10 08:53:41 +01002516 struct mlxsw_sp_vr *vr = &mlxsw_sp->router.vrs[i];
Ido Schimmelac571de2016-11-14 11:26:32 +01002517
Ido Schimmel76610eb2017-03-10 08:53:41 +01002518 if (!mlxsw_sp_vr_is_used(vr))
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002519 continue;
Ido Schimmel76610eb2017-03-10 08:53:41 +01002520 mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002521 }
Ido Schimmelac571de2016-11-14 11:26:32 +01002522}
2523
2524static void mlxsw_sp_router_fib4_abort(struct mlxsw_sp *mlxsw_sp)
2525{
2526 int err;
2527
Ido Schimmeld331d302016-11-16 09:51:58 +01002528 if (mlxsw_sp->router.aborted)
2529 return;
2530 dev_warn(mlxsw_sp->bus_info->dev, "FIB abort triggered. Note that FIB entries are no longer being offloaded to this device.\n");
Ido Schimmelac571de2016-11-14 11:26:32 +01002531 mlxsw_sp_router_fib_flush(mlxsw_sp);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002532 mlxsw_sp->router.aborted = true;
2533 err = mlxsw_sp_router_set_abort_trap(mlxsw_sp);
2534 if (err)
2535 dev_warn(mlxsw_sp->bus_info->dev, "Failed to set abort trap.\n");
2536}
2537
Ido Schimmel30572242016-12-03 16:45:01 +01002538struct mlxsw_sp_fib_event_work {
Ido Schimmela0e47612017-02-06 16:20:10 +01002539 struct work_struct work;
Ido Schimmelad178c82017-02-08 11:16:40 +01002540 union {
2541 struct fib_entry_notifier_info fen_info;
Ido Schimmel5d7bfd12017-03-16 09:08:14 +01002542 struct fib_rule_notifier_info fr_info;
Ido Schimmelad178c82017-02-08 11:16:40 +01002543 struct fib_nh_notifier_info fnh_info;
2544 };
Ido Schimmel30572242016-12-03 16:45:01 +01002545 struct mlxsw_sp *mlxsw_sp;
2546 unsigned long event;
2547};
2548
2549static void mlxsw_sp_router_fib_event_work(struct work_struct *work)
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002550{
Ido Schimmel30572242016-12-03 16:45:01 +01002551 struct mlxsw_sp_fib_event_work *fib_work =
Ido Schimmela0e47612017-02-06 16:20:10 +01002552 container_of(work, struct mlxsw_sp_fib_event_work, work);
Ido Schimmel30572242016-12-03 16:45:01 +01002553 struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
Ido Schimmel5d7bfd12017-03-16 09:08:14 +01002554 struct fib_rule *rule;
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002555 bool replace, append;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002556 int err;
2557
Ido Schimmel30572242016-12-03 16:45:01 +01002558 /* Protect internal structures from changes */
2559 rtnl_lock();
2560 switch (fib_work->event) {
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002561 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
Ido Schimmel4283bce2017-02-09 10:28:43 +01002562 case FIB_EVENT_ENTRY_APPEND: /* fall through */
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002563 case FIB_EVENT_ENTRY_ADD:
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002564 replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
Ido Schimmel4283bce2017-02-09 10:28:43 +01002565 append = fib_work->event == FIB_EVENT_ENTRY_APPEND;
2566 err = mlxsw_sp_router_fib4_add(mlxsw_sp, &fib_work->fen_info,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002567 replace, append);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002568 if (err)
2569 mlxsw_sp_router_fib4_abort(mlxsw_sp);
Ido Schimmel30572242016-12-03 16:45:01 +01002570 fib_info_put(fib_work->fen_info.fi);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002571 break;
2572 case FIB_EVENT_ENTRY_DEL:
Ido Schimmel30572242016-12-03 16:45:01 +01002573 mlxsw_sp_router_fib4_del(mlxsw_sp, &fib_work->fen_info);
2574 fib_info_put(fib_work->fen_info.fi);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002575 break;
2576 case FIB_EVENT_RULE_ADD: /* fall through */
2577 case FIB_EVENT_RULE_DEL:
Ido Schimmel5d7bfd12017-03-16 09:08:14 +01002578 rule = fib_work->fr_info.rule;
Ido Schimmelc7f6e662017-03-16 09:08:20 +01002579 if (!fib4_rule_default(rule) && !rule->l3mdev)
Ido Schimmel5d7bfd12017-03-16 09:08:14 +01002580 mlxsw_sp_router_fib4_abort(mlxsw_sp);
2581 fib_rule_put(rule);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002582 break;
Ido Schimmelad178c82017-02-08 11:16:40 +01002583 case FIB_EVENT_NH_ADD: /* fall through */
2584 case FIB_EVENT_NH_DEL:
2585 mlxsw_sp_nexthop_event(mlxsw_sp, fib_work->event,
2586 fib_work->fnh_info.fib_nh);
2587 fib_info_put(fib_work->fnh_info.fib_nh->nh_parent);
2588 break;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002589 }
Ido Schimmel30572242016-12-03 16:45:01 +01002590 rtnl_unlock();
2591 kfree(fib_work);
2592}
2593
2594/* Called with rcu_read_lock() */
2595static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
2596 unsigned long event, void *ptr)
2597{
2598 struct mlxsw_sp *mlxsw_sp = container_of(nb, struct mlxsw_sp, fib_nb);
2599 struct mlxsw_sp_fib_event_work *fib_work;
2600 struct fib_notifier_info *info = ptr;
2601
2602 if (!net_eq(info->net, &init_net))
2603 return NOTIFY_DONE;
2604
2605 fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC);
2606 if (WARN_ON(!fib_work))
2607 return NOTIFY_BAD;
2608
Ido Schimmela0e47612017-02-06 16:20:10 +01002609 INIT_WORK(&fib_work->work, mlxsw_sp_router_fib_event_work);
Ido Schimmel30572242016-12-03 16:45:01 +01002610 fib_work->mlxsw_sp = mlxsw_sp;
2611 fib_work->event = event;
2612
2613 switch (event) {
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002614 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
Ido Schimmel4283bce2017-02-09 10:28:43 +01002615 case FIB_EVENT_ENTRY_APPEND: /* fall through */
Ido Schimmel30572242016-12-03 16:45:01 +01002616 case FIB_EVENT_ENTRY_ADD: /* fall through */
2617 case FIB_EVENT_ENTRY_DEL:
2618 memcpy(&fib_work->fen_info, ptr, sizeof(fib_work->fen_info));
2619 /* Take referece on fib_info to prevent it from being
2620 * freed while work is queued. Release it afterwards.
2621 */
2622 fib_info_hold(fib_work->fen_info.fi);
2623 break;
Ido Schimmel5d7bfd12017-03-16 09:08:14 +01002624 case FIB_EVENT_RULE_ADD: /* fall through */
2625 case FIB_EVENT_RULE_DEL:
2626 memcpy(&fib_work->fr_info, ptr, sizeof(fib_work->fr_info));
2627 fib_rule_get(fib_work->fr_info.rule);
2628 break;
Ido Schimmelad178c82017-02-08 11:16:40 +01002629 case FIB_EVENT_NH_ADD: /* fall through */
2630 case FIB_EVENT_NH_DEL:
2631 memcpy(&fib_work->fnh_info, ptr, sizeof(fib_work->fnh_info));
2632 fib_info_hold(fib_work->fnh_info.fib_nh->nh_parent);
2633 break;
Ido Schimmel30572242016-12-03 16:45:01 +01002634 }
2635
Ido Schimmela0e47612017-02-06 16:20:10 +01002636 mlxsw_core_schedule_work(&fib_work->work);
Ido Schimmel30572242016-12-03 16:45:01 +01002637
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002638 return NOTIFY_DONE;
2639}
2640
Ido Schimmel4724ba562017-03-10 08:53:39 +01002641static struct mlxsw_sp_rif *
2642mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
2643 const struct net_device *dev)
2644{
2645 int i;
2646
2647 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
2648 if (mlxsw_sp->rifs[i] && mlxsw_sp->rifs[i]->dev == dev)
2649 return mlxsw_sp->rifs[i];
2650
2651 return NULL;
2652}
2653
2654static int mlxsw_sp_router_rif_disable(struct mlxsw_sp *mlxsw_sp, u16 rif)
2655{
2656 char ritr_pl[MLXSW_REG_RITR_LEN];
2657 int err;
2658
2659 mlxsw_reg_ritr_rif_pack(ritr_pl, rif);
2660 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
2661 if (WARN_ON_ONCE(err))
2662 return err;
2663
2664 mlxsw_reg_ritr_enable_set(ritr_pl, false);
2665 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
2666}
2667
2668static void mlxsw_sp_router_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002669 struct mlxsw_sp_rif *rif)
Ido Schimmel4724ba562017-03-10 08:53:39 +01002670{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002671 mlxsw_sp_router_rif_disable(mlxsw_sp, rif->rif_index);
2672 mlxsw_sp_nexthop_rif_gone_sync(mlxsw_sp, rif);
2673 mlxsw_sp_neigh_rif_gone_sync(mlxsw_sp, rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01002674}
2675
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002676static bool mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *rif,
Ido Schimmel4724ba562017-03-10 08:53:39 +01002677 const struct in_device *in_dev,
2678 unsigned long event)
2679{
2680 switch (event) {
2681 case NETDEV_UP:
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002682 if (!rif)
Ido Schimmel4724ba562017-03-10 08:53:39 +01002683 return true;
2684 return false;
2685 case NETDEV_DOWN:
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002686 if (rif && !in_dev->ifa_list &&
2687 !netif_is_l3_slave(rif->dev))
Ido Schimmel4724ba562017-03-10 08:53:39 +01002688 return true;
2689 /* It is possible we already removed the RIF ourselves
2690 * if it was assigned to a netdev that is now a bridge
2691 * or LAG slave.
2692 */
2693 return false;
2694 }
2695
2696 return false;
2697}
2698
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002699#define MLXSW_SP_INVALID_INDEX_RIF 0xffff
Ido Schimmel4724ba562017-03-10 08:53:39 +01002700static int mlxsw_sp_avail_rif_get(struct mlxsw_sp *mlxsw_sp)
2701{
2702 int i;
2703
2704 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
2705 if (!mlxsw_sp->rifs[i])
2706 return i;
2707
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002708 return MLXSW_SP_INVALID_INDEX_RIF;
Ido Schimmel4724ba562017-03-10 08:53:39 +01002709}
2710
2711static void mlxsw_sp_vport_rif_sp_attr_get(struct mlxsw_sp_port *mlxsw_sp_vport,
2712 bool *p_lagged, u16 *p_system_port)
2713{
2714 u8 local_port = mlxsw_sp_vport->local_port;
2715
2716 *p_lagged = mlxsw_sp_vport->lagged;
2717 *p_system_port = *p_lagged ? mlxsw_sp_vport->lag_id : local_port;
2718}
2719
2720static int mlxsw_sp_vport_rif_sp_op(struct mlxsw_sp_port *mlxsw_sp_vport,
Ido Schimmel69132292017-03-10 08:53:42 +01002721 u16 vr_id, struct net_device *l3_dev,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002722 u16 rif_index, bool create)
Ido Schimmel4724ba562017-03-10 08:53:39 +01002723{
2724 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
2725 bool lagged = mlxsw_sp_vport->lagged;
2726 char ritr_pl[MLXSW_REG_RITR_LEN];
2727 u16 system_port;
2728
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002729 mlxsw_reg_ritr_pack(ritr_pl, create, MLXSW_REG_RITR_SP_IF, rif_index,
2730 vr_id, l3_dev->mtu, l3_dev->dev_addr);
Ido Schimmel4724ba562017-03-10 08:53:39 +01002731
2732 mlxsw_sp_vport_rif_sp_attr_get(mlxsw_sp_vport, &lagged, &system_port);
2733 mlxsw_reg_ritr_sp_if_pack(ritr_pl, lagged, system_port,
2734 mlxsw_sp_vport_vid_get(mlxsw_sp_vport));
2735
2736 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
2737}
2738
2739static void mlxsw_sp_vport_rif_sp_leave(struct mlxsw_sp_port *mlxsw_sp_vport);
2740
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002741static u16 mlxsw_sp_rif_sp_to_fid(u16 rif_index)
Ido Schimmel4724ba562017-03-10 08:53:39 +01002742{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002743 return MLXSW_SP_RFID_BASE + rif_index;
Ido Schimmel4724ba562017-03-10 08:53:39 +01002744}
2745
2746static struct mlxsw_sp_fid *
2747mlxsw_sp_rfid_alloc(u16 fid, struct net_device *l3_dev)
2748{
2749 struct mlxsw_sp_fid *f;
2750
2751 f = kzalloc(sizeof(*f), GFP_KERNEL);
2752 if (!f)
2753 return NULL;
2754
2755 f->leave = mlxsw_sp_vport_rif_sp_leave;
2756 f->ref_count = 0;
2757 f->dev = l3_dev;
2758 f->fid = fid;
2759
2760 return f;
2761}
2762
2763static struct mlxsw_sp_rif *
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002764mlxsw_sp_rif_alloc(u16 rif_index, u16 vr_id, struct net_device *l3_dev,
Ido Schimmel69132292017-03-10 08:53:42 +01002765 struct mlxsw_sp_fid *f)
Ido Schimmel4724ba562017-03-10 08:53:39 +01002766{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002767 struct mlxsw_sp_rif *rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01002768
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002769 rif = kzalloc(sizeof(*rif), GFP_KERNEL);
2770 if (!rif)
Ido Schimmel4724ba562017-03-10 08:53:39 +01002771 return NULL;
2772
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002773 INIT_LIST_HEAD(&rif->nexthop_list);
2774 INIT_LIST_HEAD(&rif->neigh_list);
2775 ether_addr_copy(rif->addr, l3_dev->dev_addr);
2776 rif->mtu = l3_dev->mtu;
2777 rif->vr_id = vr_id;
2778 rif->dev = l3_dev;
2779 rif->rif_index = rif_index;
2780 rif->f = f;
Ido Schimmel4724ba562017-03-10 08:53:39 +01002781
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002782 return rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01002783}
2784
2785static struct mlxsw_sp_rif *
2786mlxsw_sp_vport_rif_sp_create(struct mlxsw_sp_port *mlxsw_sp_vport,
2787 struct net_device *l3_dev)
2788{
2789 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
Ido Schimmel57837882017-03-16 09:08:16 +01002790 u32 tb_id = l3mdev_fib_table(l3_dev);
Ido Schimmel69132292017-03-10 08:53:42 +01002791 struct mlxsw_sp_vr *vr;
Ido Schimmel4724ba562017-03-10 08:53:39 +01002792 struct mlxsw_sp_fid *f;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002793 struct mlxsw_sp_rif *rif;
2794 u16 fid, rif_index;
Ido Schimmel4724ba562017-03-10 08:53:39 +01002795 int err;
2796
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002797 rif_index = mlxsw_sp_avail_rif_get(mlxsw_sp);
2798 if (rif_index == MLXSW_SP_INVALID_INDEX_RIF)
Ido Schimmel4724ba562017-03-10 08:53:39 +01002799 return ERR_PTR(-ERANGE);
2800
Ido Schimmel57837882017-03-16 09:08:16 +01002801 vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id ? : RT_TABLE_MAIN);
Ido Schimmel69132292017-03-10 08:53:42 +01002802 if (IS_ERR(vr))
2803 return ERR_CAST(vr);
2804
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002805 err = mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, vr->id, l3_dev,
2806 rif_index, true);
Ido Schimmel4724ba562017-03-10 08:53:39 +01002807 if (err)
Ido Schimmel69132292017-03-10 08:53:42 +01002808 goto err_vport_rif_sp_op;
Ido Schimmel4724ba562017-03-10 08:53:39 +01002809
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002810 fid = mlxsw_sp_rif_sp_to_fid(rif_index);
Ido Schimmel4724ba562017-03-10 08:53:39 +01002811 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, true);
2812 if (err)
2813 goto err_rif_fdb_op;
2814
2815 f = mlxsw_sp_rfid_alloc(fid, l3_dev);
2816 if (!f) {
2817 err = -ENOMEM;
2818 goto err_rfid_alloc;
2819 }
2820
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002821 rif = mlxsw_sp_rif_alloc(rif_index, vr->id, l3_dev, f);
2822 if (!rif) {
Ido Schimmel4724ba562017-03-10 08:53:39 +01002823 err = -ENOMEM;
2824 goto err_rif_alloc;
2825 }
2826
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002827 f->rif = rif;
2828 mlxsw_sp->rifs[rif_index] = rif;
Ido Schimmel69132292017-03-10 08:53:42 +01002829 vr->rif_count++;
Ido Schimmel4724ba562017-03-10 08:53:39 +01002830
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002831 return rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01002832
2833err_rif_alloc:
2834 kfree(f);
2835err_rfid_alloc:
2836 mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, false);
2837err_rif_fdb_op:
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002838 mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, vr->id, l3_dev, rif_index,
2839 false);
Ido Schimmel69132292017-03-10 08:53:42 +01002840err_vport_rif_sp_op:
2841 mlxsw_sp_vr_put(vr);
Ido Schimmel4724ba562017-03-10 08:53:39 +01002842 return ERR_PTR(err);
2843}
2844
2845static void mlxsw_sp_vport_rif_sp_destroy(struct mlxsw_sp_port *mlxsw_sp_vport,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002846 struct mlxsw_sp_rif *rif)
Ido Schimmel4724ba562017-03-10 08:53:39 +01002847{
2848 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002849 struct mlxsw_sp_vr *vr = &mlxsw_sp->router.vrs[rif->vr_id];
2850 struct net_device *l3_dev = rif->dev;
2851 struct mlxsw_sp_fid *f = rif->f;
2852 u16 rif_index = rif->rif_index;
Ido Schimmel4724ba562017-03-10 08:53:39 +01002853 u16 fid = f->fid;
Ido Schimmel4724ba562017-03-10 08:53:39 +01002854
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002855 mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01002856
Ido Schimmel69132292017-03-10 08:53:42 +01002857 vr->rif_count--;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002858 mlxsw_sp->rifs[rif_index] = NULL;
2859 f->rif = NULL;
Ido Schimmel4724ba562017-03-10 08:53:39 +01002860
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002861 kfree(rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01002862
2863 kfree(f);
2864
2865 mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, false);
2866
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002867 mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, vr->id, l3_dev, rif_index,
2868 false);
Ido Schimmel69132292017-03-10 08:53:42 +01002869 mlxsw_sp_vr_put(vr);
Ido Schimmel4724ba562017-03-10 08:53:39 +01002870}
2871
2872static int mlxsw_sp_vport_rif_sp_join(struct mlxsw_sp_port *mlxsw_sp_vport,
2873 struct net_device *l3_dev)
2874{
2875 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002876 struct mlxsw_sp_rif *rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01002877
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002878 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
2879 if (!rif) {
2880 rif = mlxsw_sp_vport_rif_sp_create(mlxsw_sp_vport, l3_dev);
2881 if (IS_ERR(rif))
2882 return PTR_ERR(rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01002883 }
2884
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002885 mlxsw_sp_vport_fid_set(mlxsw_sp_vport, rif->f);
2886 rif->f->ref_count++;
Ido Schimmel4724ba562017-03-10 08:53:39 +01002887
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002888 netdev_dbg(mlxsw_sp_vport->dev, "Joined FID=%d\n", rif->f->fid);
Ido Schimmel4724ba562017-03-10 08:53:39 +01002889
2890 return 0;
2891}
2892
2893static void mlxsw_sp_vport_rif_sp_leave(struct mlxsw_sp_port *mlxsw_sp_vport)
2894{
2895 struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
2896
2897 netdev_dbg(mlxsw_sp_vport->dev, "Left FID=%d\n", f->fid);
2898
2899 mlxsw_sp_vport_fid_set(mlxsw_sp_vport, NULL);
2900 if (--f->ref_count == 0)
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002901 mlxsw_sp_vport_rif_sp_destroy(mlxsw_sp_vport, f->rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01002902}
2903
2904static int mlxsw_sp_inetaddr_vport_event(struct net_device *l3_dev,
2905 struct net_device *port_dev,
2906 unsigned long event, u16 vid)
2907{
2908 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(port_dev);
2909 struct mlxsw_sp_port *mlxsw_sp_vport;
2910
2911 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
2912 if (WARN_ON(!mlxsw_sp_vport))
2913 return -EINVAL;
2914
2915 switch (event) {
2916 case NETDEV_UP:
2917 return mlxsw_sp_vport_rif_sp_join(mlxsw_sp_vport, l3_dev);
2918 case NETDEV_DOWN:
2919 mlxsw_sp_vport_rif_sp_leave(mlxsw_sp_vport);
2920 break;
2921 }
2922
2923 return 0;
2924}
2925
2926static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev,
2927 unsigned long event)
2928{
2929 if (netif_is_bridge_port(port_dev) || netif_is_lag_port(port_dev))
2930 return 0;
2931
2932 return mlxsw_sp_inetaddr_vport_event(port_dev, port_dev, event, 1);
2933}
2934
2935static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev,
2936 struct net_device *lag_dev,
2937 unsigned long event, u16 vid)
2938{
2939 struct net_device *port_dev;
2940 struct list_head *iter;
2941 int err;
2942
2943 netdev_for_each_lower_dev(lag_dev, port_dev, iter) {
2944 if (mlxsw_sp_port_dev_check(port_dev)) {
2945 err = mlxsw_sp_inetaddr_vport_event(l3_dev, port_dev,
2946 event, vid);
2947 if (err)
2948 return err;
2949 }
2950 }
2951
2952 return 0;
2953}
2954
2955static int mlxsw_sp_inetaddr_lag_event(struct net_device *lag_dev,
2956 unsigned long event)
2957{
2958 if (netif_is_bridge_port(lag_dev))
2959 return 0;
2960
2961 return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event, 1);
2962}
2963
2964static struct mlxsw_sp_fid *mlxsw_sp_bridge_fid_get(struct mlxsw_sp *mlxsw_sp,
2965 struct net_device *l3_dev)
2966{
2967 u16 fid;
2968
2969 if (is_vlan_dev(l3_dev))
2970 fid = vlan_dev_vlan_id(l3_dev);
2971 else if (mlxsw_sp->master_bridge.dev == l3_dev)
2972 fid = 1;
2973 else
2974 return mlxsw_sp_vfid_find(mlxsw_sp, l3_dev);
2975
2976 return mlxsw_sp_fid_find(mlxsw_sp, fid);
2977}
2978
Ido Schimmel5ec2ee72017-03-24 08:02:48 +01002979static u8 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp)
2980{
2981 return mlxsw_core_max_ports(mlxsw_sp->core) + 1;
2982}
2983
Ido Schimmel4724ba562017-03-10 08:53:39 +01002984static enum mlxsw_flood_table_type mlxsw_sp_flood_table_type_get(u16 fid)
2985{
2986 return mlxsw_sp_fid_is_vfid(fid) ? MLXSW_REG_SFGC_TABLE_TYPE_FID :
2987 MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST;
2988}
2989
2990static u16 mlxsw_sp_flood_table_index_get(u16 fid)
2991{
2992 return mlxsw_sp_fid_is_vfid(fid) ? mlxsw_sp_fid_to_vfid(fid) : fid;
2993}
2994
2995static int mlxsw_sp_router_port_flood_set(struct mlxsw_sp *mlxsw_sp, u16 fid,
2996 bool set)
2997{
Ido Schimmel5ec2ee72017-03-24 08:02:48 +01002998 u8 router_port = mlxsw_sp_router_port(mlxsw_sp);
Ido Schimmel4724ba562017-03-10 08:53:39 +01002999 enum mlxsw_flood_table_type table_type;
3000 char *sftr_pl;
3001 u16 index;
3002 int err;
3003
3004 sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL);
3005 if (!sftr_pl)
3006 return -ENOMEM;
3007
3008 table_type = mlxsw_sp_flood_table_type_get(fid);
3009 index = mlxsw_sp_flood_table_index_get(fid);
3010 mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BC, index, table_type,
Ido Schimmel5ec2ee72017-03-24 08:02:48 +01003011 1, router_port, set);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003012 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
3013
3014 kfree(sftr_pl);
3015 return err;
3016}
3017
3018static enum mlxsw_reg_ritr_if_type mlxsw_sp_rif_type_get(u16 fid)
3019{
3020 if (mlxsw_sp_fid_is_vfid(fid))
3021 return MLXSW_REG_RITR_FID_IF;
3022 else
3023 return MLXSW_REG_RITR_VLAN_IF;
3024}
3025
Ido Schimmel69132292017-03-10 08:53:42 +01003026static int mlxsw_sp_rif_bridge_op(struct mlxsw_sp *mlxsw_sp, u16 vr_id,
Ido Schimmel4724ba562017-03-10 08:53:39 +01003027 struct net_device *l3_dev,
3028 u16 fid, u16 rif,
3029 bool create)
3030{
3031 enum mlxsw_reg_ritr_if_type rif_type;
3032 char ritr_pl[MLXSW_REG_RITR_LEN];
3033
3034 rif_type = mlxsw_sp_rif_type_get(fid);
Ido Schimmel69132292017-03-10 08:53:42 +01003035 mlxsw_reg_ritr_pack(ritr_pl, create, rif_type, rif, vr_id, l3_dev->mtu,
Ido Schimmel4724ba562017-03-10 08:53:39 +01003036 l3_dev->dev_addr);
3037 mlxsw_reg_ritr_fid_set(ritr_pl, rif_type, fid);
3038
3039 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
3040}
3041
3042static int mlxsw_sp_rif_bridge_create(struct mlxsw_sp *mlxsw_sp,
3043 struct net_device *l3_dev,
3044 struct mlxsw_sp_fid *f)
3045{
Ido Schimmel57837882017-03-16 09:08:16 +01003046 u32 tb_id = l3mdev_fib_table(l3_dev);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003047 struct mlxsw_sp_rif *rif;
Ido Schimmel69132292017-03-10 08:53:42 +01003048 struct mlxsw_sp_vr *vr;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003049 u16 rif_index;
Ido Schimmel4724ba562017-03-10 08:53:39 +01003050 int err;
3051
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003052 rif_index = mlxsw_sp_avail_rif_get(mlxsw_sp);
3053 if (rif_index == MLXSW_SP_INVALID_INDEX_RIF)
Ido Schimmel4724ba562017-03-10 08:53:39 +01003054 return -ERANGE;
3055
Ido Schimmel57837882017-03-16 09:08:16 +01003056 vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id ? : RT_TABLE_MAIN);
Ido Schimmel69132292017-03-10 08:53:42 +01003057 if (IS_ERR(vr))
3058 return PTR_ERR(vr);
3059
Ido Schimmel4724ba562017-03-10 08:53:39 +01003060 err = mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, true);
3061 if (err)
Ido Schimmel69132292017-03-10 08:53:42 +01003062 goto err_port_flood_set;
Ido Schimmel4724ba562017-03-10 08:53:39 +01003063
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003064 err = mlxsw_sp_rif_bridge_op(mlxsw_sp, vr->id, l3_dev, f->fid,
3065 rif_index, true);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003066 if (err)
3067 goto err_rif_bridge_op;
3068
3069 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, true);
3070 if (err)
3071 goto err_rif_fdb_op;
3072
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003073 rif = mlxsw_sp_rif_alloc(rif_index, vr->id, l3_dev, f);
3074 if (!rif) {
Ido Schimmel4724ba562017-03-10 08:53:39 +01003075 err = -ENOMEM;
3076 goto err_rif_alloc;
3077 }
3078
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003079 f->rif = rif;
3080 mlxsw_sp->rifs[rif_index] = rif;
Ido Schimmel69132292017-03-10 08:53:42 +01003081 vr->rif_count++;
Ido Schimmel4724ba562017-03-10 08:53:39 +01003082
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003083 netdev_dbg(l3_dev, "RIF=%d created\n", rif_index);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003084
3085 return 0;
3086
3087err_rif_alloc:
3088 mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, false);
3089err_rif_fdb_op:
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003090 mlxsw_sp_rif_bridge_op(mlxsw_sp, vr->id, l3_dev, f->fid, rif_index,
3091 false);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003092err_rif_bridge_op:
3093 mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, false);
Ido Schimmel69132292017-03-10 08:53:42 +01003094err_port_flood_set:
3095 mlxsw_sp_vr_put(vr);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003096 return err;
3097}
3098
3099void mlxsw_sp_rif_bridge_destroy(struct mlxsw_sp *mlxsw_sp,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003100 struct mlxsw_sp_rif *rif)
Ido Schimmel4724ba562017-03-10 08:53:39 +01003101{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003102 struct mlxsw_sp_vr *vr = &mlxsw_sp->router.vrs[rif->vr_id];
3103 struct net_device *l3_dev = rif->dev;
3104 struct mlxsw_sp_fid *f = rif->f;
3105 u16 rif_index = rif->rif_index;
Ido Schimmel4724ba562017-03-10 08:53:39 +01003106
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003107 mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003108
Ido Schimmel69132292017-03-10 08:53:42 +01003109 vr->rif_count--;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003110 mlxsw_sp->rifs[rif_index] = NULL;
3111 f->rif = NULL;
Ido Schimmel4724ba562017-03-10 08:53:39 +01003112
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003113 kfree(rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003114
3115 mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, false);
3116
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003117 mlxsw_sp_rif_bridge_op(mlxsw_sp, vr->id, l3_dev, f->fid, rif_index,
3118 false);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003119
3120 mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, false);
3121
Ido Schimmel69132292017-03-10 08:53:42 +01003122 mlxsw_sp_vr_put(vr);
3123
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003124 netdev_dbg(l3_dev, "RIF=%d destroyed\n", rif_index);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003125}
3126
3127static int mlxsw_sp_inetaddr_bridge_event(struct net_device *l3_dev,
3128 struct net_device *br_dev,
3129 unsigned long event)
3130{
3131 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
3132 struct mlxsw_sp_fid *f;
3133
3134 /* FID can either be an actual FID if the L3 device is the
3135 * VLAN-aware bridge or a VLAN device on top. Otherwise, the
3136 * L3 device is a VLAN-unaware bridge and we get a vFID.
3137 */
3138 f = mlxsw_sp_bridge_fid_get(mlxsw_sp, l3_dev);
3139 if (WARN_ON(!f))
3140 return -EINVAL;
3141
3142 switch (event) {
3143 case NETDEV_UP:
3144 return mlxsw_sp_rif_bridge_create(mlxsw_sp, l3_dev, f);
3145 case NETDEV_DOWN:
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003146 mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003147 break;
3148 }
3149
3150 return 0;
3151}
3152
3153static int mlxsw_sp_inetaddr_vlan_event(struct net_device *vlan_dev,
3154 unsigned long event)
3155{
3156 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
3157 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(vlan_dev);
3158 u16 vid = vlan_dev_vlan_id(vlan_dev);
3159
3160 if (mlxsw_sp_port_dev_check(real_dev))
3161 return mlxsw_sp_inetaddr_vport_event(vlan_dev, real_dev, event,
3162 vid);
3163 else if (netif_is_lag_master(real_dev))
3164 return __mlxsw_sp_inetaddr_lag_event(vlan_dev, real_dev, event,
3165 vid);
3166 else if (netif_is_bridge_master(real_dev) &&
3167 mlxsw_sp->master_bridge.dev == real_dev)
3168 return mlxsw_sp_inetaddr_bridge_event(vlan_dev, real_dev,
3169 event);
3170
3171 return 0;
3172}
3173
3174int mlxsw_sp_inetaddr_event(struct notifier_block *unused,
3175 unsigned long event, void *ptr)
3176{
3177 struct in_ifaddr *ifa = (struct in_ifaddr *) ptr;
3178 struct net_device *dev = ifa->ifa_dev->dev;
3179 struct mlxsw_sp *mlxsw_sp;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003180 struct mlxsw_sp_rif *rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01003181 int err = 0;
3182
3183 mlxsw_sp = mlxsw_sp_lower_get(dev);
3184 if (!mlxsw_sp)
3185 goto out;
3186
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003187 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
3188 if (!mlxsw_sp_rif_should_config(rif, ifa->ifa_dev, event))
Ido Schimmel4724ba562017-03-10 08:53:39 +01003189 goto out;
3190
3191 if (mlxsw_sp_port_dev_check(dev))
3192 err = mlxsw_sp_inetaddr_port_event(dev, event);
3193 else if (netif_is_lag_master(dev))
3194 err = mlxsw_sp_inetaddr_lag_event(dev, event);
3195 else if (netif_is_bridge_master(dev))
3196 err = mlxsw_sp_inetaddr_bridge_event(dev, dev, event);
3197 else if (is_vlan_dev(dev))
3198 err = mlxsw_sp_inetaddr_vlan_event(dev, event);
3199
3200out:
3201 return notifier_from_errno(err);
3202}
3203
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003204static int mlxsw_sp_rif_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
Ido Schimmel4724ba562017-03-10 08:53:39 +01003205 const char *mac, int mtu)
3206{
3207 char ritr_pl[MLXSW_REG_RITR_LEN];
3208 int err;
3209
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003210 mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003211 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
3212 if (err)
3213 return err;
3214
3215 mlxsw_reg_ritr_mtu_set(ritr_pl, mtu);
3216 mlxsw_reg_ritr_if_mac_memcpy_to(ritr_pl, mac);
3217 mlxsw_reg_ritr_op_set(ritr_pl, MLXSW_REG_RITR_RIF_CREATE);
3218 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
3219}
3220
3221int mlxsw_sp_netdevice_router_port_event(struct net_device *dev)
3222{
3223 struct mlxsw_sp *mlxsw_sp;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003224 struct mlxsw_sp_rif *rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01003225 int err;
3226
3227 mlxsw_sp = mlxsw_sp_lower_get(dev);
3228 if (!mlxsw_sp)
3229 return 0;
3230
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003231 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
3232 if (!rif)
Ido Schimmel4724ba562017-03-10 08:53:39 +01003233 return 0;
3234
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003235 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, rif->f->fid, false);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003236 if (err)
3237 return err;
3238
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003239 err = mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, dev->dev_addr,
3240 dev->mtu);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003241 if (err)
3242 goto err_rif_edit;
3243
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003244 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, dev->dev_addr, rif->f->fid, true);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003245 if (err)
3246 goto err_rif_fdb_op;
3247
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003248 ether_addr_copy(rif->addr, dev->dev_addr);
3249 rif->mtu = dev->mtu;
Ido Schimmel4724ba562017-03-10 08:53:39 +01003250
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003251 netdev_dbg(dev, "Updated RIF=%d\n", rif->rif_index);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003252
3253 return 0;
3254
3255err_rif_fdb_op:
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003256 mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, rif->addr, rif->mtu);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003257err_rif_edit:
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003258 mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, rif->f->fid, true);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003259 return err;
3260}
3261
Ido Schimmel7179eb52017-03-16 09:08:18 +01003262int mlxsw_sp_vport_vrf_join(struct mlxsw_sp_port *mlxsw_sp_vport)
3263{
3264 struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
3265 struct net_device *dev = mlxsw_sp_vport->dev;
3266
3267 /* In case vPort already has a RIF, then we need to drop it.
3268 * A new one will be created using the VRF's VR.
3269 */
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003270 if (f && f->rif)
Ido Schimmel7179eb52017-03-16 09:08:18 +01003271 mlxsw_sp_vport_rif_sp_leave(mlxsw_sp_vport);
3272
3273 return mlxsw_sp_vport_rif_sp_join(mlxsw_sp_vport, dev);
3274}
3275
3276void mlxsw_sp_vport_vrf_leave(struct mlxsw_sp_port *mlxsw_sp_vport)
3277{
3278 mlxsw_sp_vport_rif_sp_leave(mlxsw_sp_vport);
3279}
3280
3281int mlxsw_sp_port_vrf_join(struct mlxsw_sp_port *mlxsw_sp_port)
3282{
3283 struct mlxsw_sp_port *mlxsw_sp_vport;
3284
3285 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, 1);
3286 if (WARN_ON(!mlxsw_sp_vport))
3287 return -EINVAL;
3288
3289 return mlxsw_sp_vport_vrf_join(mlxsw_sp_vport);
3290}
3291
3292void mlxsw_sp_port_vrf_leave(struct mlxsw_sp_port *mlxsw_sp_port)
3293{
3294 struct mlxsw_sp_port *mlxsw_sp_vport;
3295
3296 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, 1);
3297 if (WARN_ON(!mlxsw_sp_vport))
3298 return;
3299
3300 mlxsw_sp_vport_vrf_leave(mlxsw_sp_vport);
3301}
3302
Ido Schimmel3d70e4582017-03-16 09:08:19 +01003303int mlxsw_sp_bridge_vrf_join(struct mlxsw_sp *mlxsw_sp,
3304 struct net_device *l3_dev)
3305{
3306 struct mlxsw_sp_fid *f;
3307
3308 f = mlxsw_sp_bridge_fid_get(mlxsw_sp, l3_dev);
3309 if (WARN_ON(!f))
3310 return -EINVAL;
3311
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003312 if (f->rif)
3313 mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->rif);
Ido Schimmel3d70e4582017-03-16 09:08:19 +01003314
3315 return mlxsw_sp_rif_bridge_create(mlxsw_sp, l3_dev, f);
3316}
3317
3318void mlxsw_sp_bridge_vrf_leave(struct mlxsw_sp *mlxsw_sp,
3319 struct net_device *l3_dev)
3320{
3321 struct mlxsw_sp_fid *f;
3322
3323 f = mlxsw_sp_bridge_fid_get(mlxsw_sp, l3_dev);
3324 if (WARN_ON(!f))
3325 return;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003326 mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->rif);
Ido Schimmel3d70e4582017-03-16 09:08:19 +01003327}
3328
Ido Schimmelc3852ef2016-12-03 16:45:07 +01003329static void mlxsw_sp_router_fib_dump_flush(struct notifier_block *nb)
3330{
3331 struct mlxsw_sp *mlxsw_sp = container_of(nb, struct mlxsw_sp, fib_nb);
3332
3333 /* Flush pending FIB notifications and then flush the device's
3334 * table before requesting another dump. The FIB notification
3335 * block is unregistered, so no need to take RTNL.
3336 */
3337 mlxsw_core_flush_owq();
3338 mlxsw_sp_router_fib_flush(mlxsw_sp);
3339}
3340
Ido Schimmel4724ba562017-03-10 08:53:39 +01003341static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
3342{
3343 char rgcr_pl[MLXSW_REG_RGCR_LEN];
3344 u64 max_rifs;
3345 int err;
3346
3347 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_RIFS))
3348 return -EIO;
3349
3350 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
3351 mlxsw_sp->rifs = kcalloc(max_rifs, sizeof(struct mlxsw_sp_rif *),
3352 GFP_KERNEL);
3353 if (!mlxsw_sp->rifs)
3354 return -ENOMEM;
3355
3356 mlxsw_reg_rgcr_pack(rgcr_pl, true);
3357 mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs);
3358 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
3359 if (err)
3360 goto err_rgcr_fail;
3361
3362 return 0;
3363
3364err_rgcr_fail:
3365 kfree(mlxsw_sp->rifs);
3366 return err;
3367}
3368
3369static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
3370{
3371 char rgcr_pl[MLXSW_REG_RGCR_LEN];
3372 int i;
3373
3374 mlxsw_reg_rgcr_pack(rgcr_pl, false);
3375 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
3376
3377 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
3378 WARN_ON_ONCE(mlxsw_sp->rifs[i]);
3379
3380 kfree(mlxsw_sp->rifs);
3381}
3382
Jiri Pirkob45f64d2016-09-26 12:52:31 +02003383int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
3384{
3385 int err;
3386
3387 INIT_LIST_HEAD(&mlxsw_sp->router.nexthop_neighs_list);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02003388 err = __mlxsw_sp_router_init(mlxsw_sp);
3389 if (err)
3390 return err;
3391
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003392 err = rhashtable_init(&mlxsw_sp->router.nexthop_ht,
3393 &mlxsw_sp_nexthop_ht_params);
3394 if (err)
3395 goto err_nexthop_ht_init;
3396
Ido Schimmele9ad5e72017-02-08 11:16:29 +01003397 err = rhashtable_init(&mlxsw_sp->router.nexthop_group_ht,
3398 &mlxsw_sp_nexthop_group_ht_params);
3399 if (err)
3400 goto err_nexthop_group_ht_init;
3401
Ido Schimmel8494ab02017-03-24 08:02:47 +01003402 err = mlxsw_sp_lpm_init(mlxsw_sp);
3403 if (err)
3404 goto err_lpm_init;
3405
Jiri Pirkob45f64d2016-09-26 12:52:31 +02003406 err = mlxsw_sp_vrs_init(mlxsw_sp);
3407 if (err)
3408 goto err_vrs_init;
3409
Ido Schimmel8c9583a2016-10-27 15:12:57 +02003410 err = mlxsw_sp_neigh_init(mlxsw_sp);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02003411 if (err)
3412 goto err_neigh_init;
3413
3414 mlxsw_sp->fib_nb.notifier_call = mlxsw_sp_router_fib_event;
Ido Schimmelc3852ef2016-12-03 16:45:07 +01003415 err = register_fib_notifier(&mlxsw_sp->fib_nb,
3416 mlxsw_sp_router_fib_dump_flush);
3417 if (err)
3418 goto err_register_fib_notifier;
3419
Jiri Pirkob45f64d2016-09-26 12:52:31 +02003420 return 0;
3421
Ido Schimmelc3852ef2016-12-03 16:45:07 +01003422err_register_fib_notifier:
3423 mlxsw_sp_neigh_fini(mlxsw_sp);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02003424err_neigh_init:
3425 mlxsw_sp_vrs_fini(mlxsw_sp);
3426err_vrs_init:
Ido Schimmel8494ab02017-03-24 08:02:47 +01003427 mlxsw_sp_lpm_fini(mlxsw_sp);
3428err_lpm_init:
Ido Schimmele9ad5e72017-02-08 11:16:29 +01003429 rhashtable_destroy(&mlxsw_sp->router.nexthop_group_ht);
3430err_nexthop_group_ht_init:
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003431 rhashtable_destroy(&mlxsw_sp->router.nexthop_ht);
3432err_nexthop_ht_init:
Jiri Pirkob45f64d2016-09-26 12:52:31 +02003433 __mlxsw_sp_router_fini(mlxsw_sp);
3434 return err;
3435}
3436
3437void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
3438{
3439 unregister_fib_notifier(&mlxsw_sp->fib_nb);
3440 mlxsw_sp_neigh_fini(mlxsw_sp);
3441 mlxsw_sp_vrs_fini(mlxsw_sp);
Ido Schimmel8494ab02017-03-24 08:02:47 +01003442 mlxsw_sp_lpm_fini(mlxsw_sp);
Ido Schimmele9ad5e72017-02-08 11:16:29 +01003443 rhashtable_destroy(&mlxsw_sp->router.nexthop_group_ht);
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003444 rhashtable_destroy(&mlxsw_sp->router.nexthop_ht);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02003445 __mlxsw_sp_router_fini(mlxsw_sp);
3446}