blob: 1839ba05f4dde45fc71d341dc0fa4827f0b202a2 [file] [log] [blame]
Ido Schimmel464dce12016-07-02 11:00:15 +02001/*
2 * drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
3 * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
5 * Copyright (c) 2016 Ido Schimmel <idosch@mellanox.com>
Yotam Gigic723c7352016-07-05 11:27:43 +02006 * Copyright (c) 2016 Yotam Gigi <yotamg@mellanox.com>
Ido Schimmel464dce12016-07-02 11:00:15 +02007 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include <linux/kernel.h>
38#include <linux/types.h>
Jiri Pirko5e9c16c2016-07-04 08:23:04 +020039#include <linux/rhashtable.h>
40#include <linux/bitops.h>
41#include <linux/in6.h>
Yotam Gigic723c7352016-07-05 11:27:43 +020042#include <linux/notifier.h>
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +010043#include <linux/inetdevice.h>
Yotam Gigic723c7352016-07-05 11:27:43 +020044#include <net/netevent.h>
Jiri Pirko6cf3c972016-07-05 11:27:39 +020045#include <net/neighbour.h>
46#include <net/arp.h>
Jiri Pirkob45f64d2016-09-26 12:52:31 +020047#include <net/ip_fib.h>
Ido Schimmel464dce12016-07-02 11:00:15 +020048
49#include "spectrum.h"
50#include "core.h"
51#include "reg.h"
52
Ido Schimmel4724ba562017-03-10 08:53:39 +010053struct mlxsw_sp_rif {
54 struct list_head nexthop_list;
55 struct list_head neigh_list;
56 struct net_device *dev;
57 struct mlxsw_sp_fid *f;
58 unsigned char addr[ETH_ALEN];
59 int mtu;
60 u16 rif;
61};
62
63static struct mlxsw_sp_rif *
64mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
65 const struct net_device *dev);
66
Jiri Pirko53342022016-07-04 08:23:08 +020067#define mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) \
68 for_each_set_bit(prefix, (prefix_usage)->b, MLXSW_SP_PREFIX_COUNT)
69
70static bool
Jiri Pirko6b75c482016-07-04 08:23:09 +020071mlxsw_sp_prefix_usage_subset(struct mlxsw_sp_prefix_usage *prefix_usage1,
72 struct mlxsw_sp_prefix_usage *prefix_usage2)
73{
74 unsigned char prefix;
75
76 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage1) {
77 if (!test_bit(prefix, prefix_usage2->b))
78 return false;
79 }
80 return true;
81}
82
83static bool
Jiri Pirko53342022016-07-04 08:23:08 +020084mlxsw_sp_prefix_usage_eq(struct mlxsw_sp_prefix_usage *prefix_usage1,
85 struct mlxsw_sp_prefix_usage *prefix_usage2)
86{
87 return !memcmp(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
88}
89
Jiri Pirko6b75c482016-07-04 08:23:09 +020090static bool
91mlxsw_sp_prefix_usage_none(struct mlxsw_sp_prefix_usage *prefix_usage)
92{
93 struct mlxsw_sp_prefix_usage prefix_usage_none = {{ 0 } };
94
95 return mlxsw_sp_prefix_usage_eq(prefix_usage, &prefix_usage_none);
96}
97
98static void
99mlxsw_sp_prefix_usage_cpy(struct mlxsw_sp_prefix_usage *prefix_usage1,
100 struct mlxsw_sp_prefix_usage *prefix_usage2)
101{
102 memcpy(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
103}
104
105static void
106mlxsw_sp_prefix_usage_zero(struct mlxsw_sp_prefix_usage *prefix_usage)
107{
108 memset(prefix_usage, 0, sizeof(*prefix_usage));
109}
110
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200111static void
112mlxsw_sp_prefix_usage_set(struct mlxsw_sp_prefix_usage *prefix_usage,
113 unsigned char prefix_len)
114{
115 set_bit(prefix_len, prefix_usage->b);
116}
117
118static void
119mlxsw_sp_prefix_usage_clear(struct mlxsw_sp_prefix_usage *prefix_usage,
120 unsigned char prefix_len)
121{
122 clear_bit(prefix_len, prefix_usage->b);
123}
124
125struct mlxsw_sp_fib_key {
126 unsigned char addr[sizeof(struct in6_addr)];
127 unsigned char prefix_len;
128};
129
Jiri Pirko61c503f2016-07-04 08:23:11 +0200130enum mlxsw_sp_fib_entry_type {
131 MLXSW_SP_FIB_ENTRY_TYPE_REMOTE,
132 MLXSW_SP_FIB_ENTRY_TYPE_LOCAL,
133 MLXSW_SP_FIB_ENTRY_TYPE_TRAP,
134};
135
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +0200136struct mlxsw_sp_nexthop_group;
137
Ido Schimmel9aecce12017-02-09 10:28:42 +0100138struct mlxsw_sp_fib_node {
139 struct list_head entry_list;
Jiri Pirkob45f64d2016-09-26 12:52:31 +0200140 struct list_head list;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100141 struct rhash_head ht_node;
Jiri Pirko61c503f2016-07-04 08:23:11 +0200142 struct mlxsw_sp_vr *vr;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100143 struct mlxsw_sp_fib_key key;
144};
145
146struct mlxsw_sp_fib_entry_params {
147 u32 tb_id;
148 u32 prio;
149 u8 tos;
150 u8 type;
151};
152
153struct mlxsw_sp_fib_entry {
154 struct list_head list;
155 struct mlxsw_sp_fib_node *fib_node;
156 enum mlxsw_sp_fib_entry_type type;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +0200157 struct list_head nexthop_group_node;
158 struct mlxsw_sp_nexthop_group *nh_group;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100159 struct mlxsw_sp_fib_entry_params params;
Ido Schimmel013b20f2017-02-08 11:16:36 +0100160 bool offloaded;
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200161};
162
163struct mlxsw_sp_fib {
164 struct rhashtable ht;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100165 struct list_head node_list;
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200166 unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT];
167 struct mlxsw_sp_prefix_usage prefix_usage;
168};
169
Ido Schimmel9aecce12017-02-09 10:28:42 +0100170static const struct rhashtable_params mlxsw_sp_fib_ht_params;
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200171
172static struct mlxsw_sp_fib *mlxsw_sp_fib_create(void)
173{
174 struct mlxsw_sp_fib *fib;
175 int err;
176
177 fib = kzalloc(sizeof(*fib), GFP_KERNEL);
178 if (!fib)
179 return ERR_PTR(-ENOMEM);
180 err = rhashtable_init(&fib->ht, &mlxsw_sp_fib_ht_params);
181 if (err)
182 goto err_rhashtable_init;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100183 INIT_LIST_HEAD(&fib->node_list);
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200184 return fib;
185
186err_rhashtable_init:
187 kfree(fib);
188 return ERR_PTR(err);
189}
190
191static void mlxsw_sp_fib_destroy(struct mlxsw_sp_fib *fib)
192{
Ido Schimmel9aecce12017-02-09 10:28:42 +0100193 WARN_ON(!list_empty(&fib->node_list));
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200194 rhashtable_destroy(&fib->ht);
195 kfree(fib);
196}
197
Jiri Pirko53342022016-07-04 08:23:08 +0200198static struct mlxsw_sp_lpm_tree *
199mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp *mlxsw_sp, bool one_reserved)
200{
201 static struct mlxsw_sp_lpm_tree *lpm_tree;
202 int i;
203
204 for (i = 0; i < MLXSW_SP_LPM_TREE_COUNT; i++) {
205 lpm_tree = &mlxsw_sp->router.lpm_trees[i];
206 if (lpm_tree->ref_count == 0) {
207 if (one_reserved)
208 one_reserved = false;
209 else
210 return lpm_tree;
211 }
212 }
213 return NULL;
214}
215
216static int mlxsw_sp_lpm_tree_alloc(struct mlxsw_sp *mlxsw_sp,
217 struct mlxsw_sp_lpm_tree *lpm_tree)
218{
219 char ralta_pl[MLXSW_REG_RALTA_LEN];
220
Ido Schimmel1a9234e662016-09-19 08:29:26 +0200221 mlxsw_reg_ralta_pack(ralta_pl, true,
222 (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
223 lpm_tree->id);
Jiri Pirko53342022016-07-04 08:23:08 +0200224 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
225}
226
227static int mlxsw_sp_lpm_tree_free(struct mlxsw_sp *mlxsw_sp,
228 struct mlxsw_sp_lpm_tree *lpm_tree)
229{
230 char ralta_pl[MLXSW_REG_RALTA_LEN];
231
Ido Schimmel1a9234e662016-09-19 08:29:26 +0200232 mlxsw_reg_ralta_pack(ralta_pl, false,
233 (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
234 lpm_tree->id);
Jiri Pirko53342022016-07-04 08:23:08 +0200235 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
236}
237
238static int
239mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp *mlxsw_sp,
240 struct mlxsw_sp_prefix_usage *prefix_usage,
241 struct mlxsw_sp_lpm_tree *lpm_tree)
242{
243 char ralst_pl[MLXSW_REG_RALST_LEN];
244 u8 root_bin = 0;
245 u8 prefix;
246 u8 last_prefix = MLXSW_REG_RALST_BIN_NO_CHILD;
247
248 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage)
249 root_bin = prefix;
250
251 mlxsw_reg_ralst_pack(ralst_pl, root_bin, lpm_tree->id);
252 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) {
253 if (prefix == 0)
254 continue;
255 mlxsw_reg_ralst_bin_pack(ralst_pl, prefix, last_prefix,
256 MLXSW_REG_RALST_BIN_NO_CHILD);
257 last_prefix = prefix;
258 }
259 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
260}
261
262static struct mlxsw_sp_lpm_tree *
263mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
264 struct mlxsw_sp_prefix_usage *prefix_usage,
265 enum mlxsw_sp_l3proto proto, bool one_reserved)
266{
267 struct mlxsw_sp_lpm_tree *lpm_tree;
268 int err;
269
270 lpm_tree = mlxsw_sp_lpm_tree_find_unused(mlxsw_sp, one_reserved);
271 if (!lpm_tree)
272 return ERR_PTR(-EBUSY);
273 lpm_tree->proto = proto;
274 err = mlxsw_sp_lpm_tree_alloc(mlxsw_sp, lpm_tree);
275 if (err)
276 return ERR_PTR(err);
277
278 err = mlxsw_sp_lpm_tree_left_struct_set(mlxsw_sp, prefix_usage,
279 lpm_tree);
280 if (err)
281 goto err_left_struct_set;
Jiri Pirko2083d362016-10-25 11:25:56 +0200282 memcpy(&lpm_tree->prefix_usage, prefix_usage,
283 sizeof(lpm_tree->prefix_usage));
Jiri Pirko53342022016-07-04 08:23:08 +0200284 return lpm_tree;
285
286err_left_struct_set:
287 mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
288 return ERR_PTR(err);
289}
290
291static int mlxsw_sp_lpm_tree_destroy(struct mlxsw_sp *mlxsw_sp,
292 struct mlxsw_sp_lpm_tree *lpm_tree)
293{
294 return mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
295}
296
297static struct mlxsw_sp_lpm_tree *
298mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
299 struct mlxsw_sp_prefix_usage *prefix_usage,
300 enum mlxsw_sp_l3proto proto, bool one_reserved)
301{
302 struct mlxsw_sp_lpm_tree *lpm_tree;
303 int i;
304
305 for (i = 0; i < MLXSW_SP_LPM_TREE_COUNT; i++) {
306 lpm_tree = &mlxsw_sp->router.lpm_trees[i];
Jiri Pirko8b99bec2016-10-25 11:25:57 +0200307 if (lpm_tree->ref_count != 0 &&
308 lpm_tree->proto == proto &&
Jiri Pirko53342022016-07-04 08:23:08 +0200309 mlxsw_sp_prefix_usage_eq(&lpm_tree->prefix_usage,
310 prefix_usage))
311 goto inc_ref_count;
312 }
313 lpm_tree = mlxsw_sp_lpm_tree_create(mlxsw_sp, prefix_usage,
314 proto, one_reserved);
315 if (IS_ERR(lpm_tree))
316 return lpm_tree;
317
318inc_ref_count:
319 lpm_tree->ref_count++;
320 return lpm_tree;
321}
322
323static int mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
324 struct mlxsw_sp_lpm_tree *lpm_tree)
325{
326 if (--lpm_tree->ref_count == 0)
327 return mlxsw_sp_lpm_tree_destroy(mlxsw_sp, lpm_tree);
328 return 0;
329}
330
331static void mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp)
332{
333 struct mlxsw_sp_lpm_tree *lpm_tree;
334 int i;
335
336 for (i = 0; i < MLXSW_SP_LPM_TREE_COUNT; i++) {
337 lpm_tree = &mlxsw_sp->router.lpm_trees[i];
338 lpm_tree->id = i + MLXSW_SP_LPM_TREE_MIN;
339 }
340}
341
Jiri Pirko6b75c482016-07-04 08:23:09 +0200342static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp)
343{
344 struct mlxsw_sp_vr *vr;
345 int i;
346
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200347 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
Jiri Pirko6b75c482016-07-04 08:23:09 +0200348 vr = &mlxsw_sp->router.vrs[i];
349 if (!vr->used)
350 return vr;
351 }
352 return NULL;
353}
354
355static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
356 struct mlxsw_sp_vr *vr)
357{
358 char raltb_pl[MLXSW_REG_RALTB_LEN];
359
Ido Schimmel1a9234e662016-09-19 08:29:26 +0200360 mlxsw_reg_raltb_pack(raltb_pl, vr->id,
361 (enum mlxsw_reg_ralxx_protocol) vr->proto,
362 vr->lpm_tree->id);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200363 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
364}
365
366static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
367 struct mlxsw_sp_vr *vr)
368{
369 char raltb_pl[MLXSW_REG_RALTB_LEN];
370
371 /* Bind to tree 0 which is default */
Ido Schimmel1a9234e662016-09-19 08:29:26 +0200372 mlxsw_reg_raltb_pack(raltb_pl, vr->id,
373 (enum mlxsw_reg_ralxx_protocol) vr->proto, 0);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200374 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
375}
376
377static u32 mlxsw_sp_fix_tb_id(u32 tb_id)
378{
379 /* For our purpose, squash main and local table into one */
380 if (tb_id == RT_TABLE_LOCAL)
381 tb_id = RT_TABLE_MAIN;
382 return tb_id;
383}
384
385static struct mlxsw_sp_vr *mlxsw_sp_vr_find(struct mlxsw_sp *mlxsw_sp,
386 u32 tb_id,
387 enum mlxsw_sp_l3proto proto)
388{
389 struct mlxsw_sp_vr *vr;
390 int i;
391
392 tb_id = mlxsw_sp_fix_tb_id(tb_id);
Nogah Frankel9497c042016-09-20 11:16:54 +0200393
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200394 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
Jiri Pirko6b75c482016-07-04 08:23:09 +0200395 vr = &mlxsw_sp->router.vrs[i];
396 if (vr->used && vr->proto == proto && vr->tb_id == tb_id)
397 return vr;
398 }
399 return NULL;
400}
401
402static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
403 unsigned char prefix_len,
404 u32 tb_id,
405 enum mlxsw_sp_l3proto proto)
406{
407 struct mlxsw_sp_prefix_usage req_prefix_usage;
408 struct mlxsw_sp_lpm_tree *lpm_tree;
409 struct mlxsw_sp_vr *vr;
410 int err;
411
412 vr = mlxsw_sp_vr_find_unused(mlxsw_sp);
413 if (!vr)
414 return ERR_PTR(-EBUSY);
415 vr->fib = mlxsw_sp_fib_create();
416 if (IS_ERR(vr->fib))
417 return ERR_CAST(vr->fib);
418
419 vr->proto = proto;
420 vr->tb_id = tb_id;
421 mlxsw_sp_prefix_usage_zero(&req_prefix_usage);
422 mlxsw_sp_prefix_usage_set(&req_prefix_usage, prefix_len);
423 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
424 proto, true);
425 if (IS_ERR(lpm_tree)) {
426 err = PTR_ERR(lpm_tree);
427 goto err_tree_get;
428 }
429 vr->lpm_tree = lpm_tree;
430 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, vr);
431 if (err)
432 goto err_tree_bind;
433
434 vr->used = true;
435 return vr;
436
437err_tree_bind:
438 mlxsw_sp_lpm_tree_put(mlxsw_sp, vr->lpm_tree);
439err_tree_get:
440 mlxsw_sp_fib_destroy(vr->fib);
441
442 return ERR_PTR(err);
443}
444
445static void mlxsw_sp_vr_destroy(struct mlxsw_sp *mlxsw_sp,
446 struct mlxsw_sp_vr *vr)
447{
448 mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, vr);
449 mlxsw_sp_lpm_tree_put(mlxsw_sp, vr->lpm_tree);
450 mlxsw_sp_fib_destroy(vr->fib);
451 vr->used = false;
452}
453
454static int
455mlxsw_sp_vr_lpm_tree_check(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr,
456 struct mlxsw_sp_prefix_usage *req_prefix_usage)
457{
Ido Schimmelf7df4922017-02-28 08:55:40 +0100458 struct mlxsw_sp_lpm_tree *lpm_tree = vr->lpm_tree;
459 struct mlxsw_sp_lpm_tree *new_tree;
460 int err;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200461
Ido Schimmelf7df4922017-02-28 08:55:40 +0100462 if (mlxsw_sp_prefix_usage_eq(req_prefix_usage, &lpm_tree->prefix_usage))
Jiri Pirko6b75c482016-07-04 08:23:09 +0200463 return 0;
464
Ido Schimmelf7df4922017-02-28 08:55:40 +0100465 new_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, req_prefix_usage,
Jiri Pirko6b75c482016-07-04 08:23:09 +0200466 vr->proto, false);
Ido Schimmelf7df4922017-02-28 08:55:40 +0100467 if (IS_ERR(new_tree)) {
Jiri Pirko6b75c482016-07-04 08:23:09 +0200468 /* We failed to get a tree according to the required
469 * prefix usage. However, the current tree might be still good
470 * for us if our requirement is subset of the prefixes used
471 * in the tree.
472 */
473 if (mlxsw_sp_prefix_usage_subset(req_prefix_usage,
Ido Schimmelf7df4922017-02-28 08:55:40 +0100474 &lpm_tree->prefix_usage))
Jiri Pirko6b75c482016-07-04 08:23:09 +0200475 return 0;
Ido Schimmelf7df4922017-02-28 08:55:40 +0100476 return PTR_ERR(new_tree);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200477 }
478
Ido Schimmelf7df4922017-02-28 08:55:40 +0100479 /* Prevent packet loss by overwriting existing binding */
480 vr->lpm_tree = new_tree;
481 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, vr);
482 if (err)
483 goto err_tree_bind;
484 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
485
486 return 0;
487
488err_tree_bind:
Jiri Pirko6b75c482016-07-04 08:23:09 +0200489 vr->lpm_tree = lpm_tree;
Ido Schimmelf7df4922017-02-28 08:55:40 +0100490 mlxsw_sp_lpm_tree_put(mlxsw_sp, new_tree);
491 return err;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200492}
493
494static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp,
495 unsigned char prefix_len,
496 u32 tb_id,
497 enum mlxsw_sp_l3proto proto)
498{
499 struct mlxsw_sp_vr *vr;
500 int err;
501
502 tb_id = mlxsw_sp_fix_tb_id(tb_id);
503 vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id, proto);
504 if (!vr) {
505 vr = mlxsw_sp_vr_create(mlxsw_sp, prefix_len, tb_id, proto);
506 if (IS_ERR(vr))
507 return vr;
508 } else {
509 struct mlxsw_sp_prefix_usage req_prefix_usage;
510
511 mlxsw_sp_prefix_usage_cpy(&req_prefix_usage,
512 &vr->fib->prefix_usage);
513 mlxsw_sp_prefix_usage_set(&req_prefix_usage, prefix_len);
514 /* Need to replace LPM tree in case new prefix is required. */
515 err = mlxsw_sp_vr_lpm_tree_check(mlxsw_sp, vr,
516 &req_prefix_usage);
517 if (err)
518 return ERR_PTR(err);
519 }
520 return vr;
521}
522
523static void mlxsw_sp_vr_put(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr)
524{
525 /* Destroy virtual router entity in case the associated FIB is empty
526 * and allow it to be used for other tables in future. Otherwise,
527 * check if some prefix usage did not disappear and change tree if
528 * that is the case. Note that in case new, smaller tree cannot be
529 * allocated, the original one will be kept being used.
530 */
531 if (mlxsw_sp_prefix_usage_none(&vr->fib->prefix_usage))
532 mlxsw_sp_vr_destroy(mlxsw_sp, vr);
533 else
534 mlxsw_sp_vr_lpm_tree_check(mlxsw_sp, vr,
535 &vr->fib->prefix_usage);
536}
537
Nogah Frankel9497c042016-09-20 11:16:54 +0200538static int mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200539{
540 struct mlxsw_sp_vr *vr;
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200541 u64 max_vrs;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200542 int i;
543
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200544 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_VRS))
Nogah Frankel9497c042016-09-20 11:16:54 +0200545 return -EIO;
546
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200547 max_vrs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS);
548 mlxsw_sp->router.vrs = kcalloc(max_vrs, sizeof(struct mlxsw_sp_vr),
Nogah Frankel9497c042016-09-20 11:16:54 +0200549 GFP_KERNEL);
550 if (!mlxsw_sp->router.vrs)
551 return -ENOMEM;
552
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200553 for (i = 0; i < max_vrs; i++) {
Jiri Pirko6b75c482016-07-04 08:23:09 +0200554 vr = &mlxsw_sp->router.vrs[i];
555 vr->id = i;
556 }
Nogah Frankel9497c042016-09-20 11:16:54 +0200557
558 return 0;
559}
560
Ido Schimmelac571de2016-11-14 11:26:32 +0100561static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp);
562
Nogah Frankel9497c042016-09-20 11:16:54 +0200563static void mlxsw_sp_vrs_fini(struct mlxsw_sp *mlxsw_sp)
564{
Ido Schimmel30572242016-12-03 16:45:01 +0100565 /* At this stage we're guaranteed not to have new incoming
566 * FIB notifications and the work queue is free from FIBs
567 * sitting on top of mlxsw netdevs. However, we can still
568 * have other FIBs queued. Flush the queue before flushing
569 * the device's tables. No need for locks, as we're the only
570 * writer.
571 */
572 mlxsw_core_flush_owq();
Ido Schimmelac571de2016-11-14 11:26:32 +0100573 mlxsw_sp_router_fib_flush(mlxsw_sp);
Nogah Frankel9497c042016-09-20 11:16:54 +0200574 kfree(mlxsw_sp->router.vrs);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200575}
576
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200577struct mlxsw_sp_neigh_key {
Jiri Pirko33b13412016-11-10 12:31:04 +0100578 struct neighbour *n;
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200579};
580
581struct mlxsw_sp_neigh_entry {
Ido Schimmel9665b742017-02-08 11:16:42 +0100582 struct list_head rif_list_node;
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200583 struct rhash_head ht_node;
584 struct mlxsw_sp_neigh_key key;
585 u16 rif;
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100586 bool connected;
Yotam Gigia6bf9e92016-07-05 11:27:44 +0200587 unsigned char ha[ETH_ALEN];
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +0200588 struct list_head nexthop_list; /* list of nexthops using
589 * this neigh entry
590 */
Yotam Gigib2157142016-07-05 11:27:51 +0200591 struct list_head nexthop_neighs_list_node;
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200592};
593
594static const struct rhashtable_params mlxsw_sp_neigh_ht_params = {
595 .key_offset = offsetof(struct mlxsw_sp_neigh_entry, key),
596 .head_offset = offsetof(struct mlxsw_sp_neigh_entry, ht_node),
597 .key_len = sizeof(struct mlxsw_sp_neigh_key),
598};
599
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100600static struct mlxsw_sp_neigh_entry *
601mlxsw_sp_neigh_entry_alloc(struct mlxsw_sp *mlxsw_sp, struct neighbour *n,
602 u16 rif)
603{
604 struct mlxsw_sp_neigh_entry *neigh_entry;
605
606 neigh_entry = kzalloc(sizeof(*neigh_entry), GFP_KERNEL);
607 if (!neigh_entry)
608 return NULL;
609
610 neigh_entry->key.n = n;
611 neigh_entry->rif = rif;
612 INIT_LIST_HEAD(&neigh_entry->nexthop_list);
613
614 return neigh_entry;
615}
616
617static void mlxsw_sp_neigh_entry_free(struct mlxsw_sp_neigh_entry *neigh_entry)
618{
619 kfree(neigh_entry);
620}
621
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200622static int
623mlxsw_sp_neigh_entry_insert(struct mlxsw_sp *mlxsw_sp,
624 struct mlxsw_sp_neigh_entry *neigh_entry)
625{
626 return rhashtable_insert_fast(&mlxsw_sp->router.neigh_ht,
627 &neigh_entry->ht_node,
628 mlxsw_sp_neigh_ht_params);
629}
630
631static void
632mlxsw_sp_neigh_entry_remove(struct mlxsw_sp *mlxsw_sp,
633 struct mlxsw_sp_neigh_entry *neigh_entry)
634{
635 rhashtable_remove_fast(&mlxsw_sp->router.neigh_ht,
636 &neigh_entry->ht_node,
637 mlxsw_sp_neigh_ht_params);
638}
639
640static struct mlxsw_sp_neigh_entry *
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100641mlxsw_sp_neigh_entry_create(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200642{
643 struct mlxsw_sp_neigh_entry *neigh_entry;
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100644 struct mlxsw_sp_rif *r;
645 int err;
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200646
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100647 r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, n->dev);
648 if (!r)
649 return ERR_PTR(-EINVAL);
650
651 neigh_entry = mlxsw_sp_neigh_entry_alloc(mlxsw_sp, n, r->rif);
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200652 if (!neigh_entry)
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100653 return ERR_PTR(-ENOMEM);
654
655 err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
656 if (err)
657 goto err_neigh_entry_insert;
658
Ido Schimmel9665b742017-02-08 11:16:42 +0100659 list_add(&neigh_entry->rif_list_node, &r->neigh_list);
660
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200661 return neigh_entry;
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100662
663err_neigh_entry_insert:
664 mlxsw_sp_neigh_entry_free(neigh_entry);
665 return ERR_PTR(err);
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200666}
667
668static void
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100669mlxsw_sp_neigh_entry_destroy(struct mlxsw_sp *mlxsw_sp,
670 struct mlxsw_sp_neigh_entry *neigh_entry)
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200671{
Ido Schimmel9665b742017-02-08 11:16:42 +0100672 list_del(&neigh_entry->rif_list_node);
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100673 mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry);
674 mlxsw_sp_neigh_entry_free(neigh_entry);
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200675}
676
677static struct mlxsw_sp_neigh_entry *
Jiri Pirko33b13412016-11-10 12:31:04 +0100678mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200679{
Jiri Pirko33b13412016-11-10 12:31:04 +0100680 struct mlxsw_sp_neigh_key key;
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200681
Jiri Pirko33b13412016-11-10 12:31:04 +0100682 key.n = n;
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200683 return rhashtable_lookup_fast(&mlxsw_sp->router.neigh_ht,
684 &key, mlxsw_sp_neigh_ht_params);
685}
686
Yotam Gigic723c7352016-07-05 11:27:43 +0200687static void
688mlxsw_sp_router_neighs_update_interval_init(struct mlxsw_sp *mlxsw_sp)
689{
690 unsigned long interval = NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME);
691
692 mlxsw_sp->router.neighs_update.interval = jiffies_to_msecs(interval);
693}
694
695static void mlxsw_sp_router_neigh_ent_ipv4_process(struct mlxsw_sp *mlxsw_sp,
696 char *rauhtd_pl,
697 int ent_index)
698{
699 struct net_device *dev;
700 struct neighbour *n;
701 __be32 dipn;
702 u32 dip;
703 u16 rif;
704
705 mlxsw_reg_rauhtd_ent_ipv4_unpack(rauhtd_pl, ent_index, &rif, &dip);
706
707 if (!mlxsw_sp->rifs[rif]) {
708 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
709 return;
710 }
711
712 dipn = htonl(dip);
713 dev = mlxsw_sp->rifs[rif]->dev;
714 n = neigh_lookup(&arp_tbl, &dipn, dev);
715 if (!n) {
716 netdev_err(dev, "Failed to find matching neighbour for IP=%pI4h\n",
717 &dip);
718 return;
719 }
720
721 netdev_dbg(dev, "Updating neighbour with IP=%pI4h\n", &dip);
722 neigh_event_send(n, NULL);
723 neigh_release(n);
724}
725
726static void mlxsw_sp_router_neigh_rec_ipv4_process(struct mlxsw_sp *mlxsw_sp,
727 char *rauhtd_pl,
728 int rec_index)
729{
730 u8 num_entries;
731 int i;
732
733 num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
734 rec_index);
735 /* Hardware starts counting at 0, so add 1. */
736 num_entries++;
737
738 /* Each record consists of several neighbour entries. */
739 for (i = 0; i < num_entries; i++) {
740 int ent_index;
741
742 ent_index = rec_index * MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC + i;
743 mlxsw_sp_router_neigh_ent_ipv4_process(mlxsw_sp, rauhtd_pl,
744 ent_index);
745 }
746
747}
748
749static void mlxsw_sp_router_neigh_rec_process(struct mlxsw_sp *mlxsw_sp,
750 char *rauhtd_pl, int rec_index)
751{
752 switch (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, rec_index)) {
753 case MLXSW_REG_RAUHTD_TYPE_IPV4:
754 mlxsw_sp_router_neigh_rec_ipv4_process(mlxsw_sp, rauhtd_pl,
755 rec_index);
756 break;
757 case MLXSW_REG_RAUHTD_TYPE_IPV6:
758 WARN_ON_ONCE(1);
759 break;
760 }
761}
762
Arkadi Sharshevsky42cdb332016-11-11 16:34:26 +0100763static bool mlxsw_sp_router_rauhtd_is_full(char *rauhtd_pl)
764{
765 u8 num_rec, last_rec_index, num_entries;
766
767 num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
768 last_rec_index = num_rec - 1;
769
770 if (num_rec < MLXSW_REG_RAUHTD_REC_MAX_NUM)
771 return false;
772 if (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, last_rec_index) ==
773 MLXSW_REG_RAUHTD_TYPE_IPV6)
774 return true;
775
776 num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
777 last_rec_index);
778 if (++num_entries == MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC)
779 return true;
780 return false;
781}
782
Yotam Gigib2157142016-07-05 11:27:51 +0200783static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp)
Yotam Gigic723c7352016-07-05 11:27:43 +0200784{
Yotam Gigic723c7352016-07-05 11:27:43 +0200785 char *rauhtd_pl;
786 u8 num_rec;
787 int i, err;
788
789 rauhtd_pl = kmalloc(MLXSW_REG_RAUHTD_LEN, GFP_KERNEL);
790 if (!rauhtd_pl)
Yotam Gigib2157142016-07-05 11:27:51 +0200791 return -ENOMEM;
Yotam Gigic723c7352016-07-05 11:27:43 +0200792
793 /* Make sure the neighbour's netdev isn't removed in the
794 * process.
795 */
796 rtnl_lock();
797 do {
798 mlxsw_reg_rauhtd_pack(rauhtd_pl, MLXSW_REG_RAUHTD_TYPE_IPV4);
799 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(rauhtd),
800 rauhtd_pl);
801 if (err) {
802 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to dump neighbour talbe\n");
803 break;
804 }
805 num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
806 for (i = 0; i < num_rec; i++)
807 mlxsw_sp_router_neigh_rec_process(mlxsw_sp, rauhtd_pl,
808 i);
Arkadi Sharshevsky42cdb332016-11-11 16:34:26 +0100809 } while (mlxsw_sp_router_rauhtd_is_full(rauhtd_pl));
Yotam Gigic723c7352016-07-05 11:27:43 +0200810 rtnl_unlock();
811
812 kfree(rauhtd_pl);
Yotam Gigib2157142016-07-05 11:27:51 +0200813 return err;
814}
815
816static void mlxsw_sp_router_neighs_update_nh(struct mlxsw_sp *mlxsw_sp)
817{
818 struct mlxsw_sp_neigh_entry *neigh_entry;
819
820 /* Take RTNL mutex here to prevent lists from changes */
821 rtnl_lock();
822 list_for_each_entry(neigh_entry, &mlxsw_sp->router.nexthop_neighs_list,
Ido Schimmel8a0b7272017-02-06 16:20:15 +0100823 nexthop_neighs_list_node)
Yotam Gigib2157142016-07-05 11:27:51 +0200824 /* If this neigh have nexthops, make the kernel think this neigh
825 * is active regardless of the traffic.
826 */
Ido Schimmel8a0b7272017-02-06 16:20:15 +0100827 neigh_event_send(neigh_entry->key.n, NULL);
Yotam Gigib2157142016-07-05 11:27:51 +0200828 rtnl_unlock();
829}
830
831static void
832mlxsw_sp_router_neighs_update_work_schedule(struct mlxsw_sp *mlxsw_sp)
833{
834 unsigned long interval = mlxsw_sp->router.neighs_update.interval;
835
836 mlxsw_core_schedule_dw(&mlxsw_sp->router.neighs_update.dw,
837 msecs_to_jiffies(interval));
838}
839
840static void mlxsw_sp_router_neighs_update_work(struct work_struct *work)
841{
842 struct mlxsw_sp *mlxsw_sp = container_of(work, struct mlxsw_sp,
843 router.neighs_update.dw.work);
844 int err;
845
846 err = mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp);
847 if (err)
848 dev_err(mlxsw_sp->bus_info->dev, "Could not update kernel for neigh activity");
849
850 mlxsw_sp_router_neighs_update_nh(mlxsw_sp);
851
Yotam Gigic723c7352016-07-05 11:27:43 +0200852 mlxsw_sp_router_neighs_update_work_schedule(mlxsw_sp);
853}
854
Yotam Gigi0b2361d2016-07-05 11:27:52 +0200855static void mlxsw_sp_router_probe_unresolved_nexthops(struct work_struct *work)
856{
857 struct mlxsw_sp_neigh_entry *neigh_entry;
858 struct mlxsw_sp *mlxsw_sp = container_of(work, struct mlxsw_sp,
859 router.nexthop_probe_dw.work);
860
861 /* Iterate over nexthop neighbours, find those who are unresolved and
862 * send arp on them. This solves the chicken-egg problem when
863 * the nexthop wouldn't get offloaded until the neighbor is resolved
864 * but it wouldn't get resolved ever in case traffic is flowing in HW
865 * using different nexthop.
866 *
867 * Take RTNL mutex here to prevent lists from changes.
868 */
869 rtnl_lock();
870 list_for_each_entry(neigh_entry, &mlxsw_sp->router.nexthop_neighs_list,
Ido Schimmel8a0b7272017-02-06 16:20:15 +0100871 nexthop_neighs_list_node)
Ido Schimmel01b1aa32017-02-06 16:20:16 +0100872 if (!neigh_entry->connected)
Jiri Pirko33b13412016-11-10 12:31:04 +0100873 neigh_event_send(neigh_entry->key.n, NULL);
Yotam Gigi0b2361d2016-07-05 11:27:52 +0200874 rtnl_unlock();
875
876 mlxsw_core_schedule_dw(&mlxsw_sp->router.nexthop_probe_dw,
877 MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL);
878}
879
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +0200880static void
881mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
882 struct mlxsw_sp_neigh_entry *neigh_entry,
883 bool removing);
884
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100885static enum mlxsw_reg_rauht_op mlxsw_sp_rauht_op(bool adding)
Yotam Gigia6bf9e92016-07-05 11:27:44 +0200886{
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100887 return adding ? MLXSW_REG_RAUHT_OP_WRITE_ADD :
888 MLXSW_REG_RAUHT_OP_WRITE_DELETE;
889}
890
891static void
892mlxsw_sp_router_neigh_entry_op4(struct mlxsw_sp *mlxsw_sp,
893 struct mlxsw_sp_neigh_entry *neigh_entry,
894 enum mlxsw_reg_rauht_op op)
895{
Jiri Pirko33b13412016-11-10 12:31:04 +0100896 struct neighbour *n = neigh_entry->key.n;
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100897 u32 dip = ntohl(*((__be32 *) n->primary_key));
Yotam Gigia6bf9e92016-07-05 11:27:44 +0200898 char rauht_pl[MLXSW_REG_RAUHT_LEN];
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100899
900 mlxsw_reg_rauht_pack4(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
901 dip);
902 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
903}
904
905static void
906mlxsw_sp_neigh_entry_update(struct mlxsw_sp *mlxsw_sp,
907 struct mlxsw_sp_neigh_entry *neigh_entry,
908 bool adding)
909{
910 if (!adding && !neigh_entry->connected)
911 return;
912 neigh_entry->connected = adding;
913 if (neigh_entry->key.n->tbl == &arp_tbl)
914 mlxsw_sp_router_neigh_entry_op4(mlxsw_sp, neigh_entry,
915 mlxsw_sp_rauht_op(adding));
916 else
917 WARN_ON_ONCE(1);
918}
919
920struct mlxsw_sp_neigh_event_work {
921 struct work_struct work;
922 struct mlxsw_sp *mlxsw_sp;
923 struct neighbour *n;
924};
925
926static void mlxsw_sp_router_neigh_event_work(struct work_struct *work)
927{
928 struct mlxsw_sp_neigh_event_work *neigh_work =
929 container_of(work, struct mlxsw_sp_neigh_event_work, work);
930 struct mlxsw_sp *mlxsw_sp = neigh_work->mlxsw_sp;
931 struct mlxsw_sp_neigh_entry *neigh_entry;
932 struct neighbour *n = neigh_work->n;
933 unsigned char ha[ETH_ALEN];
Yotam Gigia6bf9e92016-07-05 11:27:44 +0200934 bool entry_connected;
Ido Schimmel93a87e52016-12-23 09:32:49 +0100935 u8 nud_state, dead;
Yotam Gigia6bf9e92016-07-05 11:27:44 +0200936
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100937 /* If these parameters are changed after we release the lock,
938 * then we are guaranteed to receive another event letting us
939 * know about it.
940 */
Yotam Gigia6bf9e92016-07-05 11:27:44 +0200941 read_lock_bh(&n->lock);
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100942 memcpy(ha, n->ha, ETH_ALEN);
Yotam Gigia6bf9e92016-07-05 11:27:44 +0200943 nud_state = n->nud_state;
Ido Schimmel93a87e52016-12-23 09:32:49 +0100944 dead = n->dead;
Yotam Gigia6bf9e92016-07-05 11:27:44 +0200945 read_unlock_bh(&n->lock);
946
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100947 rtnl_lock();
Ido Schimmel93a87e52016-12-23 09:32:49 +0100948 entry_connected = nud_state & NUD_VALID && !dead;
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100949 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
950 if (!entry_connected && !neigh_entry)
951 goto out;
952 if (!neigh_entry) {
953 neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
954 if (IS_ERR(neigh_entry))
955 goto out;
Yotam Gigia6bf9e92016-07-05 11:27:44 +0200956 }
957
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100958 memcpy(neigh_entry->ha, ha, ETH_ALEN);
959 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, entry_connected);
960 mlxsw_sp_nexthop_neigh_update(mlxsw_sp, neigh_entry, !entry_connected);
961
962 if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
963 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
964
965out:
966 rtnl_unlock();
Yotam Gigia6bf9e92016-07-05 11:27:44 +0200967 neigh_release(n);
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100968 kfree(neigh_work);
Yotam Gigia6bf9e92016-07-05 11:27:44 +0200969}
970
Jiri Pirkoe7322632016-09-01 10:37:43 +0200971int mlxsw_sp_router_netevent_event(struct notifier_block *unused,
972 unsigned long event, void *ptr)
Yotam Gigic723c7352016-07-05 11:27:43 +0200973{
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100974 struct mlxsw_sp_neigh_event_work *neigh_work;
Yotam Gigic723c7352016-07-05 11:27:43 +0200975 struct mlxsw_sp_port *mlxsw_sp_port;
976 struct mlxsw_sp *mlxsw_sp;
977 unsigned long interval;
978 struct neigh_parms *p;
Yotam Gigia6bf9e92016-07-05 11:27:44 +0200979 struct neighbour *n;
Yotam Gigic723c7352016-07-05 11:27:43 +0200980
981 switch (event) {
982 case NETEVENT_DELAY_PROBE_TIME_UPDATE:
983 p = ptr;
984
985 /* We don't care about changes in the default table. */
986 if (!p->dev || p->tbl != &arp_tbl)
987 return NOTIFY_DONE;
988
989 /* We are in atomic context and can't take RTNL mutex,
990 * so use RCU variant to walk the device chain.
991 */
992 mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(p->dev);
993 if (!mlxsw_sp_port)
994 return NOTIFY_DONE;
995
996 mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
997 interval = jiffies_to_msecs(NEIGH_VAR(p, DELAY_PROBE_TIME));
998 mlxsw_sp->router.neighs_update.interval = interval;
999
1000 mlxsw_sp_port_dev_put(mlxsw_sp_port);
1001 break;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001002 case NETEVENT_NEIGH_UPDATE:
1003 n = ptr;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001004
1005 if (n->tbl != &arp_tbl)
1006 return NOTIFY_DONE;
1007
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001008 mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(n->dev);
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001009 if (!mlxsw_sp_port)
1010 return NOTIFY_DONE;
1011
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001012 neigh_work = kzalloc(sizeof(*neigh_work), GFP_ATOMIC);
1013 if (!neigh_work) {
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001014 mlxsw_sp_port_dev_put(mlxsw_sp_port);
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001015 return NOTIFY_BAD;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001016 }
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001017
1018 INIT_WORK(&neigh_work->work, mlxsw_sp_router_neigh_event_work);
1019 neigh_work->mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1020 neigh_work->n = n;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001021
1022 /* Take a reference to ensure the neighbour won't be
1023 * destructed until we drop the reference in delayed
1024 * work.
1025 */
1026 neigh_clone(n);
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001027 mlxsw_core_schedule_work(&neigh_work->work);
1028 mlxsw_sp_port_dev_put(mlxsw_sp_port);
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001029 break;
Yotam Gigic723c7352016-07-05 11:27:43 +02001030 }
1031
1032 return NOTIFY_DONE;
1033}
1034
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001035static int mlxsw_sp_neigh_init(struct mlxsw_sp *mlxsw_sp)
1036{
Yotam Gigic723c7352016-07-05 11:27:43 +02001037 int err;
1038
1039 err = rhashtable_init(&mlxsw_sp->router.neigh_ht,
1040 &mlxsw_sp_neigh_ht_params);
1041 if (err)
1042 return err;
1043
1044 /* Initialize the polling interval according to the default
1045 * table.
1046 */
1047 mlxsw_sp_router_neighs_update_interval_init(mlxsw_sp);
1048
Yotam Gigi0b2361d2016-07-05 11:27:52 +02001049 /* Create the delayed works for the activity_update */
Yotam Gigic723c7352016-07-05 11:27:43 +02001050 INIT_DELAYED_WORK(&mlxsw_sp->router.neighs_update.dw,
1051 mlxsw_sp_router_neighs_update_work);
Yotam Gigi0b2361d2016-07-05 11:27:52 +02001052 INIT_DELAYED_WORK(&mlxsw_sp->router.nexthop_probe_dw,
1053 mlxsw_sp_router_probe_unresolved_nexthops);
Yotam Gigic723c7352016-07-05 11:27:43 +02001054 mlxsw_core_schedule_dw(&mlxsw_sp->router.neighs_update.dw, 0);
Yotam Gigi0b2361d2016-07-05 11:27:52 +02001055 mlxsw_core_schedule_dw(&mlxsw_sp->router.nexthop_probe_dw, 0);
Yotam Gigic723c7352016-07-05 11:27:43 +02001056 return 0;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001057}
1058
1059static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp)
1060{
Yotam Gigic723c7352016-07-05 11:27:43 +02001061 cancel_delayed_work_sync(&mlxsw_sp->router.neighs_update.dw);
Yotam Gigi0b2361d2016-07-05 11:27:52 +02001062 cancel_delayed_work_sync(&mlxsw_sp->router.nexthop_probe_dw);
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001063 rhashtable_destroy(&mlxsw_sp->router.neigh_ht);
1064}
1065
Ido Schimmel9665b742017-02-08 11:16:42 +01001066static int mlxsw_sp_neigh_rif_flush(struct mlxsw_sp *mlxsw_sp,
1067 const struct mlxsw_sp_rif *r)
1068{
1069 char rauht_pl[MLXSW_REG_RAUHT_LEN];
1070
1071 mlxsw_reg_rauht_pack(rauht_pl, MLXSW_REG_RAUHT_OP_WRITE_DELETE_ALL,
1072 r->rif, r->addr);
1073 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
1074}
1075
1076static void mlxsw_sp_neigh_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
1077 struct mlxsw_sp_rif *r)
1078{
1079 struct mlxsw_sp_neigh_entry *neigh_entry, *tmp;
1080
1081 mlxsw_sp_neigh_rif_flush(mlxsw_sp, r);
1082 list_for_each_entry_safe(neigh_entry, tmp, &r->neigh_list,
1083 rif_list_node)
1084 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
1085}
1086
Ido Schimmelc53b8e12017-02-08 11:16:30 +01001087struct mlxsw_sp_nexthop_key {
1088 struct fib_nh *fib_nh;
1089};
1090
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001091struct mlxsw_sp_nexthop {
1092 struct list_head neigh_list_node; /* member of neigh entry list */
Ido Schimmel9665b742017-02-08 11:16:42 +01001093 struct list_head rif_list_node;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001094 struct mlxsw_sp_nexthop_group *nh_grp; /* pointer back to the group
1095 * this belongs to
1096 */
Ido Schimmelc53b8e12017-02-08 11:16:30 +01001097 struct rhash_head ht_node;
1098 struct mlxsw_sp_nexthop_key key;
Ido Schimmelb8399a12017-02-08 11:16:33 +01001099 struct mlxsw_sp_rif *r;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001100 u8 should_offload:1, /* set indicates this neigh is connected and
1101 * should be put to KVD linear area of this group.
1102 */
1103 offloaded:1, /* set in case the neigh is actually put into
1104 * KVD linear area of this group.
1105 */
1106 update:1; /* set indicates that MAC of this neigh should be
1107 * updated in HW
1108 */
1109 struct mlxsw_sp_neigh_entry *neigh_entry;
1110};
1111
Ido Schimmele9ad5e72017-02-08 11:16:29 +01001112struct mlxsw_sp_nexthop_group_key {
1113 struct fib_info *fi;
1114};
1115
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001116struct mlxsw_sp_nexthop_group {
Ido Schimmele9ad5e72017-02-08 11:16:29 +01001117 struct rhash_head ht_node;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001118 struct list_head fib_list; /* list of fib entries that use this group */
Ido Schimmele9ad5e72017-02-08 11:16:29 +01001119 struct mlxsw_sp_nexthop_group_key key;
Ido Schimmelb3e8d1e2017-02-08 11:16:32 +01001120 u8 adj_index_valid:1,
1121 gateway:1; /* routes using the group use a gateway */
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001122 u32 adj_index;
1123 u16 ecmp_size;
1124 u16 count;
1125 struct mlxsw_sp_nexthop nexthops[0];
Ido Schimmelb8399a12017-02-08 11:16:33 +01001126#define nh_rif nexthops[0].r
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001127};
1128
Ido Schimmele9ad5e72017-02-08 11:16:29 +01001129static const struct rhashtable_params mlxsw_sp_nexthop_group_ht_params = {
1130 .key_offset = offsetof(struct mlxsw_sp_nexthop_group, key),
1131 .head_offset = offsetof(struct mlxsw_sp_nexthop_group, ht_node),
1132 .key_len = sizeof(struct mlxsw_sp_nexthop_group_key),
1133};
1134
1135static int mlxsw_sp_nexthop_group_insert(struct mlxsw_sp *mlxsw_sp,
1136 struct mlxsw_sp_nexthop_group *nh_grp)
1137{
1138 return rhashtable_insert_fast(&mlxsw_sp->router.nexthop_group_ht,
1139 &nh_grp->ht_node,
1140 mlxsw_sp_nexthop_group_ht_params);
1141}
1142
1143static void mlxsw_sp_nexthop_group_remove(struct mlxsw_sp *mlxsw_sp,
1144 struct mlxsw_sp_nexthop_group *nh_grp)
1145{
1146 rhashtable_remove_fast(&mlxsw_sp->router.nexthop_group_ht,
1147 &nh_grp->ht_node,
1148 mlxsw_sp_nexthop_group_ht_params);
1149}
1150
1151static struct mlxsw_sp_nexthop_group *
1152mlxsw_sp_nexthop_group_lookup(struct mlxsw_sp *mlxsw_sp,
1153 struct mlxsw_sp_nexthop_group_key key)
1154{
1155 return rhashtable_lookup_fast(&mlxsw_sp->router.nexthop_group_ht, &key,
1156 mlxsw_sp_nexthop_group_ht_params);
1157}
1158
Ido Schimmelc53b8e12017-02-08 11:16:30 +01001159static const struct rhashtable_params mlxsw_sp_nexthop_ht_params = {
1160 .key_offset = offsetof(struct mlxsw_sp_nexthop, key),
1161 .head_offset = offsetof(struct mlxsw_sp_nexthop, ht_node),
1162 .key_len = sizeof(struct mlxsw_sp_nexthop_key),
1163};
1164
1165static int mlxsw_sp_nexthop_insert(struct mlxsw_sp *mlxsw_sp,
1166 struct mlxsw_sp_nexthop *nh)
1167{
1168 return rhashtable_insert_fast(&mlxsw_sp->router.nexthop_ht,
1169 &nh->ht_node, mlxsw_sp_nexthop_ht_params);
1170}
1171
1172static void mlxsw_sp_nexthop_remove(struct mlxsw_sp *mlxsw_sp,
1173 struct mlxsw_sp_nexthop *nh)
1174{
1175 rhashtable_remove_fast(&mlxsw_sp->router.nexthop_ht, &nh->ht_node,
1176 mlxsw_sp_nexthop_ht_params);
1177}
1178
Ido Schimmelad178c82017-02-08 11:16:40 +01001179static struct mlxsw_sp_nexthop *
1180mlxsw_sp_nexthop_lookup(struct mlxsw_sp *mlxsw_sp,
1181 struct mlxsw_sp_nexthop_key key)
1182{
1183 return rhashtable_lookup_fast(&mlxsw_sp->router.nexthop_ht, &key,
1184 mlxsw_sp_nexthop_ht_params);
1185}
1186
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001187static int mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp *mlxsw_sp,
1188 struct mlxsw_sp_vr *vr,
1189 u32 adj_index, u16 ecmp_size,
1190 u32 new_adj_index,
1191 u16 new_ecmp_size)
1192{
1193 char raleu_pl[MLXSW_REG_RALEU_LEN];
1194
Ido Schimmel1a9234e662016-09-19 08:29:26 +02001195 mlxsw_reg_raleu_pack(raleu_pl,
1196 (enum mlxsw_reg_ralxx_protocol) vr->proto, vr->id,
1197 adj_index, ecmp_size, new_adj_index,
1198 new_ecmp_size);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001199 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raleu), raleu_pl);
1200}
1201
1202static int mlxsw_sp_adj_index_mass_update(struct mlxsw_sp *mlxsw_sp,
1203 struct mlxsw_sp_nexthop_group *nh_grp,
1204 u32 old_adj_index, u16 old_ecmp_size)
1205{
1206 struct mlxsw_sp_fib_entry *fib_entry;
1207 struct mlxsw_sp_vr *vr = NULL;
1208 int err;
1209
1210 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
Ido Schimmel9aecce12017-02-09 10:28:42 +01001211 if (vr == fib_entry->fib_node->vr)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001212 continue;
Ido Schimmel9aecce12017-02-09 10:28:42 +01001213 vr = fib_entry->fib_node->vr;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001214 err = mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp, vr,
1215 old_adj_index,
1216 old_ecmp_size,
1217 nh_grp->adj_index,
1218 nh_grp->ecmp_size);
1219 if (err)
1220 return err;
1221 }
1222 return 0;
1223}
1224
1225static int mlxsw_sp_nexthop_mac_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
1226 struct mlxsw_sp_nexthop *nh)
1227{
1228 struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
1229 char ratr_pl[MLXSW_REG_RATR_LEN];
1230
1231 mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY,
1232 true, adj_index, neigh_entry->rif);
1233 mlxsw_reg_ratr_eth_entry_pack(ratr_pl, neigh_entry->ha);
1234 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
1235}
1236
1237static int
1238mlxsw_sp_nexthop_group_mac_update(struct mlxsw_sp *mlxsw_sp,
Ido Schimmela59b7e02017-01-23 11:11:42 +01001239 struct mlxsw_sp_nexthop_group *nh_grp,
1240 bool reallocate)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001241{
1242 u32 adj_index = nh_grp->adj_index; /* base */
1243 struct mlxsw_sp_nexthop *nh;
1244 int i;
1245 int err;
1246
1247 for (i = 0; i < nh_grp->count; i++) {
1248 nh = &nh_grp->nexthops[i];
1249
1250 if (!nh->should_offload) {
1251 nh->offloaded = 0;
1252 continue;
1253 }
1254
Ido Schimmela59b7e02017-01-23 11:11:42 +01001255 if (nh->update || reallocate) {
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001256 err = mlxsw_sp_nexthop_mac_update(mlxsw_sp,
1257 adj_index, nh);
1258 if (err)
1259 return err;
1260 nh->update = 0;
1261 nh->offloaded = 1;
1262 }
1263 adj_index++;
1264 }
1265 return 0;
1266}
1267
1268static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
1269 struct mlxsw_sp_fib_entry *fib_entry);
1270
1271static int
1272mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp *mlxsw_sp,
1273 struct mlxsw_sp_nexthop_group *nh_grp)
1274{
1275 struct mlxsw_sp_fib_entry *fib_entry;
1276 int err;
1277
1278 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
1279 err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
1280 if (err)
1281 return err;
1282 }
1283 return 0;
1284}
1285
1286static void
1287mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
1288 struct mlxsw_sp_nexthop_group *nh_grp)
1289{
1290 struct mlxsw_sp_nexthop *nh;
1291 bool offload_change = false;
1292 u32 adj_index;
1293 u16 ecmp_size = 0;
1294 bool old_adj_index_valid;
1295 u32 old_adj_index;
1296 u16 old_ecmp_size;
1297 int ret;
1298 int i;
1299 int err;
1300
Ido Schimmelb3e8d1e2017-02-08 11:16:32 +01001301 if (!nh_grp->gateway) {
1302 mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
1303 return;
1304 }
1305
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001306 for (i = 0; i < nh_grp->count; i++) {
1307 nh = &nh_grp->nexthops[i];
1308
1309 if (nh->should_offload ^ nh->offloaded) {
1310 offload_change = true;
1311 if (nh->should_offload)
1312 nh->update = 1;
1313 }
1314 if (nh->should_offload)
1315 ecmp_size++;
1316 }
1317 if (!offload_change) {
1318 /* Nothing was added or removed, so no need to reallocate. Just
1319 * update MAC on existing adjacency indexes.
1320 */
Ido Schimmela59b7e02017-01-23 11:11:42 +01001321 err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp,
1322 false);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001323 if (err) {
1324 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
1325 goto set_trap;
1326 }
1327 return;
1328 }
1329 if (!ecmp_size)
1330 /* No neigh of this group is connected so we just set
1331 * the trap and let everthing flow through kernel.
1332 */
1333 goto set_trap;
1334
1335 ret = mlxsw_sp_kvdl_alloc(mlxsw_sp, ecmp_size);
1336 if (ret < 0) {
1337 /* We ran out of KVD linear space, just set the
1338 * trap and let everything flow through kernel.
1339 */
1340 dev_warn(mlxsw_sp->bus_info->dev, "Failed to allocate KVD linear area for nexthop group.\n");
1341 goto set_trap;
1342 }
1343 adj_index = ret;
1344 old_adj_index_valid = nh_grp->adj_index_valid;
1345 old_adj_index = nh_grp->adj_index;
1346 old_ecmp_size = nh_grp->ecmp_size;
1347 nh_grp->adj_index_valid = 1;
1348 nh_grp->adj_index = adj_index;
1349 nh_grp->ecmp_size = ecmp_size;
Ido Schimmela59b7e02017-01-23 11:11:42 +01001350 err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp, true);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001351 if (err) {
1352 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
1353 goto set_trap;
1354 }
1355
1356 if (!old_adj_index_valid) {
1357 /* The trap was set for fib entries, so we have to call
1358 * fib entry update to unset it and use adjacency index.
1359 */
1360 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
1361 if (err) {
1362 dev_warn(mlxsw_sp->bus_info->dev, "Failed to add adjacency index to fib entries.\n");
1363 goto set_trap;
1364 }
1365 return;
1366 }
1367
1368 err = mlxsw_sp_adj_index_mass_update(mlxsw_sp, nh_grp,
1369 old_adj_index, old_ecmp_size);
1370 mlxsw_sp_kvdl_free(mlxsw_sp, old_adj_index);
1371 if (err) {
1372 dev_warn(mlxsw_sp->bus_info->dev, "Failed to mass-update adjacency index for nexthop group.\n");
1373 goto set_trap;
1374 }
1375 return;
1376
1377set_trap:
1378 old_adj_index_valid = nh_grp->adj_index_valid;
1379 nh_grp->adj_index_valid = 0;
1380 for (i = 0; i < nh_grp->count; i++) {
1381 nh = &nh_grp->nexthops[i];
1382 nh->offloaded = 0;
1383 }
1384 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
1385 if (err)
1386 dev_warn(mlxsw_sp->bus_info->dev, "Failed to set traps for fib entries.\n");
1387 if (old_adj_index_valid)
1388 mlxsw_sp_kvdl_free(mlxsw_sp, nh_grp->adj_index);
1389}
1390
1391static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh,
1392 bool removing)
1393{
1394 if (!removing && !nh->should_offload)
1395 nh->should_offload = 1;
1396 else if (removing && nh->offloaded)
1397 nh->should_offload = 0;
1398 nh->update = 1;
1399}
1400
1401static void
1402mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
1403 struct mlxsw_sp_neigh_entry *neigh_entry,
1404 bool removing)
1405{
1406 struct mlxsw_sp_nexthop *nh;
1407
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001408 list_for_each_entry(nh, &neigh_entry->nexthop_list,
1409 neigh_list_node) {
1410 __mlxsw_sp_nexthop_neigh_update(nh, removing);
1411 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
1412 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001413}
1414
Ido Schimmel9665b742017-02-08 11:16:42 +01001415static void mlxsw_sp_nexthop_rif_init(struct mlxsw_sp_nexthop *nh,
1416 struct mlxsw_sp_rif *r)
1417{
1418 if (nh->r)
1419 return;
1420
1421 nh->r = r;
1422 list_add(&nh->rif_list_node, &r->nexthop_list);
1423}
1424
1425static void mlxsw_sp_nexthop_rif_fini(struct mlxsw_sp_nexthop *nh)
1426{
1427 if (!nh->r)
1428 return;
1429
1430 list_del(&nh->rif_list_node);
1431 nh->r = NULL;
1432}
1433
Ido Schimmela8c97012017-02-08 11:16:35 +01001434static int mlxsw_sp_nexthop_neigh_init(struct mlxsw_sp *mlxsw_sp,
1435 struct mlxsw_sp_nexthop *nh)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001436{
1437 struct mlxsw_sp_neigh_entry *neigh_entry;
Ido Schimmela8c97012017-02-08 11:16:35 +01001438 struct fib_nh *fib_nh = nh->key.fib_nh;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001439 struct neighbour *n;
Ido Schimmel93a87e52016-12-23 09:32:49 +01001440 u8 nud_state, dead;
Ido Schimmelc53b8e12017-02-08 11:16:30 +01001441 int err;
1442
Ido Schimmelad178c82017-02-08 11:16:40 +01001443 if (!nh->nh_grp->gateway || nh->neigh_entry)
Ido Schimmelb8399a12017-02-08 11:16:33 +01001444 return 0;
1445
Jiri Pirko33b13412016-11-10 12:31:04 +01001446 /* Take a reference of neigh here ensuring that neigh would
1447 * not be detructed before the nexthop entry is finished.
1448 * The reference is taken either in neigh_lookup() or
Ido Schimmelfd76d912017-02-06 16:20:17 +01001449 * in neigh_create() in case n is not found.
Jiri Pirko33b13412016-11-10 12:31:04 +01001450 */
Ido Schimmela8c97012017-02-08 11:16:35 +01001451 n = neigh_lookup(&arp_tbl, &fib_nh->nh_gw, fib_nh->nh_dev);
Jiri Pirko33b13412016-11-10 12:31:04 +01001452 if (!n) {
Ido Schimmela8c97012017-02-08 11:16:35 +01001453 n = neigh_create(&arp_tbl, &fib_nh->nh_gw, fib_nh->nh_dev);
1454 if (IS_ERR(n))
1455 return PTR_ERR(n);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001456 neigh_event_send(n, NULL);
Jiri Pirko33b13412016-11-10 12:31:04 +01001457 }
1458 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
1459 if (!neigh_entry) {
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001460 neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
1461 if (IS_ERR(neigh_entry)) {
Ido Schimmelc53b8e12017-02-08 11:16:30 +01001462 err = -EINVAL;
1463 goto err_neigh_entry_create;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001464 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001465 }
Yotam Gigib2157142016-07-05 11:27:51 +02001466
1467 /* If that is the first nexthop connected to that neigh, add to
1468 * nexthop_neighs_list
1469 */
1470 if (list_empty(&neigh_entry->nexthop_list))
1471 list_add_tail(&neigh_entry->nexthop_neighs_list_node,
1472 &mlxsw_sp->router.nexthop_neighs_list);
1473
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001474 nh->neigh_entry = neigh_entry;
1475 list_add_tail(&nh->neigh_list_node, &neigh_entry->nexthop_list);
1476 read_lock_bh(&n->lock);
1477 nud_state = n->nud_state;
Ido Schimmel93a87e52016-12-23 09:32:49 +01001478 dead = n->dead;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001479 read_unlock_bh(&n->lock);
Ido Schimmel93a87e52016-12-23 09:32:49 +01001480 __mlxsw_sp_nexthop_neigh_update(nh, !(nud_state & NUD_VALID && !dead));
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001481
1482 return 0;
Ido Schimmelc53b8e12017-02-08 11:16:30 +01001483
1484err_neigh_entry_create:
1485 neigh_release(n);
Ido Schimmelc53b8e12017-02-08 11:16:30 +01001486 return err;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001487}
1488
Ido Schimmela8c97012017-02-08 11:16:35 +01001489static void mlxsw_sp_nexthop_neigh_fini(struct mlxsw_sp *mlxsw_sp,
1490 struct mlxsw_sp_nexthop *nh)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001491{
1492 struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
Ido Schimmela8c97012017-02-08 11:16:35 +01001493 struct neighbour *n;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001494
Ido Schimmelb8399a12017-02-08 11:16:33 +01001495 if (!neigh_entry)
Ido Schimmela8c97012017-02-08 11:16:35 +01001496 return;
1497 n = neigh_entry->key.n;
Ido Schimmelb8399a12017-02-08 11:16:33 +01001498
Ido Schimmel58312122016-12-23 09:32:50 +01001499 __mlxsw_sp_nexthop_neigh_update(nh, true);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001500 list_del(&nh->neigh_list_node);
Ido Schimmele58be792017-02-08 11:16:28 +01001501 nh->neigh_entry = NULL;
Yotam Gigib2157142016-07-05 11:27:51 +02001502
1503 /* If that is the last nexthop connected to that neigh, remove from
1504 * nexthop_neighs_list
1505 */
Ido Schimmele58be792017-02-08 11:16:28 +01001506 if (list_empty(&neigh_entry->nexthop_list))
1507 list_del(&neigh_entry->nexthop_neighs_list_node);
Yotam Gigib2157142016-07-05 11:27:51 +02001508
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001509 if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
1510 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
1511
1512 neigh_release(n);
Ido Schimmela8c97012017-02-08 11:16:35 +01001513}
Ido Schimmelc53b8e12017-02-08 11:16:30 +01001514
Ido Schimmela8c97012017-02-08 11:16:35 +01001515static int mlxsw_sp_nexthop_init(struct mlxsw_sp *mlxsw_sp,
1516 struct mlxsw_sp_nexthop_group *nh_grp,
1517 struct mlxsw_sp_nexthop *nh,
1518 struct fib_nh *fib_nh)
1519{
1520 struct net_device *dev = fib_nh->nh_dev;
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +01001521 struct in_device *in_dev;
Ido Schimmela8c97012017-02-08 11:16:35 +01001522 struct mlxsw_sp_rif *r;
1523 int err;
1524
1525 nh->nh_grp = nh_grp;
1526 nh->key.fib_nh = fib_nh;
1527 err = mlxsw_sp_nexthop_insert(mlxsw_sp, nh);
1528 if (err)
1529 return err;
1530
Ido Schimmel97989ee2017-03-10 08:53:38 +01001531 if (!dev)
1532 return 0;
1533
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +01001534 in_dev = __in_dev_get_rtnl(dev);
1535 if (in_dev && IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
1536 fib_nh->nh_flags & RTNH_F_LINKDOWN)
1537 return 0;
1538
Ido Schimmela8c97012017-02-08 11:16:35 +01001539 r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
1540 if (!r)
1541 return 0;
Ido Schimmel9665b742017-02-08 11:16:42 +01001542 mlxsw_sp_nexthop_rif_init(nh, r);
Ido Schimmela8c97012017-02-08 11:16:35 +01001543
1544 err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
1545 if (err)
1546 goto err_nexthop_neigh_init;
1547
1548 return 0;
1549
1550err_nexthop_neigh_init:
1551 mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
1552 return err;
1553}
1554
1555static void mlxsw_sp_nexthop_fini(struct mlxsw_sp *mlxsw_sp,
1556 struct mlxsw_sp_nexthop *nh)
1557{
1558 mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh);
Ido Schimmel9665b742017-02-08 11:16:42 +01001559 mlxsw_sp_nexthop_rif_fini(nh);
Ido Schimmelc53b8e12017-02-08 11:16:30 +01001560 mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001561}
1562
Ido Schimmelad178c82017-02-08 11:16:40 +01001563static void mlxsw_sp_nexthop_event(struct mlxsw_sp *mlxsw_sp,
1564 unsigned long event, struct fib_nh *fib_nh)
1565{
1566 struct mlxsw_sp_nexthop_key key;
1567 struct mlxsw_sp_nexthop *nh;
1568 struct mlxsw_sp_rif *r;
1569
1570 if (mlxsw_sp->router.aborted)
1571 return;
1572
1573 key.fib_nh = fib_nh;
1574 nh = mlxsw_sp_nexthop_lookup(mlxsw_sp, key);
1575 if (WARN_ON_ONCE(!nh))
1576 return;
1577
1578 r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, fib_nh->nh_dev);
1579 if (!r)
1580 return;
1581
1582 switch (event) {
1583 case FIB_EVENT_NH_ADD:
Ido Schimmel9665b742017-02-08 11:16:42 +01001584 mlxsw_sp_nexthop_rif_init(nh, r);
Ido Schimmelad178c82017-02-08 11:16:40 +01001585 mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
1586 break;
1587 case FIB_EVENT_NH_DEL:
1588 mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh);
Ido Schimmel9665b742017-02-08 11:16:42 +01001589 mlxsw_sp_nexthop_rif_fini(nh);
Ido Schimmelad178c82017-02-08 11:16:40 +01001590 break;
1591 }
1592
1593 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
1594}
1595
Ido Schimmel9665b742017-02-08 11:16:42 +01001596static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
1597 struct mlxsw_sp_rif *r)
1598{
1599 struct mlxsw_sp_nexthop *nh, *tmp;
1600
1601 list_for_each_entry_safe(nh, tmp, &r->nexthop_list, rif_list_node) {
1602 mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh);
1603 mlxsw_sp_nexthop_rif_fini(nh);
1604 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
1605 }
1606}
1607
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001608static struct mlxsw_sp_nexthop_group *
1609mlxsw_sp_nexthop_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi)
1610{
1611 struct mlxsw_sp_nexthop_group *nh_grp;
1612 struct mlxsw_sp_nexthop *nh;
1613 struct fib_nh *fib_nh;
1614 size_t alloc_size;
1615 int i;
1616 int err;
1617
1618 alloc_size = sizeof(*nh_grp) +
1619 fi->fib_nhs * sizeof(struct mlxsw_sp_nexthop);
1620 nh_grp = kzalloc(alloc_size, GFP_KERNEL);
1621 if (!nh_grp)
1622 return ERR_PTR(-ENOMEM);
1623 INIT_LIST_HEAD(&nh_grp->fib_list);
Ido Schimmelb3e8d1e2017-02-08 11:16:32 +01001624 nh_grp->gateway = fi->fib_nh->nh_scope == RT_SCOPE_LINK;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001625 nh_grp->count = fi->fib_nhs;
Ido Schimmele9ad5e72017-02-08 11:16:29 +01001626 nh_grp->key.fi = fi;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001627 for (i = 0; i < nh_grp->count; i++) {
1628 nh = &nh_grp->nexthops[i];
1629 fib_nh = &fi->fib_nh[i];
1630 err = mlxsw_sp_nexthop_init(mlxsw_sp, nh_grp, nh, fib_nh);
1631 if (err)
1632 goto err_nexthop_init;
1633 }
Ido Schimmele9ad5e72017-02-08 11:16:29 +01001634 err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
1635 if (err)
1636 goto err_nexthop_group_insert;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001637 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
1638 return nh_grp;
1639
Ido Schimmele9ad5e72017-02-08 11:16:29 +01001640err_nexthop_group_insert:
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001641err_nexthop_init:
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +01001642 for (i--; i >= 0; i--) {
1643 nh = &nh_grp->nexthops[i];
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001644 mlxsw_sp_nexthop_fini(mlxsw_sp, nh);
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +01001645 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001646 kfree(nh_grp);
1647 return ERR_PTR(err);
1648}
1649
1650static void
1651mlxsw_sp_nexthop_group_destroy(struct mlxsw_sp *mlxsw_sp,
1652 struct mlxsw_sp_nexthop_group *nh_grp)
1653{
1654 struct mlxsw_sp_nexthop *nh;
1655 int i;
1656
Ido Schimmele9ad5e72017-02-08 11:16:29 +01001657 mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001658 for (i = 0; i < nh_grp->count; i++) {
1659 nh = &nh_grp->nexthops[i];
1660 mlxsw_sp_nexthop_fini(mlxsw_sp, nh);
1661 }
Ido Schimmel58312122016-12-23 09:32:50 +01001662 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
1663 WARN_ON_ONCE(nh_grp->adj_index_valid);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001664 kfree(nh_grp);
1665}
1666
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001667static int mlxsw_sp_nexthop_group_get(struct mlxsw_sp *mlxsw_sp,
1668 struct mlxsw_sp_fib_entry *fib_entry,
1669 struct fib_info *fi)
1670{
Ido Schimmele9ad5e72017-02-08 11:16:29 +01001671 struct mlxsw_sp_nexthop_group_key key;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001672 struct mlxsw_sp_nexthop_group *nh_grp;
1673
Ido Schimmele9ad5e72017-02-08 11:16:29 +01001674 key.fi = fi;
1675 nh_grp = mlxsw_sp_nexthop_group_lookup(mlxsw_sp, key);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001676 if (!nh_grp) {
1677 nh_grp = mlxsw_sp_nexthop_group_create(mlxsw_sp, fi);
1678 if (IS_ERR(nh_grp))
1679 return PTR_ERR(nh_grp);
1680 }
1681 list_add_tail(&fib_entry->nexthop_group_node, &nh_grp->fib_list);
1682 fib_entry->nh_group = nh_grp;
1683 return 0;
1684}
1685
1686static void mlxsw_sp_nexthop_group_put(struct mlxsw_sp *mlxsw_sp,
1687 struct mlxsw_sp_fib_entry *fib_entry)
1688{
1689 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
1690
1691 list_del(&fib_entry->nexthop_group_node);
1692 if (!list_empty(&nh_grp->fib_list))
1693 return;
1694 mlxsw_sp_nexthop_group_destroy(mlxsw_sp, nh_grp);
1695}
1696
Ido Schimmel013b20f2017-02-08 11:16:36 +01001697static bool
1698mlxsw_sp_fib_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
1699{
1700 struct mlxsw_sp_nexthop_group *nh_group = fib_entry->nh_group;
1701
Ido Schimmel9aecce12017-02-09 10:28:42 +01001702 if (fib_entry->params.tos)
1703 return false;
1704
Ido Schimmel013b20f2017-02-08 11:16:36 +01001705 switch (fib_entry->type) {
1706 case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
1707 return !!nh_group->adj_index_valid;
1708 case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
Ido Schimmel70ad3502017-02-08 11:16:38 +01001709 return !!nh_group->nh_rif;
Ido Schimmel013b20f2017-02-08 11:16:36 +01001710 default:
1711 return false;
1712 }
1713}
1714
1715static void mlxsw_sp_fib_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
1716{
1717 fib_entry->offloaded = true;
1718
Ido Schimmel9aecce12017-02-09 10:28:42 +01001719 switch (fib_entry->fib_node->vr->proto) {
Ido Schimmel013b20f2017-02-08 11:16:36 +01001720 case MLXSW_SP_L3_PROTO_IPV4:
1721 fib_info_offload_inc(fib_entry->nh_group->key.fi);
1722 break;
1723 case MLXSW_SP_L3_PROTO_IPV6:
1724 WARN_ON_ONCE(1);
1725 }
1726}
1727
1728static void
1729mlxsw_sp_fib_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
1730{
Ido Schimmel9aecce12017-02-09 10:28:42 +01001731 switch (fib_entry->fib_node->vr->proto) {
Ido Schimmel013b20f2017-02-08 11:16:36 +01001732 case MLXSW_SP_L3_PROTO_IPV4:
1733 fib_info_offload_dec(fib_entry->nh_group->key.fi);
1734 break;
1735 case MLXSW_SP_L3_PROTO_IPV6:
1736 WARN_ON_ONCE(1);
1737 }
1738
1739 fib_entry->offloaded = false;
1740}
1741
1742static void
1743mlxsw_sp_fib_entry_offload_refresh(struct mlxsw_sp_fib_entry *fib_entry,
1744 enum mlxsw_reg_ralue_op op, int err)
1745{
1746 switch (op) {
1747 case MLXSW_REG_RALUE_OP_WRITE_DELETE:
1748 if (!fib_entry->offloaded)
1749 return;
1750 return mlxsw_sp_fib_entry_offload_unset(fib_entry);
1751 case MLXSW_REG_RALUE_OP_WRITE_WRITE:
1752 if (err)
1753 return;
1754 if (mlxsw_sp_fib_entry_should_offload(fib_entry) &&
1755 !fib_entry->offloaded)
1756 mlxsw_sp_fib_entry_offload_set(fib_entry);
1757 else if (!mlxsw_sp_fib_entry_should_offload(fib_entry) &&
1758 fib_entry->offloaded)
1759 mlxsw_sp_fib_entry_offload_unset(fib_entry);
1760 return;
1761 default:
1762 return;
1763 }
1764}
1765
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001766static int mlxsw_sp_fib_entry_op4_remote(struct mlxsw_sp *mlxsw_sp,
1767 struct mlxsw_sp_fib_entry *fib_entry,
1768 enum mlxsw_reg_ralue_op op)
1769{
1770 char ralue_pl[MLXSW_REG_RALUE_LEN];
Ido Schimmel9aecce12017-02-09 10:28:42 +01001771 u32 *p_dip = (u32 *) fib_entry->fib_node->key.addr;
1772 struct mlxsw_sp_vr *vr = fib_entry->fib_node->vr;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001773 enum mlxsw_reg_ralue_trap_action trap_action;
1774 u16 trap_id = 0;
1775 u32 adjacency_index = 0;
1776 u16 ecmp_size = 0;
1777
1778 /* In case the nexthop group adjacency index is valid, use it
1779 * with provided ECMP size. Otherwise, setup trap and pass
1780 * traffic to kernel.
1781 */
Ido Schimmel4b411472017-02-08 11:16:37 +01001782 if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001783 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
1784 adjacency_index = fib_entry->nh_group->adj_index;
1785 ecmp_size = fib_entry->nh_group->ecmp_size;
1786 } else {
1787 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
1788 trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
1789 }
1790
Ido Schimmel1a9234e662016-09-19 08:29:26 +02001791 mlxsw_reg_ralue_pack4(ralue_pl,
1792 (enum mlxsw_reg_ralxx_protocol) vr->proto, op,
Ido Schimmel9aecce12017-02-09 10:28:42 +01001793 vr->id, fib_entry->fib_node->key.prefix_len,
1794 *p_dip);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001795 mlxsw_reg_ralue_act_remote_pack(ralue_pl, trap_action, trap_id,
1796 adjacency_index, ecmp_size);
1797 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
1798}
1799
Jiri Pirko61c503f2016-07-04 08:23:11 +02001800static int mlxsw_sp_fib_entry_op4_local(struct mlxsw_sp *mlxsw_sp,
1801 struct mlxsw_sp_fib_entry *fib_entry,
1802 enum mlxsw_reg_ralue_op op)
1803{
Ido Schimmelb8399a12017-02-08 11:16:33 +01001804 struct mlxsw_sp_rif *r = fib_entry->nh_group->nh_rif;
Ido Schimmel70ad3502017-02-08 11:16:38 +01001805 enum mlxsw_reg_ralue_trap_action trap_action;
Jiri Pirko61c503f2016-07-04 08:23:11 +02001806 char ralue_pl[MLXSW_REG_RALUE_LEN];
Ido Schimmel9aecce12017-02-09 10:28:42 +01001807 u32 *p_dip = (u32 *) fib_entry->fib_node->key.addr;
1808 struct mlxsw_sp_vr *vr = fib_entry->fib_node->vr;
Ido Schimmel70ad3502017-02-08 11:16:38 +01001809 u16 trap_id = 0;
1810 u16 rif = 0;
1811
1812 if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
1813 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
1814 rif = r->rif;
1815 } else {
1816 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
1817 trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
1818 }
Jiri Pirko61c503f2016-07-04 08:23:11 +02001819
Ido Schimmel1a9234e662016-09-19 08:29:26 +02001820 mlxsw_reg_ralue_pack4(ralue_pl,
1821 (enum mlxsw_reg_ralxx_protocol) vr->proto, op,
Ido Schimmel9aecce12017-02-09 10:28:42 +01001822 vr->id, fib_entry->fib_node->key.prefix_len,
1823 *p_dip);
Ido Schimmel70ad3502017-02-08 11:16:38 +01001824 mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, trap_id, rif);
Jiri Pirko61c503f2016-07-04 08:23:11 +02001825 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
1826}
1827
1828static int mlxsw_sp_fib_entry_op4_trap(struct mlxsw_sp *mlxsw_sp,
1829 struct mlxsw_sp_fib_entry *fib_entry,
1830 enum mlxsw_reg_ralue_op op)
1831{
1832 char ralue_pl[MLXSW_REG_RALUE_LEN];
Ido Schimmel9aecce12017-02-09 10:28:42 +01001833 u32 *p_dip = (u32 *) fib_entry->fib_node->key.addr;
1834 struct mlxsw_sp_vr *vr = fib_entry->fib_node->vr;
Jiri Pirko61c503f2016-07-04 08:23:11 +02001835
Ido Schimmel1a9234e662016-09-19 08:29:26 +02001836 mlxsw_reg_ralue_pack4(ralue_pl,
1837 (enum mlxsw_reg_ralxx_protocol) vr->proto, op,
Ido Schimmel9aecce12017-02-09 10:28:42 +01001838 vr->id, fib_entry->fib_node->key.prefix_len,
1839 *p_dip);
Jiri Pirko61c503f2016-07-04 08:23:11 +02001840 mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
1841 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
1842}
1843
1844static int mlxsw_sp_fib_entry_op4(struct mlxsw_sp *mlxsw_sp,
1845 struct mlxsw_sp_fib_entry *fib_entry,
1846 enum mlxsw_reg_ralue_op op)
1847{
1848 switch (fib_entry->type) {
1849 case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001850 return mlxsw_sp_fib_entry_op4_remote(mlxsw_sp, fib_entry, op);
Jiri Pirko61c503f2016-07-04 08:23:11 +02001851 case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
1852 return mlxsw_sp_fib_entry_op4_local(mlxsw_sp, fib_entry, op);
1853 case MLXSW_SP_FIB_ENTRY_TYPE_TRAP:
1854 return mlxsw_sp_fib_entry_op4_trap(mlxsw_sp, fib_entry, op);
1855 }
1856 return -EINVAL;
1857}
1858
1859static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
1860 struct mlxsw_sp_fib_entry *fib_entry,
1861 enum mlxsw_reg_ralue_op op)
1862{
Ido Schimmel013b20f2017-02-08 11:16:36 +01001863 int err = -EINVAL;
1864
Ido Schimmel9aecce12017-02-09 10:28:42 +01001865 switch (fib_entry->fib_node->vr->proto) {
Jiri Pirko61c503f2016-07-04 08:23:11 +02001866 case MLXSW_SP_L3_PROTO_IPV4:
Ido Schimmel013b20f2017-02-08 11:16:36 +01001867 err = mlxsw_sp_fib_entry_op4(mlxsw_sp, fib_entry, op);
1868 break;
Jiri Pirko61c503f2016-07-04 08:23:11 +02001869 case MLXSW_SP_L3_PROTO_IPV6:
Ido Schimmel013b20f2017-02-08 11:16:36 +01001870 return err;
Jiri Pirko61c503f2016-07-04 08:23:11 +02001871 }
Ido Schimmel013b20f2017-02-08 11:16:36 +01001872 mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, err);
1873 return err;
Jiri Pirko61c503f2016-07-04 08:23:11 +02001874}
1875
1876static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
1877 struct mlxsw_sp_fib_entry *fib_entry)
1878{
Jiri Pirko7146da32016-09-01 10:37:41 +02001879 return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
1880 MLXSW_REG_RALUE_OP_WRITE_WRITE);
Jiri Pirko61c503f2016-07-04 08:23:11 +02001881}
1882
1883static int mlxsw_sp_fib_entry_del(struct mlxsw_sp *mlxsw_sp,
1884 struct mlxsw_sp_fib_entry *fib_entry)
1885{
1886 return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
1887 MLXSW_REG_RALUE_OP_WRITE_DELETE);
1888}
1889
Jiri Pirko61c503f2016-07-04 08:23:11 +02001890static int
Ido Schimmel013b20f2017-02-08 11:16:36 +01001891mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp,
1892 const struct fib_entry_notifier_info *fen_info,
1893 struct mlxsw_sp_fib_entry *fib_entry)
Jiri Pirko61c503f2016-07-04 08:23:11 +02001894{
Jiri Pirkob45f64d2016-09-26 12:52:31 +02001895 struct fib_info *fi = fen_info->fi;
Jiri Pirko61c503f2016-07-04 08:23:11 +02001896
Ido Schimmel97989ee2017-03-10 08:53:38 +01001897 switch (fen_info->type) {
1898 case RTN_BROADCAST: /* fall through */
1899 case RTN_LOCAL:
Jiri Pirko61c503f2016-07-04 08:23:11 +02001900 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
1901 return 0;
Ido Schimmel97989ee2017-03-10 08:53:38 +01001902 case RTN_UNREACHABLE: /* fall through */
1903 case RTN_BLACKHOLE: /* fall through */
1904 case RTN_PROHIBIT:
1905 /* Packets hitting these routes need to be trapped, but
1906 * can do so with a lower priority than packets directed
1907 * at the host, so use action type local instead of trap.
1908 */
Jiri Pirkob45f64d2016-09-26 12:52:31 +02001909 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
Ido Schimmel97989ee2017-03-10 08:53:38 +01001910 return 0;
1911 case RTN_UNICAST:
1912 if (fi->fib_nh->nh_scope != RT_SCOPE_LINK)
1913 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
1914 else
1915 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
1916 return 0;
1917 default:
1918 return -EINVAL;
1919 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001920}
1921
Jiri Pirko5b004412016-09-01 10:37:40 +02001922static struct mlxsw_sp_fib_entry *
Ido Schimmel9aecce12017-02-09 10:28:42 +01001923mlxsw_sp_fib4_entry_create(struct mlxsw_sp *mlxsw_sp,
1924 struct mlxsw_sp_fib_node *fib_node,
1925 const struct fib_entry_notifier_info *fen_info)
Jiri Pirko5b004412016-09-01 10:37:40 +02001926{
1927 struct mlxsw_sp_fib_entry *fib_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01001928 int err;
1929
1930 fib_entry = kzalloc(sizeof(*fib_entry), GFP_KERNEL);
1931 if (!fib_entry) {
1932 err = -ENOMEM;
1933 goto err_fib_entry_alloc;
1934 }
1935
1936 err = mlxsw_sp_fib4_entry_type_set(mlxsw_sp, fen_info, fib_entry);
1937 if (err)
1938 goto err_fib4_entry_type_set;
1939
1940 err = mlxsw_sp_nexthop_group_get(mlxsw_sp, fib_entry, fen_info->fi);
1941 if (err)
1942 goto err_nexthop_group_get;
1943
1944 fib_entry->params.prio = fen_info->fi->fib_priority;
1945 fib_entry->params.tb_id = fen_info->tb_id;
1946 fib_entry->params.type = fen_info->type;
1947 fib_entry->params.tos = fen_info->tos;
1948
1949 fib_entry->fib_node = fib_node;
1950
1951 return fib_entry;
1952
1953err_nexthop_group_get:
1954err_fib4_entry_type_set:
1955 kfree(fib_entry);
1956err_fib_entry_alloc:
1957 return ERR_PTR(err);
1958}
1959
1960static void mlxsw_sp_fib4_entry_destroy(struct mlxsw_sp *mlxsw_sp,
1961 struct mlxsw_sp_fib_entry *fib_entry)
1962{
1963 mlxsw_sp_nexthop_group_put(mlxsw_sp, fib_entry);
1964 kfree(fib_entry);
1965}
1966
1967static struct mlxsw_sp_fib_node *
1968mlxsw_sp_fib4_node_get(struct mlxsw_sp *mlxsw_sp,
1969 const struct fib_entry_notifier_info *fen_info);
1970
1971static struct mlxsw_sp_fib_entry *
1972mlxsw_sp_fib4_entry_lookup(struct mlxsw_sp *mlxsw_sp,
1973 const struct fib_entry_notifier_info *fen_info)
1974{
1975 struct mlxsw_sp_fib_entry *fib_entry;
1976 struct mlxsw_sp_fib_node *fib_node;
1977
1978 fib_node = mlxsw_sp_fib4_node_get(mlxsw_sp, fen_info);
1979 if (IS_ERR(fib_node))
1980 return NULL;
1981
1982 list_for_each_entry(fib_entry, &fib_node->entry_list, list) {
1983 if (fib_entry->params.tb_id == fen_info->tb_id &&
1984 fib_entry->params.tos == fen_info->tos &&
1985 fib_entry->params.type == fen_info->type &&
1986 fib_entry->nh_group->key.fi == fen_info->fi) {
1987 return fib_entry;
1988 }
1989 }
1990
1991 return NULL;
1992}
1993
1994static const struct rhashtable_params mlxsw_sp_fib_ht_params = {
1995 .key_offset = offsetof(struct mlxsw_sp_fib_node, key),
1996 .head_offset = offsetof(struct mlxsw_sp_fib_node, ht_node),
1997 .key_len = sizeof(struct mlxsw_sp_fib_key),
1998 .automatic_shrinking = true,
1999};
2000
2001static int mlxsw_sp_fib_node_insert(struct mlxsw_sp_fib *fib,
2002 struct mlxsw_sp_fib_node *fib_node)
2003{
2004 return rhashtable_insert_fast(&fib->ht, &fib_node->ht_node,
2005 mlxsw_sp_fib_ht_params);
2006}
2007
2008static void mlxsw_sp_fib_node_remove(struct mlxsw_sp_fib *fib,
2009 struct mlxsw_sp_fib_node *fib_node)
2010{
2011 rhashtable_remove_fast(&fib->ht, &fib_node->ht_node,
2012 mlxsw_sp_fib_ht_params);
2013}
2014
2015static struct mlxsw_sp_fib_node *
2016mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
2017 size_t addr_len, unsigned char prefix_len)
2018{
2019 struct mlxsw_sp_fib_key key;
2020
2021 memset(&key, 0, sizeof(key));
2022 memcpy(key.addr, addr, addr_len);
2023 key.prefix_len = prefix_len;
2024 return rhashtable_lookup_fast(&fib->ht, &key, mlxsw_sp_fib_ht_params);
2025}
2026
2027static struct mlxsw_sp_fib_node *
2028mlxsw_sp_fib_node_create(struct mlxsw_sp_vr *vr, const void *addr,
2029 size_t addr_len, unsigned char prefix_len)
2030{
2031 struct mlxsw_sp_fib_node *fib_node;
2032
2033 fib_node = kzalloc(sizeof(*fib_node), GFP_KERNEL);
2034 if (!fib_node)
2035 return NULL;
2036
2037 INIT_LIST_HEAD(&fib_node->entry_list);
2038 list_add(&fib_node->list, &vr->fib->node_list);
2039 memcpy(fib_node->key.addr, addr, addr_len);
2040 fib_node->key.prefix_len = prefix_len;
2041 mlxsw_sp_fib_node_insert(vr->fib, fib_node);
2042 fib_node->vr = vr;
2043
2044 return fib_node;
2045}
2046
2047static void mlxsw_sp_fib_node_destroy(struct mlxsw_sp_fib_node *fib_node)
2048{
2049 mlxsw_sp_fib_node_remove(fib_node->vr->fib, fib_node);
2050 list_del(&fib_node->list);
2051 WARN_ON(!list_empty(&fib_node->entry_list));
2052 kfree(fib_node);
2053}
2054
2055static bool
2056mlxsw_sp_fib_node_entry_is_first(const struct mlxsw_sp_fib_node *fib_node,
2057 const struct mlxsw_sp_fib_entry *fib_entry)
2058{
2059 return list_first_entry(&fib_node->entry_list,
2060 struct mlxsw_sp_fib_entry, list) == fib_entry;
2061}
2062
2063static void mlxsw_sp_fib_node_prefix_inc(struct mlxsw_sp_fib_node *fib_node)
2064{
2065 unsigned char prefix_len = fib_node->key.prefix_len;
2066 struct mlxsw_sp_fib *fib = fib_node->vr->fib;
2067
2068 if (fib->prefix_ref_count[prefix_len]++ == 0)
2069 mlxsw_sp_prefix_usage_set(&fib->prefix_usage, prefix_len);
2070}
2071
2072static void mlxsw_sp_fib_node_prefix_dec(struct mlxsw_sp_fib_node *fib_node)
2073{
2074 unsigned char prefix_len = fib_node->key.prefix_len;
2075 struct mlxsw_sp_fib *fib = fib_node->vr->fib;
2076
2077 if (--fib->prefix_ref_count[prefix_len] == 0)
2078 mlxsw_sp_prefix_usage_clear(&fib->prefix_usage, prefix_len);
2079}
2080
2081static struct mlxsw_sp_fib_node *
2082mlxsw_sp_fib4_node_get(struct mlxsw_sp *mlxsw_sp,
2083 const struct fib_entry_notifier_info *fen_info)
2084{
2085 struct mlxsw_sp_fib_node *fib_node;
Jiri Pirko5b004412016-09-01 10:37:40 +02002086 struct mlxsw_sp_vr *vr;
2087 int err;
2088
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002089 vr = mlxsw_sp_vr_get(mlxsw_sp, fen_info->dst_len, fen_info->tb_id,
Jiri Pirko5b004412016-09-01 10:37:40 +02002090 MLXSW_SP_L3_PROTO_IPV4);
2091 if (IS_ERR(vr))
2092 return ERR_CAST(vr);
2093
Ido Schimmel9aecce12017-02-09 10:28:42 +01002094 fib_node = mlxsw_sp_fib_node_lookup(vr->fib, &fen_info->dst,
2095 sizeof(fen_info->dst),
2096 fen_info->dst_len);
2097 if (fib_node)
2098 return fib_node;
2099
2100 fib_node = mlxsw_sp_fib_node_create(vr, &fen_info->dst,
2101 sizeof(fen_info->dst),
2102 fen_info->dst_len);
2103 if (!fib_node) {
Jiri Pirko5b004412016-09-01 10:37:40 +02002104 err = -ENOMEM;
Ido Schimmel9aecce12017-02-09 10:28:42 +01002105 goto err_fib_node_create;
Jiri Pirko5b004412016-09-01 10:37:40 +02002106 }
Jiri Pirko5b004412016-09-01 10:37:40 +02002107
Ido Schimmel9aecce12017-02-09 10:28:42 +01002108 return fib_node;
Jiri Pirko5b004412016-09-01 10:37:40 +02002109
Ido Schimmel9aecce12017-02-09 10:28:42 +01002110err_fib_node_create:
Jiri Pirko5b004412016-09-01 10:37:40 +02002111 mlxsw_sp_vr_put(mlxsw_sp, vr);
Jiri Pirko5b004412016-09-01 10:37:40 +02002112 return ERR_PTR(err);
2113}
2114
Ido Schimmel9aecce12017-02-09 10:28:42 +01002115static void mlxsw_sp_fib4_node_put(struct mlxsw_sp *mlxsw_sp,
2116 struct mlxsw_sp_fib_node *fib_node)
Jiri Pirko5b004412016-09-01 10:37:40 +02002117{
Ido Schimmel9aecce12017-02-09 10:28:42 +01002118 struct mlxsw_sp_vr *vr = fib_node->vr;
Jiri Pirko5b004412016-09-01 10:37:40 +02002119
Ido Schimmel9aecce12017-02-09 10:28:42 +01002120 if (!list_empty(&fib_node->entry_list))
2121 return;
2122 mlxsw_sp_fib_node_destroy(fib_node);
Jiri Pirko5b004412016-09-01 10:37:40 +02002123 mlxsw_sp_vr_put(mlxsw_sp, vr);
2124}
2125
Ido Schimmel9aecce12017-02-09 10:28:42 +01002126static struct mlxsw_sp_fib_entry *
2127mlxsw_sp_fib4_node_entry_find(const struct mlxsw_sp_fib_node *fib_node,
2128 const struct mlxsw_sp_fib_entry_params *params)
Jiri Pirko61c503f2016-07-04 08:23:11 +02002129{
Jiri Pirko61c503f2016-07-04 08:23:11 +02002130 struct mlxsw_sp_fib_entry *fib_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01002131
2132 list_for_each_entry(fib_entry, &fib_node->entry_list, list) {
2133 if (fib_entry->params.tb_id > params->tb_id)
2134 continue;
2135 if (fib_entry->params.tb_id != params->tb_id)
2136 break;
2137 if (fib_entry->params.tos > params->tos)
2138 continue;
2139 if (fib_entry->params.prio >= params->prio ||
2140 fib_entry->params.tos < params->tos)
2141 return fib_entry;
2142 }
2143
2144 return NULL;
2145}
2146
Ido Schimmel4283bce2017-02-09 10:28:43 +01002147static int mlxsw_sp_fib4_node_list_append(struct mlxsw_sp_fib_entry *fib_entry,
2148 struct mlxsw_sp_fib_entry *new_entry)
2149{
2150 struct mlxsw_sp_fib_node *fib_node;
2151
2152 if (WARN_ON(!fib_entry))
2153 return -EINVAL;
2154
2155 fib_node = fib_entry->fib_node;
2156 list_for_each_entry_from(fib_entry, &fib_node->entry_list, list) {
2157 if (fib_entry->params.tb_id != new_entry->params.tb_id ||
2158 fib_entry->params.tos != new_entry->params.tos ||
2159 fib_entry->params.prio != new_entry->params.prio)
2160 break;
2161 }
2162
2163 list_add_tail(&new_entry->list, &fib_entry->list);
2164 return 0;
2165}
2166
Ido Schimmel9aecce12017-02-09 10:28:42 +01002167static int
2168mlxsw_sp_fib4_node_list_insert(struct mlxsw_sp_fib_node *fib_node,
Ido Schimmel4283bce2017-02-09 10:28:43 +01002169 struct mlxsw_sp_fib_entry *new_entry,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002170 bool replace, bool append)
Ido Schimmel9aecce12017-02-09 10:28:42 +01002171{
2172 struct mlxsw_sp_fib_entry *fib_entry;
2173
2174 fib_entry = mlxsw_sp_fib4_node_entry_find(fib_node, &new_entry->params);
2175
Ido Schimmel4283bce2017-02-09 10:28:43 +01002176 if (append)
2177 return mlxsw_sp_fib4_node_list_append(fib_entry, new_entry);
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002178 if (replace && WARN_ON(!fib_entry))
2179 return -EINVAL;
Ido Schimmel4283bce2017-02-09 10:28:43 +01002180
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002181 /* Insert new entry before replaced one, so that we can later
2182 * remove the second.
2183 */
Ido Schimmel9aecce12017-02-09 10:28:42 +01002184 if (fib_entry) {
2185 list_add_tail(&new_entry->list, &fib_entry->list);
2186 } else {
2187 struct mlxsw_sp_fib_entry *last;
2188
2189 list_for_each_entry(last, &fib_node->entry_list, list) {
2190 if (new_entry->params.tb_id > last->params.tb_id)
2191 break;
2192 fib_entry = last;
2193 }
2194
2195 if (fib_entry)
2196 list_add(&new_entry->list, &fib_entry->list);
2197 else
2198 list_add(&new_entry->list, &fib_node->entry_list);
2199 }
2200
2201 return 0;
2202}
2203
2204static void
2205mlxsw_sp_fib4_node_list_remove(struct mlxsw_sp_fib_entry *fib_entry)
2206{
2207 list_del(&fib_entry->list);
2208}
2209
2210static int
2211mlxsw_sp_fib4_node_entry_add(struct mlxsw_sp *mlxsw_sp,
2212 const struct mlxsw_sp_fib_node *fib_node,
2213 struct mlxsw_sp_fib_entry *fib_entry)
2214{
2215 if (!mlxsw_sp_fib_node_entry_is_first(fib_node, fib_entry))
2216 return 0;
2217
2218 /* To prevent packet loss, overwrite the previously offloaded
2219 * entry.
2220 */
2221 if (!list_is_singular(&fib_node->entry_list)) {
2222 enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_DELETE;
2223 struct mlxsw_sp_fib_entry *n = list_next_entry(fib_entry, list);
2224
2225 mlxsw_sp_fib_entry_offload_refresh(n, op, 0);
2226 }
2227
2228 return mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
2229}
2230
2231static void
2232mlxsw_sp_fib4_node_entry_del(struct mlxsw_sp *mlxsw_sp,
2233 const struct mlxsw_sp_fib_node *fib_node,
2234 struct mlxsw_sp_fib_entry *fib_entry)
2235{
2236 if (!mlxsw_sp_fib_node_entry_is_first(fib_node, fib_entry))
2237 return;
2238
2239 /* Promote the next entry by overwriting the deleted entry */
2240 if (!list_is_singular(&fib_node->entry_list)) {
2241 struct mlxsw_sp_fib_entry *n = list_next_entry(fib_entry, list);
2242 enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_DELETE;
2243
2244 mlxsw_sp_fib_entry_update(mlxsw_sp, n);
2245 mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, 0);
2246 return;
2247 }
2248
2249 mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry);
2250}
2251
2252static int mlxsw_sp_fib4_node_entry_link(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel4283bce2017-02-09 10:28:43 +01002253 struct mlxsw_sp_fib_entry *fib_entry,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002254 bool replace, bool append)
Ido Schimmel9aecce12017-02-09 10:28:42 +01002255{
2256 struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
2257 int err;
2258
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002259 err = mlxsw_sp_fib4_node_list_insert(fib_node, fib_entry, replace,
2260 append);
Ido Schimmel9aecce12017-02-09 10:28:42 +01002261 if (err)
2262 return err;
2263
2264 err = mlxsw_sp_fib4_node_entry_add(mlxsw_sp, fib_node, fib_entry);
2265 if (err)
2266 goto err_fib4_node_entry_add;
2267
2268 mlxsw_sp_fib_node_prefix_inc(fib_node);
2269
2270 return 0;
2271
2272err_fib4_node_entry_add:
2273 mlxsw_sp_fib4_node_list_remove(fib_entry);
2274 return err;
2275}
2276
2277static void
2278mlxsw_sp_fib4_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
2279 struct mlxsw_sp_fib_entry *fib_entry)
2280{
2281 struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
2282
2283 mlxsw_sp_fib_node_prefix_dec(fib_node);
2284 mlxsw_sp_fib4_node_entry_del(mlxsw_sp, fib_node, fib_entry);
2285 mlxsw_sp_fib4_node_list_remove(fib_entry);
2286}
2287
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002288static void mlxsw_sp_fib4_entry_replace(struct mlxsw_sp *mlxsw_sp,
2289 struct mlxsw_sp_fib_entry *fib_entry,
2290 bool replace)
2291{
2292 struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
2293 struct mlxsw_sp_fib_entry *replaced;
2294
2295 if (!replace)
2296 return;
2297
2298 /* We inserted the new entry before replaced one */
2299 replaced = list_next_entry(fib_entry, list);
2300
2301 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, replaced);
2302 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, replaced);
2303 mlxsw_sp_fib4_node_put(mlxsw_sp, fib_node);
2304}
2305
Ido Schimmel9aecce12017-02-09 10:28:42 +01002306static int
2307mlxsw_sp_router_fib4_add(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel4283bce2017-02-09 10:28:43 +01002308 const struct fib_entry_notifier_info *fen_info,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002309 bool replace, bool append)
Ido Schimmel9aecce12017-02-09 10:28:42 +01002310{
2311 struct mlxsw_sp_fib_entry *fib_entry;
2312 struct mlxsw_sp_fib_node *fib_node;
Jiri Pirko61c503f2016-07-04 08:23:11 +02002313 int err;
2314
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002315 if (mlxsw_sp->router.aborted)
2316 return 0;
2317
Ido Schimmel9aecce12017-02-09 10:28:42 +01002318 fib_node = mlxsw_sp_fib4_node_get(mlxsw_sp, fen_info);
2319 if (IS_ERR(fib_node)) {
2320 dev_warn(mlxsw_sp->bus_info->dev, "Failed to get FIB node\n");
2321 return PTR_ERR(fib_node);
2322 }
2323
2324 fib_entry = mlxsw_sp_fib4_entry_create(mlxsw_sp, fib_node, fen_info);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002325 if (IS_ERR(fib_entry)) {
Ido Schimmel9aecce12017-02-09 10:28:42 +01002326 dev_warn(mlxsw_sp->bus_info->dev, "Failed to create FIB entry\n");
2327 err = PTR_ERR(fib_entry);
2328 goto err_fib4_entry_create;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002329 }
Jiri Pirko61c503f2016-07-04 08:23:11 +02002330
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002331 err = mlxsw_sp_fib4_node_entry_link(mlxsw_sp, fib_entry, replace,
2332 append);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002333 if (err) {
Ido Schimmel9aecce12017-02-09 10:28:42 +01002334 dev_warn(mlxsw_sp->bus_info->dev, "Failed to link FIB entry to node\n");
2335 goto err_fib4_node_entry_link;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002336 }
Ido Schimmel9aecce12017-02-09 10:28:42 +01002337
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002338 mlxsw_sp_fib4_entry_replace(mlxsw_sp, fib_entry, replace);
2339
Jiri Pirko61c503f2016-07-04 08:23:11 +02002340 return 0;
2341
Ido Schimmel9aecce12017-02-09 10:28:42 +01002342err_fib4_node_entry_link:
2343 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib_entry);
2344err_fib4_entry_create:
2345 mlxsw_sp_fib4_node_put(mlxsw_sp, fib_node);
Jiri Pirko61c503f2016-07-04 08:23:11 +02002346 return err;
2347}
2348
Jiri Pirko37956d72016-10-20 16:05:43 +02002349static void mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
2350 struct fib_entry_notifier_info *fen_info)
Jiri Pirko61c503f2016-07-04 08:23:11 +02002351{
Jiri Pirko61c503f2016-07-04 08:23:11 +02002352 struct mlxsw_sp_fib_entry *fib_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01002353 struct mlxsw_sp_fib_node *fib_node;
Jiri Pirko61c503f2016-07-04 08:23:11 +02002354
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002355 if (mlxsw_sp->router.aborted)
Jiri Pirko37956d72016-10-20 16:05:43 +02002356 return;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002357
Ido Schimmel9aecce12017-02-09 10:28:42 +01002358 fib_entry = mlxsw_sp_fib4_entry_lookup(mlxsw_sp, fen_info);
2359 if (WARN_ON(!fib_entry))
Jiri Pirko37956d72016-10-20 16:05:43 +02002360 return;
Ido Schimmel9aecce12017-02-09 10:28:42 +01002361 fib_node = fib_entry->fib_node;
Jiri Pirko5b004412016-09-01 10:37:40 +02002362
Ido Schimmel9aecce12017-02-09 10:28:42 +01002363 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, fib_entry);
2364 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib_entry);
2365 mlxsw_sp_fib4_node_put(mlxsw_sp, fib_node);
Jiri Pirko61c503f2016-07-04 08:23:11 +02002366}
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002367
2368static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp)
2369{
2370 char ralta_pl[MLXSW_REG_RALTA_LEN];
2371 char ralst_pl[MLXSW_REG_RALST_LEN];
2372 char raltb_pl[MLXSW_REG_RALTB_LEN];
2373 char ralue_pl[MLXSW_REG_RALUE_LEN];
2374 int err;
2375
2376 mlxsw_reg_ralta_pack(ralta_pl, true, MLXSW_REG_RALXX_PROTOCOL_IPV4,
2377 MLXSW_SP_LPM_TREE_MIN);
2378 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
2379 if (err)
2380 return err;
2381
2382 mlxsw_reg_ralst_pack(ralst_pl, 0xff, MLXSW_SP_LPM_TREE_MIN);
2383 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
2384 if (err)
2385 return err;
2386
Jiri Pirko19271c12016-10-20 16:05:42 +02002387 mlxsw_reg_raltb_pack(raltb_pl, 0, MLXSW_REG_RALXX_PROTOCOL_IPV4,
2388 MLXSW_SP_LPM_TREE_MIN);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002389 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
2390 if (err)
2391 return err;
2392
2393 mlxsw_reg_ralue_pack4(ralue_pl, MLXSW_SP_L3_PROTO_IPV4,
2394 MLXSW_REG_RALUE_OP_WRITE_WRITE, 0, 0, 0);
2395 mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
2396 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
2397}
2398
Ido Schimmel9aecce12017-02-09 10:28:42 +01002399static void mlxsw_sp_fib4_node_flush(struct mlxsw_sp *mlxsw_sp,
2400 struct mlxsw_sp_fib_node *fib_node)
2401{
2402 struct mlxsw_sp_fib_entry *fib_entry, *tmp;
2403
2404 list_for_each_entry_safe(fib_entry, tmp, &fib_node->entry_list, list) {
2405 bool do_break = &tmp->list == &fib_node->entry_list;
2406
2407 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, fib_entry);
2408 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib_entry);
2409 mlxsw_sp_fib4_node_put(mlxsw_sp, fib_node);
2410 /* Break when entry list is empty and node was freed.
2411 * Otherwise, we'll access freed memory in the next
2412 * iteration.
2413 */
2414 if (do_break)
2415 break;
2416 }
2417}
2418
2419static void mlxsw_sp_fib_node_flush(struct mlxsw_sp *mlxsw_sp,
2420 struct mlxsw_sp_fib_node *fib_node)
2421{
2422 switch (fib_node->vr->proto) {
2423 case MLXSW_SP_L3_PROTO_IPV4:
2424 mlxsw_sp_fib4_node_flush(mlxsw_sp, fib_node);
2425 break;
2426 case MLXSW_SP_L3_PROTO_IPV6:
2427 WARN_ON_ONCE(1);
2428 break;
2429 }
2430}
2431
Ido Schimmelac571de2016-11-14 11:26:32 +01002432static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp)
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002433{
Ido Schimmel9aecce12017-02-09 10:28:42 +01002434 struct mlxsw_sp_fib_node *fib_node, *tmp;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002435 struct mlxsw_sp_vr *vr;
2436 int i;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002437
Jiri Pirkoc1a38312016-10-21 16:07:23 +02002438 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002439 vr = &mlxsw_sp->router.vrs[i];
Ido Schimmelac571de2016-11-14 11:26:32 +01002440
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002441 if (!vr->used)
2442 continue;
2443
Ido Schimmel9aecce12017-02-09 10:28:42 +01002444 list_for_each_entry_safe(fib_node, tmp, &vr->fib->node_list,
2445 list) {
2446 bool do_break = &tmp->list == &vr->fib->node_list;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002447
Ido Schimmel9aecce12017-02-09 10:28:42 +01002448 mlxsw_sp_fib_node_flush(mlxsw_sp, fib_node);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002449 if (do_break)
2450 break;
2451 }
2452 }
Ido Schimmelac571de2016-11-14 11:26:32 +01002453}
2454
2455static void mlxsw_sp_router_fib4_abort(struct mlxsw_sp *mlxsw_sp)
2456{
2457 int err;
2458
Ido Schimmeld331d302016-11-16 09:51:58 +01002459 if (mlxsw_sp->router.aborted)
2460 return;
2461 dev_warn(mlxsw_sp->bus_info->dev, "FIB abort triggered. Note that FIB entries are no longer being offloaded to this device.\n");
Ido Schimmelac571de2016-11-14 11:26:32 +01002462 mlxsw_sp_router_fib_flush(mlxsw_sp);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002463 mlxsw_sp->router.aborted = true;
2464 err = mlxsw_sp_router_set_abort_trap(mlxsw_sp);
2465 if (err)
2466 dev_warn(mlxsw_sp->bus_info->dev, "Failed to set abort trap.\n");
2467}
2468
Ido Schimmel30572242016-12-03 16:45:01 +01002469struct mlxsw_sp_fib_event_work {
Ido Schimmela0e47612017-02-06 16:20:10 +01002470 struct work_struct work;
Ido Schimmelad178c82017-02-08 11:16:40 +01002471 union {
2472 struct fib_entry_notifier_info fen_info;
2473 struct fib_nh_notifier_info fnh_info;
2474 };
Ido Schimmel30572242016-12-03 16:45:01 +01002475 struct mlxsw_sp *mlxsw_sp;
2476 unsigned long event;
2477};
2478
2479static void mlxsw_sp_router_fib_event_work(struct work_struct *work)
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002480{
Ido Schimmel30572242016-12-03 16:45:01 +01002481 struct mlxsw_sp_fib_event_work *fib_work =
Ido Schimmela0e47612017-02-06 16:20:10 +01002482 container_of(work, struct mlxsw_sp_fib_event_work, work);
Ido Schimmel30572242016-12-03 16:45:01 +01002483 struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002484 bool replace, append;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002485 int err;
2486
Ido Schimmel30572242016-12-03 16:45:01 +01002487 /* Protect internal structures from changes */
2488 rtnl_lock();
2489 switch (fib_work->event) {
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002490 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
Ido Schimmel4283bce2017-02-09 10:28:43 +01002491 case FIB_EVENT_ENTRY_APPEND: /* fall through */
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002492 case FIB_EVENT_ENTRY_ADD:
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002493 replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
Ido Schimmel4283bce2017-02-09 10:28:43 +01002494 append = fib_work->event == FIB_EVENT_ENTRY_APPEND;
2495 err = mlxsw_sp_router_fib4_add(mlxsw_sp, &fib_work->fen_info,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002496 replace, append);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002497 if (err)
2498 mlxsw_sp_router_fib4_abort(mlxsw_sp);
Ido Schimmel30572242016-12-03 16:45:01 +01002499 fib_info_put(fib_work->fen_info.fi);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002500 break;
2501 case FIB_EVENT_ENTRY_DEL:
Ido Schimmel30572242016-12-03 16:45:01 +01002502 mlxsw_sp_router_fib4_del(mlxsw_sp, &fib_work->fen_info);
2503 fib_info_put(fib_work->fen_info.fi);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002504 break;
2505 case FIB_EVENT_RULE_ADD: /* fall through */
2506 case FIB_EVENT_RULE_DEL:
2507 mlxsw_sp_router_fib4_abort(mlxsw_sp);
2508 break;
Ido Schimmelad178c82017-02-08 11:16:40 +01002509 case FIB_EVENT_NH_ADD: /* fall through */
2510 case FIB_EVENT_NH_DEL:
2511 mlxsw_sp_nexthop_event(mlxsw_sp, fib_work->event,
2512 fib_work->fnh_info.fib_nh);
2513 fib_info_put(fib_work->fnh_info.fib_nh->nh_parent);
2514 break;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002515 }
Ido Schimmel30572242016-12-03 16:45:01 +01002516 rtnl_unlock();
2517 kfree(fib_work);
2518}
2519
2520/* Called with rcu_read_lock() */
2521static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
2522 unsigned long event, void *ptr)
2523{
2524 struct mlxsw_sp *mlxsw_sp = container_of(nb, struct mlxsw_sp, fib_nb);
2525 struct mlxsw_sp_fib_event_work *fib_work;
2526 struct fib_notifier_info *info = ptr;
2527
2528 if (!net_eq(info->net, &init_net))
2529 return NOTIFY_DONE;
2530
2531 fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC);
2532 if (WARN_ON(!fib_work))
2533 return NOTIFY_BAD;
2534
Ido Schimmela0e47612017-02-06 16:20:10 +01002535 INIT_WORK(&fib_work->work, mlxsw_sp_router_fib_event_work);
Ido Schimmel30572242016-12-03 16:45:01 +01002536 fib_work->mlxsw_sp = mlxsw_sp;
2537 fib_work->event = event;
2538
2539 switch (event) {
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002540 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
Ido Schimmel4283bce2017-02-09 10:28:43 +01002541 case FIB_EVENT_ENTRY_APPEND: /* fall through */
Ido Schimmel30572242016-12-03 16:45:01 +01002542 case FIB_EVENT_ENTRY_ADD: /* fall through */
2543 case FIB_EVENT_ENTRY_DEL:
2544 memcpy(&fib_work->fen_info, ptr, sizeof(fib_work->fen_info));
2545 /* Take referece on fib_info to prevent it from being
2546 * freed while work is queued. Release it afterwards.
2547 */
2548 fib_info_hold(fib_work->fen_info.fi);
2549 break;
Ido Schimmelad178c82017-02-08 11:16:40 +01002550 case FIB_EVENT_NH_ADD: /* fall through */
2551 case FIB_EVENT_NH_DEL:
2552 memcpy(&fib_work->fnh_info, ptr, sizeof(fib_work->fnh_info));
2553 fib_info_hold(fib_work->fnh_info.fib_nh->nh_parent);
2554 break;
Ido Schimmel30572242016-12-03 16:45:01 +01002555 }
2556
Ido Schimmela0e47612017-02-06 16:20:10 +01002557 mlxsw_core_schedule_work(&fib_work->work);
Ido Schimmel30572242016-12-03 16:45:01 +01002558
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002559 return NOTIFY_DONE;
2560}
2561
Ido Schimmel4724ba562017-03-10 08:53:39 +01002562static struct mlxsw_sp_rif *
2563mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
2564 const struct net_device *dev)
2565{
2566 int i;
2567
2568 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
2569 if (mlxsw_sp->rifs[i] && mlxsw_sp->rifs[i]->dev == dev)
2570 return mlxsw_sp->rifs[i];
2571
2572 return NULL;
2573}
2574
2575static int mlxsw_sp_router_rif_disable(struct mlxsw_sp *mlxsw_sp, u16 rif)
2576{
2577 char ritr_pl[MLXSW_REG_RITR_LEN];
2578 int err;
2579
2580 mlxsw_reg_ritr_rif_pack(ritr_pl, rif);
2581 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
2582 if (WARN_ON_ONCE(err))
2583 return err;
2584
2585 mlxsw_reg_ritr_enable_set(ritr_pl, false);
2586 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
2587}
2588
2589static void mlxsw_sp_router_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
2590 struct mlxsw_sp_rif *r)
2591{
2592 mlxsw_sp_router_rif_disable(mlxsw_sp, r->rif);
2593 mlxsw_sp_nexthop_rif_gone_sync(mlxsw_sp, r);
2594 mlxsw_sp_neigh_rif_gone_sync(mlxsw_sp, r);
2595}
2596
2597static bool mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *r,
2598 const struct in_device *in_dev,
2599 unsigned long event)
2600{
2601 switch (event) {
2602 case NETDEV_UP:
2603 if (!r)
2604 return true;
2605 return false;
2606 case NETDEV_DOWN:
2607 if (r && !in_dev->ifa_list)
2608 return true;
2609 /* It is possible we already removed the RIF ourselves
2610 * if it was assigned to a netdev that is now a bridge
2611 * or LAG slave.
2612 */
2613 return false;
2614 }
2615
2616 return false;
2617}
2618
2619#define MLXSW_SP_INVALID_RIF 0xffff
2620static int mlxsw_sp_avail_rif_get(struct mlxsw_sp *mlxsw_sp)
2621{
2622 int i;
2623
2624 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
2625 if (!mlxsw_sp->rifs[i])
2626 return i;
2627
2628 return MLXSW_SP_INVALID_RIF;
2629}
2630
2631static void mlxsw_sp_vport_rif_sp_attr_get(struct mlxsw_sp_port *mlxsw_sp_vport,
2632 bool *p_lagged, u16 *p_system_port)
2633{
2634 u8 local_port = mlxsw_sp_vport->local_port;
2635
2636 *p_lagged = mlxsw_sp_vport->lagged;
2637 *p_system_port = *p_lagged ? mlxsw_sp_vport->lag_id : local_port;
2638}
2639
2640static int mlxsw_sp_vport_rif_sp_op(struct mlxsw_sp_port *mlxsw_sp_vport,
2641 struct net_device *l3_dev, u16 rif,
2642 bool create)
2643{
2644 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
2645 bool lagged = mlxsw_sp_vport->lagged;
2646 char ritr_pl[MLXSW_REG_RITR_LEN];
2647 u16 system_port;
2648
2649 mlxsw_reg_ritr_pack(ritr_pl, create, MLXSW_REG_RITR_SP_IF, rif,
2650 l3_dev->mtu, l3_dev->dev_addr);
2651
2652 mlxsw_sp_vport_rif_sp_attr_get(mlxsw_sp_vport, &lagged, &system_port);
2653 mlxsw_reg_ritr_sp_if_pack(ritr_pl, lagged, system_port,
2654 mlxsw_sp_vport_vid_get(mlxsw_sp_vport));
2655
2656 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
2657}
2658
2659static void mlxsw_sp_vport_rif_sp_leave(struct mlxsw_sp_port *mlxsw_sp_vport);
2660
2661static u16 mlxsw_sp_rif_sp_to_fid(u16 rif)
2662{
2663 return MLXSW_SP_RFID_BASE + rif;
2664}
2665
2666static struct mlxsw_sp_fid *
2667mlxsw_sp_rfid_alloc(u16 fid, struct net_device *l3_dev)
2668{
2669 struct mlxsw_sp_fid *f;
2670
2671 f = kzalloc(sizeof(*f), GFP_KERNEL);
2672 if (!f)
2673 return NULL;
2674
2675 f->leave = mlxsw_sp_vport_rif_sp_leave;
2676 f->ref_count = 0;
2677 f->dev = l3_dev;
2678 f->fid = fid;
2679
2680 return f;
2681}
2682
2683static struct mlxsw_sp_rif *
2684mlxsw_sp_rif_alloc(u16 rif, struct net_device *l3_dev, struct mlxsw_sp_fid *f)
2685{
2686 struct mlxsw_sp_rif *r;
2687
2688 r = kzalloc(sizeof(*r), GFP_KERNEL);
2689 if (!r)
2690 return NULL;
2691
2692 INIT_LIST_HEAD(&r->nexthop_list);
2693 INIT_LIST_HEAD(&r->neigh_list);
2694 ether_addr_copy(r->addr, l3_dev->dev_addr);
2695 r->mtu = l3_dev->mtu;
2696 r->dev = l3_dev;
2697 r->rif = rif;
2698 r->f = f;
2699
2700 return r;
2701}
2702
2703static struct mlxsw_sp_rif *
2704mlxsw_sp_vport_rif_sp_create(struct mlxsw_sp_port *mlxsw_sp_vport,
2705 struct net_device *l3_dev)
2706{
2707 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
2708 struct mlxsw_sp_fid *f;
2709 struct mlxsw_sp_rif *r;
2710 u16 fid, rif;
2711 int err;
2712
2713 rif = mlxsw_sp_avail_rif_get(mlxsw_sp);
2714 if (rif == MLXSW_SP_INVALID_RIF)
2715 return ERR_PTR(-ERANGE);
2716
2717 err = mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, l3_dev, rif, true);
2718 if (err)
2719 return ERR_PTR(err);
2720
2721 fid = mlxsw_sp_rif_sp_to_fid(rif);
2722 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, true);
2723 if (err)
2724 goto err_rif_fdb_op;
2725
2726 f = mlxsw_sp_rfid_alloc(fid, l3_dev);
2727 if (!f) {
2728 err = -ENOMEM;
2729 goto err_rfid_alloc;
2730 }
2731
2732 r = mlxsw_sp_rif_alloc(rif, l3_dev, f);
2733 if (!r) {
2734 err = -ENOMEM;
2735 goto err_rif_alloc;
2736 }
2737
2738 f->r = r;
2739 mlxsw_sp->rifs[rif] = r;
2740
2741 return r;
2742
2743err_rif_alloc:
2744 kfree(f);
2745err_rfid_alloc:
2746 mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, false);
2747err_rif_fdb_op:
2748 mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, l3_dev, rif, false);
2749 return ERR_PTR(err);
2750}
2751
2752static void mlxsw_sp_vport_rif_sp_destroy(struct mlxsw_sp_port *mlxsw_sp_vport,
2753 struct mlxsw_sp_rif *r)
2754{
2755 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
2756 struct net_device *l3_dev = r->dev;
2757 struct mlxsw_sp_fid *f = r->f;
2758 u16 fid = f->fid;
2759 u16 rif = r->rif;
2760
2761 mlxsw_sp_router_rif_gone_sync(mlxsw_sp, r);
2762
2763 mlxsw_sp->rifs[rif] = NULL;
2764 f->r = NULL;
2765
2766 kfree(r);
2767
2768 kfree(f);
2769
2770 mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, false);
2771
2772 mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, l3_dev, rif, false);
2773}
2774
2775static int mlxsw_sp_vport_rif_sp_join(struct mlxsw_sp_port *mlxsw_sp_vport,
2776 struct net_device *l3_dev)
2777{
2778 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
2779 struct mlxsw_sp_rif *r;
2780
2781 r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
2782 if (!r) {
2783 r = mlxsw_sp_vport_rif_sp_create(mlxsw_sp_vport, l3_dev);
2784 if (IS_ERR(r))
2785 return PTR_ERR(r);
2786 }
2787
2788 mlxsw_sp_vport_fid_set(mlxsw_sp_vport, r->f);
2789 r->f->ref_count++;
2790
2791 netdev_dbg(mlxsw_sp_vport->dev, "Joined FID=%d\n", r->f->fid);
2792
2793 return 0;
2794}
2795
2796static void mlxsw_sp_vport_rif_sp_leave(struct mlxsw_sp_port *mlxsw_sp_vport)
2797{
2798 struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
2799
2800 netdev_dbg(mlxsw_sp_vport->dev, "Left FID=%d\n", f->fid);
2801
2802 mlxsw_sp_vport_fid_set(mlxsw_sp_vport, NULL);
2803 if (--f->ref_count == 0)
2804 mlxsw_sp_vport_rif_sp_destroy(mlxsw_sp_vport, f->r);
2805}
2806
2807static int mlxsw_sp_inetaddr_vport_event(struct net_device *l3_dev,
2808 struct net_device *port_dev,
2809 unsigned long event, u16 vid)
2810{
2811 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(port_dev);
2812 struct mlxsw_sp_port *mlxsw_sp_vport;
2813
2814 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
2815 if (WARN_ON(!mlxsw_sp_vport))
2816 return -EINVAL;
2817
2818 switch (event) {
2819 case NETDEV_UP:
2820 return mlxsw_sp_vport_rif_sp_join(mlxsw_sp_vport, l3_dev);
2821 case NETDEV_DOWN:
2822 mlxsw_sp_vport_rif_sp_leave(mlxsw_sp_vport);
2823 break;
2824 }
2825
2826 return 0;
2827}
2828
2829static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev,
2830 unsigned long event)
2831{
2832 if (netif_is_bridge_port(port_dev) || netif_is_lag_port(port_dev))
2833 return 0;
2834
2835 return mlxsw_sp_inetaddr_vport_event(port_dev, port_dev, event, 1);
2836}
2837
2838static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev,
2839 struct net_device *lag_dev,
2840 unsigned long event, u16 vid)
2841{
2842 struct net_device *port_dev;
2843 struct list_head *iter;
2844 int err;
2845
2846 netdev_for_each_lower_dev(lag_dev, port_dev, iter) {
2847 if (mlxsw_sp_port_dev_check(port_dev)) {
2848 err = mlxsw_sp_inetaddr_vport_event(l3_dev, port_dev,
2849 event, vid);
2850 if (err)
2851 return err;
2852 }
2853 }
2854
2855 return 0;
2856}
2857
2858static int mlxsw_sp_inetaddr_lag_event(struct net_device *lag_dev,
2859 unsigned long event)
2860{
2861 if (netif_is_bridge_port(lag_dev))
2862 return 0;
2863
2864 return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event, 1);
2865}
2866
2867static struct mlxsw_sp_fid *mlxsw_sp_bridge_fid_get(struct mlxsw_sp *mlxsw_sp,
2868 struct net_device *l3_dev)
2869{
2870 u16 fid;
2871
2872 if (is_vlan_dev(l3_dev))
2873 fid = vlan_dev_vlan_id(l3_dev);
2874 else if (mlxsw_sp->master_bridge.dev == l3_dev)
2875 fid = 1;
2876 else
2877 return mlxsw_sp_vfid_find(mlxsw_sp, l3_dev);
2878
2879 return mlxsw_sp_fid_find(mlxsw_sp, fid);
2880}
2881
2882static enum mlxsw_flood_table_type mlxsw_sp_flood_table_type_get(u16 fid)
2883{
2884 return mlxsw_sp_fid_is_vfid(fid) ? MLXSW_REG_SFGC_TABLE_TYPE_FID :
2885 MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST;
2886}
2887
2888static u16 mlxsw_sp_flood_table_index_get(u16 fid)
2889{
2890 return mlxsw_sp_fid_is_vfid(fid) ? mlxsw_sp_fid_to_vfid(fid) : fid;
2891}
2892
2893static int mlxsw_sp_router_port_flood_set(struct mlxsw_sp *mlxsw_sp, u16 fid,
2894 bool set)
2895{
2896 enum mlxsw_flood_table_type table_type;
2897 char *sftr_pl;
2898 u16 index;
2899 int err;
2900
2901 sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL);
2902 if (!sftr_pl)
2903 return -ENOMEM;
2904
2905 table_type = mlxsw_sp_flood_table_type_get(fid);
2906 index = mlxsw_sp_flood_table_index_get(fid);
2907 mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BC, index, table_type,
2908 1, MLXSW_PORT_ROUTER_PORT, set);
2909 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
2910
2911 kfree(sftr_pl);
2912 return err;
2913}
2914
2915static enum mlxsw_reg_ritr_if_type mlxsw_sp_rif_type_get(u16 fid)
2916{
2917 if (mlxsw_sp_fid_is_vfid(fid))
2918 return MLXSW_REG_RITR_FID_IF;
2919 else
2920 return MLXSW_REG_RITR_VLAN_IF;
2921}
2922
2923static int mlxsw_sp_rif_bridge_op(struct mlxsw_sp *mlxsw_sp,
2924 struct net_device *l3_dev,
2925 u16 fid, u16 rif,
2926 bool create)
2927{
2928 enum mlxsw_reg_ritr_if_type rif_type;
2929 char ritr_pl[MLXSW_REG_RITR_LEN];
2930
2931 rif_type = mlxsw_sp_rif_type_get(fid);
2932 mlxsw_reg_ritr_pack(ritr_pl, create, rif_type, rif, l3_dev->mtu,
2933 l3_dev->dev_addr);
2934 mlxsw_reg_ritr_fid_set(ritr_pl, rif_type, fid);
2935
2936 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
2937}
2938
2939static int mlxsw_sp_rif_bridge_create(struct mlxsw_sp *mlxsw_sp,
2940 struct net_device *l3_dev,
2941 struct mlxsw_sp_fid *f)
2942{
2943 struct mlxsw_sp_rif *r;
2944 u16 rif;
2945 int err;
2946
2947 rif = mlxsw_sp_avail_rif_get(mlxsw_sp);
2948 if (rif == MLXSW_SP_INVALID_RIF)
2949 return -ERANGE;
2950
2951 err = mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, true);
2952 if (err)
2953 return err;
2954
2955 err = mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, true);
2956 if (err)
2957 goto err_rif_bridge_op;
2958
2959 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, true);
2960 if (err)
2961 goto err_rif_fdb_op;
2962
2963 r = mlxsw_sp_rif_alloc(rif, l3_dev, f);
2964 if (!r) {
2965 err = -ENOMEM;
2966 goto err_rif_alloc;
2967 }
2968
2969 f->r = r;
2970 mlxsw_sp->rifs[rif] = r;
2971
2972 netdev_dbg(l3_dev, "RIF=%d created\n", rif);
2973
2974 return 0;
2975
2976err_rif_alloc:
2977 mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, false);
2978err_rif_fdb_op:
2979 mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, false);
2980err_rif_bridge_op:
2981 mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, false);
2982 return err;
2983}
2984
2985void mlxsw_sp_rif_bridge_destroy(struct mlxsw_sp *mlxsw_sp,
2986 struct mlxsw_sp_rif *r)
2987{
2988 struct net_device *l3_dev = r->dev;
2989 struct mlxsw_sp_fid *f = r->f;
2990 u16 rif = r->rif;
2991
2992 mlxsw_sp_router_rif_gone_sync(mlxsw_sp, r);
2993
2994 mlxsw_sp->rifs[rif] = NULL;
2995 f->r = NULL;
2996
2997 kfree(r);
2998
2999 mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, false);
3000
3001 mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, false);
3002
3003 mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, false);
3004
3005 netdev_dbg(l3_dev, "RIF=%d destroyed\n", rif);
3006}
3007
3008static int mlxsw_sp_inetaddr_bridge_event(struct net_device *l3_dev,
3009 struct net_device *br_dev,
3010 unsigned long event)
3011{
3012 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
3013 struct mlxsw_sp_fid *f;
3014
3015 /* FID can either be an actual FID if the L3 device is the
3016 * VLAN-aware bridge or a VLAN device on top. Otherwise, the
3017 * L3 device is a VLAN-unaware bridge and we get a vFID.
3018 */
3019 f = mlxsw_sp_bridge_fid_get(mlxsw_sp, l3_dev);
3020 if (WARN_ON(!f))
3021 return -EINVAL;
3022
3023 switch (event) {
3024 case NETDEV_UP:
3025 return mlxsw_sp_rif_bridge_create(mlxsw_sp, l3_dev, f);
3026 case NETDEV_DOWN:
3027 mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->r);
3028 break;
3029 }
3030
3031 return 0;
3032}
3033
3034static int mlxsw_sp_inetaddr_vlan_event(struct net_device *vlan_dev,
3035 unsigned long event)
3036{
3037 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
3038 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(vlan_dev);
3039 u16 vid = vlan_dev_vlan_id(vlan_dev);
3040
3041 if (mlxsw_sp_port_dev_check(real_dev))
3042 return mlxsw_sp_inetaddr_vport_event(vlan_dev, real_dev, event,
3043 vid);
3044 else if (netif_is_lag_master(real_dev))
3045 return __mlxsw_sp_inetaddr_lag_event(vlan_dev, real_dev, event,
3046 vid);
3047 else if (netif_is_bridge_master(real_dev) &&
3048 mlxsw_sp->master_bridge.dev == real_dev)
3049 return mlxsw_sp_inetaddr_bridge_event(vlan_dev, real_dev,
3050 event);
3051
3052 return 0;
3053}
3054
3055int mlxsw_sp_inetaddr_event(struct notifier_block *unused,
3056 unsigned long event, void *ptr)
3057{
3058 struct in_ifaddr *ifa = (struct in_ifaddr *) ptr;
3059 struct net_device *dev = ifa->ifa_dev->dev;
3060 struct mlxsw_sp *mlxsw_sp;
3061 struct mlxsw_sp_rif *r;
3062 int err = 0;
3063
3064 mlxsw_sp = mlxsw_sp_lower_get(dev);
3065 if (!mlxsw_sp)
3066 goto out;
3067
3068 r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
3069 if (!mlxsw_sp_rif_should_config(r, ifa->ifa_dev, event))
3070 goto out;
3071
3072 if (mlxsw_sp_port_dev_check(dev))
3073 err = mlxsw_sp_inetaddr_port_event(dev, event);
3074 else if (netif_is_lag_master(dev))
3075 err = mlxsw_sp_inetaddr_lag_event(dev, event);
3076 else if (netif_is_bridge_master(dev))
3077 err = mlxsw_sp_inetaddr_bridge_event(dev, dev, event);
3078 else if (is_vlan_dev(dev))
3079 err = mlxsw_sp_inetaddr_vlan_event(dev, event);
3080
3081out:
3082 return notifier_from_errno(err);
3083}
3084
3085static int mlxsw_sp_rif_edit(struct mlxsw_sp *mlxsw_sp, u16 rif,
3086 const char *mac, int mtu)
3087{
3088 char ritr_pl[MLXSW_REG_RITR_LEN];
3089 int err;
3090
3091 mlxsw_reg_ritr_rif_pack(ritr_pl, rif);
3092 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
3093 if (err)
3094 return err;
3095
3096 mlxsw_reg_ritr_mtu_set(ritr_pl, mtu);
3097 mlxsw_reg_ritr_if_mac_memcpy_to(ritr_pl, mac);
3098 mlxsw_reg_ritr_op_set(ritr_pl, MLXSW_REG_RITR_RIF_CREATE);
3099 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
3100}
3101
3102int mlxsw_sp_netdevice_router_port_event(struct net_device *dev)
3103{
3104 struct mlxsw_sp *mlxsw_sp;
3105 struct mlxsw_sp_rif *r;
3106 int err;
3107
3108 mlxsw_sp = mlxsw_sp_lower_get(dev);
3109 if (!mlxsw_sp)
3110 return 0;
3111
3112 r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
3113 if (!r)
3114 return 0;
3115
3116 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, r->addr, r->f->fid, false);
3117 if (err)
3118 return err;
3119
3120 err = mlxsw_sp_rif_edit(mlxsw_sp, r->rif, dev->dev_addr, dev->mtu);
3121 if (err)
3122 goto err_rif_edit;
3123
3124 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, dev->dev_addr, r->f->fid, true);
3125 if (err)
3126 goto err_rif_fdb_op;
3127
3128 ether_addr_copy(r->addr, dev->dev_addr);
3129 r->mtu = dev->mtu;
3130
3131 netdev_dbg(dev, "Updated RIF=%d\n", r->rif);
3132
3133 return 0;
3134
3135err_rif_fdb_op:
3136 mlxsw_sp_rif_edit(mlxsw_sp, r->rif, r->addr, r->mtu);
3137err_rif_edit:
3138 mlxsw_sp_rif_fdb_op(mlxsw_sp, r->addr, r->f->fid, true);
3139 return err;
3140}
3141
Ido Schimmelc3852ef2016-12-03 16:45:07 +01003142static void mlxsw_sp_router_fib_dump_flush(struct notifier_block *nb)
3143{
3144 struct mlxsw_sp *mlxsw_sp = container_of(nb, struct mlxsw_sp, fib_nb);
3145
3146 /* Flush pending FIB notifications and then flush the device's
3147 * table before requesting another dump. The FIB notification
3148 * block is unregistered, so no need to take RTNL.
3149 */
3150 mlxsw_core_flush_owq();
3151 mlxsw_sp_router_fib_flush(mlxsw_sp);
3152}
3153
Ido Schimmel4724ba562017-03-10 08:53:39 +01003154static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
3155{
3156 char rgcr_pl[MLXSW_REG_RGCR_LEN];
3157 u64 max_rifs;
3158 int err;
3159
3160 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_RIFS))
3161 return -EIO;
3162
3163 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
3164 mlxsw_sp->rifs = kcalloc(max_rifs, sizeof(struct mlxsw_sp_rif *),
3165 GFP_KERNEL);
3166 if (!mlxsw_sp->rifs)
3167 return -ENOMEM;
3168
3169 mlxsw_reg_rgcr_pack(rgcr_pl, true);
3170 mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs);
3171 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
3172 if (err)
3173 goto err_rgcr_fail;
3174
3175 return 0;
3176
3177err_rgcr_fail:
3178 kfree(mlxsw_sp->rifs);
3179 return err;
3180}
3181
3182static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
3183{
3184 char rgcr_pl[MLXSW_REG_RGCR_LEN];
3185 int i;
3186
3187 mlxsw_reg_rgcr_pack(rgcr_pl, false);
3188 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
3189
3190 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
3191 WARN_ON_ONCE(mlxsw_sp->rifs[i]);
3192
3193 kfree(mlxsw_sp->rifs);
3194}
3195
Jiri Pirkob45f64d2016-09-26 12:52:31 +02003196int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
3197{
3198 int err;
3199
3200 INIT_LIST_HEAD(&mlxsw_sp->router.nexthop_neighs_list);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02003201 err = __mlxsw_sp_router_init(mlxsw_sp);
3202 if (err)
3203 return err;
3204
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003205 err = rhashtable_init(&mlxsw_sp->router.nexthop_ht,
3206 &mlxsw_sp_nexthop_ht_params);
3207 if (err)
3208 goto err_nexthop_ht_init;
3209
Ido Schimmele9ad5e72017-02-08 11:16:29 +01003210 err = rhashtable_init(&mlxsw_sp->router.nexthop_group_ht,
3211 &mlxsw_sp_nexthop_group_ht_params);
3212 if (err)
3213 goto err_nexthop_group_ht_init;
3214
Jiri Pirkob45f64d2016-09-26 12:52:31 +02003215 mlxsw_sp_lpm_init(mlxsw_sp);
3216 err = mlxsw_sp_vrs_init(mlxsw_sp);
3217 if (err)
3218 goto err_vrs_init;
3219
Ido Schimmel8c9583a2016-10-27 15:12:57 +02003220 err = mlxsw_sp_neigh_init(mlxsw_sp);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02003221 if (err)
3222 goto err_neigh_init;
3223
3224 mlxsw_sp->fib_nb.notifier_call = mlxsw_sp_router_fib_event;
Ido Schimmelc3852ef2016-12-03 16:45:07 +01003225 err = register_fib_notifier(&mlxsw_sp->fib_nb,
3226 mlxsw_sp_router_fib_dump_flush);
3227 if (err)
3228 goto err_register_fib_notifier;
3229
Jiri Pirkob45f64d2016-09-26 12:52:31 +02003230 return 0;
3231
Ido Schimmelc3852ef2016-12-03 16:45:07 +01003232err_register_fib_notifier:
3233 mlxsw_sp_neigh_fini(mlxsw_sp);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02003234err_neigh_init:
3235 mlxsw_sp_vrs_fini(mlxsw_sp);
3236err_vrs_init:
Ido Schimmele9ad5e72017-02-08 11:16:29 +01003237 rhashtable_destroy(&mlxsw_sp->router.nexthop_group_ht);
3238err_nexthop_group_ht_init:
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003239 rhashtable_destroy(&mlxsw_sp->router.nexthop_ht);
3240err_nexthop_ht_init:
Jiri Pirkob45f64d2016-09-26 12:52:31 +02003241 __mlxsw_sp_router_fini(mlxsw_sp);
3242 return err;
3243}
3244
3245void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
3246{
3247 unregister_fib_notifier(&mlxsw_sp->fib_nb);
3248 mlxsw_sp_neigh_fini(mlxsw_sp);
3249 mlxsw_sp_vrs_fini(mlxsw_sp);
Ido Schimmele9ad5e72017-02-08 11:16:29 +01003250 rhashtable_destroy(&mlxsw_sp->router.nexthop_group_ht);
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003251 rhashtable_destroy(&mlxsw_sp->router.nexthop_ht);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02003252 __mlxsw_sp_router_fini(mlxsw_sp);
3253}