blob: 28f7f54c76f9a962d07d885dcb735778eaf7b507 [file] [log] [blame]
Ido Schimmel464dce12016-07-02 11:00:15 +02001/*
2 * drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
3 * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
5 * Copyright (c) 2016 Ido Schimmel <idosch@mellanox.com>
Yotam Gigic723c7352016-07-05 11:27:43 +02006 * Copyright (c) 2016 Yotam Gigi <yotamg@mellanox.com>
Ido Schimmel464dce12016-07-02 11:00:15 +02007 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include <linux/kernel.h>
38#include <linux/types.h>
Jiri Pirko5e9c16c2016-07-04 08:23:04 +020039#include <linux/rhashtable.h>
40#include <linux/bitops.h>
41#include <linux/in6.h>
Yotam Gigic723c7352016-07-05 11:27:43 +020042#include <linux/notifier.h>
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +010043#include <linux/inetdevice.h>
Ido Schimmel9db032b2017-03-16 09:08:17 +010044#include <linux/netdevice.h>
Yotam Gigic723c7352016-07-05 11:27:43 +020045#include <net/netevent.h>
Jiri Pirko6cf3c972016-07-05 11:27:39 +020046#include <net/neighbour.h>
47#include <net/arp.h>
Jiri Pirkob45f64d2016-09-26 12:52:31 +020048#include <net/ip_fib.h>
Ido Schimmel5d7bfd12017-03-16 09:08:14 +010049#include <net/fib_rules.h>
Ido Schimmel57837882017-03-16 09:08:16 +010050#include <net/l3mdev.h>
Ido Schimmel464dce12016-07-02 11:00:15 +020051
52#include "spectrum.h"
53#include "core.h"
54#include "reg.h"
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +020055#include "spectrum_cnt.h"
56#include "spectrum_dpipe.h"
57#include "spectrum_router.h"
Ido Schimmel464dce12016-07-02 11:00:15 +020058
Ido Schimmel9011b672017-05-16 19:38:25 +020059struct mlxsw_sp_vr;
60struct mlxsw_sp_lpm_tree;
61
62struct mlxsw_sp_router {
63 struct mlxsw_sp *mlxsw_sp;
64 struct mlxsw_sp_vr *vrs;
65 struct rhashtable neigh_ht;
66 struct rhashtable nexthop_group_ht;
67 struct rhashtable nexthop_ht;
68 struct {
69 struct mlxsw_sp_lpm_tree *trees;
70 unsigned int tree_count;
71 } lpm;
72 struct {
73 struct delayed_work dw;
74 unsigned long interval; /* ms */
75 } neighs_update;
76 struct delayed_work nexthop_probe_dw;
77#define MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL 5000 /* ms */
78 struct list_head nexthop_neighs_list;
79 bool aborted;
80};
81
Ido Schimmel4724ba562017-03-10 08:53:39 +010082struct mlxsw_sp_rif {
83 struct list_head nexthop_list;
84 struct list_head neigh_list;
85 struct net_device *dev;
86 struct mlxsw_sp_fid *f;
87 unsigned char addr[ETH_ALEN];
88 int mtu;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +010089 u16 rif_index;
Ido Schimmel69132292017-03-10 08:53:42 +010090 u16 vr_id;
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +020091 unsigned int counter_ingress;
92 bool counter_ingress_valid;
93 unsigned int counter_egress;
94 bool counter_egress_valid;
Ido Schimmel4724ba562017-03-10 08:53:39 +010095};
96
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +020097static unsigned int *
98mlxsw_sp_rif_p_counter_get(struct mlxsw_sp_rif *rif,
99 enum mlxsw_sp_rif_counter_dir dir)
100{
101 switch (dir) {
102 case MLXSW_SP_RIF_COUNTER_EGRESS:
103 return &rif->counter_egress;
104 case MLXSW_SP_RIF_COUNTER_INGRESS:
105 return &rif->counter_ingress;
106 }
107 return NULL;
108}
109
110static bool
111mlxsw_sp_rif_counter_valid_get(struct mlxsw_sp_rif *rif,
112 enum mlxsw_sp_rif_counter_dir dir)
113{
114 switch (dir) {
115 case MLXSW_SP_RIF_COUNTER_EGRESS:
116 return rif->counter_egress_valid;
117 case MLXSW_SP_RIF_COUNTER_INGRESS:
118 return rif->counter_ingress_valid;
119 }
120 return false;
121}
122
123static void
124mlxsw_sp_rif_counter_valid_set(struct mlxsw_sp_rif *rif,
125 enum mlxsw_sp_rif_counter_dir dir,
126 bool valid)
127{
128 switch (dir) {
129 case MLXSW_SP_RIF_COUNTER_EGRESS:
130 rif->counter_egress_valid = valid;
131 break;
132 case MLXSW_SP_RIF_COUNTER_INGRESS:
133 rif->counter_ingress_valid = valid;
134 break;
135 }
136}
137
138static int mlxsw_sp_rif_counter_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
139 unsigned int counter_index, bool enable,
140 enum mlxsw_sp_rif_counter_dir dir)
141{
142 char ritr_pl[MLXSW_REG_RITR_LEN];
143 bool is_egress = false;
144 int err;
145
146 if (dir == MLXSW_SP_RIF_COUNTER_EGRESS)
147 is_egress = true;
148 mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
149 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
150 if (err)
151 return err;
152
153 mlxsw_reg_ritr_counter_pack(ritr_pl, counter_index, enable,
154 is_egress);
155 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
156}
157
158int mlxsw_sp_rif_counter_value_get(struct mlxsw_sp *mlxsw_sp,
159 struct mlxsw_sp_rif *rif,
160 enum mlxsw_sp_rif_counter_dir dir, u64 *cnt)
161{
162 char ricnt_pl[MLXSW_REG_RICNT_LEN];
163 unsigned int *p_counter_index;
164 bool valid;
165 int err;
166
167 valid = mlxsw_sp_rif_counter_valid_get(rif, dir);
168 if (!valid)
169 return -EINVAL;
170
171 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
172 if (!p_counter_index)
173 return -EINVAL;
174 mlxsw_reg_ricnt_pack(ricnt_pl, *p_counter_index,
175 MLXSW_REG_RICNT_OPCODE_NOP);
176 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
177 if (err)
178 return err;
179 *cnt = mlxsw_reg_ricnt_good_unicast_packets_get(ricnt_pl);
180 return 0;
181}
182
183static int mlxsw_sp_rif_counter_clear(struct mlxsw_sp *mlxsw_sp,
184 unsigned int counter_index)
185{
186 char ricnt_pl[MLXSW_REG_RICNT_LEN];
187
188 mlxsw_reg_ricnt_pack(ricnt_pl, counter_index,
189 MLXSW_REG_RICNT_OPCODE_CLEAR);
190 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
191}
192
193int mlxsw_sp_rif_counter_alloc(struct mlxsw_sp *mlxsw_sp,
194 struct mlxsw_sp_rif *rif,
195 enum mlxsw_sp_rif_counter_dir dir)
196{
197 unsigned int *p_counter_index;
198 int err;
199
200 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
201 if (!p_counter_index)
202 return -EINVAL;
203 err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
204 p_counter_index);
205 if (err)
206 return err;
207
208 err = mlxsw_sp_rif_counter_clear(mlxsw_sp, *p_counter_index);
209 if (err)
210 goto err_counter_clear;
211
212 err = mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
213 *p_counter_index, true, dir);
214 if (err)
215 goto err_counter_edit;
216 mlxsw_sp_rif_counter_valid_set(rif, dir, true);
217 return 0;
218
219err_counter_edit:
220err_counter_clear:
221 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
222 *p_counter_index);
223 return err;
224}
225
226void mlxsw_sp_rif_counter_free(struct mlxsw_sp *mlxsw_sp,
227 struct mlxsw_sp_rif *rif,
228 enum mlxsw_sp_rif_counter_dir dir)
229{
230 unsigned int *p_counter_index;
231
232 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
233 if (WARN_ON(!p_counter_index))
234 return;
235 mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
236 *p_counter_index, false, dir);
237 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
238 *p_counter_index);
239 mlxsw_sp_rif_counter_valid_set(rif, dir, false);
240}
241
Ido Schimmel4724ba562017-03-10 08:53:39 +0100242static struct mlxsw_sp_rif *
243mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
244 const struct net_device *dev);
245
Ido Schimmel9011b672017-05-16 19:38:25 +0200246#define MLXSW_SP_PREFIX_COUNT (sizeof(struct in6_addr) * BITS_PER_BYTE)
247
248struct mlxsw_sp_prefix_usage {
249 DECLARE_BITMAP(b, MLXSW_SP_PREFIX_COUNT);
250};
251
Jiri Pirko53342022016-07-04 08:23:08 +0200252#define mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) \
253 for_each_set_bit(prefix, (prefix_usage)->b, MLXSW_SP_PREFIX_COUNT)
254
255static bool
Jiri Pirko6b75c482016-07-04 08:23:09 +0200256mlxsw_sp_prefix_usage_subset(struct mlxsw_sp_prefix_usage *prefix_usage1,
257 struct mlxsw_sp_prefix_usage *prefix_usage2)
258{
259 unsigned char prefix;
260
261 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage1) {
262 if (!test_bit(prefix, prefix_usage2->b))
263 return false;
264 }
265 return true;
266}
267
268static bool
Jiri Pirko53342022016-07-04 08:23:08 +0200269mlxsw_sp_prefix_usage_eq(struct mlxsw_sp_prefix_usage *prefix_usage1,
270 struct mlxsw_sp_prefix_usage *prefix_usage2)
271{
272 return !memcmp(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
273}
274
Jiri Pirko6b75c482016-07-04 08:23:09 +0200275static bool
276mlxsw_sp_prefix_usage_none(struct mlxsw_sp_prefix_usage *prefix_usage)
277{
278 struct mlxsw_sp_prefix_usage prefix_usage_none = {{ 0 } };
279
280 return mlxsw_sp_prefix_usage_eq(prefix_usage, &prefix_usage_none);
281}
282
283static void
284mlxsw_sp_prefix_usage_cpy(struct mlxsw_sp_prefix_usage *prefix_usage1,
285 struct mlxsw_sp_prefix_usage *prefix_usage2)
286{
287 memcpy(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
288}
289
290static void
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200291mlxsw_sp_prefix_usage_set(struct mlxsw_sp_prefix_usage *prefix_usage,
292 unsigned char prefix_len)
293{
294 set_bit(prefix_len, prefix_usage->b);
295}
296
297static void
298mlxsw_sp_prefix_usage_clear(struct mlxsw_sp_prefix_usage *prefix_usage,
299 unsigned char prefix_len)
300{
301 clear_bit(prefix_len, prefix_usage->b);
302}
303
304struct mlxsw_sp_fib_key {
305 unsigned char addr[sizeof(struct in6_addr)];
306 unsigned char prefix_len;
307};
308
Jiri Pirko61c503f2016-07-04 08:23:11 +0200309enum mlxsw_sp_fib_entry_type {
310 MLXSW_SP_FIB_ENTRY_TYPE_REMOTE,
311 MLXSW_SP_FIB_ENTRY_TYPE_LOCAL,
312 MLXSW_SP_FIB_ENTRY_TYPE_TRAP,
313};
314
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +0200315struct mlxsw_sp_nexthop_group;
Ido Schimmel9011b672017-05-16 19:38:25 +0200316struct mlxsw_sp_fib;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +0200317
Ido Schimmel9aecce12017-02-09 10:28:42 +0100318struct mlxsw_sp_fib_node {
319 struct list_head entry_list;
Jiri Pirkob45f64d2016-09-26 12:52:31 +0200320 struct list_head list;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100321 struct rhash_head ht_node;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100322 struct mlxsw_sp_fib *fib;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100323 struct mlxsw_sp_fib_key key;
324};
325
326struct mlxsw_sp_fib_entry_params {
327 u32 tb_id;
328 u32 prio;
329 u8 tos;
330 u8 type;
331};
332
333struct mlxsw_sp_fib_entry {
334 struct list_head list;
335 struct mlxsw_sp_fib_node *fib_node;
336 enum mlxsw_sp_fib_entry_type type;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +0200337 struct list_head nexthop_group_node;
338 struct mlxsw_sp_nexthop_group *nh_group;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100339 struct mlxsw_sp_fib_entry_params params;
Ido Schimmel013b20f2017-02-08 11:16:36 +0100340 bool offloaded;
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200341};
342
Ido Schimmel9011b672017-05-16 19:38:25 +0200343enum mlxsw_sp_l3proto {
344 MLXSW_SP_L3_PROTO_IPV4,
345 MLXSW_SP_L3_PROTO_IPV6,
346};
347
348struct mlxsw_sp_lpm_tree {
349 u8 id; /* tree ID */
350 unsigned int ref_count;
351 enum mlxsw_sp_l3proto proto;
352 struct mlxsw_sp_prefix_usage prefix_usage;
353};
354
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200355struct mlxsw_sp_fib {
356 struct rhashtable ht;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100357 struct list_head node_list;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100358 struct mlxsw_sp_vr *vr;
359 struct mlxsw_sp_lpm_tree *lpm_tree;
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200360 unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT];
361 struct mlxsw_sp_prefix_usage prefix_usage;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100362 enum mlxsw_sp_l3proto proto;
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200363};
364
Ido Schimmel9011b672017-05-16 19:38:25 +0200365struct mlxsw_sp_vr {
366 u16 id; /* virtual router ID */
367 u32 tb_id; /* kernel fib table id */
368 unsigned int rif_count;
369 struct mlxsw_sp_fib *fib4;
370};
371
Ido Schimmel9aecce12017-02-09 10:28:42 +0100372static const struct rhashtable_params mlxsw_sp_fib_ht_params;
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200373
Ido Schimmel76610eb2017-03-10 08:53:41 +0100374static struct mlxsw_sp_fib *mlxsw_sp_fib_create(struct mlxsw_sp_vr *vr,
375 enum mlxsw_sp_l3proto proto)
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200376{
377 struct mlxsw_sp_fib *fib;
378 int err;
379
380 fib = kzalloc(sizeof(*fib), GFP_KERNEL);
381 if (!fib)
382 return ERR_PTR(-ENOMEM);
383 err = rhashtable_init(&fib->ht, &mlxsw_sp_fib_ht_params);
384 if (err)
385 goto err_rhashtable_init;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100386 INIT_LIST_HEAD(&fib->node_list);
Ido Schimmel76610eb2017-03-10 08:53:41 +0100387 fib->proto = proto;
388 fib->vr = vr;
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200389 return fib;
390
391err_rhashtable_init:
392 kfree(fib);
393 return ERR_PTR(err);
394}
395
396static void mlxsw_sp_fib_destroy(struct mlxsw_sp_fib *fib)
397{
Ido Schimmel9aecce12017-02-09 10:28:42 +0100398 WARN_ON(!list_empty(&fib->node_list));
Ido Schimmel76610eb2017-03-10 08:53:41 +0100399 WARN_ON(fib->lpm_tree);
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200400 rhashtable_destroy(&fib->ht);
401 kfree(fib);
402}
403
Jiri Pirko53342022016-07-04 08:23:08 +0200404static struct mlxsw_sp_lpm_tree *
Ido Schimmel382dbb42017-03-10 08:53:40 +0100405mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp *mlxsw_sp)
Jiri Pirko53342022016-07-04 08:23:08 +0200406{
407 static struct mlxsw_sp_lpm_tree *lpm_tree;
408 int i;
409
Ido Schimmel9011b672017-05-16 19:38:25 +0200410 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
411 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
Ido Schimmel382dbb42017-03-10 08:53:40 +0100412 if (lpm_tree->ref_count == 0)
413 return lpm_tree;
Jiri Pirko53342022016-07-04 08:23:08 +0200414 }
415 return NULL;
416}
417
418static int mlxsw_sp_lpm_tree_alloc(struct mlxsw_sp *mlxsw_sp,
419 struct mlxsw_sp_lpm_tree *lpm_tree)
420{
421 char ralta_pl[MLXSW_REG_RALTA_LEN];
422
Ido Schimmel1a9234e662016-09-19 08:29:26 +0200423 mlxsw_reg_ralta_pack(ralta_pl, true,
424 (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
425 lpm_tree->id);
Jiri Pirko53342022016-07-04 08:23:08 +0200426 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
427}
428
429static int mlxsw_sp_lpm_tree_free(struct mlxsw_sp *mlxsw_sp,
430 struct mlxsw_sp_lpm_tree *lpm_tree)
431{
432 char ralta_pl[MLXSW_REG_RALTA_LEN];
433
Ido Schimmel1a9234e662016-09-19 08:29:26 +0200434 mlxsw_reg_ralta_pack(ralta_pl, false,
435 (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
436 lpm_tree->id);
Jiri Pirko53342022016-07-04 08:23:08 +0200437 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
438}
439
440static int
441mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp *mlxsw_sp,
442 struct mlxsw_sp_prefix_usage *prefix_usage,
443 struct mlxsw_sp_lpm_tree *lpm_tree)
444{
445 char ralst_pl[MLXSW_REG_RALST_LEN];
446 u8 root_bin = 0;
447 u8 prefix;
448 u8 last_prefix = MLXSW_REG_RALST_BIN_NO_CHILD;
449
450 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage)
451 root_bin = prefix;
452
453 mlxsw_reg_ralst_pack(ralst_pl, root_bin, lpm_tree->id);
454 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) {
455 if (prefix == 0)
456 continue;
457 mlxsw_reg_ralst_bin_pack(ralst_pl, prefix, last_prefix,
458 MLXSW_REG_RALST_BIN_NO_CHILD);
459 last_prefix = prefix;
460 }
461 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
462}
463
464static struct mlxsw_sp_lpm_tree *
465mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
466 struct mlxsw_sp_prefix_usage *prefix_usage,
Ido Schimmel382dbb42017-03-10 08:53:40 +0100467 enum mlxsw_sp_l3proto proto)
Jiri Pirko53342022016-07-04 08:23:08 +0200468{
469 struct mlxsw_sp_lpm_tree *lpm_tree;
470 int err;
471
Ido Schimmel382dbb42017-03-10 08:53:40 +0100472 lpm_tree = mlxsw_sp_lpm_tree_find_unused(mlxsw_sp);
Jiri Pirko53342022016-07-04 08:23:08 +0200473 if (!lpm_tree)
474 return ERR_PTR(-EBUSY);
475 lpm_tree->proto = proto;
476 err = mlxsw_sp_lpm_tree_alloc(mlxsw_sp, lpm_tree);
477 if (err)
478 return ERR_PTR(err);
479
480 err = mlxsw_sp_lpm_tree_left_struct_set(mlxsw_sp, prefix_usage,
481 lpm_tree);
482 if (err)
483 goto err_left_struct_set;
Jiri Pirko2083d362016-10-25 11:25:56 +0200484 memcpy(&lpm_tree->prefix_usage, prefix_usage,
485 sizeof(lpm_tree->prefix_usage));
Jiri Pirko53342022016-07-04 08:23:08 +0200486 return lpm_tree;
487
488err_left_struct_set:
489 mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
490 return ERR_PTR(err);
491}
492
493static int mlxsw_sp_lpm_tree_destroy(struct mlxsw_sp *mlxsw_sp,
494 struct mlxsw_sp_lpm_tree *lpm_tree)
495{
496 return mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
497}
498
499static struct mlxsw_sp_lpm_tree *
500mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
501 struct mlxsw_sp_prefix_usage *prefix_usage,
Ido Schimmel382dbb42017-03-10 08:53:40 +0100502 enum mlxsw_sp_l3proto proto)
Jiri Pirko53342022016-07-04 08:23:08 +0200503{
504 struct mlxsw_sp_lpm_tree *lpm_tree;
505 int i;
506
Ido Schimmel9011b672017-05-16 19:38:25 +0200507 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
508 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
Jiri Pirko8b99bec2016-10-25 11:25:57 +0200509 if (lpm_tree->ref_count != 0 &&
510 lpm_tree->proto == proto &&
Jiri Pirko53342022016-07-04 08:23:08 +0200511 mlxsw_sp_prefix_usage_eq(&lpm_tree->prefix_usage,
512 prefix_usage))
513 goto inc_ref_count;
514 }
515 lpm_tree = mlxsw_sp_lpm_tree_create(mlxsw_sp, prefix_usage,
Ido Schimmel382dbb42017-03-10 08:53:40 +0100516 proto);
Jiri Pirko53342022016-07-04 08:23:08 +0200517 if (IS_ERR(lpm_tree))
518 return lpm_tree;
519
520inc_ref_count:
521 lpm_tree->ref_count++;
522 return lpm_tree;
523}
524
525static int mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
526 struct mlxsw_sp_lpm_tree *lpm_tree)
527{
528 if (--lpm_tree->ref_count == 0)
529 return mlxsw_sp_lpm_tree_destroy(mlxsw_sp, lpm_tree);
530 return 0;
531}
532
Ido Schimmel8494ab02017-03-24 08:02:47 +0100533#define MLXSW_SP_LPM_TREE_MIN 2 /* trees 0 and 1 are reserved */
534
535static int mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp)
Jiri Pirko53342022016-07-04 08:23:08 +0200536{
537 struct mlxsw_sp_lpm_tree *lpm_tree;
Ido Schimmel8494ab02017-03-24 08:02:47 +0100538 u64 max_trees;
Jiri Pirko53342022016-07-04 08:23:08 +0200539 int i;
540
Ido Schimmel8494ab02017-03-24 08:02:47 +0100541 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LPM_TREES))
542 return -EIO;
543
544 max_trees = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LPM_TREES);
Ido Schimmel9011b672017-05-16 19:38:25 +0200545 mlxsw_sp->router->lpm.tree_count = max_trees - MLXSW_SP_LPM_TREE_MIN;
546 mlxsw_sp->router->lpm.trees = kcalloc(mlxsw_sp->router->lpm.tree_count,
Ido Schimmel8494ab02017-03-24 08:02:47 +0100547 sizeof(struct mlxsw_sp_lpm_tree),
548 GFP_KERNEL);
Ido Schimmel9011b672017-05-16 19:38:25 +0200549 if (!mlxsw_sp->router->lpm.trees)
Ido Schimmel8494ab02017-03-24 08:02:47 +0100550 return -ENOMEM;
551
Ido Schimmel9011b672017-05-16 19:38:25 +0200552 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
553 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
Jiri Pirko53342022016-07-04 08:23:08 +0200554 lpm_tree->id = i + MLXSW_SP_LPM_TREE_MIN;
555 }
Ido Schimmel8494ab02017-03-24 08:02:47 +0100556
557 return 0;
558}
559
560static void mlxsw_sp_lpm_fini(struct mlxsw_sp *mlxsw_sp)
561{
Ido Schimmel9011b672017-05-16 19:38:25 +0200562 kfree(mlxsw_sp->router->lpm.trees);
Jiri Pirko53342022016-07-04 08:23:08 +0200563}
564
Ido Schimmel76610eb2017-03-10 08:53:41 +0100565static bool mlxsw_sp_vr_is_used(const struct mlxsw_sp_vr *vr)
566{
567 return !!vr->fib4;
568}
569
Jiri Pirko6b75c482016-07-04 08:23:09 +0200570static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp)
571{
572 struct mlxsw_sp_vr *vr;
573 int i;
574
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200575 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
Ido Schimmel9011b672017-05-16 19:38:25 +0200576 vr = &mlxsw_sp->router->vrs[i];
Ido Schimmel76610eb2017-03-10 08:53:41 +0100577 if (!mlxsw_sp_vr_is_used(vr))
Jiri Pirko6b75c482016-07-04 08:23:09 +0200578 return vr;
579 }
580 return NULL;
581}
582
583static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel76610eb2017-03-10 08:53:41 +0100584 const struct mlxsw_sp_fib *fib)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200585{
586 char raltb_pl[MLXSW_REG_RALTB_LEN];
587
Ido Schimmel76610eb2017-03-10 08:53:41 +0100588 mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
589 (enum mlxsw_reg_ralxx_protocol) fib->proto,
590 fib->lpm_tree->id);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200591 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
592}
593
594static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel76610eb2017-03-10 08:53:41 +0100595 const struct mlxsw_sp_fib *fib)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200596{
597 char raltb_pl[MLXSW_REG_RALTB_LEN];
598
599 /* Bind to tree 0 which is default */
Ido Schimmel76610eb2017-03-10 08:53:41 +0100600 mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
601 (enum mlxsw_reg_ralxx_protocol) fib->proto, 0);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200602 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
603}
604
605static u32 mlxsw_sp_fix_tb_id(u32 tb_id)
606{
607 /* For our purpose, squash main and local table into one */
608 if (tb_id == RT_TABLE_LOCAL)
609 tb_id = RT_TABLE_MAIN;
610 return tb_id;
611}
612
613static struct mlxsw_sp_vr *mlxsw_sp_vr_find(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel76610eb2017-03-10 08:53:41 +0100614 u32 tb_id)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200615{
616 struct mlxsw_sp_vr *vr;
617 int i;
618
619 tb_id = mlxsw_sp_fix_tb_id(tb_id);
Nogah Frankel9497c042016-09-20 11:16:54 +0200620
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200621 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
Ido Schimmel9011b672017-05-16 19:38:25 +0200622 vr = &mlxsw_sp->router->vrs[i];
Ido Schimmel76610eb2017-03-10 08:53:41 +0100623 if (mlxsw_sp_vr_is_used(vr) && vr->tb_id == tb_id)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200624 return vr;
625 }
626 return NULL;
627}
628
Ido Schimmel76610eb2017-03-10 08:53:41 +0100629static struct mlxsw_sp_fib *mlxsw_sp_vr_fib(const struct mlxsw_sp_vr *vr,
630 enum mlxsw_sp_l3proto proto)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200631{
Ido Schimmel76610eb2017-03-10 08:53:41 +0100632 switch (proto) {
633 case MLXSW_SP_L3_PROTO_IPV4:
634 return vr->fib4;
635 case MLXSW_SP_L3_PROTO_IPV6:
636 BUG_ON(1);
637 }
638 return NULL;
639}
640
641static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
642 u32 tb_id)
643{
Jiri Pirko6b75c482016-07-04 08:23:09 +0200644 struct mlxsw_sp_vr *vr;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200645
646 vr = mlxsw_sp_vr_find_unused(mlxsw_sp);
647 if (!vr)
648 return ERR_PTR(-EBUSY);
Ido Schimmel76610eb2017-03-10 08:53:41 +0100649 vr->fib4 = mlxsw_sp_fib_create(vr, MLXSW_SP_L3_PROTO_IPV4);
650 if (IS_ERR(vr->fib4))
651 return ERR_CAST(vr->fib4);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200652 vr->tb_id = tb_id;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200653 return vr;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200654}
655
Ido Schimmel76610eb2017-03-10 08:53:41 +0100656static void mlxsw_sp_vr_destroy(struct mlxsw_sp_vr *vr)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200657{
Ido Schimmel76610eb2017-03-10 08:53:41 +0100658 mlxsw_sp_fib_destroy(vr->fib4);
659 vr->fib4 = NULL;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200660}
661
662static int
Ido Schimmel76610eb2017-03-10 08:53:41 +0100663mlxsw_sp_vr_lpm_tree_check(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fib *fib,
Jiri Pirko6b75c482016-07-04 08:23:09 +0200664 struct mlxsw_sp_prefix_usage *req_prefix_usage)
665{
Ido Schimmel76610eb2017-03-10 08:53:41 +0100666 struct mlxsw_sp_lpm_tree *lpm_tree = fib->lpm_tree;
Ido Schimmelf7df4922017-02-28 08:55:40 +0100667 struct mlxsw_sp_lpm_tree *new_tree;
668 int err;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200669
Ido Schimmelf7df4922017-02-28 08:55:40 +0100670 if (mlxsw_sp_prefix_usage_eq(req_prefix_usage, &lpm_tree->prefix_usage))
Jiri Pirko6b75c482016-07-04 08:23:09 +0200671 return 0;
672
Ido Schimmelf7df4922017-02-28 08:55:40 +0100673 new_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, req_prefix_usage,
Ido Schimmel76610eb2017-03-10 08:53:41 +0100674 fib->proto);
Ido Schimmelf7df4922017-02-28 08:55:40 +0100675 if (IS_ERR(new_tree)) {
Jiri Pirko6b75c482016-07-04 08:23:09 +0200676 /* We failed to get a tree according to the required
677 * prefix usage. However, the current tree might be still good
678 * for us if our requirement is subset of the prefixes used
679 * in the tree.
680 */
681 if (mlxsw_sp_prefix_usage_subset(req_prefix_usage,
Ido Schimmelf7df4922017-02-28 08:55:40 +0100682 &lpm_tree->prefix_usage))
Jiri Pirko6b75c482016-07-04 08:23:09 +0200683 return 0;
Ido Schimmelf7df4922017-02-28 08:55:40 +0100684 return PTR_ERR(new_tree);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200685 }
686
Ido Schimmelf7df4922017-02-28 08:55:40 +0100687 /* Prevent packet loss by overwriting existing binding */
Ido Schimmel76610eb2017-03-10 08:53:41 +0100688 fib->lpm_tree = new_tree;
689 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib);
Ido Schimmelf7df4922017-02-28 08:55:40 +0100690 if (err)
691 goto err_tree_bind;
692 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
693
694 return 0;
695
696err_tree_bind:
Ido Schimmel76610eb2017-03-10 08:53:41 +0100697 fib->lpm_tree = lpm_tree;
Ido Schimmelf7df4922017-02-28 08:55:40 +0100698 mlxsw_sp_lpm_tree_put(mlxsw_sp, new_tree);
699 return err;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200700}
701
Ido Schimmel76610eb2017-03-10 08:53:41 +0100702static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200703{
704 struct mlxsw_sp_vr *vr;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200705
706 tb_id = mlxsw_sp_fix_tb_id(tb_id);
Ido Schimmel76610eb2017-03-10 08:53:41 +0100707 vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
708 if (!vr)
709 vr = mlxsw_sp_vr_create(mlxsw_sp, tb_id);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200710 return vr;
711}
712
Ido Schimmel76610eb2017-03-10 08:53:41 +0100713static void mlxsw_sp_vr_put(struct mlxsw_sp_vr *vr)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200714{
Ido Schimmel69132292017-03-10 08:53:42 +0100715 if (!vr->rif_count && list_empty(&vr->fib4->node_list))
Ido Schimmel76610eb2017-03-10 08:53:41 +0100716 mlxsw_sp_vr_destroy(vr);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200717}
718
Nogah Frankel9497c042016-09-20 11:16:54 +0200719static int mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200720{
721 struct mlxsw_sp_vr *vr;
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200722 u64 max_vrs;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200723 int i;
724
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200725 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_VRS))
Nogah Frankel9497c042016-09-20 11:16:54 +0200726 return -EIO;
727
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200728 max_vrs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS);
Ido Schimmel9011b672017-05-16 19:38:25 +0200729 mlxsw_sp->router->vrs = kcalloc(max_vrs, sizeof(struct mlxsw_sp_vr),
730 GFP_KERNEL);
731 if (!mlxsw_sp->router->vrs)
Nogah Frankel9497c042016-09-20 11:16:54 +0200732 return -ENOMEM;
733
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200734 for (i = 0; i < max_vrs; i++) {
Ido Schimmel9011b672017-05-16 19:38:25 +0200735 vr = &mlxsw_sp->router->vrs[i];
Jiri Pirko6b75c482016-07-04 08:23:09 +0200736 vr->id = i;
737 }
Nogah Frankel9497c042016-09-20 11:16:54 +0200738
739 return 0;
740}
741
Ido Schimmelac571de2016-11-14 11:26:32 +0100742static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp);
743
Nogah Frankel9497c042016-09-20 11:16:54 +0200744static void mlxsw_sp_vrs_fini(struct mlxsw_sp *mlxsw_sp)
745{
Ido Schimmel30572242016-12-03 16:45:01 +0100746 /* At this stage we're guaranteed not to have new incoming
747 * FIB notifications and the work queue is free from FIBs
748 * sitting on top of mlxsw netdevs. However, we can still
749 * have other FIBs queued. Flush the queue before flushing
750 * the device's tables. No need for locks, as we're the only
751 * writer.
752 */
753 mlxsw_core_flush_owq();
Ido Schimmelac571de2016-11-14 11:26:32 +0100754 mlxsw_sp_router_fib_flush(mlxsw_sp);
Ido Schimmel9011b672017-05-16 19:38:25 +0200755 kfree(mlxsw_sp->router->vrs);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200756}
757
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200758struct mlxsw_sp_neigh_key {
Jiri Pirko33b13412016-11-10 12:31:04 +0100759 struct neighbour *n;
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200760};
761
762struct mlxsw_sp_neigh_entry {
Ido Schimmel9665b742017-02-08 11:16:42 +0100763 struct list_head rif_list_node;
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200764 struct rhash_head ht_node;
765 struct mlxsw_sp_neigh_key key;
766 u16 rif;
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100767 bool connected;
Yotam Gigia6bf9e92016-07-05 11:27:44 +0200768 unsigned char ha[ETH_ALEN];
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +0200769 struct list_head nexthop_list; /* list of nexthops using
770 * this neigh entry
771 */
Yotam Gigib2157142016-07-05 11:27:51 +0200772 struct list_head nexthop_neighs_list_node;
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200773};
774
775static const struct rhashtable_params mlxsw_sp_neigh_ht_params = {
776 .key_offset = offsetof(struct mlxsw_sp_neigh_entry, key),
777 .head_offset = offsetof(struct mlxsw_sp_neigh_entry, ht_node),
778 .key_len = sizeof(struct mlxsw_sp_neigh_key),
779};
780
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100781static struct mlxsw_sp_neigh_entry *
782mlxsw_sp_neigh_entry_alloc(struct mlxsw_sp *mlxsw_sp, struct neighbour *n,
783 u16 rif)
784{
785 struct mlxsw_sp_neigh_entry *neigh_entry;
786
787 neigh_entry = kzalloc(sizeof(*neigh_entry), GFP_KERNEL);
788 if (!neigh_entry)
789 return NULL;
790
791 neigh_entry->key.n = n;
792 neigh_entry->rif = rif;
793 INIT_LIST_HEAD(&neigh_entry->nexthop_list);
794
795 return neigh_entry;
796}
797
798static void mlxsw_sp_neigh_entry_free(struct mlxsw_sp_neigh_entry *neigh_entry)
799{
800 kfree(neigh_entry);
801}
802
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200803static int
804mlxsw_sp_neigh_entry_insert(struct mlxsw_sp *mlxsw_sp,
805 struct mlxsw_sp_neigh_entry *neigh_entry)
806{
Ido Schimmel9011b672017-05-16 19:38:25 +0200807 return rhashtable_insert_fast(&mlxsw_sp->router->neigh_ht,
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200808 &neigh_entry->ht_node,
809 mlxsw_sp_neigh_ht_params);
810}
811
812static void
813mlxsw_sp_neigh_entry_remove(struct mlxsw_sp *mlxsw_sp,
814 struct mlxsw_sp_neigh_entry *neigh_entry)
815{
Ido Schimmel9011b672017-05-16 19:38:25 +0200816 rhashtable_remove_fast(&mlxsw_sp->router->neigh_ht,
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200817 &neigh_entry->ht_node,
818 mlxsw_sp_neigh_ht_params);
819}
820
821static struct mlxsw_sp_neigh_entry *
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100822mlxsw_sp_neigh_entry_create(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200823{
824 struct mlxsw_sp_neigh_entry *neigh_entry;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +0100825 struct mlxsw_sp_rif *rif;
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100826 int err;
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200827
Arkadi Sharshevskybf952332017-03-17 09:38:00 +0100828 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, n->dev);
829 if (!rif)
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100830 return ERR_PTR(-EINVAL);
831
Arkadi Sharshevskybf952332017-03-17 09:38:00 +0100832 neigh_entry = mlxsw_sp_neigh_entry_alloc(mlxsw_sp, n, rif->rif_index);
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200833 if (!neigh_entry)
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100834 return ERR_PTR(-ENOMEM);
835
836 err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
837 if (err)
838 goto err_neigh_entry_insert;
839
Arkadi Sharshevskybf952332017-03-17 09:38:00 +0100840 list_add(&neigh_entry->rif_list_node, &rif->neigh_list);
Ido Schimmel9665b742017-02-08 11:16:42 +0100841
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200842 return neigh_entry;
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100843
844err_neigh_entry_insert:
845 mlxsw_sp_neigh_entry_free(neigh_entry);
846 return ERR_PTR(err);
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200847}
848
849static void
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100850mlxsw_sp_neigh_entry_destroy(struct mlxsw_sp *mlxsw_sp,
851 struct mlxsw_sp_neigh_entry *neigh_entry)
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200852{
Ido Schimmel9665b742017-02-08 11:16:42 +0100853 list_del(&neigh_entry->rif_list_node);
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100854 mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry);
855 mlxsw_sp_neigh_entry_free(neigh_entry);
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200856}
857
858static struct mlxsw_sp_neigh_entry *
Jiri Pirko33b13412016-11-10 12:31:04 +0100859mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200860{
Jiri Pirko33b13412016-11-10 12:31:04 +0100861 struct mlxsw_sp_neigh_key key;
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200862
Jiri Pirko33b13412016-11-10 12:31:04 +0100863 key.n = n;
Ido Schimmel9011b672017-05-16 19:38:25 +0200864 return rhashtable_lookup_fast(&mlxsw_sp->router->neigh_ht,
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200865 &key, mlxsw_sp_neigh_ht_params);
866}
867
Yotam Gigic723c7352016-07-05 11:27:43 +0200868static void
869mlxsw_sp_router_neighs_update_interval_init(struct mlxsw_sp *mlxsw_sp)
870{
871 unsigned long interval = NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME);
872
Ido Schimmel9011b672017-05-16 19:38:25 +0200873 mlxsw_sp->router->neighs_update.interval = jiffies_to_msecs(interval);
Yotam Gigic723c7352016-07-05 11:27:43 +0200874}
875
876static void mlxsw_sp_router_neigh_ent_ipv4_process(struct mlxsw_sp *mlxsw_sp,
877 char *rauhtd_pl,
878 int ent_index)
879{
880 struct net_device *dev;
881 struct neighbour *n;
882 __be32 dipn;
883 u32 dip;
884 u16 rif;
885
886 mlxsw_reg_rauhtd_ent_ipv4_unpack(rauhtd_pl, ent_index, &rif, &dip);
887
888 if (!mlxsw_sp->rifs[rif]) {
889 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
890 return;
891 }
892
893 dipn = htonl(dip);
894 dev = mlxsw_sp->rifs[rif]->dev;
895 n = neigh_lookup(&arp_tbl, &dipn, dev);
896 if (!n) {
897 netdev_err(dev, "Failed to find matching neighbour for IP=%pI4h\n",
898 &dip);
899 return;
900 }
901
902 netdev_dbg(dev, "Updating neighbour with IP=%pI4h\n", &dip);
903 neigh_event_send(n, NULL);
904 neigh_release(n);
905}
906
907static void mlxsw_sp_router_neigh_rec_ipv4_process(struct mlxsw_sp *mlxsw_sp,
908 char *rauhtd_pl,
909 int rec_index)
910{
911 u8 num_entries;
912 int i;
913
914 num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
915 rec_index);
916 /* Hardware starts counting at 0, so add 1. */
917 num_entries++;
918
919 /* Each record consists of several neighbour entries. */
920 for (i = 0; i < num_entries; i++) {
921 int ent_index;
922
923 ent_index = rec_index * MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC + i;
924 mlxsw_sp_router_neigh_ent_ipv4_process(mlxsw_sp, rauhtd_pl,
925 ent_index);
926 }
927
928}
929
930static void mlxsw_sp_router_neigh_rec_process(struct mlxsw_sp *mlxsw_sp,
931 char *rauhtd_pl, int rec_index)
932{
933 switch (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, rec_index)) {
934 case MLXSW_REG_RAUHTD_TYPE_IPV4:
935 mlxsw_sp_router_neigh_rec_ipv4_process(mlxsw_sp, rauhtd_pl,
936 rec_index);
937 break;
938 case MLXSW_REG_RAUHTD_TYPE_IPV6:
939 WARN_ON_ONCE(1);
940 break;
941 }
942}
943
Arkadi Sharshevsky42cdb332016-11-11 16:34:26 +0100944static bool mlxsw_sp_router_rauhtd_is_full(char *rauhtd_pl)
945{
946 u8 num_rec, last_rec_index, num_entries;
947
948 num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
949 last_rec_index = num_rec - 1;
950
951 if (num_rec < MLXSW_REG_RAUHTD_REC_MAX_NUM)
952 return false;
953 if (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, last_rec_index) ==
954 MLXSW_REG_RAUHTD_TYPE_IPV6)
955 return true;
956
957 num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
958 last_rec_index);
959 if (++num_entries == MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC)
960 return true;
961 return false;
962}
963
Yotam Gigib2157142016-07-05 11:27:51 +0200964static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp)
Yotam Gigic723c7352016-07-05 11:27:43 +0200965{
Yotam Gigic723c7352016-07-05 11:27:43 +0200966 char *rauhtd_pl;
967 u8 num_rec;
968 int i, err;
969
970 rauhtd_pl = kmalloc(MLXSW_REG_RAUHTD_LEN, GFP_KERNEL);
971 if (!rauhtd_pl)
Yotam Gigib2157142016-07-05 11:27:51 +0200972 return -ENOMEM;
Yotam Gigic723c7352016-07-05 11:27:43 +0200973
974 /* Make sure the neighbour's netdev isn't removed in the
975 * process.
976 */
977 rtnl_lock();
978 do {
979 mlxsw_reg_rauhtd_pack(rauhtd_pl, MLXSW_REG_RAUHTD_TYPE_IPV4);
980 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(rauhtd),
981 rauhtd_pl);
982 if (err) {
983 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to dump neighbour talbe\n");
984 break;
985 }
986 num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
987 for (i = 0; i < num_rec; i++)
988 mlxsw_sp_router_neigh_rec_process(mlxsw_sp, rauhtd_pl,
989 i);
Arkadi Sharshevsky42cdb332016-11-11 16:34:26 +0100990 } while (mlxsw_sp_router_rauhtd_is_full(rauhtd_pl));
Yotam Gigic723c7352016-07-05 11:27:43 +0200991 rtnl_unlock();
992
993 kfree(rauhtd_pl);
Yotam Gigib2157142016-07-05 11:27:51 +0200994 return err;
995}
996
997static void mlxsw_sp_router_neighs_update_nh(struct mlxsw_sp *mlxsw_sp)
998{
999 struct mlxsw_sp_neigh_entry *neigh_entry;
1000
1001 /* Take RTNL mutex here to prevent lists from changes */
1002 rtnl_lock();
Ido Schimmel9011b672017-05-16 19:38:25 +02001003 list_for_each_entry(neigh_entry, &mlxsw_sp->router->nexthop_neighs_list,
Ido Schimmel8a0b7272017-02-06 16:20:15 +01001004 nexthop_neighs_list_node)
Yotam Gigib2157142016-07-05 11:27:51 +02001005 /* If this neigh have nexthops, make the kernel think this neigh
1006 * is active regardless of the traffic.
1007 */
Ido Schimmel8a0b7272017-02-06 16:20:15 +01001008 neigh_event_send(neigh_entry->key.n, NULL);
Yotam Gigib2157142016-07-05 11:27:51 +02001009 rtnl_unlock();
1010}
1011
1012static void
1013mlxsw_sp_router_neighs_update_work_schedule(struct mlxsw_sp *mlxsw_sp)
1014{
Ido Schimmel9011b672017-05-16 19:38:25 +02001015 unsigned long interval = mlxsw_sp->router->neighs_update.interval;
Yotam Gigib2157142016-07-05 11:27:51 +02001016
Ido Schimmel9011b672017-05-16 19:38:25 +02001017 mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw,
Yotam Gigib2157142016-07-05 11:27:51 +02001018 msecs_to_jiffies(interval));
1019}
1020
1021static void mlxsw_sp_router_neighs_update_work(struct work_struct *work)
1022{
Ido Schimmel9011b672017-05-16 19:38:25 +02001023 struct mlxsw_sp_router *router;
Yotam Gigib2157142016-07-05 11:27:51 +02001024 int err;
1025
Ido Schimmel9011b672017-05-16 19:38:25 +02001026 router = container_of(work, struct mlxsw_sp_router,
1027 neighs_update.dw.work);
1028 err = mlxsw_sp_router_neighs_update_rauhtd(router->mlxsw_sp);
Yotam Gigib2157142016-07-05 11:27:51 +02001029 if (err)
Ido Schimmel9011b672017-05-16 19:38:25 +02001030 dev_err(router->mlxsw_sp->bus_info->dev, "Could not update kernel for neigh activity");
Yotam Gigib2157142016-07-05 11:27:51 +02001031
Ido Schimmel9011b672017-05-16 19:38:25 +02001032 mlxsw_sp_router_neighs_update_nh(router->mlxsw_sp);
Yotam Gigib2157142016-07-05 11:27:51 +02001033
Ido Schimmel9011b672017-05-16 19:38:25 +02001034 mlxsw_sp_router_neighs_update_work_schedule(router->mlxsw_sp);
Yotam Gigic723c7352016-07-05 11:27:43 +02001035}
1036
Yotam Gigi0b2361d2016-07-05 11:27:52 +02001037static void mlxsw_sp_router_probe_unresolved_nexthops(struct work_struct *work)
1038{
1039 struct mlxsw_sp_neigh_entry *neigh_entry;
Ido Schimmel9011b672017-05-16 19:38:25 +02001040 struct mlxsw_sp_router *router;
Yotam Gigi0b2361d2016-07-05 11:27:52 +02001041
Ido Schimmel9011b672017-05-16 19:38:25 +02001042 router = container_of(work, struct mlxsw_sp_router,
1043 nexthop_probe_dw.work);
Yotam Gigi0b2361d2016-07-05 11:27:52 +02001044 /* Iterate over nexthop neighbours, find those who are unresolved and
1045 * send arp on them. This solves the chicken-egg problem when
1046 * the nexthop wouldn't get offloaded until the neighbor is resolved
1047 * but it wouldn't get resolved ever in case traffic is flowing in HW
1048 * using different nexthop.
1049 *
1050 * Take RTNL mutex here to prevent lists from changes.
1051 */
1052 rtnl_lock();
Ido Schimmel9011b672017-05-16 19:38:25 +02001053 list_for_each_entry(neigh_entry, &router->nexthop_neighs_list,
Ido Schimmel8a0b7272017-02-06 16:20:15 +01001054 nexthop_neighs_list_node)
Ido Schimmel01b1aa32017-02-06 16:20:16 +01001055 if (!neigh_entry->connected)
Jiri Pirko33b13412016-11-10 12:31:04 +01001056 neigh_event_send(neigh_entry->key.n, NULL);
Yotam Gigi0b2361d2016-07-05 11:27:52 +02001057 rtnl_unlock();
1058
Ido Schimmel9011b672017-05-16 19:38:25 +02001059 mlxsw_core_schedule_dw(&router->nexthop_probe_dw,
Yotam Gigi0b2361d2016-07-05 11:27:52 +02001060 MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL);
1061}
1062
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001063static void
1064mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
1065 struct mlxsw_sp_neigh_entry *neigh_entry,
1066 bool removing);
1067
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001068static enum mlxsw_reg_rauht_op mlxsw_sp_rauht_op(bool adding)
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001069{
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001070 return adding ? MLXSW_REG_RAUHT_OP_WRITE_ADD :
1071 MLXSW_REG_RAUHT_OP_WRITE_DELETE;
1072}
1073
1074static void
1075mlxsw_sp_router_neigh_entry_op4(struct mlxsw_sp *mlxsw_sp,
1076 struct mlxsw_sp_neigh_entry *neigh_entry,
1077 enum mlxsw_reg_rauht_op op)
1078{
Jiri Pirko33b13412016-11-10 12:31:04 +01001079 struct neighbour *n = neigh_entry->key.n;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001080 u32 dip = ntohl(*((__be32 *) n->primary_key));
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001081 char rauht_pl[MLXSW_REG_RAUHT_LEN];
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001082
1083 mlxsw_reg_rauht_pack4(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
1084 dip);
1085 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
1086}
1087
1088static void
1089mlxsw_sp_neigh_entry_update(struct mlxsw_sp *mlxsw_sp,
1090 struct mlxsw_sp_neigh_entry *neigh_entry,
1091 bool adding)
1092{
1093 if (!adding && !neigh_entry->connected)
1094 return;
1095 neigh_entry->connected = adding;
1096 if (neigh_entry->key.n->tbl == &arp_tbl)
1097 mlxsw_sp_router_neigh_entry_op4(mlxsw_sp, neigh_entry,
1098 mlxsw_sp_rauht_op(adding));
1099 else
1100 WARN_ON_ONCE(1);
1101}
1102
1103struct mlxsw_sp_neigh_event_work {
1104 struct work_struct work;
1105 struct mlxsw_sp *mlxsw_sp;
1106 struct neighbour *n;
1107};
1108
1109static void mlxsw_sp_router_neigh_event_work(struct work_struct *work)
1110{
1111 struct mlxsw_sp_neigh_event_work *neigh_work =
1112 container_of(work, struct mlxsw_sp_neigh_event_work, work);
1113 struct mlxsw_sp *mlxsw_sp = neigh_work->mlxsw_sp;
1114 struct mlxsw_sp_neigh_entry *neigh_entry;
1115 struct neighbour *n = neigh_work->n;
1116 unsigned char ha[ETH_ALEN];
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001117 bool entry_connected;
Ido Schimmel93a87e52016-12-23 09:32:49 +01001118 u8 nud_state, dead;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001119
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001120 /* If these parameters are changed after we release the lock,
1121 * then we are guaranteed to receive another event letting us
1122 * know about it.
1123 */
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001124 read_lock_bh(&n->lock);
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001125 memcpy(ha, n->ha, ETH_ALEN);
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001126 nud_state = n->nud_state;
Ido Schimmel93a87e52016-12-23 09:32:49 +01001127 dead = n->dead;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001128 read_unlock_bh(&n->lock);
1129
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001130 rtnl_lock();
Ido Schimmel93a87e52016-12-23 09:32:49 +01001131 entry_connected = nud_state & NUD_VALID && !dead;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001132 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
1133 if (!entry_connected && !neigh_entry)
1134 goto out;
1135 if (!neigh_entry) {
1136 neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
1137 if (IS_ERR(neigh_entry))
1138 goto out;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001139 }
1140
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001141 memcpy(neigh_entry->ha, ha, ETH_ALEN);
1142 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, entry_connected);
1143 mlxsw_sp_nexthop_neigh_update(mlxsw_sp, neigh_entry, !entry_connected);
1144
1145 if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
1146 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
1147
1148out:
1149 rtnl_unlock();
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001150 neigh_release(n);
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001151 kfree(neigh_work);
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001152}
1153
Jiri Pirkoe7322632016-09-01 10:37:43 +02001154int mlxsw_sp_router_netevent_event(struct notifier_block *unused,
1155 unsigned long event, void *ptr)
Yotam Gigic723c7352016-07-05 11:27:43 +02001156{
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001157 struct mlxsw_sp_neigh_event_work *neigh_work;
Yotam Gigic723c7352016-07-05 11:27:43 +02001158 struct mlxsw_sp_port *mlxsw_sp_port;
1159 struct mlxsw_sp *mlxsw_sp;
1160 unsigned long interval;
1161 struct neigh_parms *p;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001162 struct neighbour *n;
Yotam Gigic723c7352016-07-05 11:27:43 +02001163
1164 switch (event) {
1165 case NETEVENT_DELAY_PROBE_TIME_UPDATE:
1166 p = ptr;
1167
1168 /* We don't care about changes in the default table. */
1169 if (!p->dev || p->tbl != &arp_tbl)
1170 return NOTIFY_DONE;
1171
1172 /* We are in atomic context and can't take RTNL mutex,
1173 * so use RCU variant to walk the device chain.
1174 */
1175 mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(p->dev);
1176 if (!mlxsw_sp_port)
1177 return NOTIFY_DONE;
1178
1179 mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1180 interval = jiffies_to_msecs(NEIGH_VAR(p, DELAY_PROBE_TIME));
Ido Schimmel9011b672017-05-16 19:38:25 +02001181 mlxsw_sp->router->neighs_update.interval = interval;
Yotam Gigic723c7352016-07-05 11:27:43 +02001182
1183 mlxsw_sp_port_dev_put(mlxsw_sp_port);
1184 break;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001185 case NETEVENT_NEIGH_UPDATE:
1186 n = ptr;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001187
1188 if (n->tbl != &arp_tbl)
1189 return NOTIFY_DONE;
1190
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001191 mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(n->dev);
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001192 if (!mlxsw_sp_port)
1193 return NOTIFY_DONE;
1194
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001195 neigh_work = kzalloc(sizeof(*neigh_work), GFP_ATOMIC);
1196 if (!neigh_work) {
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001197 mlxsw_sp_port_dev_put(mlxsw_sp_port);
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001198 return NOTIFY_BAD;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001199 }
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001200
1201 INIT_WORK(&neigh_work->work, mlxsw_sp_router_neigh_event_work);
1202 neigh_work->mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1203 neigh_work->n = n;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001204
1205 /* Take a reference to ensure the neighbour won't be
1206 * destructed until we drop the reference in delayed
1207 * work.
1208 */
1209 neigh_clone(n);
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001210 mlxsw_core_schedule_work(&neigh_work->work);
1211 mlxsw_sp_port_dev_put(mlxsw_sp_port);
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001212 break;
Yotam Gigic723c7352016-07-05 11:27:43 +02001213 }
1214
1215 return NOTIFY_DONE;
1216}
1217
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001218static int mlxsw_sp_neigh_init(struct mlxsw_sp *mlxsw_sp)
1219{
Yotam Gigic723c7352016-07-05 11:27:43 +02001220 int err;
1221
Ido Schimmel9011b672017-05-16 19:38:25 +02001222 err = rhashtable_init(&mlxsw_sp->router->neigh_ht,
Yotam Gigic723c7352016-07-05 11:27:43 +02001223 &mlxsw_sp_neigh_ht_params);
1224 if (err)
1225 return err;
1226
1227 /* Initialize the polling interval according to the default
1228 * table.
1229 */
1230 mlxsw_sp_router_neighs_update_interval_init(mlxsw_sp);
1231
Yotam Gigi0b2361d2016-07-05 11:27:52 +02001232 /* Create the delayed works for the activity_update */
Ido Schimmel9011b672017-05-16 19:38:25 +02001233 INIT_DELAYED_WORK(&mlxsw_sp->router->neighs_update.dw,
Yotam Gigic723c7352016-07-05 11:27:43 +02001234 mlxsw_sp_router_neighs_update_work);
Ido Schimmel9011b672017-05-16 19:38:25 +02001235 INIT_DELAYED_WORK(&mlxsw_sp->router->nexthop_probe_dw,
Yotam Gigi0b2361d2016-07-05 11:27:52 +02001236 mlxsw_sp_router_probe_unresolved_nexthops);
Ido Schimmel9011b672017-05-16 19:38:25 +02001237 mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw, 0);
1238 mlxsw_core_schedule_dw(&mlxsw_sp->router->nexthop_probe_dw, 0);
Yotam Gigic723c7352016-07-05 11:27:43 +02001239 return 0;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001240}
1241
1242static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp)
1243{
Ido Schimmel9011b672017-05-16 19:38:25 +02001244 cancel_delayed_work_sync(&mlxsw_sp->router->neighs_update.dw);
1245 cancel_delayed_work_sync(&mlxsw_sp->router->nexthop_probe_dw);
1246 rhashtable_destroy(&mlxsw_sp->router->neigh_ht);
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001247}
1248
Ido Schimmel9665b742017-02-08 11:16:42 +01001249static int mlxsw_sp_neigh_rif_flush(struct mlxsw_sp *mlxsw_sp,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001250 const struct mlxsw_sp_rif *rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01001251{
1252 char rauht_pl[MLXSW_REG_RAUHT_LEN];
1253
1254 mlxsw_reg_rauht_pack(rauht_pl, MLXSW_REG_RAUHT_OP_WRITE_DELETE_ALL,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001255 rif->rif_index, rif->addr);
Ido Schimmel9665b742017-02-08 11:16:42 +01001256 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
1257}
1258
1259static void mlxsw_sp_neigh_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001260 struct mlxsw_sp_rif *rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01001261{
1262 struct mlxsw_sp_neigh_entry *neigh_entry, *tmp;
1263
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001264 mlxsw_sp_neigh_rif_flush(mlxsw_sp, rif);
1265 list_for_each_entry_safe(neigh_entry, tmp, &rif->neigh_list,
Ido Schimmel9665b742017-02-08 11:16:42 +01001266 rif_list_node)
1267 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
1268}
1269
Ido Schimmelc53b8e12017-02-08 11:16:30 +01001270struct mlxsw_sp_nexthop_key {
1271 struct fib_nh *fib_nh;
1272};
1273
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001274struct mlxsw_sp_nexthop {
1275 struct list_head neigh_list_node; /* member of neigh entry list */
Ido Schimmel9665b742017-02-08 11:16:42 +01001276 struct list_head rif_list_node;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001277 struct mlxsw_sp_nexthop_group *nh_grp; /* pointer back to the group
1278 * this belongs to
1279 */
Ido Schimmelc53b8e12017-02-08 11:16:30 +01001280 struct rhash_head ht_node;
1281 struct mlxsw_sp_nexthop_key key;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001282 struct mlxsw_sp_rif *rif;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001283 u8 should_offload:1, /* set indicates this neigh is connected and
1284 * should be put to KVD linear area of this group.
1285 */
1286 offloaded:1, /* set in case the neigh is actually put into
1287 * KVD linear area of this group.
1288 */
1289 update:1; /* set indicates that MAC of this neigh should be
1290 * updated in HW
1291 */
1292 struct mlxsw_sp_neigh_entry *neigh_entry;
1293};
1294
Ido Schimmele9ad5e72017-02-08 11:16:29 +01001295struct mlxsw_sp_nexthop_group_key {
1296 struct fib_info *fi;
1297};
1298
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001299struct mlxsw_sp_nexthop_group {
Ido Schimmele9ad5e72017-02-08 11:16:29 +01001300 struct rhash_head ht_node;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001301 struct list_head fib_list; /* list of fib entries that use this group */
Ido Schimmele9ad5e72017-02-08 11:16:29 +01001302 struct mlxsw_sp_nexthop_group_key key;
Ido Schimmelb3e8d1e2017-02-08 11:16:32 +01001303 u8 adj_index_valid:1,
1304 gateway:1; /* routes using the group use a gateway */
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001305 u32 adj_index;
1306 u16 ecmp_size;
1307 u16 count;
1308 struct mlxsw_sp_nexthop nexthops[0];
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001309#define nh_rif nexthops[0].rif
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001310};
1311
Ido Schimmele9ad5e72017-02-08 11:16:29 +01001312static const struct rhashtable_params mlxsw_sp_nexthop_group_ht_params = {
1313 .key_offset = offsetof(struct mlxsw_sp_nexthop_group, key),
1314 .head_offset = offsetof(struct mlxsw_sp_nexthop_group, ht_node),
1315 .key_len = sizeof(struct mlxsw_sp_nexthop_group_key),
1316};
1317
1318static int mlxsw_sp_nexthop_group_insert(struct mlxsw_sp *mlxsw_sp,
1319 struct mlxsw_sp_nexthop_group *nh_grp)
1320{
Ido Schimmel9011b672017-05-16 19:38:25 +02001321 return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_group_ht,
Ido Schimmele9ad5e72017-02-08 11:16:29 +01001322 &nh_grp->ht_node,
1323 mlxsw_sp_nexthop_group_ht_params);
1324}
1325
1326static void mlxsw_sp_nexthop_group_remove(struct mlxsw_sp *mlxsw_sp,
1327 struct mlxsw_sp_nexthop_group *nh_grp)
1328{
Ido Schimmel9011b672017-05-16 19:38:25 +02001329 rhashtable_remove_fast(&mlxsw_sp->router->nexthop_group_ht,
Ido Schimmele9ad5e72017-02-08 11:16:29 +01001330 &nh_grp->ht_node,
1331 mlxsw_sp_nexthop_group_ht_params);
1332}
1333
1334static struct mlxsw_sp_nexthop_group *
1335mlxsw_sp_nexthop_group_lookup(struct mlxsw_sp *mlxsw_sp,
1336 struct mlxsw_sp_nexthop_group_key key)
1337{
Ido Schimmel9011b672017-05-16 19:38:25 +02001338 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht, &key,
Ido Schimmele9ad5e72017-02-08 11:16:29 +01001339 mlxsw_sp_nexthop_group_ht_params);
1340}
1341
Ido Schimmelc53b8e12017-02-08 11:16:30 +01001342static const struct rhashtable_params mlxsw_sp_nexthop_ht_params = {
1343 .key_offset = offsetof(struct mlxsw_sp_nexthop, key),
1344 .head_offset = offsetof(struct mlxsw_sp_nexthop, ht_node),
1345 .key_len = sizeof(struct mlxsw_sp_nexthop_key),
1346};
1347
1348static int mlxsw_sp_nexthop_insert(struct mlxsw_sp *mlxsw_sp,
1349 struct mlxsw_sp_nexthop *nh)
1350{
Ido Schimmel9011b672017-05-16 19:38:25 +02001351 return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_ht,
Ido Schimmelc53b8e12017-02-08 11:16:30 +01001352 &nh->ht_node, mlxsw_sp_nexthop_ht_params);
1353}
1354
1355static void mlxsw_sp_nexthop_remove(struct mlxsw_sp *mlxsw_sp,
1356 struct mlxsw_sp_nexthop *nh)
1357{
Ido Schimmel9011b672017-05-16 19:38:25 +02001358 rhashtable_remove_fast(&mlxsw_sp->router->nexthop_ht, &nh->ht_node,
Ido Schimmelc53b8e12017-02-08 11:16:30 +01001359 mlxsw_sp_nexthop_ht_params);
1360}
1361
Ido Schimmelad178c82017-02-08 11:16:40 +01001362static struct mlxsw_sp_nexthop *
1363mlxsw_sp_nexthop_lookup(struct mlxsw_sp *mlxsw_sp,
1364 struct mlxsw_sp_nexthop_key key)
1365{
Ido Schimmel9011b672017-05-16 19:38:25 +02001366 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_ht, &key,
Ido Schimmelad178c82017-02-08 11:16:40 +01001367 mlxsw_sp_nexthop_ht_params);
1368}
1369
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001370static int mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel76610eb2017-03-10 08:53:41 +01001371 const struct mlxsw_sp_fib *fib,
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001372 u32 adj_index, u16 ecmp_size,
1373 u32 new_adj_index,
1374 u16 new_ecmp_size)
1375{
1376 char raleu_pl[MLXSW_REG_RALEU_LEN];
1377
Ido Schimmel1a9234e662016-09-19 08:29:26 +02001378 mlxsw_reg_raleu_pack(raleu_pl,
Ido Schimmel76610eb2017-03-10 08:53:41 +01001379 (enum mlxsw_reg_ralxx_protocol) fib->proto,
1380 fib->vr->id, adj_index, ecmp_size, new_adj_index,
Ido Schimmel1a9234e662016-09-19 08:29:26 +02001381 new_ecmp_size);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001382 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raleu), raleu_pl);
1383}
1384
1385static int mlxsw_sp_adj_index_mass_update(struct mlxsw_sp *mlxsw_sp,
1386 struct mlxsw_sp_nexthop_group *nh_grp,
1387 u32 old_adj_index, u16 old_ecmp_size)
1388{
1389 struct mlxsw_sp_fib_entry *fib_entry;
Ido Schimmel76610eb2017-03-10 08:53:41 +01001390 struct mlxsw_sp_fib *fib = NULL;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001391 int err;
1392
1393 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
Ido Schimmel76610eb2017-03-10 08:53:41 +01001394 if (fib == fib_entry->fib_node->fib)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001395 continue;
Ido Schimmel76610eb2017-03-10 08:53:41 +01001396 fib = fib_entry->fib_node->fib;
1397 err = mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp, fib,
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001398 old_adj_index,
1399 old_ecmp_size,
1400 nh_grp->adj_index,
1401 nh_grp->ecmp_size);
1402 if (err)
1403 return err;
1404 }
1405 return 0;
1406}
1407
1408static int mlxsw_sp_nexthop_mac_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
1409 struct mlxsw_sp_nexthop *nh)
1410{
1411 struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
1412 char ratr_pl[MLXSW_REG_RATR_LEN];
1413
1414 mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY,
1415 true, adj_index, neigh_entry->rif);
1416 mlxsw_reg_ratr_eth_entry_pack(ratr_pl, neigh_entry->ha);
1417 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
1418}
1419
1420static int
1421mlxsw_sp_nexthop_group_mac_update(struct mlxsw_sp *mlxsw_sp,
Ido Schimmela59b7e02017-01-23 11:11:42 +01001422 struct mlxsw_sp_nexthop_group *nh_grp,
1423 bool reallocate)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001424{
1425 u32 adj_index = nh_grp->adj_index; /* base */
1426 struct mlxsw_sp_nexthop *nh;
1427 int i;
1428 int err;
1429
1430 for (i = 0; i < nh_grp->count; i++) {
1431 nh = &nh_grp->nexthops[i];
1432
1433 if (!nh->should_offload) {
1434 nh->offloaded = 0;
1435 continue;
1436 }
1437
Ido Schimmela59b7e02017-01-23 11:11:42 +01001438 if (nh->update || reallocate) {
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001439 err = mlxsw_sp_nexthop_mac_update(mlxsw_sp,
1440 adj_index, nh);
1441 if (err)
1442 return err;
1443 nh->update = 0;
1444 nh->offloaded = 1;
1445 }
1446 adj_index++;
1447 }
1448 return 0;
1449}
1450
1451static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
1452 struct mlxsw_sp_fib_entry *fib_entry);
1453
1454static int
1455mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp *mlxsw_sp,
1456 struct mlxsw_sp_nexthop_group *nh_grp)
1457{
1458 struct mlxsw_sp_fib_entry *fib_entry;
1459 int err;
1460
1461 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
1462 err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
1463 if (err)
1464 return err;
1465 }
1466 return 0;
1467}
1468
1469static void
1470mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
1471 struct mlxsw_sp_nexthop_group *nh_grp)
1472{
1473 struct mlxsw_sp_nexthop *nh;
1474 bool offload_change = false;
1475 u32 adj_index;
1476 u16 ecmp_size = 0;
1477 bool old_adj_index_valid;
1478 u32 old_adj_index;
1479 u16 old_ecmp_size;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001480 int i;
1481 int err;
1482
Ido Schimmelb3e8d1e2017-02-08 11:16:32 +01001483 if (!nh_grp->gateway) {
1484 mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
1485 return;
1486 }
1487
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001488 for (i = 0; i < nh_grp->count; i++) {
1489 nh = &nh_grp->nexthops[i];
1490
1491 if (nh->should_offload ^ nh->offloaded) {
1492 offload_change = true;
1493 if (nh->should_offload)
1494 nh->update = 1;
1495 }
1496 if (nh->should_offload)
1497 ecmp_size++;
1498 }
1499 if (!offload_change) {
1500 /* Nothing was added or removed, so no need to reallocate. Just
1501 * update MAC on existing adjacency indexes.
1502 */
Ido Schimmela59b7e02017-01-23 11:11:42 +01001503 err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp,
1504 false);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001505 if (err) {
1506 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
1507 goto set_trap;
1508 }
1509 return;
1510 }
1511 if (!ecmp_size)
1512 /* No neigh of this group is connected so we just set
1513 * the trap and let everthing flow through kernel.
1514 */
1515 goto set_trap;
1516
Arkadi Sharshevsky13124442017-03-25 08:28:22 +01001517 err = mlxsw_sp_kvdl_alloc(mlxsw_sp, ecmp_size, &adj_index);
1518 if (err) {
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001519 /* We ran out of KVD linear space, just set the
1520 * trap and let everything flow through kernel.
1521 */
1522 dev_warn(mlxsw_sp->bus_info->dev, "Failed to allocate KVD linear area for nexthop group.\n");
1523 goto set_trap;
1524 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001525 old_adj_index_valid = nh_grp->adj_index_valid;
1526 old_adj_index = nh_grp->adj_index;
1527 old_ecmp_size = nh_grp->ecmp_size;
1528 nh_grp->adj_index_valid = 1;
1529 nh_grp->adj_index = adj_index;
1530 nh_grp->ecmp_size = ecmp_size;
Ido Schimmela59b7e02017-01-23 11:11:42 +01001531 err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp, true);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001532 if (err) {
1533 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
1534 goto set_trap;
1535 }
1536
1537 if (!old_adj_index_valid) {
1538 /* The trap was set for fib entries, so we have to call
1539 * fib entry update to unset it and use adjacency index.
1540 */
1541 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
1542 if (err) {
1543 dev_warn(mlxsw_sp->bus_info->dev, "Failed to add adjacency index to fib entries.\n");
1544 goto set_trap;
1545 }
1546 return;
1547 }
1548
1549 err = mlxsw_sp_adj_index_mass_update(mlxsw_sp, nh_grp,
1550 old_adj_index, old_ecmp_size);
1551 mlxsw_sp_kvdl_free(mlxsw_sp, old_adj_index);
1552 if (err) {
1553 dev_warn(mlxsw_sp->bus_info->dev, "Failed to mass-update adjacency index for nexthop group.\n");
1554 goto set_trap;
1555 }
1556 return;
1557
1558set_trap:
1559 old_adj_index_valid = nh_grp->adj_index_valid;
1560 nh_grp->adj_index_valid = 0;
1561 for (i = 0; i < nh_grp->count; i++) {
1562 nh = &nh_grp->nexthops[i];
1563 nh->offloaded = 0;
1564 }
1565 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
1566 if (err)
1567 dev_warn(mlxsw_sp->bus_info->dev, "Failed to set traps for fib entries.\n");
1568 if (old_adj_index_valid)
1569 mlxsw_sp_kvdl_free(mlxsw_sp, nh_grp->adj_index);
1570}
1571
1572static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh,
1573 bool removing)
1574{
1575 if (!removing && !nh->should_offload)
1576 nh->should_offload = 1;
1577 else if (removing && nh->offloaded)
1578 nh->should_offload = 0;
1579 nh->update = 1;
1580}
1581
1582static void
1583mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
1584 struct mlxsw_sp_neigh_entry *neigh_entry,
1585 bool removing)
1586{
1587 struct mlxsw_sp_nexthop *nh;
1588
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001589 list_for_each_entry(nh, &neigh_entry->nexthop_list,
1590 neigh_list_node) {
1591 __mlxsw_sp_nexthop_neigh_update(nh, removing);
1592 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
1593 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001594}
1595
Ido Schimmel9665b742017-02-08 11:16:42 +01001596static void mlxsw_sp_nexthop_rif_init(struct mlxsw_sp_nexthop *nh,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001597 struct mlxsw_sp_rif *rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01001598{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001599 if (nh->rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01001600 return;
1601
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001602 nh->rif = rif;
1603 list_add(&nh->rif_list_node, &rif->nexthop_list);
Ido Schimmel9665b742017-02-08 11:16:42 +01001604}
1605
1606static void mlxsw_sp_nexthop_rif_fini(struct mlxsw_sp_nexthop *nh)
1607{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001608 if (!nh->rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01001609 return;
1610
1611 list_del(&nh->rif_list_node);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001612 nh->rif = NULL;
Ido Schimmel9665b742017-02-08 11:16:42 +01001613}
1614
Ido Schimmela8c97012017-02-08 11:16:35 +01001615static int mlxsw_sp_nexthop_neigh_init(struct mlxsw_sp *mlxsw_sp,
1616 struct mlxsw_sp_nexthop *nh)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001617{
1618 struct mlxsw_sp_neigh_entry *neigh_entry;
Ido Schimmela8c97012017-02-08 11:16:35 +01001619 struct fib_nh *fib_nh = nh->key.fib_nh;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001620 struct neighbour *n;
Ido Schimmel93a87e52016-12-23 09:32:49 +01001621 u8 nud_state, dead;
Ido Schimmelc53b8e12017-02-08 11:16:30 +01001622 int err;
1623
Ido Schimmelad178c82017-02-08 11:16:40 +01001624 if (!nh->nh_grp->gateway || nh->neigh_entry)
Ido Schimmelb8399a12017-02-08 11:16:33 +01001625 return 0;
1626
Jiri Pirko33b13412016-11-10 12:31:04 +01001627 /* Take a reference of neigh here ensuring that neigh would
1628 * not be detructed before the nexthop entry is finished.
1629 * The reference is taken either in neigh_lookup() or
Ido Schimmelfd76d912017-02-06 16:20:17 +01001630 * in neigh_create() in case n is not found.
Jiri Pirko33b13412016-11-10 12:31:04 +01001631 */
Ido Schimmela8c97012017-02-08 11:16:35 +01001632 n = neigh_lookup(&arp_tbl, &fib_nh->nh_gw, fib_nh->nh_dev);
Jiri Pirko33b13412016-11-10 12:31:04 +01001633 if (!n) {
Ido Schimmela8c97012017-02-08 11:16:35 +01001634 n = neigh_create(&arp_tbl, &fib_nh->nh_gw, fib_nh->nh_dev);
1635 if (IS_ERR(n))
1636 return PTR_ERR(n);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001637 neigh_event_send(n, NULL);
Jiri Pirko33b13412016-11-10 12:31:04 +01001638 }
1639 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
1640 if (!neigh_entry) {
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001641 neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
1642 if (IS_ERR(neigh_entry)) {
Ido Schimmelc53b8e12017-02-08 11:16:30 +01001643 err = -EINVAL;
1644 goto err_neigh_entry_create;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001645 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001646 }
Yotam Gigib2157142016-07-05 11:27:51 +02001647
1648 /* If that is the first nexthop connected to that neigh, add to
1649 * nexthop_neighs_list
1650 */
1651 if (list_empty(&neigh_entry->nexthop_list))
1652 list_add_tail(&neigh_entry->nexthop_neighs_list_node,
Ido Schimmel9011b672017-05-16 19:38:25 +02001653 &mlxsw_sp->router->nexthop_neighs_list);
Yotam Gigib2157142016-07-05 11:27:51 +02001654
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001655 nh->neigh_entry = neigh_entry;
1656 list_add_tail(&nh->neigh_list_node, &neigh_entry->nexthop_list);
1657 read_lock_bh(&n->lock);
1658 nud_state = n->nud_state;
Ido Schimmel93a87e52016-12-23 09:32:49 +01001659 dead = n->dead;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001660 read_unlock_bh(&n->lock);
Ido Schimmel93a87e52016-12-23 09:32:49 +01001661 __mlxsw_sp_nexthop_neigh_update(nh, !(nud_state & NUD_VALID && !dead));
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001662
1663 return 0;
Ido Schimmelc53b8e12017-02-08 11:16:30 +01001664
1665err_neigh_entry_create:
1666 neigh_release(n);
Ido Schimmelc53b8e12017-02-08 11:16:30 +01001667 return err;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001668}
1669
Ido Schimmela8c97012017-02-08 11:16:35 +01001670static void mlxsw_sp_nexthop_neigh_fini(struct mlxsw_sp *mlxsw_sp,
1671 struct mlxsw_sp_nexthop *nh)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001672{
1673 struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
Ido Schimmela8c97012017-02-08 11:16:35 +01001674 struct neighbour *n;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001675
Ido Schimmelb8399a12017-02-08 11:16:33 +01001676 if (!neigh_entry)
Ido Schimmela8c97012017-02-08 11:16:35 +01001677 return;
1678 n = neigh_entry->key.n;
Ido Schimmelb8399a12017-02-08 11:16:33 +01001679
Ido Schimmel58312122016-12-23 09:32:50 +01001680 __mlxsw_sp_nexthop_neigh_update(nh, true);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001681 list_del(&nh->neigh_list_node);
Ido Schimmele58be792017-02-08 11:16:28 +01001682 nh->neigh_entry = NULL;
Yotam Gigib2157142016-07-05 11:27:51 +02001683
1684 /* If that is the last nexthop connected to that neigh, remove from
1685 * nexthop_neighs_list
1686 */
Ido Schimmele58be792017-02-08 11:16:28 +01001687 if (list_empty(&neigh_entry->nexthop_list))
1688 list_del(&neigh_entry->nexthop_neighs_list_node);
Yotam Gigib2157142016-07-05 11:27:51 +02001689
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001690 if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
1691 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
1692
1693 neigh_release(n);
Ido Schimmela8c97012017-02-08 11:16:35 +01001694}
Ido Schimmelc53b8e12017-02-08 11:16:30 +01001695
Ido Schimmela8c97012017-02-08 11:16:35 +01001696static int mlxsw_sp_nexthop_init(struct mlxsw_sp *mlxsw_sp,
1697 struct mlxsw_sp_nexthop_group *nh_grp,
1698 struct mlxsw_sp_nexthop *nh,
1699 struct fib_nh *fib_nh)
1700{
1701 struct net_device *dev = fib_nh->nh_dev;
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +01001702 struct in_device *in_dev;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001703 struct mlxsw_sp_rif *rif;
Ido Schimmela8c97012017-02-08 11:16:35 +01001704 int err;
1705
1706 nh->nh_grp = nh_grp;
1707 nh->key.fib_nh = fib_nh;
1708 err = mlxsw_sp_nexthop_insert(mlxsw_sp, nh);
1709 if (err)
1710 return err;
1711
Ido Schimmel97989ee2017-03-10 08:53:38 +01001712 if (!dev)
1713 return 0;
1714
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +01001715 in_dev = __in_dev_get_rtnl(dev);
1716 if (in_dev && IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
1717 fib_nh->nh_flags & RTNH_F_LINKDOWN)
1718 return 0;
1719
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001720 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
1721 if (!rif)
Ido Schimmela8c97012017-02-08 11:16:35 +01001722 return 0;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001723 mlxsw_sp_nexthop_rif_init(nh, rif);
Ido Schimmela8c97012017-02-08 11:16:35 +01001724
1725 err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
1726 if (err)
1727 goto err_nexthop_neigh_init;
1728
1729 return 0;
1730
1731err_nexthop_neigh_init:
1732 mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
1733 return err;
1734}
1735
1736static void mlxsw_sp_nexthop_fini(struct mlxsw_sp *mlxsw_sp,
1737 struct mlxsw_sp_nexthop *nh)
1738{
1739 mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh);
Ido Schimmel9665b742017-02-08 11:16:42 +01001740 mlxsw_sp_nexthop_rif_fini(nh);
Ido Schimmelc53b8e12017-02-08 11:16:30 +01001741 mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001742}
1743
Ido Schimmelad178c82017-02-08 11:16:40 +01001744static void mlxsw_sp_nexthop_event(struct mlxsw_sp *mlxsw_sp,
1745 unsigned long event, struct fib_nh *fib_nh)
1746{
1747 struct mlxsw_sp_nexthop_key key;
1748 struct mlxsw_sp_nexthop *nh;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001749 struct mlxsw_sp_rif *rif;
Ido Schimmelad178c82017-02-08 11:16:40 +01001750
Ido Schimmel9011b672017-05-16 19:38:25 +02001751 if (mlxsw_sp->router->aborted)
Ido Schimmelad178c82017-02-08 11:16:40 +01001752 return;
1753
1754 key.fib_nh = fib_nh;
1755 nh = mlxsw_sp_nexthop_lookup(mlxsw_sp, key);
1756 if (WARN_ON_ONCE(!nh))
1757 return;
1758
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001759 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, fib_nh->nh_dev);
1760 if (!rif)
Ido Schimmelad178c82017-02-08 11:16:40 +01001761 return;
1762
1763 switch (event) {
1764 case FIB_EVENT_NH_ADD:
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001765 mlxsw_sp_nexthop_rif_init(nh, rif);
Ido Schimmelad178c82017-02-08 11:16:40 +01001766 mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
1767 break;
1768 case FIB_EVENT_NH_DEL:
1769 mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh);
Ido Schimmel9665b742017-02-08 11:16:42 +01001770 mlxsw_sp_nexthop_rif_fini(nh);
Ido Schimmelad178c82017-02-08 11:16:40 +01001771 break;
1772 }
1773
1774 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
1775}
1776
Ido Schimmel9665b742017-02-08 11:16:42 +01001777static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001778 struct mlxsw_sp_rif *rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01001779{
1780 struct mlxsw_sp_nexthop *nh, *tmp;
1781
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001782 list_for_each_entry_safe(nh, tmp, &rif->nexthop_list, rif_list_node) {
Ido Schimmel9665b742017-02-08 11:16:42 +01001783 mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh);
1784 mlxsw_sp_nexthop_rif_fini(nh);
1785 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
1786 }
1787}
1788
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001789static struct mlxsw_sp_nexthop_group *
1790mlxsw_sp_nexthop_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi)
1791{
1792 struct mlxsw_sp_nexthop_group *nh_grp;
1793 struct mlxsw_sp_nexthop *nh;
1794 struct fib_nh *fib_nh;
1795 size_t alloc_size;
1796 int i;
1797 int err;
1798
1799 alloc_size = sizeof(*nh_grp) +
1800 fi->fib_nhs * sizeof(struct mlxsw_sp_nexthop);
1801 nh_grp = kzalloc(alloc_size, GFP_KERNEL);
1802 if (!nh_grp)
1803 return ERR_PTR(-ENOMEM);
1804 INIT_LIST_HEAD(&nh_grp->fib_list);
Ido Schimmelb3e8d1e2017-02-08 11:16:32 +01001805 nh_grp->gateway = fi->fib_nh->nh_scope == RT_SCOPE_LINK;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001806 nh_grp->count = fi->fib_nhs;
Ido Schimmele9ad5e72017-02-08 11:16:29 +01001807 nh_grp->key.fi = fi;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001808 for (i = 0; i < nh_grp->count; i++) {
1809 nh = &nh_grp->nexthops[i];
1810 fib_nh = &fi->fib_nh[i];
1811 err = mlxsw_sp_nexthop_init(mlxsw_sp, nh_grp, nh, fib_nh);
1812 if (err)
1813 goto err_nexthop_init;
1814 }
Ido Schimmele9ad5e72017-02-08 11:16:29 +01001815 err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
1816 if (err)
1817 goto err_nexthop_group_insert;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001818 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
1819 return nh_grp;
1820
Ido Schimmele9ad5e72017-02-08 11:16:29 +01001821err_nexthop_group_insert:
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001822err_nexthop_init:
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +01001823 for (i--; i >= 0; i--) {
1824 nh = &nh_grp->nexthops[i];
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001825 mlxsw_sp_nexthop_fini(mlxsw_sp, nh);
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +01001826 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001827 kfree(nh_grp);
1828 return ERR_PTR(err);
1829}
1830
1831static void
1832mlxsw_sp_nexthop_group_destroy(struct mlxsw_sp *mlxsw_sp,
1833 struct mlxsw_sp_nexthop_group *nh_grp)
1834{
1835 struct mlxsw_sp_nexthop *nh;
1836 int i;
1837
Ido Schimmele9ad5e72017-02-08 11:16:29 +01001838 mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001839 for (i = 0; i < nh_grp->count; i++) {
1840 nh = &nh_grp->nexthops[i];
1841 mlxsw_sp_nexthop_fini(mlxsw_sp, nh);
1842 }
Ido Schimmel58312122016-12-23 09:32:50 +01001843 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
1844 WARN_ON_ONCE(nh_grp->adj_index_valid);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001845 kfree(nh_grp);
1846}
1847
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001848static int mlxsw_sp_nexthop_group_get(struct mlxsw_sp *mlxsw_sp,
1849 struct mlxsw_sp_fib_entry *fib_entry,
1850 struct fib_info *fi)
1851{
Ido Schimmele9ad5e72017-02-08 11:16:29 +01001852 struct mlxsw_sp_nexthop_group_key key;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001853 struct mlxsw_sp_nexthop_group *nh_grp;
1854
Ido Schimmele9ad5e72017-02-08 11:16:29 +01001855 key.fi = fi;
1856 nh_grp = mlxsw_sp_nexthop_group_lookup(mlxsw_sp, key);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001857 if (!nh_grp) {
1858 nh_grp = mlxsw_sp_nexthop_group_create(mlxsw_sp, fi);
1859 if (IS_ERR(nh_grp))
1860 return PTR_ERR(nh_grp);
1861 }
1862 list_add_tail(&fib_entry->nexthop_group_node, &nh_grp->fib_list);
1863 fib_entry->nh_group = nh_grp;
1864 return 0;
1865}
1866
1867static void mlxsw_sp_nexthop_group_put(struct mlxsw_sp *mlxsw_sp,
1868 struct mlxsw_sp_fib_entry *fib_entry)
1869{
1870 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
1871
1872 list_del(&fib_entry->nexthop_group_node);
1873 if (!list_empty(&nh_grp->fib_list))
1874 return;
1875 mlxsw_sp_nexthop_group_destroy(mlxsw_sp, nh_grp);
1876}
1877
Ido Schimmel013b20f2017-02-08 11:16:36 +01001878static bool
1879mlxsw_sp_fib_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
1880{
1881 struct mlxsw_sp_nexthop_group *nh_group = fib_entry->nh_group;
1882
Ido Schimmel9aecce12017-02-09 10:28:42 +01001883 if (fib_entry->params.tos)
1884 return false;
1885
Ido Schimmel013b20f2017-02-08 11:16:36 +01001886 switch (fib_entry->type) {
1887 case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
1888 return !!nh_group->adj_index_valid;
1889 case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
Ido Schimmel70ad3502017-02-08 11:16:38 +01001890 return !!nh_group->nh_rif;
Ido Schimmel013b20f2017-02-08 11:16:36 +01001891 default:
1892 return false;
1893 }
1894}
1895
1896static void mlxsw_sp_fib_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
1897{
1898 fib_entry->offloaded = true;
1899
Ido Schimmel76610eb2017-03-10 08:53:41 +01001900 switch (fib_entry->fib_node->fib->proto) {
Ido Schimmel013b20f2017-02-08 11:16:36 +01001901 case MLXSW_SP_L3_PROTO_IPV4:
1902 fib_info_offload_inc(fib_entry->nh_group->key.fi);
1903 break;
1904 case MLXSW_SP_L3_PROTO_IPV6:
1905 WARN_ON_ONCE(1);
1906 }
1907}
1908
1909static void
1910mlxsw_sp_fib_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
1911{
Ido Schimmel76610eb2017-03-10 08:53:41 +01001912 switch (fib_entry->fib_node->fib->proto) {
Ido Schimmel013b20f2017-02-08 11:16:36 +01001913 case MLXSW_SP_L3_PROTO_IPV4:
1914 fib_info_offload_dec(fib_entry->nh_group->key.fi);
1915 break;
1916 case MLXSW_SP_L3_PROTO_IPV6:
1917 WARN_ON_ONCE(1);
1918 }
1919
1920 fib_entry->offloaded = false;
1921}
1922
1923static void
1924mlxsw_sp_fib_entry_offload_refresh(struct mlxsw_sp_fib_entry *fib_entry,
1925 enum mlxsw_reg_ralue_op op, int err)
1926{
1927 switch (op) {
1928 case MLXSW_REG_RALUE_OP_WRITE_DELETE:
1929 if (!fib_entry->offloaded)
1930 return;
1931 return mlxsw_sp_fib_entry_offload_unset(fib_entry);
1932 case MLXSW_REG_RALUE_OP_WRITE_WRITE:
1933 if (err)
1934 return;
1935 if (mlxsw_sp_fib_entry_should_offload(fib_entry) &&
1936 !fib_entry->offloaded)
1937 mlxsw_sp_fib_entry_offload_set(fib_entry);
1938 else if (!mlxsw_sp_fib_entry_should_offload(fib_entry) &&
1939 fib_entry->offloaded)
1940 mlxsw_sp_fib_entry_offload_unset(fib_entry);
1941 return;
1942 default:
1943 return;
1944 }
1945}
1946
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001947static int mlxsw_sp_fib_entry_op4_remote(struct mlxsw_sp *mlxsw_sp,
1948 struct mlxsw_sp_fib_entry *fib_entry,
1949 enum mlxsw_reg_ralue_op op)
1950{
1951 char ralue_pl[MLXSW_REG_RALUE_LEN];
Ido Schimmel76610eb2017-03-10 08:53:41 +01001952 struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;
Ido Schimmel9aecce12017-02-09 10:28:42 +01001953 u32 *p_dip = (u32 *) fib_entry->fib_node->key.addr;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001954 enum mlxsw_reg_ralue_trap_action trap_action;
1955 u16 trap_id = 0;
1956 u32 adjacency_index = 0;
1957 u16 ecmp_size = 0;
1958
1959 /* In case the nexthop group adjacency index is valid, use it
1960 * with provided ECMP size. Otherwise, setup trap and pass
1961 * traffic to kernel.
1962 */
Ido Schimmel4b411472017-02-08 11:16:37 +01001963 if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001964 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
1965 adjacency_index = fib_entry->nh_group->adj_index;
1966 ecmp_size = fib_entry->nh_group->ecmp_size;
1967 } else {
1968 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
1969 trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
1970 }
1971
Ido Schimmel1a9234e662016-09-19 08:29:26 +02001972 mlxsw_reg_ralue_pack4(ralue_pl,
Ido Schimmel76610eb2017-03-10 08:53:41 +01001973 (enum mlxsw_reg_ralxx_protocol) fib->proto, op,
1974 fib->vr->id, fib_entry->fib_node->key.prefix_len,
Ido Schimmel9aecce12017-02-09 10:28:42 +01001975 *p_dip);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001976 mlxsw_reg_ralue_act_remote_pack(ralue_pl, trap_action, trap_id,
1977 adjacency_index, ecmp_size);
1978 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
1979}
1980
Jiri Pirko61c503f2016-07-04 08:23:11 +02001981static int mlxsw_sp_fib_entry_op4_local(struct mlxsw_sp *mlxsw_sp,
1982 struct mlxsw_sp_fib_entry *fib_entry,
1983 enum mlxsw_reg_ralue_op op)
1984{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001985 struct mlxsw_sp_rif *rif = fib_entry->nh_group->nh_rif;
Ido Schimmel76610eb2017-03-10 08:53:41 +01001986 struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;
Ido Schimmel70ad3502017-02-08 11:16:38 +01001987 enum mlxsw_reg_ralue_trap_action trap_action;
Jiri Pirko61c503f2016-07-04 08:23:11 +02001988 char ralue_pl[MLXSW_REG_RALUE_LEN];
Ido Schimmel9aecce12017-02-09 10:28:42 +01001989 u32 *p_dip = (u32 *) fib_entry->fib_node->key.addr;
Ido Schimmel70ad3502017-02-08 11:16:38 +01001990 u16 trap_id = 0;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001991 u16 rif_index = 0;
Ido Schimmel70ad3502017-02-08 11:16:38 +01001992
1993 if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
1994 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001995 rif_index = rif->rif_index;
Ido Schimmel70ad3502017-02-08 11:16:38 +01001996 } else {
1997 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
1998 trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
1999 }
Jiri Pirko61c503f2016-07-04 08:23:11 +02002000
Ido Schimmel1a9234e662016-09-19 08:29:26 +02002001 mlxsw_reg_ralue_pack4(ralue_pl,
Ido Schimmel76610eb2017-03-10 08:53:41 +01002002 (enum mlxsw_reg_ralxx_protocol) fib->proto, op,
2003 fib->vr->id, fib_entry->fib_node->key.prefix_len,
Ido Schimmel9aecce12017-02-09 10:28:42 +01002004 *p_dip);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002005 mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, trap_id,
2006 rif_index);
Jiri Pirko61c503f2016-07-04 08:23:11 +02002007 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
2008}
2009
2010static int mlxsw_sp_fib_entry_op4_trap(struct mlxsw_sp *mlxsw_sp,
2011 struct mlxsw_sp_fib_entry *fib_entry,
2012 enum mlxsw_reg_ralue_op op)
2013{
Ido Schimmel76610eb2017-03-10 08:53:41 +01002014 struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;
Jiri Pirko61c503f2016-07-04 08:23:11 +02002015 char ralue_pl[MLXSW_REG_RALUE_LEN];
Ido Schimmel9aecce12017-02-09 10:28:42 +01002016 u32 *p_dip = (u32 *) fib_entry->fib_node->key.addr;
Jiri Pirko61c503f2016-07-04 08:23:11 +02002017
Ido Schimmel1a9234e662016-09-19 08:29:26 +02002018 mlxsw_reg_ralue_pack4(ralue_pl,
Ido Schimmel76610eb2017-03-10 08:53:41 +01002019 (enum mlxsw_reg_ralxx_protocol) fib->proto, op,
2020 fib->vr->id, fib_entry->fib_node->key.prefix_len,
Ido Schimmel9aecce12017-02-09 10:28:42 +01002021 *p_dip);
Jiri Pirko61c503f2016-07-04 08:23:11 +02002022 mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
2023 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
2024}
2025
2026static int mlxsw_sp_fib_entry_op4(struct mlxsw_sp *mlxsw_sp,
2027 struct mlxsw_sp_fib_entry *fib_entry,
2028 enum mlxsw_reg_ralue_op op)
2029{
2030 switch (fib_entry->type) {
2031 case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002032 return mlxsw_sp_fib_entry_op4_remote(mlxsw_sp, fib_entry, op);
Jiri Pirko61c503f2016-07-04 08:23:11 +02002033 case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
2034 return mlxsw_sp_fib_entry_op4_local(mlxsw_sp, fib_entry, op);
2035 case MLXSW_SP_FIB_ENTRY_TYPE_TRAP:
2036 return mlxsw_sp_fib_entry_op4_trap(mlxsw_sp, fib_entry, op);
2037 }
2038 return -EINVAL;
2039}
2040
2041static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
2042 struct mlxsw_sp_fib_entry *fib_entry,
2043 enum mlxsw_reg_ralue_op op)
2044{
Ido Schimmel013b20f2017-02-08 11:16:36 +01002045 int err = -EINVAL;
2046
Ido Schimmel76610eb2017-03-10 08:53:41 +01002047 switch (fib_entry->fib_node->fib->proto) {
Jiri Pirko61c503f2016-07-04 08:23:11 +02002048 case MLXSW_SP_L3_PROTO_IPV4:
Ido Schimmel013b20f2017-02-08 11:16:36 +01002049 err = mlxsw_sp_fib_entry_op4(mlxsw_sp, fib_entry, op);
2050 break;
Jiri Pirko61c503f2016-07-04 08:23:11 +02002051 case MLXSW_SP_L3_PROTO_IPV6:
Ido Schimmel013b20f2017-02-08 11:16:36 +01002052 return err;
Jiri Pirko61c503f2016-07-04 08:23:11 +02002053 }
Ido Schimmel013b20f2017-02-08 11:16:36 +01002054 mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, err);
2055 return err;
Jiri Pirko61c503f2016-07-04 08:23:11 +02002056}
2057
2058static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
2059 struct mlxsw_sp_fib_entry *fib_entry)
2060{
Jiri Pirko7146da32016-09-01 10:37:41 +02002061 return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
2062 MLXSW_REG_RALUE_OP_WRITE_WRITE);
Jiri Pirko61c503f2016-07-04 08:23:11 +02002063}
2064
2065static int mlxsw_sp_fib_entry_del(struct mlxsw_sp *mlxsw_sp,
2066 struct mlxsw_sp_fib_entry *fib_entry)
2067{
2068 return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
2069 MLXSW_REG_RALUE_OP_WRITE_DELETE);
2070}
2071
Jiri Pirko61c503f2016-07-04 08:23:11 +02002072static int
Ido Schimmel013b20f2017-02-08 11:16:36 +01002073mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp,
2074 const struct fib_entry_notifier_info *fen_info,
2075 struct mlxsw_sp_fib_entry *fib_entry)
Jiri Pirko61c503f2016-07-04 08:23:11 +02002076{
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002077 struct fib_info *fi = fen_info->fi;
Jiri Pirko61c503f2016-07-04 08:23:11 +02002078
Ido Schimmel97989ee2017-03-10 08:53:38 +01002079 switch (fen_info->type) {
2080 case RTN_BROADCAST: /* fall through */
2081 case RTN_LOCAL:
Jiri Pirko61c503f2016-07-04 08:23:11 +02002082 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
2083 return 0;
Ido Schimmel97989ee2017-03-10 08:53:38 +01002084 case RTN_UNREACHABLE: /* fall through */
2085 case RTN_BLACKHOLE: /* fall through */
2086 case RTN_PROHIBIT:
2087 /* Packets hitting these routes need to be trapped, but
2088 * can do so with a lower priority than packets directed
2089 * at the host, so use action type local instead of trap.
2090 */
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002091 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
Ido Schimmel97989ee2017-03-10 08:53:38 +01002092 return 0;
2093 case RTN_UNICAST:
2094 if (fi->fib_nh->nh_scope != RT_SCOPE_LINK)
2095 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
2096 else
2097 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
2098 return 0;
2099 default:
2100 return -EINVAL;
2101 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002102}
2103
Jiri Pirko5b004412016-09-01 10:37:40 +02002104static struct mlxsw_sp_fib_entry *
Ido Schimmel9aecce12017-02-09 10:28:42 +01002105mlxsw_sp_fib4_entry_create(struct mlxsw_sp *mlxsw_sp,
2106 struct mlxsw_sp_fib_node *fib_node,
2107 const struct fib_entry_notifier_info *fen_info)
Jiri Pirko5b004412016-09-01 10:37:40 +02002108{
2109 struct mlxsw_sp_fib_entry *fib_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01002110 int err;
2111
2112 fib_entry = kzalloc(sizeof(*fib_entry), GFP_KERNEL);
2113 if (!fib_entry) {
2114 err = -ENOMEM;
2115 goto err_fib_entry_alloc;
2116 }
2117
2118 err = mlxsw_sp_fib4_entry_type_set(mlxsw_sp, fen_info, fib_entry);
2119 if (err)
2120 goto err_fib4_entry_type_set;
2121
2122 err = mlxsw_sp_nexthop_group_get(mlxsw_sp, fib_entry, fen_info->fi);
2123 if (err)
2124 goto err_nexthop_group_get;
2125
2126 fib_entry->params.prio = fen_info->fi->fib_priority;
2127 fib_entry->params.tb_id = fen_info->tb_id;
2128 fib_entry->params.type = fen_info->type;
2129 fib_entry->params.tos = fen_info->tos;
2130
2131 fib_entry->fib_node = fib_node;
2132
2133 return fib_entry;
2134
2135err_nexthop_group_get:
2136err_fib4_entry_type_set:
2137 kfree(fib_entry);
2138err_fib_entry_alloc:
2139 return ERR_PTR(err);
2140}
2141
2142static void mlxsw_sp_fib4_entry_destroy(struct mlxsw_sp *mlxsw_sp,
2143 struct mlxsw_sp_fib_entry *fib_entry)
2144{
2145 mlxsw_sp_nexthop_group_put(mlxsw_sp, fib_entry);
2146 kfree(fib_entry);
2147}
2148
2149static struct mlxsw_sp_fib_node *
2150mlxsw_sp_fib4_node_get(struct mlxsw_sp *mlxsw_sp,
2151 const struct fib_entry_notifier_info *fen_info);
2152
2153static struct mlxsw_sp_fib_entry *
2154mlxsw_sp_fib4_entry_lookup(struct mlxsw_sp *mlxsw_sp,
2155 const struct fib_entry_notifier_info *fen_info)
2156{
2157 struct mlxsw_sp_fib_entry *fib_entry;
2158 struct mlxsw_sp_fib_node *fib_node;
2159
2160 fib_node = mlxsw_sp_fib4_node_get(mlxsw_sp, fen_info);
2161 if (IS_ERR(fib_node))
2162 return NULL;
2163
2164 list_for_each_entry(fib_entry, &fib_node->entry_list, list) {
2165 if (fib_entry->params.tb_id == fen_info->tb_id &&
2166 fib_entry->params.tos == fen_info->tos &&
2167 fib_entry->params.type == fen_info->type &&
2168 fib_entry->nh_group->key.fi == fen_info->fi) {
2169 return fib_entry;
2170 }
2171 }
2172
2173 return NULL;
2174}
2175
2176static const struct rhashtable_params mlxsw_sp_fib_ht_params = {
2177 .key_offset = offsetof(struct mlxsw_sp_fib_node, key),
2178 .head_offset = offsetof(struct mlxsw_sp_fib_node, ht_node),
2179 .key_len = sizeof(struct mlxsw_sp_fib_key),
2180 .automatic_shrinking = true,
2181};
2182
2183static int mlxsw_sp_fib_node_insert(struct mlxsw_sp_fib *fib,
2184 struct mlxsw_sp_fib_node *fib_node)
2185{
2186 return rhashtable_insert_fast(&fib->ht, &fib_node->ht_node,
2187 mlxsw_sp_fib_ht_params);
2188}
2189
2190static void mlxsw_sp_fib_node_remove(struct mlxsw_sp_fib *fib,
2191 struct mlxsw_sp_fib_node *fib_node)
2192{
2193 rhashtable_remove_fast(&fib->ht, &fib_node->ht_node,
2194 mlxsw_sp_fib_ht_params);
2195}
2196
2197static struct mlxsw_sp_fib_node *
2198mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
2199 size_t addr_len, unsigned char prefix_len)
2200{
2201 struct mlxsw_sp_fib_key key;
2202
2203 memset(&key, 0, sizeof(key));
2204 memcpy(key.addr, addr, addr_len);
2205 key.prefix_len = prefix_len;
2206 return rhashtable_lookup_fast(&fib->ht, &key, mlxsw_sp_fib_ht_params);
2207}
2208
2209static struct mlxsw_sp_fib_node *
Ido Schimmel76610eb2017-03-10 08:53:41 +01002210mlxsw_sp_fib_node_create(struct mlxsw_sp_fib *fib, const void *addr,
Ido Schimmel9aecce12017-02-09 10:28:42 +01002211 size_t addr_len, unsigned char prefix_len)
2212{
2213 struct mlxsw_sp_fib_node *fib_node;
2214
2215 fib_node = kzalloc(sizeof(*fib_node), GFP_KERNEL);
2216 if (!fib_node)
2217 return NULL;
2218
2219 INIT_LIST_HEAD(&fib_node->entry_list);
Ido Schimmel76610eb2017-03-10 08:53:41 +01002220 list_add(&fib_node->list, &fib->node_list);
Ido Schimmel9aecce12017-02-09 10:28:42 +01002221 memcpy(fib_node->key.addr, addr, addr_len);
2222 fib_node->key.prefix_len = prefix_len;
Ido Schimmel9aecce12017-02-09 10:28:42 +01002223
2224 return fib_node;
2225}
2226
2227static void mlxsw_sp_fib_node_destroy(struct mlxsw_sp_fib_node *fib_node)
2228{
Ido Schimmel9aecce12017-02-09 10:28:42 +01002229 list_del(&fib_node->list);
2230 WARN_ON(!list_empty(&fib_node->entry_list));
2231 kfree(fib_node);
2232}
2233
2234static bool
2235mlxsw_sp_fib_node_entry_is_first(const struct mlxsw_sp_fib_node *fib_node,
2236 const struct mlxsw_sp_fib_entry *fib_entry)
2237{
2238 return list_first_entry(&fib_node->entry_list,
2239 struct mlxsw_sp_fib_entry, list) == fib_entry;
2240}
2241
2242static void mlxsw_sp_fib_node_prefix_inc(struct mlxsw_sp_fib_node *fib_node)
2243{
2244 unsigned char prefix_len = fib_node->key.prefix_len;
Ido Schimmel76610eb2017-03-10 08:53:41 +01002245 struct mlxsw_sp_fib *fib = fib_node->fib;
Ido Schimmel9aecce12017-02-09 10:28:42 +01002246
2247 if (fib->prefix_ref_count[prefix_len]++ == 0)
2248 mlxsw_sp_prefix_usage_set(&fib->prefix_usage, prefix_len);
2249}
2250
2251static void mlxsw_sp_fib_node_prefix_dec(struct mlxsw_sp_fib_node *fib_node)
2252{
2253 unsigned char prefix_len = fib_node->key.prefix_len;
Ido Schimmel76610eb2017-03-10 08:53:41 +01002254 struct mlxsw_sp_fib *fib = fib_node->fib;
Ido Schimmel9aecce12017-02-09 10:28:42 +01002255
2256 if (--fib->prefix_ref_count[prefix_len] == 0)
2257 mlxsw_sp_prefix_usage_clear(&fib->prefix_usage, prefix_len);
2258}
2259
Ido Schimmel76610eb2017-03-10 08:53:41 +01002260static int mlxsw_sp_fib_node_init(struct mlxsw_sp *mlxsw_sp,
2261 struct mlxsw_sp_fib_node *fib_node,
2262 struct mlxsw_sp_fib *fib)
2263{
2264 struct mlxsw_sp_prefix_usage req_prefix_usage;
2265 struct mlxsw_sp_lpm_tree *lpm_tree;
2266 int err;
2267
2268 err = mlxsw_sp_fib_node_insert(fib, fib_node);
2269 if (err)
2270 return err;
2271 fib_node->fib = fib;
2272
2273 mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, &fib->prefix_usage);
2274 mlxsw_sp_prefix_usage_set(&req_prefix_usage, fib_node->key.prefix_len);
2275
2276 if (!mlxsw_sp_prefix_usage_none(&fib->prefix_usage)) {
2277 err = mlxsw_sp_vr_lpm_tree_check(mlxsw_sp, fib,
2278 &req_prefix_usage);
2279 if (err)
2280 goto err_tree_check;
2281 } else {
2282 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
2283 fib->proto);
2284 if (IS_ERR(lpm_tree))
2285 return PTR_ERR(lpm_tree);
2286 fib->lpm_tree = lpm_tree;
2287 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib);
2288 if (err)
2289 goto err_tree_bind;
2290 }
2291
2292 mlxsw_sp_fib_node_prefix_inc(fib_node);
2293
2294 return 0;
2295
2296err_tree_bind:
2297 fib->lpm_tree = NULL;
2298 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
2299err_tree_check:
2300 fib_node->fib = NULL;
2301 mlxsw_sp_fib_node_remove(fib, fib_node);
2302 return err;
2303}
2304
2305static void mlxsw_sp_fib_node_fini(struct mlxsw_sp *mlxsw_sp,
2306 struct mlxsw_sp_fib_node *fib_node)
2307{
2308 struct mlxsw_sp_lpm_tree *lpm_tree = fib_node->fib->lpm_tree;
2309 struct mlxsw_sp_fib *fib = fib_node->fib;
2310
2311 mlxsw_sp_fib_node_prefix_dec(fib_node);
2312
2313 if (mlxsw_sp_prefix_usage_none(&fib->prefix_usage)) {
2314 mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, fib);
2315 fib->lpm_tree = NULL;
2316 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
2317 } else {
2318 mlxsw_sp_vr_lpm_tree_check(mlxsw_sp, fib, &fib->prefix_usage);
2319 }
2320
2321 fib_node->fib = NULL;
2322 mlxsw_sp_fib_node_remove(fib, fib_node);
2323}
2324
Ido Schimmel9aecce12017-02-09 10:28:42 +01002325static struct mlxsw_sp_fib_node *
2326mlxsw_sp_fib4_node_get(struct mlxsw_sp *mlxsw_sp,
2327 const struct fib_entry_notifier_info *fen_info)
2328{
2329 struct mlxsw_sp_fib_node *fib_node;
Ido Schimmel76610eb2017-03-10 08:53:41 +01002330 struct mlxsw_sp_fib *fib;
Jiri Pirko5b004412016-09-01 10:37:40 +02002331 struct mlxsw_sp_vr *vr;
2332 int err;
2333
Ido Schimmel76610eb2017-03-10 08:53:41 +01002334 vr = mlxsw_sp_vr_get(mlxsw_sp, fen_info->tb_id);
Jiri Pirko5b004412016-09-01 10:37:40 +02002335 if (IS_ERR(vr))
2336 return ERR_CAST(vr);
Ido Schimmel76610eb2017-03-10 08:53:41 +01002337 fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV4);
Jiri Pirko5b004412016-09-01 10:37:40 +02002338
Ido Schimmel76610eb2017-03-10 08:53:41 +01002339 fib_node = mlxsw_sp_fib_node_lookup(fib, &fen_info->dst,
Ido Schimmel9aecce12017-02-09 10:28:42 +01002340 sizeof(fen_info->dst),
2341 fen_info->dst_len);
2342 if (fib_node)
2343 return fib_node;
2344
Ido Schimmel76610eb2017-03-10 08:53:41 +01002345 fib_node = mlxsw_sp_fib_node_create(fib, &fen_info->dst,
Ido Schimmel9aecce12017-02-09 10:28:42 +01002346 sizeof(fen_info->dst),
2347 fen_info->dst_len);
2348 if (!fib_node) {
Jiri Pirko5b004412016-09-01 10:37:40 +02002349 err = -ENOMEM;
Ido Schimmel9aecce12017-02-09 10:28:42 +01002350 goto err_fib_node_create;
Jiri Pirko5b004412016-09-01 10:37:40 +02002351 }
Jiri Pirko5b004412016-09-01 10:37:40 +02002352
Ido Schimmel76610eb2017-03-10 08:53:41 +01002353 err = mlxsw_sp_fib_node_init(mlxsw_sp, fib_node, fib);
2354 if (err)
2355 goto err_fib_node_init;
2356
Ido Schimmel9aecce12017-02-09 10:28:42 +01002357 return fib_node;
Jiri Pirko5b004412016-09-01 10:37:40 +02002358
Ido Schimmel76610eb2017-03-10 08:53:41 +01002359err_fib_node_init:
2360 mlxsw_sp_fib_node_destroy(fib_node);
Ido Schimmel9aecce12017-02-09 10:28:42 +01002361err_fib_node_create:
Ido Schimmel76610eb2017-03-10 08:53:41 +01002362 mlxsw_sp_vr_put(vr);
Jiri Pirko5b004412016-09-01 10:37:40 +02002363 return ERR_PTR(err);
2364}
2365
Ido Schimmel9aecce12017-02-09 10:28:42 +01002366static void mlxsw_sp_fib4_node_put(struct mlxsw_sp *mlxsw_sp,
2367 struct mlxsw_sp_fib_node *fib_node)
Jiri Pirko5b004412016-09-01 10:37:40 +02002368{
Ido Schimmel76610eb2017-03-10 08:53:41 +01002369 struct mlxsw_sp_vr *vr = fib_node->fib->vr;
Jiri Pirko5b004412016-09-01 10:37:40 +02002370
Ido Schimmel9aecce12017-02-09 10:28:42 +01002371 if (!list_empty(&fib_node->entry_list))
2372 return;
Ido Schimmel76610eb2017-03-10 08:53:41 +01002373 mlxsw_sp_fib_node_fini(mlxsw_sp, fib_node);
Ido Schimmel9aecce12017-02-09 10:28:42 +01002374 mlxsw_sp_fib_node_destroy(fib_node);
Ido Schimmel76610eb2017-03-10 08:53:41 +01002375 mlxsw_sp_vr_put(vr);
Jiri Pirko5b004412016-09-01 10:37:40 +02002376}
2377
Ido Schimmel9aecce12017-02-09 10:28:42 +01002378static struct mlxsw_sp_fib_entry *
2379mlxsw_sp_fib4_node_entry_find(const struct mlxsw_sp_fib_node *fib_node,
2380 const struct mlxsw_sp_fib_entry_params *params)
Jiri Pirko61c503f2016-07-04 08:23:11 +02002381{
Jiri Pirko61c503f2016-07-04 08:23:11 +02002382 struct mlxsw_sp_fib_entry *fib_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01002383
2384 list_for_each_entry(fib_entry, &fib_node->entry_list, list) {
2385 if (fib_entry->params.tb_id > params->tb_id)
2386 continue;
2387 if (fib_entry->params.tb_id != params->tb_id)
2388 break;
2389 if (fib_entry->params.tos > params->tos)
2390 continue;
2391 if (fib_entry->params.prio >= params->prio ||
2392 fib_entry->params.tos < params->tos)
2393 return fib_entry;
2394 }
2395
2396 return NULL;
2397}
2398
Ido Schimmel4283bce2017-02-09 10:28:43 +01002399static int mlxsw_sp_fib4_node_list_append(struct mlxsw_sp_fib_entry *fib_entry,
2400 struct mlxsw_sp_fib_entry *new_entry)
2401{
2402 struct mlxsw_sp_fib_node *fib_node;
2403
2404 if (WARN_ON(!fib_entry))
2405 return -EINVAL;
2406
2407 fib_node = fib_entry->fib_node;
2408 list_for_each_entry_from(fib_entry, &fib_node->entry_list, list) {
2409 if (fib_entry->params.tb_id != new_entry->params.tb_id ||
2410 fib_entry->params.tos != new_entry->params.tos ||
2411 fib_entry->params.prio != new_entry->params.prio)
2412 break;
2413 }
2414
2415 list_add_tail(&new_entry->list, &fib_entry->list);
2416 return 0;
2417}
2418
Ido Schimmel9aecce12017-02-09 10:28:42 +01002419static int
2420mlxsw_sp_fib4_node_list_insert(struct mlxsw_sp_fib_node *fib_node,
Ido Schimmel4283bce2017-02-09 10:28:43 +01002421 struct mlxsw_sp_fib_entry *new_entry,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002422 bool replace, bool append)
Ido Schimmel9aecce12017-02-09 10:28:42 +01002423{
2424 struct mlxsw_sp_fib_entry *fib_entry;
2425
2426 fib_entry = mlxsw_sp_fib4_node_entry_find(fib_node, &new_entry->params);
2427
Ido Schimmel4283bce2017-02-09 10:28:43 +01002428 if (append)
2429 return mlxsw_sp_fib4_node_list_append(fib_entry, new_entry);
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002430 if (replace && WARN_ON(!fib_entry))
2431 return -EINVAL;
Ido Schimmel4283bce2017-02-09 10:28:43 +01002432
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002433 /* Insert new entry before replaced one, so that we can later
2434 * remove the second.
2435 */
Ido Schimmel9aecce12017-02-09 10:28:42 +01002436 if (fib_entry) {
2437 list_add_tail(&new_entry->list, &fib_entry->list);
2438 } else {
2439 struct mlxsw_sp_fib_entry *last;
2440
2441 list_for_each_entry(last, &fib_node->entry_list, list) {
2442 if (new_entry->params.tb_id > last->params.tb_id)
2443 break;
2444 fib_entry = last;
2445 }
2446
2447 if (fib_entry)
2448 list_add(&new_entry->list, &fib_entry->list);
2449 else
2450 list_add(&new_entry->list, &fib_node->entry_list);
2451 }
2452
2453 return 0;
2454}
2455
2456static void
2457mlxsw_sp_fib4_node_list_remove(struct mlxsw_sp_fib_entry *fib_entry)
2458{
2459 list_del(&fib_entry->list);
2460}
2461
2462static int
2463mlxsw_sp_fib4_node_entry_add(struct mlxsw_sp *mlxsw_sp,
2464 const struct mlxsw_sp_fib_node *fib_node,
2465 struct mlxsw_sp_fib_entry *fib_entry)
2466{
2467 if (!mlxsw_sp_fib_node_entry_is_first(fib_node, fib_entry))
2468 return 0;
2469
2470 /* To prevent packet loss, overwrite the previously offloaded
2471 * entry.
2472 */
2473 if (!list_is_singular(&fib_node->entry_list)) {
2474 enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_DELETE;
2475 struct mlxsw_sp_fib_entry *n = list_next_entry(fib_entry, list);
2476
2477 mlxsw_sp_fib_entry_offload_refresh(n, op, 0);
2478 }
2479
2480 return mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
2481}
2482
2483static void
2484mlxsw_sp_fib4_node_entry_del(struct mlxsw_sp *mlxsw_sp,
2485 const struct mlxsw_sp_fib_node *fib_node,
2486 struct mlxsw_sp_fib_entry *fib_entry)
2487{
2488 if (!mlxsw_sp_fib_node_entry_is_first(fib_node, fib_entry))
2489 return;
2490
2491 /* Promote the next entry by overwriting the deleted entry */
2492 if (!list_is_singular(&fib_node->entry_list)) {
2493 struct mlxsw_sp_fib_entry *n = list_next_entry(fib_entry, list);
2494 enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_DELETE;
2495
2496 mlxsw_sp_fib_entry_update(mlxsw_sp, n);
2497 mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, 0);
2498 return;
2499 }
2500
2501 mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry);
2502}
2503
2504static int mlxsw_sp_fib4_node_entry_link(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel4283bce2017-02-09 10:28:43 +01002505 struct mlxsw_sp_fib_entry *fib_entry,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002506 bool replace, bool append)
Ido Schimmel9aecce12017-02-09 10:28:42 +01002507{
2508 struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
2509 int err;
2510
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002511 err = mlxsw_sp_fib4_node_list_insert(fib_node, fib_entry, replace,
2512 append);
Ido Schimmel9aecce12017-02-09 10:28:42 +01002513 if (err)
2514 return err;
2515
2516 err = mlxsw_sp_fib4_node_entry_add(mlxsw_sp, fib_node, fib_entry);
2517 if (err)
2518 goto err_fib4_node_entry_add;
2519
Ido Schimmel9aecce12017-02-09 10:28:42 +01002520 return 0;
2521
2522err_fib4_node_entry_add:
2523 mlxsw_sp_fib4_node_list_remove(fib_entry);
2524 return err;
2525}
2526
2527static void
2528mlxsw_sp_fib4_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
2529 struct mlxsw_sp_fib_entry *fib_entry)
2530{
2531 struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
2532
Ido Schimmel9aecce12017-02-09 10:28:42 +01002533 mlxsw_sp_fib4_node_entry_del(mlxsw_sp, fib_node, fib_entry);
2534 mlxsw_sp_fib4_node_list_remove(fib_entry);
2535}
2536
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002537static void mlxsw_sp_fib4_entry_replace(struct mlxsw_sp *mlxsw_sp,
2538 struct mlxsw_sp_fib_entry *fib_entry,
2539 bool replace)
2540{
2541 struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
2542 struct mlxsw_sp_fib_entry *replaced;
2543
2544 if (!replace)
2545 return;
2546
2547 /* We inserted the new entry before replaced one */
2548 replaced = list_next_entry(fib_entry, list);
2549
2550 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, replaced);
2551 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, replaced);
2552 mlxsw_sp_fib4_node_put(mlxsw_sp, fib_node);
2553}
2554
Ido Schimmel9aecce12017-02-09 10:28:42 +01002555static int
2556mlxsw_sp_router_fib4_add(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel4283bce2017-02-09 10:28:43 +01002557 const struct fib_entry_notifier_info *fen_info,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002558 bool replace, bool append)
Ido Schimmel9aecce12017-02-09 10:28:42 +01002559{
2560 struct mlxsw_sp_fib_entry *fib_entry;
2561 struct mlxsw_sp_fib_node *fib_node;
Jiri Pirko61c503f2016-07-04 08:23:11 +02002562 int err;
2563
Ido Schimmel9011b672017-05-16 19:38:25 +02002564 if (mlxsw_sp->router->aborted)
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002565 return 0;
2566
Ido Schimmel9aecce12017-02-09 10:28:42 +01002567 fib_node = mlxsw_sp_fib4_node_get(mlxsw_sp, fen_info);
2568 if (IS_ERR(fib_node)) {
2569 dev_warn(mlxsw_sp->bus_info->dev, "Failed to get FIB node\n");
2570 return PTR_ERR(fib_node);
2571 }
2572
2573 fib_entry = mlxsw_sp_fib4_entry_create(mlxsw_sp, fib_node, fen_info);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002574 if (IS_ERR(fib_entry)) {
Ido Schimmel9aecce12017-02-09 10:28:42 +01002575 dev_warn(mlxsw_sp->bus_info->dev, "Failed to create FIB entry\n");
2576 err = PTR_ERR(fib_entry);
2577 goto err_fib4_entry_create;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002578 }
Jiri Pirko61c503f2016-07-04 08:23:11 +02002579
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002580 err = mlxsw_sp_fib4_node_entry_link(mlxsw_sp, fib_entry, replace,
2581 append);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002582 if (err) {
Ido Schimmel9aecce12017-02-09 10:28:42 +01002583 dev_warn(mlxsw_sp->bus_info->dev, "Failed to link FIB entry to node\n");
2584 goto err_fib4_node_entry_link;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002585 }
Ido Schimmel9aecce12017-02-09 10:28:42 +01002586
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002587 mlxsw_sp_fib4_entry_replace(mlxsw_sp, fib_entry, replace);
2588
Jiri Pirko61c503f2016-07-04 08:23:11 +02002589 return 0;
2590
Ido Schimmel9aecce12017-02-09 10:28:42 +01002591err_fib4_node_entry_link:
2592 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib_entry);
2593err_fib4_entry_create:
2594 mlxsw_sp_fib4_node_put(mlxsw_sp, fib_node);
Jiri Pirko61c503f2016-07-04 08:23:11 +02002595 return err;
2596}
2597
Jiri Pirko37956d72016-10-20 16:05:43 +02002598static void mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
2599 struct fib_entry_notifier_info *fen_info)
Jiri Pirko61c503f2016-07-04 08:23:11 +02002600{
Jiri Pirko61c503f2016-07-04 08:23:11 +02002601 struct mlxsw_sp_fib_entry *fib_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01002602 struct mlxsw_sp_fib_node *fib_node;
Jiri Pirko61c503f2016-07-04 08:23:11 +02002603
Ido Schimmel9011b672017-05-16 19:38:25 +02002604 if (mlxsw_sp->router->aborted)
Jiri Pirko37956d72016-10-20 16:05:43 +02002605 return;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002606
Ido Schimmel9aecce12017-02-09 10:28:42 +01002607 fib_entry = mlxsw_sp_fib4_entry_lookup(mlxsw_sp, fen_info);
2608 if (WARN_ON(!fib_entry))
Jiri Pirko37956d72016-10-20 16:05:43 +02002609 return;
Ido Schimmel9aecce12017-02-09 10:28:42 +01002610 fib_node = fib_entry->fib_node;
Jiri Pirko5b004412016-09-01 10:37:40 +02002611
Ido Schimmel9aecce12017-02-09 10:28:42 +01002612 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, fib_entry);
2613 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib_entry);
2614 mlxsw_sp_fib4_node_put(mlxsw_sp, fib_node);
Jiri Pirko61c503f2016-07-04 08:23:11 +02002615}
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002616
2617static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp)
2618{
2619 char ralta_pl[MLXSW_REG_RALTA_LEN];
2620 char ralst_pl[MLXSW_REG_RALST_LEN];
Ido Schimmelb5d90e62017-03-10 08:53:43 +01002621 int i, err;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002622
2623 mlxsw_reg_ralta_pack(ralta_pl, true, MLXSW_REG_RALXX_PROTOCOL_IPV4,
2624 MLXSW_SP_LPM_TREE_MIN);
2625 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
2626 if (err)
2627 return err;
2628
2629 mlxsw_reg_ralst_pack(ralst_pl, 0xff, MLXSW_SP_LPM_TREE_MIN);
2630 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
2631 if (err)
2632 return err;
2633
Ido Schimmelb5d90e62017-03-10 08:53:43 +01002634 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
Ido Schimmel9011b672017-05-16 19:38:25 +02002635 struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
Ido Schimmelb5d90e62017-03-10 08:53:43 +01002636 char raltb_pl[MLXSW_REG_RALTB_LEN];
2637 char ralue_pl[MLXSW_REG_RALUE_LEN];
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002638
Ido Schimmelb5d90e62017-03-10 08:53:43 +01002639 if (!mlxsw_sp_vr_is_used(vr))
2640 continue;
2641
2642 mlxsw_reg_raltb_pack(raltb_pl, vr->id,
2643 MLXSW_REG_RALXX_PROTOCOL_IPV4,
2644 MLXSW_SP_LPM_TREE_MIN);
2645 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb),
2646 raltb_pl);
2647 if (err)
2648 return err;
2649
2650 mlxsw_reg_ralue_pack4(ralue_pl, MLXSW_SP_L3_PROTO_IPV4,
2651 MLXSW_REG_RALUE_OP_WRITE_WRITE, vr->id, 0,
2652 0);
2653 mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
2654 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue),
2655 ralue_pl);
2656 if (err)
2657 return err;
2658 }
2659
2660 return 0;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002661}
2662
Ido Schimmel9aecce12017-02-09 10:28:42 +01002663static void mlxsw_sp_fib4_node_flush(struct mlxsw_sp *mlxsw_sp,
2664 struct mlxsw_sp_fib_node *fib_node)
2665{
2666 struct mlxsw_sp_fib_entry *fib_entry, *tmp;
2667
2668 list_for_each_entry_safe(fib_entry, tmp, &fib_node->entry_list, list) {
2669 bool do_break = &tmp->list == &fib_node->entry_list;
2670
2671 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, fib_entry);
2672 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib_entry);
2673 mlxsw_sp_fib4_node_put(mlxsw_sp, fib_node);
2674 /* Break when entry list is empty and node was freed.
2675 * Otherwise, we'll access freed memory in the next
2676 * iteration.
2677 */
2678 if (do_break)
2679 break;
2680 }
2681}
2682
2683static void mlxsw_sp_fib_node_flush(struct mlxsw_sp *mlxsw_sp,
2684 struct mlxsw_sp_fib_node *fib_node)
2685{
Ido Schimmel76610eb2017-03-10 08:53:41 +01002686 switch (fib_node->fib->proto) {
Ido Schimmel9aecce12017-02-09 10:28:42 +01002687 case MLXSW_SP_L3_PROTO_IPV4:
2688 mlxsw_sp_fib4_node_flush(mlxsw_sp, fib_node);
2689 break;
2690 case MLXSW_SP_L3_PROTO_IPV6:
2691 WARN_ON_ONCE(1);
2692 break;
2693 }
2694}
2695
Ido Schimmel76610eb2017-03-10 08:53:41 +01002696static void mlxsw_sp_vr_fib_flush(struct mlxsw_sp *mlxsw_sp,
2697 struct mlxsw_sp_vr *vr,
2698 enum mlxsw_sp_l3proto proto)
2699{
2700 struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
2701 struct mlxsw_sp_fib_node *fib_node, *tmp;
2702
2703 list_for_each_entry_safe(fib_node, tmp, &fib->node_list, list) {
2704 bool do_break = &tmp->list == &fib->node_list;
2705
2706 mlxsw_sp_fib_node_flush(mlxsw_sp, fib_node);
2707 if (do_break)
2708 break;
2709 }
2710}
2711
Ido Schimmelac571de2016-11-14 11:26:32 +01002712static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp)
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002713{
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002714 int i;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002715
Jiri Pirkoc1a38312016-10-21 16:07:23 +02002716 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
Ido Schimmel9011b672017-05-16 19:38:25 +02002717 struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
Ido Schimmelac571de2016-11-14 11:26:32 +01002718
Ido Schimmel76610eb2017-03-10 08:53:41 +01002719 if (!mlxsw_sp_vr_is_used(vr))
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002720 continue;
Ido Schimmel76610eb2017-03-10 08:53:41 +01002721 mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002722 }
Ido Schimmelac571de2016-11-14 11:26:32 +01002723}
2724
2725static void mlxsw_sp_router_fib4_abort(struct mlxsw_sp *mlxsw_sp)
2726{
2727 int err;
2728
Ido Schimmel9011b672017-05-16 19:38:25 +02002729 if (mlxsw_sp->router->aborted)
Ido Schimmeld331d302016-11-16 09:51:58 +01002730 return;
2731 dev_warn(mlxsw_sp->bus_info->dev, "FIB abort triggered. Note that FIB entries are no longer being offloaded to this device.\n");
Ido Schimmelac571de2016-11-14 11:26:32 +01002732 mlxsw_sp_router_fib_flush(mlxsw_sp);
Ido Schimmel9011b672017-05-16 19:38:25 +02002733 mlxsw_sp->router->aborted = true;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002734 err = mlxsw_sp_router_set_abort_trap(mlxsw_sp);
2735 if (err)
2736 dev_warn(mlxsw_sp->bus_info->dev, "Failed to set abort trap.\n");
2737}
2738
Ido Schimmel30572242016-12-03 16:45:01 +01002739struct mlxsw_sp_fib_event_work {
Ido Schimmela0e47612017-02-06 16:20:10 +01002740 struct work_struct work;
Ido Schimmelad178c82017-02-08 11:16:40 +01002741 union {
2742 struct fib_entry_notifier_info fen_info;
Ido Schimmel5d7bfd12017-03-16 09:08:14 +01002743 struct fib_rule_notifier_info fr_info;
Ido Schimmelad178c82017-02-08 11:16:40 +01002744 struct fib_nh_notifier_info fnh_info;
2745 };
Ido Schimmel30572242016-12-03 16:45:01 +01002746 struct mlxsw_sp *mlxsw_sp;
2747 unsigned long event;
2748};
2749
2750static void mlxsw_sp_router_fib_event_work(struct work_struct *work)
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002751{
Ido Schimmel30572242016-12-03 16:45:01 +01002752 struct mlxsw_sp_fib_event_work *fib_work =
Ido Schimmela0e47612017-02-06 16:20:10 +01002753 container_of(work, struct mlxsw_sp_fib_event_work, work);
Ido Schimmel30572242016-12-03 16:45:01 +01002754 struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
Ido Schimmel5d7bfd12017-03-16 09:08:14 +01002755 struct fib_rule *rule;
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002756 bool replace, append;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002757 int err;
2758
Ido Schimmel30572242016-12-03 16:45:01 +01002759 /* Protect internal structures from changes */
2760 rtnl_lock();
2761 switch (fib_work->event) {
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002762 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
Ido Schimmel4283bce2017-02-09 10:28:43 +01002763 case FIB_EVENT_ENTRY_APPEND: /* fall through */
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002764 case FIB_EVENT_ENTRY_ADD:
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002765 replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
Ido Schimmel4283bce2017-02-09 10:28:43 +01002766 append = fib_work->event == FIB_EVENT_ENTRY_APPEND;
2767 err = mlxsw_sp_router_fib4_add(mlxsw_sp, &fib_work->fen_info,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002768 replace, append);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002769 if (err)
2770 mlxsw_sp_router_fib4_abort(mlxsw_sp);
Ido Schimmel30572242016-12-03 16:45:01 +01002771 fib_info_put(fib_work->fen_info.fi);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002772 break;
2773 case FIB_EVENT_ENTRY_DEL:
Ido Schimmel30572242016-12-03 16:45:01 +01002774 mlxsw_sp_router_fib4_del(mlxsw_sp, &fib_work->fen_info);
2775 fib_info_put(fib_work->fen_info.fi);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002776 break;
2777 case FIB_EVENT_RULE_ADD: /* fall through */
2778 case FIB_EVENT_RULE_DEL:
Ido Schimmel5d7bfd12017-03-16 09:08:14 +01002779 rule = fib_work->fr_info.rule;
Ido Schimmelc7f6e662017-03-16 09:08:20 +01002780 if (!fib4_rule_default(rule) && !rule->l3mdev)
Ido Schimmel5d7bfd12017-03-16 09:08:14 +01002781 mlxsw_sp_router_fib4_abort(mlxsw_sp);
2782 fib_rule_put(rule);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002783 break;
Ido Schimmelad178c82017-02-08 11:16:40 +01002784 case FIB_EVENT_NH_ADD: /* fall through */
2785 case FIB_EVENT_NH_DEL:
2786 mlxsw_sp_nexthop_event(mlxsw_sp, fib_work->event,
2787 fib_work->fnh_info.fib_nh);
2788 fib_info_put(fib_work->fnh_info.fib_nh->nh_parent);
2789 break;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002790 }
Ido Schimmel30572242016-12-03 16:45:01 +01002791 rtnl_unlock();
2792 kfree(fib_work);
2793}
2794
2795/* Called with rcu_read_lock() */
2796static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
2797 unsigned long event, void *ptr)
2798{
2799 struct mlxsw_sp *mlxsw_sp = container_of(nb, struct mlxsw_sp, fib_nb);
2800 struct mlxsw_sp_fib_event_work *fib_work;
2801 struct fib_notifier_info *info = ptr;
2802
2803 if (!net_eq(info->net, &init_net))
2804 return NOTIFY_DONE;
2805
2806 fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC);
2807 if (WARN_ON(!fib_work))
2808 return NOTIFY_BAD;
2809
Ido Schimmela0e47612017-02-06 16:20:10 +01002810 INIT_WORK(&fib_work->work, mlxsw_sp_router_fib_event_work);
Ido Schimmel30572242016-12-03 16:45:01 +01002811 fib_work->mlxsw_sp = mlxsw_sp;
2812 fib_work->event = event;
2813
2814 switch (event) {
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002815 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
Ido Schimmel4283bce2017-02-09 10:28:43 +01002816 case FIB_EVENT_ENTRY_APPEND: /* fall through */
Ido Schimmel30572242016-12-03 16:45:01 +01002817 case FIB_EVENT_ENTRY_ADD: /* fall through */
2818 case FIB_EVENT_ENTRY_DEL:
2819 memcpy(&fib_work->fen_info, ptr, sizeof(fib_work->fen_info));
2820 /* Take referece on fib_info to prevent it from being
2821 * freed while work is queued. Release it afterwards.
2822 */
2823 fib_info_hold(fib_work->fen_info.fi);
2824 break;
Ido Schimmel5d7bfd12017-03-16 09:08:14 +01002825 case FIB_EVENT_RULE_ADD: /* fall through */
2826 case FIB_EVENT_RULE_DEL:
2827 memcpy(&fib_work->fr_info, ptr, sizeof(fib_work->fr_info));
2828 fib_rule_get(fib_work->fr_info.rule);
2829 break;
Ido Schimmelad178c82017-02-08 11:16:40 +01002830 case FIB_EVENT_NH_ADD: /* fall through */
2831 case FIB_EVENT_NH_DEL:
2832 memcpy(&fib_work->fnh_info, ptr, sizeof(fib_work->fnh_info));
2833 fib_info_hold(fib_work->fnh_info.fib_nh->nh_parent);
2834 break;
Ido Schimmel30572242016-12-03 16:45:01 +01002835 }
2836
Ido Schimmela0e47612017-02-06 16:20:10 +01002837 mlxsw_core_schedule_work(&fib_work->work);
Ido Schimmel30572242016-12-03 16:45:01 +01002838
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002839 return NOTIFY_DONE;
2840}
2841
Ido Schimmel4724ba562017-03-10 08:53:39 +01002842static struct mlxsw_sp_rif *
2843mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
2844 const struct net_device *dev)
2845{
2846 int i;
2847
2848 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
2849 if (mlxsw_sp->rifs[i] && mlxsw_sp->rifs[i]->dev == dev)
2850 return mlxsw_sp->rifs[i];
2851
2852 return NULL;
2853}
2854
2855static int mlxsw_sp_router_rif_disable(struct mlxsw_sp *mlxsw_sp, u16 rif)
2856{
2857 char ritr_pl[MLXSW_REG_RITR_LEN];
2858 int err;
2859
2860 mlxsw_reg_ritr_rif_pack(ritr_pl, rif);
2861 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
2862 if (WARN_ON_ONCE(err))
2863 return err;
2864
2865 mlxsw_reg_ritr_enable_set(ritr_pl, false);
2866 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
2867}
2868
2869static void mlxsw_sp_router_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002870 struct mlxsw_sp_rif *rif)
Ido Schimmel4724ba562017-03-10 08:53:39 +01002871{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002872 mlxsw_sp_router_rif_disable(mlxsw_sp, rif->rif_index);
2873 mlxsw_sp_nexthop_rif_gone_sync(mlxsw_sp, rif);
2874 mlxsw_sp_neigh_rif_gone_sync(mlxsw_sp, rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01002875}
2876
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002877static bool mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *rif,
Ido Schimmel4724ba562017-03-10 08:53:39 +01002878 const struct in_device *in_dev,
2879 unsigned long event)
2880{
2881 switch (event) {
2882 case NETDEV_UP:
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002883 if (!rif)
Ido Schimmel4724ba562017-03-10 08:53:39 +01002884 return true;
2885 return false;
2886 case NETDEV_DOWN:
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002887 if (rif && !in_dev->ifa_list &&
2888 !netif_is_l3_slave(rif->dev))
Ido Schimmel4724ba562017-03-10 08:53:39 +01002889 return true;
2890 /* It is possible we already removed the RIF ourselves
2891 * if it was assigned to a netdev that is now a bridge
2892 * or LAG slave.
2893 */
2894 return false;
2895 }
2896
2897 return false;
2898}
2899
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002900#define MLXSW_SP_INVALID_INDEX_RIF 0xffff
Ido Schimmel4724ba562017-03-10 08:53:39 +01002901static int mlxsw_sp_avail_rif_get(struct mlxsw_sp *mlxsw_sp)
2902{
2903 int i;
2904
2905 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
2906 if (!mlxsw_sp->rifs[i])
2907 return i;
2908
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002909 return MLXSW_SP_INVALID_INDEX_RIF;
Ido Schimmel4724ba562017-03-10 08:53:39 +01002910}
2911
2912static void mlxsw_sp_vport_rif_sp_attr_get(struct mlxsw_sp_port *mlxsw_sp_vport,
2913 bool *p_lagged, u16 *p_system_port)
2914{
2915 u8 local_port = mlxsw_sp_vport->local_port;
2916
2917 *p_lagged = mlxsw_sp_vport->lagged;
2918 *p_system_port = *p_lagged ? mlxsw_sp_vport->lag_id : local_port;
2919}
2920
2921static int mlxsw_sp_vport_rif_sp_op(struct mlxsw_sp_port *mlxsw_sp_vport,
Ido Schimmel69132292017-03-10 08:53:42 +01002922 u16 vr_id, struct net_device *l3_dev,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002923 u16 rif_index, bool create)
Ido Schimmel4724ba562017-03-10 08:53:39 +01002924{
2925 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
2926 bool lagged = mlxsw_sp_vport->lagged;
2927 char ritr_pl[MLXSW_REG_RITR_LEN];
2928 u16 system_port;
2929
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002930 mlxsw_reg_ritr_pack(ritr_pl, create, MLXSW_REG_RITR_SP_IF, rif_index,
2931 vr_id, l3_dev->mtu, l3_dev->dev_addr);
Ido Schimmel4724ba562017-03-10 08:53:39 +01002932
2933 mlxsw_sp_vport_rif_sp_attr_get(mlxsw_sp_vport, &lagged, &system_port);
2934 mlxsw_reg_ritr_sp_if_pack(ritr_pl, lagged, system_port,
2935 mlxsw_sp_vport_vid_get(mlxsw_sp_vport));
2936
2937 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
2938}
2939
2940static void mlxsw_sp_vport_rif_sp_leave(struct mlxsw_sp_port *mlxsw_sp_vport);
2941
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002942static u16 mlxsw_sp_rif_sp_to_fid(u16 rif_index)
Ido Schimmel4724ba562017-03-10 08:53:39 +01002943{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002944 return MLXSW_SP_RFID_BASE + rif_index;
Ido Schimmel4724ba562017-03-10 08:53:39 +01002945}
2946
2947static struct mlxsw_sp_fid *
2948mlxsw_sp_rfid_alloc(u16 fid, struct net_device *l3_dev)
2949{
2950 struct mlxsw_sp_fid *f;
2951
2952 f = kzalloc(sizeof(*f), GFP_KERNEL);
2953 if (!f)
2954 return NULL;
2955
2956 f->leave = mlxsw_sp_vport_rif_sp_leave;
2957 f->ref_count = 0;
2958 f->dev = l3_dev;
2959 f->fid = fid;
2960
2961 return f;
2962}
2963
2964static struct mlxsw_sp_rif *
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002965mlxsw_sp_rif_alloc(u16 rif_index, u16 vr_id, struct net_device *l3_dev,
Ido Schimmel69132292017-03-10 08:53:42 +01002966 struct mlxsw_sp_fid *f)
Ido Schimmel4724ba562017-03-10 08:53:39 +01002967{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002968 struct mlxsw_sp_rif *rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01002969
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002970 rif = kzalloc(sizeof(*rif), GFP_KERNEL);
2971 if (!rif)
Ido Schimmel4724ba562017-03-10 08:53:39 +01002972 return NULL;
2973
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002974 INIT_LIST_HEAD(&rif->nexthop_list);
2975 INIT_LIST_HEAD(&rif->neigh_list);
2976 ether_addr_copy(rif->addr, l3_dev->dev_addr);
2977 rif->mtu = l3_dev->mtu;
2978 rif->vr_id = vr_id;
2979 rif->dev = l3_dev;
2980 rif->rif_index = rif_index;
2981 rif->f = f;
Ido Schimmel4724ba562017-03-10 08:53:39 +01002982
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002983 return rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01002984}
2985
Arkadi Sharshevskyfd1b9d42017-03-28 17:24:16 +02002986u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif)
2987{
2988 return rif->rif_index;
2989}
2990
2991int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif)
2992{
2993 return rif->dev->ifindex;
2994}
2995
Ido Schimmel4724ba562017-03-10 08:53:39 +01002996static struct mlxsw_sp_rif *
2997mlxsw_sp_vport_rif_sp_create(struct mlxsw_sp_port *mlxsw_sp_vport,
2998 struct net_device *l3_dev)
2999{
3000 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
Ido Schimmel57837882017-03-16 09:08:16 +01003001 u32 tb_id = l3mdev_fib_table(l3_dev);
Ido Schimmel69132292017-03-10 08:53:42 +01003002 struct mlxsw_sp_vr *vr;
Ido Schimmel4724ba562017-03-10 08:53:39 +01003003 struct mlxsw_sp_fid *f;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003004 struct mlxsw_sp_rif *rif;
3005 u16 fid, rif_index;
Ido Schimmel4724ba562017-03-10 08:53:39 +01003006 int err;
3007
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003008 rif_index = mlxsw_sp_avail_rif_get(mlxsw_sp);
3009 if (rif_index == MLXSW_SP_INVALID_INDEX_RIF)
Ido Schimmel4724ba562017-03-10 08:53:39 +01003010 return ERR_PTR(-ERANGE);
3011
Ido Schimmel57837882017-03-16 09:08:16 +01003012 vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id ? : RT_TABLE_MAIN);
Ido Schimmel69132292017-03-10 08:53:42 +01003013 if (IS_ERR(vr))
3014 return ERR_CAST(vr);
3015
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003016 err = mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, vr->id, l3_dev,
3017 rif_index, true);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003018 if (err)
Ido Schimmel69132292017-03-10 08:53:42 +01003019 goto err_vport_rif_sp_op;
Ido Schimmel4724ba562017-03-10 08:53:39 +01003020
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003021 fid = mlxsw_sp_rif_sp_to_fid(rif_index);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003022 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, true);
3023 if (err)
3024 goto err_rif_fdb_op;
3025
3026 f = mlxsw_sp_rfid_alloc(fid, l3_dev);
3027 if (!f) {
3028 err = -ENOMEM;
3029 goto err_rfid_alloc;
3030 }
3031
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003032 rif = mlxsw_sp_rif_alloc(rif_index, vr->id, l3_dev, f);
3033 if (!rif) {
Ido Schimmel4724ba562017-03-10 08:53:39 +01003034 err = -ENOMEM;
3035 goto err_rif_alloc;
3036 }
3037
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +02003038 if (devlink_dpipe_table_counter_enabled(priv_to_devlink(mlxsw_sp->core),
3039 MLXSW_SP_DPIPE_TABLE_NAME_ERIF)) {
3040 err = mlxsw_sp_rif_counter_alloc(mlxsw_sp, rif,
3041 MLXSW_SP_RIF_COUNTER_EGRESS);
3042 if (err)
3043 netdev_dbg(mlxsw_sp_vport->dev,
3044 "Counter alloc Failed err=%d\n", err);
3045 }
3046
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003047 f->rif = rif;
3048 mlxsw_sp->rifs[rif_index] = rif;
Ido Schimmel69132292017-03-10 08:53:42 +01003049 vr->rif_count++;
Ido Schimmel4724ba562017-03-10 08:53:39 +01003050
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003051 return rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01003052
3053err_rif_alloc:
3054 kfree(f);
3055err_rfid_alloc:
3056 mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, false);
3057err_rif_fdb_op:
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003058 mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, vr->id, l3_dev, rif_index,
3059 false);
Ido Schimmel69132292017-03-10 08:53:42 +01003060err_vport_rif_sp_op:
3061 mlxsw_sp_vr_put(vr);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003062 return ERR_PTR(err);
3063}
3064
3065static void mlxsw_sp_vport_rif_sp_destroy(struct mlxsw_sp_port *mlxsw_sp_vport,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003066 struct mlxsw_sp_rif *rif)
Ido Schimmel4724ba562017-03-10 08:53:39 +01003067{
3068 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
Ido Schimmel9011b672017-05-16 19:38:25 +02003069 struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[rif->vr_id];
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003070 struct net_device *l3_dev = rif->dev;
3071 struct mlxsw_sp_fid *f = rif->f;
3072 u16 rif_index = rif->rif_index;
Ido Schimmel4724ba562017-03-10 08:53:39 +01003073 u16 fid = f->fid;
Ido Schimmel4724ba562017-03-10 08:53:39 +01003074
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003075 mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003076
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +02003077 mlxsw_sp_rif_counter_free(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS);
3078 mlxsw_sp_rif_counter_free(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_INGRESS);
3079
Ido Schimmel69132292017-03-10 08:53:42 +01003080 vr->rif_count--;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003081 mlxsw_sp->rifs[rif_index] = NULL;
3082 f->rif = NULL;
Ido Schimmel4724ba562017-03-10 08:53:39 +01003083
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003084 kfree(rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003085
3086 kfree(f);
3087
3088 mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, false);
3089
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003090 mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, vr->id, l3_dev, rif_index,
3091 false);
Ido Schimmel69132292017-03-10 08:53:42 +01003092 mlxsw_sp_vr_put(vr);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003093}
3094
3095static int mlxsw_sp_vport_rif_sp_join(struct mlxsw_sp_port *mlxsw_sp_vport,
3096 struct net_device *l3_dev)
3097{
3098 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003099 struct mlxsw_sp_rif *rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01003100
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003101 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
3102 if (!rif) {
3103 rif = mlxsw_sp_vport_rif_sp_create(mlxsw_sp_vport, l3_dev);
3104 if (IS_ERR(rif))
3105 return PTR_ERR(rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003106 }
3107
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003108 mlxsw_sp_vport_fid_set(mlxsw_sp_vport, rif->f);
3109 rif->f->ref_count++;
Ido Schimmel4724ba562017-03-10 08:53:39 +01003110
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003111 netdev_dbg(mlxsw_sp_vport->dev, "Joined FID=%d\n", rif->f->fid);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003112
3113 return 0;
3114}
3115
3116static void mlxsw_sp_vport_rif_sp_leave(struct mlxsw_sp_port *mlxsw_sp_vport)
3117{
3118 struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
3119
3120 netdev_dbg(mlxsw_sp_vport->dev, "Left FID=%d\n", f->fid);
3121
3122 mlxsw_sp_vport_fid_set(mlxsw_sp_vport, NULL);
3123 if (--f->ref_count == 0)
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003124 mlxsw_sp_vport_rif_sp_destroy(mlxsw_sp_vport, f->rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003125}
3126
3127static int mlxsw_sp_inetaddr_vport_event(struct net_device *l3_dev,
3128 struct net_device *port_dev,
3129 unsigned long event, u16 vid)
3130{
3131 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(port_dev);
3132 struct mlxsw_sp_port *mlxsw_sp_vport;
3133
3134 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
3135 if (WARN_ON(!mlxsw_sp_vport))
3136 return -EINVAL;
3137
3138 switch (event) {
3139 case NETDEV_UP:
3140 return mlxsw_sp_vport_rif_sp_join(mlxsw_sp_vport, l3_dev);
3141 case NETDEV_DOWN:
3142 mlxsw_sp_vport_rif_sp_leave(mlxsw_sp_vport);
3143 break;
3144 }
3145
3146 return 0;
3147}
3148
3149static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev,
3150 unsigned long event)
3151{
Jiri Pirko2b94e582017-04-18 16:55:37 +02003152 if (netif_is_bridge_port(port_dev) ||
3153 netif_is_lag_port(port_dev) ||
3154 netif_is_ovs_port(port_dev))
Ido Schimmel4724ba562017-03-10 08:53:39 +01003155 return 0;
3156
3157 return mlxsw_sp_inetaddr_vport_event(port_dev, port_dev, event, 1);
3158}
3159
3160static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev,
3161 struct net_device *lag_dev,
3162 unsigned long event, u16 vid)
3163{
3164 struct net_device *port_dev;
3165 struct list_head *iter;
3166 int err;
3167
3168 netdev_for_each_lower_dev(lag_dev, port_dev, iter) {
3169 if (mlxsw_sp_port_dev_check(port_dev)) {
3170 err = mlxsw_sp_inetaddr_vport_event(l3_dev, port_dev,
3171 event, vid);
3172 if (err)
3173 return err;
3174 }
3175 }
3176
3177 return 0;
3178}
3179
3180static int mlxsw_sp_inetaddr_lag_event(struct net_device *lag_dev,
3181 unsigned long event)
3182{
3183 if (netif_is_bridge_port(lag_dev))
3184 return 0;
3185
3186 return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event, 1);
3187}
3188
3189static struct mlxsw_sp_fid *mlxsw_sp_bridge_fid_get(struct mlxsw_sp *mlxsw_sp,
3190 struct net_device *l3_dev)
3191{
3192 u16 fid;
3193
3194 if (is_vlan_dev(l3_dev))
3195 fid = vlan_dev_vlan_id(l3_dev);
3196 else if (mlxsw_sp->master_bridge.dev == l3_dev)
3197 fid = 1;
3198 else
3199 return mlxsw_sp_vfid_find(mlxsw_sp, l3_dev);
3200
3201 return mlxsw_sp_fid_find(mlxsw_sp, fid);
3202}
3203
Ido Schimmel5ec2ee72017-03-24 08:02:48 +01003204static u8 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp)
3205{
3206 return mlxsw_core_max_ports(mlxsw_sp->core) + 1;
3207}
3208
Ido Schimmel4724ba562017-03-10 08:53:39 +01003209static enum mlxsw_flood_table_type mlxsw_sp_flood_table_type_get(u16 fid)
3210{
3211 return mlxsw_sp_fid_is_vfid(fid) ? MLXSW_REG_SFGC_TABLE_TYPE_FID :
3212 MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST;
3213}
3214
3215static u16 mlxsw_sp_flood_table_index_get(u16 fid)
3216{
3217 return mlxsw_sp_fid_is_vfid(fid) ? mlxsw_sp_fid_to_vfid(fid) : fid;
3218}
3219
3220static int mlxsw_sp_router_port_flood_set(struct mlxsw_sp *mlxsw_sp, u16 fid,
3221 bool set)
3222{
Ido Schimmel5ec2ee72017-03-24 08:02:48 +01003223 u8 router_port = mlxsw_sp_router_port(mlxsw_sp);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003224 enum mlxsw_flood_table_type table_type;
3225 char *sftr_pl;
3226 u16 index;
3227 int err;
3228
3229 sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL);
3230 if (!sftr_pl)
3231 return -ENOMEM;
3232
3233 table_type = mlxsw_sp_flood_table_type_get(fid);
3234 index = mlxsw_sp_flood_table_index_get(fid);
3235 mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BC, index, table_type,
Ido Schimmel5ec2ee72017-03-24 08:02:48 +01003236 1, router_port, set);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003237 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
3238
3239 kfree(sftr_pl);
3240 return err;
3241}
3242
3243static enum mlxsw_reg_ritr_if_type mlxsw_sp_rif_type_get(u16 fid)
3244{
3245 if (mlxsw_sp_fid_is_vfid(fid))
3246 return MLXSW_REG_RITR_FID_IF;
3247 else
3248 return MLXSW_REG_RITR_VLAN_IF;
3249}
3250
Ido Schimmel69132292017-03-10 08:53:42 +01003251static int mlxsw_sp_rif_bridge_op(struct mlxsw_sp *mlxsw_sp, u16 vr_id,
Ido Schimmel4724ba562017-03-10 08:53:39 +01003252 struct net_device *l3_dev,
3253 u16 fid, u16 rif,
3254 bool create)
3255{
3256 enum mlxsw_reg_ritr_if_type rif_type;
3257 char ritr_pl[MLXSW_REG_RITR_LEN];
3258
3259 rif_type = mlxsw_sp_rif_type_get(fid);
Ido Schimmel69132292017-03-10 08:53:42 +01003260 mlxsw_reg_ritr_pack(ritr_pl, create, rif_type, rif, vr_id, l3_dev->mtu,
Ido Schimmel4724ba562017-03-10 08:53:39 +01003261 l3_dev->dev_addr);
3262 mlxsw_reg_ritr_fid_set(ritr_pl, rif_type, fid);
3263
3264 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
3265}
3266
3267static int mlxsw_sp_rif_bridge_create(struct mlxsw_sp *mlxsw_sp,
3268 struct net_device *l3_dev,
3269 struct mlxsw_sp_fid *f)
3270{
Ido Schimmel57837882017-03-16 09:08:16 +01003271 u32 tb_id = l3mdev_fib_table(l3_dev);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003272 struct mlxsw_sp_rif *rif;
Ido Schimmel69132292017-03-10 08:53:42 +01003273 struct mlxsw_sp_vr *vr;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003274 u16 rif_index;
Ido Schimmel4724ba562017-03-10 08:53:39 +01003275 int err;
3276
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003277 rif_index = mlxsw_sp_avail_rif_get(mlxsw_sp);
3278 if (rif_index == MLXSW_SP_INVALID_INDEX_RIF)
Ido Schimmel4724ba562017-03-10 08:53:39 +01003279 return -ERANGE;
3280
Ido Schimmel57837882017-03-16 09:08:16 +01003281 vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id ? : RT_TABLE_MAIN);
Ido Schimmel69132292017-03-10 08:53:42 +01003282 if (IS_ERR(vr))
3283 return PTR_ERR(vr);
3284
Ido Schimmel4724ba562017-03-10 08:53:39 +01003285 err = mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, true);
3286 if (err)
Ido Schimmel69132292017-03-10 08:53:42 +01003287 goto err_port_flood_set;
Ido Schimmel4724ba562017-03-10 08:53:39 +01003288
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003289 err = mlxsw_sp_rif_bridge_op(mlxsw_sp, vr->id, l3_dev, f->fid,
3290 rif_index, true);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003291 if (err)
3292 goto err_rif_bridge_op;
3293
3294 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, true);
3295 if (err)
3296 goto err_rif_fdb_op;
3297
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003298 rif = mlxsw_sp_rif_alloc(rif_index, vr->id, l3_dev, f);
3299 if (!rif) {
Ido Schimmel4724ba562017-03-10 08:53:39 +01003300 err = -ENOMEM;
3301 goto err_rif_alloc;
3302 }
3303
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003304 f->rif = rif;
3305 mlxsw_sp->rifs[rif_index] = rif;
Ido Schimmel69132292017-03-10 08:53:42 +01003306 vr->rif_count++;
Ido Schimmel4724ba562017-03-10 08:53:39 +01003307
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003308 netdev_dbg(l3_dev, "RIF=%d created\n", rif_index);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003309
3310 return 0;
3311
3312err_rif_alloc:
3313 mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, false);
3314err_rif_fdb_op:
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003315 mlxsw_sp_rif_bridge_op(mlxsw_sp, vr->id, l3_dev, f->fid, rif_index,
3316 false);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003317err_rif_bridge_op:
3318 mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, false);
Ido Schimmel69132292017-03-10 08:53:42 +01003319err_port_flood_set:
3320 mlxsw_sp_vr_put(vr);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003321 return err;
3322}
3323
3324void mlxsw_sp_rif_bridge_destroy(struct mlxsw_sp *mlxsw_sp,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003325 struct mlxsw_sp_rif *rif)
Ido Schimmel4724ba562017-03-10 08:53:39 +01003326{
Ido Schimmel9011b672017-05-16 19:38:25 +02003327 struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[rif->vr_id];
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003328 struct net_device *l3_dev = rif->dev;
3329 struct mlxsw_sp_fid *f = rif->f;
3330 u16 rif_index = rif->rif_index;
Ido Schimmel4724ba562017-03-10 08:53:39 +01003331
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003332 mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003333
Ido Schimmel69132292017-03-10 08:53:42 +01003334 vr->rif_count--;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003335 mlxsw_sp->rifs[rif_index] = NULL;
3336 f->rif = NULL;
Ido Schimmel4724ba562017-03-10 08:53:39 +01003337
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003338 kfree(rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003339
3340 mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, false);
3341
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003342 mlxsw_sp_rif_bridge_op(mlxsw_sp, vr->id, l3_dev, f->fid, rif_index,
3343 false);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003344
3345 mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, false);
3346
Ido Schimmel69132292017-03-10 08:53:42 +01003347 mlxsw_sp_vr_put(vr);
3348
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003349 netdev_dbg(l3_dev, "RIF=%d destroyed\n", rif_index);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003350}
3351
3352static int mlxsw_sp_inetaddr_bridge_event(struct net_device *l3_dev,
3353 struct net_device *br_dev,
3354 unsigned long event)
3355{
3356 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
3357 struct mlxsw_sp_fid *f;
3358
3359 /* FID can either be an actual FID if the L3 device is the
3360 * VLAN-aware bridge or a VLAN device on top. Otherwise, the
3361 * L3 device is a VLAN-unaware bridge and we get a vFID.
3362 */
3363 f = mlxsw_sp_bridge_fid_get(mlxsw_sp, l3_dev);
3364 if (WARN_ON(!f))
3365 return -EINVAL;
3366
3367 switch (event) {
3368 case NETDEV_UP:
3369 return mlxsw_sp_rif_bridge_create(mlxsw_sp, l3_dev, f);
3370 case NETDEV_DOWN:
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003371 mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003372 break;
3373 }
3374
3375 return 0;
3376}
3377
3378static int mlxsw_sp_inetaddr_vlan_event(struct net_device *vlan_dev,
3379 unsigned long event)
3380{
3381 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
3382 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(vlan_dev);
3383 u16 vid = vlan_dev_vlan_id(vlan_dev);
3384
3385 if (mlxsw_sp_port_dev_check(real_dev))
3386 return mlxsw_sp_inetaddr_vport_event(vlan_dev, real_dev, event,
3387 vid);
3388 else if (netif_is_lag_master(real_dev))
3389 return __mlxsw_sp_inetaddr_lag_event(vlan_dev, real_dev, event,
3390 vid);
3391 else if (netif_is_bridge_master(real_dev) &&
3392 mlxsw_sp->master_bridge.dev == real_dev)
3393 return mlxsw_sp_inetaddr_bridge_event(vlan_dev, real_dev,
3394 event);
3395
3396 return 0;
3397}
3398
Ido Schimmelb1e45522017-04-30 19:47:14 +03003399static int __mlxsw_sp_inetaddr_event(struct net_device *dev,
3400 unsigned long event)
3401{
3402 if (mlxsw_sp_port_dev_check(dev))
3403 return mlxsw_sp_inetaddr_port_event(dev, event);
3404 else if (netif_is_lag_master(dev))
3405 return mlxsw_sp_inetaddr_lag_event(dev, event);
3406 else if (netif_is_bridge_master(dev))
3407 return mlxsw_sp_inetaddr_bridge_event(dev, dev, event);
3408 else if (is_vlan_dev(dev))
3409 return mlxsw_sp_inetaddr_vlan_event(dev, event);
3410 else
3411 return 0;
3412}
3413
Ido Schimmel4724ba562017-03-10 08:53:39 +01003414int mlxsw_sp_inetaddr_event(struct notifier_block *unused,
3415 unsigned long event, void *ptr)
3416{
3417 struct in_ifaddr *ifa = (struct in_ifaddr *) ptr;
3418 struct net_device *dev = ifa->ifa_dev->dev;
3419 struct mlxsw_sp *mlxsw_sp;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003420 struct mlxsw_sp_rif *rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01003421 int err = 0;
3422
3423 mlxsw_sp = mlxsw_sp_lower_get(dev);
3424 if (!mlxsw_sp)
3425 goto out;
3426
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003427 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
3428 if (!mlxsw_sp_rif_should_config(rif, ifa->ifa_dev, event))
Ido Schimmel4724ba562017-03-10 08:53:39 +01003429 goto out;
3430
Ido Schimmelb1e45522017-04-30 19:47:14 +03003431 err = __mlxsw_sp_inetaddr_event(dev, event);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003432out:
3433 return notifier_from_errno(err);
3434}
3435
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003436static int mlxsw_sp_rif_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
Ido Schimmel4724ba562017-03-10 08:53:39 +01003437 const char *mac, int mtu)
3438{
3439 char ritr_pl[MLXSW_REG_RITR_LEN];
3440 int err;
3441
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003442 mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003443 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
3444 if (err)
3445 return err;
3446
3447 mlxsw_reg_ritr_mtu_set(ritr_pl, mtu);
3448 mlxsw_reg_ritr_if_mac_memcpy_to(ritr_pl, mac);
3449 mlxsw_reg_ritr_op_set(ritr_pl, MLXSW_REG_RITR_RIF_CREATE);
3450 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
3451}
3452
3453int mlxsw_sp_netdevice_router_port_event(struct net_device *dev)
3454{
3455 struct mlxsw_sp *mlxsw_sp;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003456 struct mlxsw_sp_rif *rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01003457 int err;
3458
3459 mlxsw_sp = mlxsw_sp_lower_get(dev);
3460 if (!mlxsw_sp)
3461 return 0;
3462
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003463 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
3464 if (!rif)
Ido Schimmel4724ba562017-03-10 08:53:39 +01003465 return 0;
3466
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003467 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, rif->f->fid, false);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003468 if (err)
3469 return err;
3470
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003471 err = mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, dev->dev_addr,
3472 dev->mtu);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003473 if (err)
3474 goto err_rif_edit;
3475
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003476 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, dev->dev_addr, rif->f->fid, true);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003477 if (err)
3478 goto err_rif_fdb_op;
3479
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003480 ether_addr_copy(rif->addr, dev->dev_addr);
3481 rif->mtu = dev->mtu;
Ido Schimmel4724ba562017-03-10 08:53:39 +01003482
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003483 netdev_dbg(dev, "Updated RIF=%d\n", rif->rif_index);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003484
3485 return 0;
3486
3487err_rif_fdb_op:
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003488 mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, rif->addr, rif->mtu);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003489err_rif_edit:
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003490 mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, rif->f->fid, true);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003491 return err;
3492}
3493
Ido Schimmelb1e45522017-04-30 19:47:14 +03003494static int mlxsw_sp_port_vrf_join(struct mlxsw_sp *mlxsw_sp,
3495 struct net_device *l3_dev)
Ido Schimmel7179eb52017-03-16 09:08:18 +01003496{
Ido Schimmelb1e45522017-04-30 19:47:14 +03003497 struct mlxsw_sp_rif *rif;
Ido Schimmel7179eb52017-03-16 09:08:18 +01003498
Ido Schimmelb1e45522017-04-30 19:47:14 +03003499 /* If netdev is already associated with a RIF, then we need to
3500 * destroy it and create a new one with the new virtual router ID.
Ido Schimmel7179eb52017-03-16 09:08:18 +01003501 */
Ido Schimmelb1e45522017-04-30 19:47:14 +03003502 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
3503 if (rif)
3504 __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_DOWN);
Ido Schimmel7179eb52017-03-16 09:08:18 +01003505
Ido Schimmelb1e45522017-04-30 19:47:14 +03003506 return __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_UP);
Ido Schimmel7179eb52017-03-16 09:08:18 +01003507}
3508
Ido Schimmelb1e45522017-04-30 19:47:14 +03003509static void mlxsw_sp_port_vrf_leave(struct mlxsw_sp *mlxsw_sp,
3510 struct net_device *l3_dev)
Ido Schimmel7179eb52017-03-16 09:08:18 +01003511{
Ido Schimmelb1e45522017-04-30 19:47:14 +03003512 struct mlxsw_sp_rif *rif;
Ido Schimmel7179eb52017-03-16 09:08:18 +01003513
Ido Schimmelb1e45522017-04-30 19:47:14 +03003514 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
3515 if (!rif)
Ido Schimmel7179eb52017-03-16 09:08:18 +01003516 return;
Ido Schimmelb1e45522017-04-30 19:47:14 +03003517 __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_DOWN);
Ido Schimmel7179eb52017-03-16 09:08:18 +01003518}
3519
Ido Schimmelb1e45522017-04-30 19:47:14 +03003520int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event,
3521 struct netdev_notifier_changeupper_info *info)
Ido Schimmel3d70e4582017-03-16 09:08:19 +01003522{
Ido Schimmelb1e45522017-04-30 19:47:14 +03003523 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
3524 int err = 0;
Ido Schimmel3d70e4582017-03-16 09:08:19 +01003525
Ido Schimmelb1e45522017-04-30 19:47:14 +03003526 if (!mlxsw_sp)
3527 return 0;
Ido Schimmel3d70e4582017-03-16 09:08:19 +01003528
Ido Schimmelb1e45522017-04-30 19:47:14 +03003529 switch (event) {
3530 case NETDEV_PRECHANGEUPPER:
3531 return 0;
3532 case NETDEV_CHANGEUPPER:
3533 if (info->linking)
3534 err = mlxsw_sp_port_vrf_join(mlxsw_sp, l3_dev);
3535 else
3536 mlxsw_sp_port_vrf_leave(mlxsw_sp, l3_dev);
3537 break;
3538 }
Ido Schimmel3d70e4582017-03-16 09:08:19 +01003539
Ido Schimmelb1e45522017-04-30 19:47:14 +03003540 return err;
Ido Schimmel3d70e4582017-03-16 09:08:19 +01003541}
3542
Ido Schimmelc3852ef2016-12-03 16:45:07 +01003543static void mlxsw_sp_router_fib_dump_flush(struct notifier_block *nb)
3544{
3545 struct mlxsw_sp *mlxsw_sp = container_of(nb, struct mlxsw_sp, fib_nb);
3546
3547 /* Flush pending FIB notifications and then flush the device's
3548 * table before requesting another dump. The FIB notification
3549 * block is unregistered, so no need to take RTNL.
3550 */
3551 mlxsw_core_flush_owq();
3552 mlxsw_sp_router_fib_flush(mlxsw_sp);
3553}
3554
Ido Schimmel4724ba562017-03-10 08:53:39 +01003555static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
3556{
3557 char rgcr_pl[MLXSW_REG_RGCR_LEN];
3558 u64 max_rifs;
3559 int err;
3560
3561 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_RIFS))
3562 return -EIO;
3563
3564 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
3565 mlxsw_sp->rifs = kcalloc(max_rifs, sizeof(struct mlxsw_sp_rif *),
3566 GFP_KERNEL);
3567 if (!mlxsw_sp->rifs)
3568 return -ENOMEM;
3569
3570 mlxsw_reg_rgcr_pack(rgcr_pl, true);
3571 mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs);
3572 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
3573 if (err)
3574 goto err_rgcr_fail;
3575
3576 return 0;
3577
3578err_rgcr_fail:
3579 kfree(mlxsw_sp->rifs);
3580 return err;
3581}
3582
3583static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
3584{
3585 char rgcr_pl[MLXSW_REG_RGCR_LEN];
3586 int i;
3587
3588 mlxsw_reg_rgcr_pack(rgcr_pl, false);
3589 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
3590
3591 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
3592 WARN_ON_ONCE(mlxsw_sp->rifs[i]);
3593
3594 kfree(mlxsw_sp->rifs);
3595}
3596
Jiri Pirkob45f64d2016-09-26 12:52:31 +02003597int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
3598{
Ido Schimmel9011b672017-05-16 19:38:25 +02003599 struct mlxsw_sp_router *router;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02003600 int err;
3601
Ido Schimmel9011b672017-05-16 19:38:25 +02003602 router = kzalloc(sizeof(*mlxsw_sp->router), GFP_KERNEL);
3603 if (!router)
3604 return -ENOMEM;
3605 mlxsw_sp->router = router;
3606 router->mlxsw_sp = mlxsw_sp;
3607
3608 INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_neighs_list);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02003609 err = __mlxsw_sp_router_init(mlxsw_sp);
3610 if (err)
Ido Schimmel9011b672017-05-16 19:38:25 +02003611 goto err_router_init;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02003612
Ido Schimmel9011b672017-05-16 19:38:25 +02003613 err = rhashtable_init(&mlxsw_sp->router->nexthop_ht,
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003614 &mlxsw_sp_nexthop_ht_params);
3615 if (err)
3616 goto err_nexthop_ht_init;
3617
Ido Schimmel9011b672017-05-16 19:38:25 +02003618 err = rhashtable_init(&mlxsw_sp->router->nexthop_group_ht,
Ido Schimmele9ad5e72017-02-08 11:16:29 +01003619 &mlxsw_sp_nexthop_group_ht_params);
3620 if (err)
3621 goto err_nexthop_group_ht_init;
3622
Ido Schimmel8494ab02017-03-24 08:02:47 +01003623 err = mlxsw_sp_lpm_init(mlxsw_sp);
3624 if (err)
3625 goto err_lpm_init;
3626
Jiri Pirkob45f64d2016-09-26 12:52:31 +02003627 err = mlxsw_sp_vrs_init(mlxsw_sp);
3628 if (err)
3629 goto err_vrs_init;
3630
Ido Schimmel8c9583a2016-10-27 15:12:57 +02003631 err = mlxsw_sp_neigh_init(mlxsw_sp);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02003632 if (err)
3633 goto err_neigh_init;
3634
3635 mlxsw_sp->fib_nb.notifier_call = mlxsw_sp_router_fib_event;
Ido Schimmelc3852ef2016-12-03 16:45:07 +01003636 err = register_fib_notifier(&mlxsw_sp->fib_nb,
3637 mlxsw_sp_router_fib_dump_flush);
3638 if (err)
3639 goto err_register_fib_notifier;
3640
Jiri Pirkob45f64d2016-09-26 12:52:31 +02003641 return 0;
3642
Ido Schimmelc3852ef2016-12-03 16:45:07 +01003643err_register_fib_notifier:
3644 mlxsw_sp_neigh_fini(mlxsw_sp);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02003645err_neigh_init:
3646 mlxsw_sp_vrs_fini(mlxsw_sp);
3647err_vrs_init:
Ido Schimmel8494ab02017-03-24 08:02:47 +01003648 mlxsw_sp_lpm_fini(mlxsw_sp);
3649err_lpm_init:
Ido Schimmel9011b672017-05-16 19:38:25 +02003650 rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
Ido Schimmele9ad5e72017-02-08 11:16:29 +01003651err_nexthop_group_ht_init:
Ido Schimmel9011b672017-05-16 19:38:25 +02003652 rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003653err_nexthop_ht_init:
Jiri Pirkob45f64d2016-09-26 12:52:31 +02003654 __mlxsw_sp_router_fini(mlxsw_sp);
Ido Schimmel9011b672017-05-16 19:38:25 +02003655err_router_init:
3656 kfree(mlxsw_sp->router);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02003657 return err;
3658}
3659
3660void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
3661{
3662 unregister_fib_notifier(&mlxsw_sp->fib_nb);
3663 mlxsw_sp_neigh_fini(mlxsw_sp);
3664 mlxsw_sp_vrs_fini(mlxsw_sp);
Ido Schimmel8494ab02017-03-24 08:02:47 +01003665 mlxsw_sp_lpm_fini(mlxsw_sp);
Ido Schimmel9011b672017-05-16 19:38:25 +02003666 rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
3667 rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02003668 __mlxsw_sp_router_fini(mlxsw_sp);
Ido Schimmel9011b672017-05-16 19:38:25 +02003669 kfree(mlxsw_sp->router);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02003670}