blob: 7b44389e57693d1e9651b5bb91a5d6d83d1e441c [file] [log] [blame]
Ido Schimmel464dce12016-07-02 11:00:15 +02001/*
2 * drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
3 * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
5 * Copyright (c) 2016 Ido Schimmel <idosch@mellanox.com>
Yotam Gigic723c7352016-07-05 11:27:43 +02006 * Copyright (c) 2016 Yotam Gigi <yotamg@mellanox.com>
Ido Schimmel464dce12016-07-02 11:00:15 +02007 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include <linux/kernel.h>
38#include <linux/types.h>
Jiri Pirko5e9c16c2016-07-04 08:23:04 +020039#include <linux/rhashtable.h>
40#include <linux/bitops.h>
41#include <linux/in6.h>
Yotam Gigic723c7352016-07-05 11:27:43 +020042#include <linux/notifier.h>
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +010043#include <linux/inetdevice.h>
Ido Schimmel9db032b2017-03-16 09:08:17 +010044#include <linux/netdevice.h>
Yotam Gigic723c7352016-07-05 11:27:43 +020045#include <net/netevent.h>
Jiri Pirko6cf3c972016-07-05 11:27:39 +020046#include <net/neighbour.h>
47#include <net/arp.h>
Jiri Pirkob45f64d2016-09-26 12:52:31 +020048#include <net/ip_fib.h>
Ido Schimmel5d7bfd12017-03-16 09:08:14 +010049#include <net/fib_rules.h>
Ido Schimmel57837882017-03-16 09:08:16 +010050#include <net/l3mdev.h>
Ido Schimmel464dce12016-07-02 11:00:15 +020051
52#include "spectrum.h"
53#include "core.h"
54#include "reg.h"
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +020055#include "spectrum_cnt.h"
56#include "spectrum_dpipe.h"
57#include "spectrum_router.h"
Ido Schimmel464dce12016-07-02 11:00:15 +020058
Ido Schimmel9011b672017-05-16 19:38:25 +020059struct mlxsw_sp_vr;
60struct mlxsw_sp_lpm_tree;
61
62struct mlxsw_sp_router {
63 struct mlxsw_sp *mlxsw_sp;
Ido Schimmel5f9efff2017-05-16 19:38:27 +020064 struct mlxsw_sp_rif **rifs;
Ido Schimmel9011b672017-05-16 19:38:25 +020065 struct mlxsw_sp_vr *vrs;
66 struct rhashtable neigh_ht;
67 struct rhashtable nexthop_group_ht;
68 struct rhashtable nexthop_ht;
69 struct {
70 struct mlxsw_sp_lpm_tree *trees;
71 unsigned int tree_count;
72 } lpm;
73 struct {
74 struct delayed_work dw;
75 unsigned long interval; /* ms */
76 } neighs_update;
77 struct delayed_work nexthop_probe_dw;
78#define MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL 5000 /* ms */
79 struct list_head nexthop_neighs_list;
80 bool aborted;
81};
82
Ido Schimmel4724ba562017-03-10 08:53:39 +010083struct mlxsw_sp_rif {
84 struct list_head nexthop_list;
85 struct list_head neigh_list;
86 struct net_device *dev;
87 struct mlxsw_sp_fid *f;
88 unsigned char addr[ETH_ALEN];
89 int mtu;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +010090 u16 rif_index;
Ido Schimmel69132292017-03-10 08:53:42 +010091 u16 vr_id;
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +020092 unsigned int counter_ingress;
93 bool counter_ingress_valid;
94 unsigned int counter_egress;
95 bool counter_egress_valid;
Ido Schimmel4724ba562017-03-10 08:53:39 +010096};
97
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +020098static unsigned int *
99mlxsw_sp_rif_p_counter_get(struct mlxsw_sp_rif *rif,
100 enum mlxsw_sp_rif_counter_dir dir)
101{
102 switch (dir) {
103 case MLXSW_SP_RIF_COUNTER_EGRESS:
104 return &rif->counter_egress;
105 case MLXSW_SP_RIF_COUNTER_INGRESS:
106 return &rif->counter_ingress;
107 }
108 return NULL;
109}
110
111static bool
112mlxsw_sp_rif_counter_valid_get(struct mlxsw_sp_rif *rif,
113 enum mlxsw_sp_rif_counter_dir dir)
114{
115 switch (dir) {
116 case MLXSW_SP_RIF_COUNTER_EGRESS:
117 return rif->counter_egress_valid;
118 case MLXSW_SP_RIF_COUNTER_INGRESS:
119 return rif->counter_ingress_valid;
120 }
121 return false;
122}
123
124static void
125mlxsw_sp_rif_counter_valid_set(struct mlxsw_sp_rif *rif,
126 enum mlxsw_sp_rif_counter_dir dir,
127 bool valid)
128{
129 switch (dir) {
130 case MLXSW_SP_RIF_COUNTER_EGRESS:
131 rif->counter_egress_valid = valid;
132 break;
133 case MLXSW_SP_RIF_COUNTER_INGRESS:
134 rif->counter_ingress_valid = valid;
135 break;
136 }
137}
138
139static int mlxsw_sp_rif_counter_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
140 unsigned int counter_index, bool enable,
141 enum mlxsw_sp_rif_counter_dir dir)
142{
143 char ritr_pl[MLXSW_REG_RITR_LEN];
144 bool is_egress = false;
145 int err;
146
147 if (dir == MLXSW_SP_RIF_COUNTER_EGRESS)
148 is_egress = true;
149 mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
150 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
151 if (err)
152 return err;
153
154 mlxsw_reg_ritr_counter_pack(ritr_pl, counter_index, enable,
155 is_egress);
156 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
157}
158
159int mlxsw_sp_rif_counter_value_get(struct mlxsw_sp *mlxsw_sp,
160 struct mlxsw_sp_rif *rif,
161 enum mlxsw_sp_rif_counter_dir dir, u64 *cnt)
162{
163 char ricnt_pl[MLXSW_REG_RICNT_LEN];
164 unsigned int *p_counter_index;
165 bool valid;
166 int err;
167
168 valid = mlxsw_sp_rif_counter_valid_get(rif, dir);
169 if (!valid)
170 return -EINVAL;
171
172 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
173 if (!p_counter_index)
174 return -EINVAL;
175 mlxsw_reg_ricnt_pack(ricnt_pl, *p_counter_index,
176 MLXSW_REG_RICNT_OPCODE_NOP);
177 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
178 if (err)
179 return err;
180 *cnt = mlxsw_reg_ricnt_good_unicast_packets_get(ricnt_pl);
181 return 0;
182}
183
184static int mlxsw_sp_rif_counter_clear(struct mlxsw_sp *mlxsw_sp,
185 unsigned int counter_index)
186{
187 char ricnt_pl[MLXSW_REG_RICNT_LEN];
188
189 mlxsw_reg_ricnt_pack(ricnt_pl, counter_index,
190 MLXSW_REG_RICNT_OPCODE_CLEAR);
191 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
192}
193
194int mlxsw_sp_rif_counter_alloc(struct mlxsw_sp *mlxsw_sp,
195 struct mlxsw_sp_rif *rif,
196 enum mlxsw_sp_rif_counter_dir dir)
197{
198 unsigned int *p_counter_index;
199 int err;
200
201 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
202 if (!p_counter_index)
203 return -EINVAL;
204 err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
205 p_counter_index);
206 if (err)
207 return err;
208
209 err = mlxsw_sp_rif_counter_clear(mlxsw_sp, *p_counter_index);
210 if (err)
211 goto err_counter_clear;
212
213 err = mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
214 *p_counter_index, true, dir);
215 if (err)
216 goto err_counter_edit;
217 mlxsw_sp_rif_counter_valid_set(rif, dir, true);
218 return 0;
219
220err_counter_edit:
221err_counter_clear:
222 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
223 *p_counter_index);
224 return err;
225}
226
227void mlxsw_sp_rif_counter_free(struct mlxsw_sp *mlxsw_sp,
228 struct mlxsw_sp_rif *rif,
229 enum mlxsw_sp_rif_counter_dir dir)
230{
231 unsigned int *p_counter_index;
232
233 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
234 if (WARN_ON(!p_counter_index))
235 return;
236 mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
237 *p_counter_index, false, dir);
238 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
239 *p_counter_index);
240 mlxsw_sp_rif_counter_valid_set(rif, dir, false);
241}
242
Ido Schimmel4724ba562017-03-10 08:53:39 +0100243static struct mlxsw_sp_rif *
244mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
245 const struct net_device *dev);
246
Ido Schimmel9011b672017-05-16 19:38:25 +0200247#define MLXSW_SP_PREFIX_COUNT (sizeof(struct in6_addr) * BITS_PER_BYTE)
248
249struct mlxsw_sp_prefix_usage {
250 DECLARE_BITMAP(b, MLXSW_SP_PREFIX_COUNT);
251};
252
Jiri Pirko53342022016-07-04 08:23:08 +0200253#define mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) \
254 for_each_set_bit(prefix, (prefix_usage)->b, MLXSW_SP_PREFIX_COUNT)
255
256static bool
Jiri Pirko6b75c482016-07-04 08:23:09 +0200257mlxsw_sp_prefix_usage_subset(struct mlxsw_sp_prefix_usage *prefix_usage1,
258 struct mlxsw_sp_prefix_usage *prefix_usage2)
259{
260 unsigned char prefix;
261
262 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage1) {
263 if (!test_bit(prefix, prefix_usage2->b))
264 return false;
265 }
266 return true;
267}
268
269static bool
Jiri Pirko53342022016-07-04 08:23:08 +0200270mlxsw_sp_prefix_usage_eq(struct mlxsw_sp_prefix_usage *prefix_usage1,
271 struct mlxsw_sp_prefix_usage *prefix_usage2)
272{
273 return !memcmp(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
274}
275
Jiri Pirko6b75c482016-07-04 08:23:09 +0200276static bool
277mlxsw_sp_prefix_usage_none(struct mlxsw_sp_prefix_usage *prefix_usage)
278{
279 struct mlxsw_sp_prefix_usage prefix_usage_none = {{ 0 } };
280
281 return mlxsw_sp_prefix_usage_eq(prefix_usage, &prefix_usage_none);
282}
283
284static void
285mlxsw_sp_prefix_usage_cpy(struct mlxsw_sp_prefix_usage *prefix_usage1,
286 struct mlxsw_sp_prefix_usage *prefix_usage2)
287{
288 memcpy(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
289}
290
291static void
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200292mlxsw_sp_prefix_usage_set(struct mlxsw_sp_prefix_usage *prefix_usage,
293 unsigned char prefix_len)
294{
295 set_bit(prefix_len, prefix_usage->b);
296}
297
298static void
299mlxsw_sp_prefix_usage_clear(struct mlxsw_sp_prefix_usage *prefix_usage,
300 unsigned char prefix_len)
301{
302 clear_bit(prefix_len, prefix_usage->b);
303}
304
305struct mlxsw_sp_fib_key {
306 unsigned char addr[sizeof(struct in6_addr)];
307 unsigned char prefix_len;
308};
309
Jiri Pirko61c503f2016-07-04 08:23:11 +0200310enum mlxsw_sp_fib_entry_type {
311 MLXSW_SP_FIB_ENTRY_TYPE_REMOTE,
312 MLXSW_SP_FIB_ENTRY_TYPE_LOCAL,
313 MLXSW_SP_FIB_ENTRY_TYPE_TRAP,
314};
315
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +0200316struct mlxsw_sp_nexthop_group;
Ido Schimmel9011b672017-05-16 19:38:25 +0200317struct mlxsw_sp_fib;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +0200318
Ido Schimmel9aecce12017-02-09 10:28:42 +0100319struct mlxsw_sp_fib_node {
320 struct list_head entry_list;
Jiri Pirkob45f64d2016-09-26 12:52:31 +0200321 struct list_head list;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100322 struct rhash_head ht_node;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100323 struct mlxsw_sp_fib *fib;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100324 struct mlxsw_sp_fib_key key;
325};
326
327struct mlxsw_sp_fib_entry_params {
328 u32 tb_id;
329 u32 prio;
330 u8 tos;
331 u8 type;
332};
333
334struct mlxsw_sp_fib_entry {
335 struct list_head list;
336 struct mlxsw_sp_fib_node *fib_node;
337 enum mlxsw_sp_fib_entry_type type;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +0200338 struct list_head nexthop_group_node;
339 struct mlxsw_sp_nexthop_group *nh_group;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100340 struct mlxsw_sp_fib_entry_params params;
Ido Schimmel013b20f2017-02-08 11:16:36 +0100341 bool offloaded;
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200342};
343
Ido Schimmel9011b672017-05-16 19:38:25 +0200344enum mlxsw_sp_l3proto {
345 MLXSW_SP_L3_PROTO_IPV4,
346 MLXSW_SP_L3_PROTO_IPV6,
347};
348
349struct mlxsw_sp_lpm_tree {
350 u8 id; /* tree ID */
351 unsigned int ref_count;
352 enum mlxsw_sp_l3proto proto;
353 struct mlxsw_sp_prefix_usage prefix_usage;
354};
355
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200356struct mlxsw_sp_fib {
357 struct rhashtable ht;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100358 struct list_head node_list;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100359 struct mlxsw_sp_vr *vr;
360 struct mlxsw_sp_lpm_tree *lpm_tree;
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200361 unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT];
362 struct mlxsw_sp_prefix_usage prefix_usage;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100363 enum mlxsw_sp_l3proto proto;
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200364};
365
Ido Schimmel9011b672017-05-16 19:38:25 +0200366struct mlxsw_sp_vr {
367 u16 id; /* virtual router ID */
368 u32 tb_id; /* kernel fib table id */
369 unsigned int rif_count;
370 struct mlxsw_sp_fib *fib4;
371};
372
Ido Schimmel9aecce12017-02-09 10:28:42 +0100373static const struct rhashtable_params mlxsw_sp_fib_ht_params;
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200374
Ido Schimmel76610eb2017-03-10 08:53:41 +0100375static struct mlxsw_sp_fib *mlxsw_sp_fib_create(struct mlxsw_sp_vr *vr,
376 enum mlxsw_sp_l3proto proto)
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200377{
378 struct mlxsw_sp_fib *fib;
379 int err;
380
381 fib = kzalloc(sizeof(*fib), GFP_KERNEL);
382 if (!fib)
383 return ERR_PTR(-ENOMEM);
384 err = rhashtable_init(&fib->ht, &mlxsw_sp_fib_ht_params);
385 if (err)
386 goto err_rhashtable_init;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100387 INIT_LIST_HEAD(&fib->node_list);
Ido Schimmel76610eb2017-03-10 08:53:41 +0100388 fib->proto = proto;
389 fib->vr = vr;
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200390 return fib;
391
392err_rhashtable_init:
393 kfree(fib);
394 return ERR_PTR(err);
395}
396
397static void mlxsw_sp_fib_destroy(struct mlxsw_sp_fib *fib)
398{
Ido Schimmel9aecce12017-02-09 10:28:42 +0100399 WARN_ON(!list_empty(&fib->node_list));
Ido Schimmel76610eb2017-03-10 08:53:41 +0100400 WARN_ON(fib->lpm_tree);
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200401 rhashtable_destroy(&fib->ht);
402 kfree(fib);
403}
404
Jiri Pirko53342022016-07-04 08:23:08 +0200405static struct mlxsw_sp_lpm_tree *
Ido Schimmel382dbb42017-03-10 08:53:40 +0100406mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp *mlxsw_sp)
Jiri Pirko53342022016-07-04 08:23:08 +0200407{
408 static struct mlxsw_sp_lpm_tree *lpm_tree;
409 int i;
410
Ido Schimmel9011b672017-05-16 19:38:25 +0200411 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
412 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
Ido Schimmel382dbb42017-03-10 08:53:40 +0100413 if (lpm_tree->ref_count == 0)
414 return lpm_tree;
Jiri Pirko53342022016-07-04 08:23:08 +0200415 }
416 return NULL;
417}
418
419static int mlxsw_sp_lpm_tree_alloc(struct mlxsw_sp *mlxsw_sp,
420 struct mlxsw_sp_lpm_tree *lpm_tree)
421{
422 char ralta_pl[MLXSW_REG_RALTA_LEN];
423
Ido Schimmel1a9234e662016-09-19 08:29:26 +0200424 mlxsw_reg_ralta_pack(ralta_pl, true,
425 (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
426 lpm_tree->id);
Jiri Pirko53342022016-07-04 08:23:08 +0200427 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
428}
429
430static int mlxsw_sp_lpm_tree_free(struct mlxsw_sp *mlxsw_sp,
431 struct mlxsw_sp_lpm_tree *lpm_tree)
432{
433 char ralta_pl[MLXSW_REG_RALTA_LEN];
434
Ido Schimmel1a9234e662016-09-19 08:29:26 +0200435 mlxsw_reg_ralta_pack(ralta_pl, false,
436 (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
437 lpm_tree->id);
Jiri Pirko53342022016-07-04 08:23:08 +0200438 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
439}
440
441static int
442mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp *mlxsw_sp,
443 struct mlxsw_sp_prefix_usage *prefix_usage,
444 struct mlxsw_sp_lpm_tree *lpm_tree)
445{
446 char ralst_pl[MLXSW_REG_RALST_LEN];
447 u8 root_bin = 0;
448 u8 prefix;
449 u8 last_prefix = MLXSW_REG_RALST_BIN_NO_CHILD;
450
451 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage)
452 root_bin = prefix;
453
454 mlxsw_reg_ralst_pack(ralst_pl, root_bin, lpm_tree->id);
455 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) {
456 if (prefix == 0)
457 continue;
458 mlxsw_reg_ralst_bin_pack(ralst_pl, prefix, last_prefix,
459 MLXSW_REG_RALST_BIN_NO_CHILD);
460 last_prefix = prefix;
461 }
462 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
463}
464
465static struct mlxsw_sp_lpm_tree *
466mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
467 struct mlxsw_sp_prefix_usage *prefix_usage,
Ido Schimmel382dbb42017-03-10 08:53:40 +0100468 enum mlxsw_sp_l3proto proto)
Jiri Pirko53342022016-07-04 08:23:08 +0200469{
470 struct mlxsw_sp_lpm_tree *lpm_tree;
471 int err;
472
Ido Schimmel382dbb42017-03-10 08:53:40 +0100473 lpm_tree = mlxsw_sp_lpm_tree_find_unused(mlxsw_sp);
Jiri Pirko53342022016-07-04 08:23:08 +0200474 if (!lpm_tree)
475 return ERR_PTR(-EBUSY);
476 lpm_tree->proto = proto;
477 err = mlxsw_sp_lpm_tree_alloc(mlxsw_sp, lpm_tree);
478 if (err)
479 return ERR_PTR(err);
480
481 err = mlxsw_sp_lpm_tree_left_struct_set(mlxsw_sp, prefix_usage,
482 lpm_tree);
483 if (err)
484 goto err_left_struct_set;
Jiri Pirko2083d362016-10-25 11:25:56 +0200485 memcpy(&lpm_tree->prefix_usage, prefix_usage,
486 sizeof(lpm_tree->prefix_usage));
Jiri Pirko53342022016-07-04 08:23:08 +0200487 return lpm_tree;
488
489err_left_struct_set:
490 mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
491 return ERR_PTR(err);
492}
493
494static int mlxsw_sp_lpm_tree_destroy(struct mlxsw_sp *mlxsw_sp,
495 struct mlxsw_sp_lpm_tree *lpm_tree)
496{
497 return mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
498}
499
500static struct mlxsw_sp_lpm_tree *
501mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
502 struct mlxsw_sp_prefix_usage *prefix_usage,
Ido Schimmel382dbb42017-03-10 08:53:40 +0100503 enum mlxsw_sp_l3proto proto)
Jiri Pirko53342022016-07-04 08:23:08 +0200504{
505 struct mlxsw_sp_lpm_tree *lpm_tree;
506 int i;
507
Ido Schimmel9011b672017-05-16 19:38:25 +0200508 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
509 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
Jiri Pirko8b99bec2016-10-25 11:25:57 +0200510 if (lpm_tree->ref_count != 0 &&
511 lpm_tree->proto == proto &&
Jiri Pirko53342022016-07-04 08:23:08 +0200512 mlxsw_sp_prefix_usage_eq(&lpm_tree->prefix_usage,
513 prefix_usage))
514 goto inc_ref_count;
515 }
516 lpm_tree = mlxsw_sp_lpm_tree_create(mlxsw_sp, prefix_usage,
Ido Schimmel382dbb42017-03-10 08:53:40 +0100517 proto);
Jiri Pirko53342022016-07-04 08:23:08 +0200518 if (IS_ERR(lpm_tree))
519 return lpm_tree;
520
521inc_ref_count:
522 lpm_tree->ref_count++;
523 return lpm_tree;
524}
525
526static int mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
527 struct mlxsw_sp_lpm_tree *lpm_tree)
528{
529 if (--lpm_tree->ref_count == 0)
530 return mlxsw_sp_lpm_tree_destroy(mlxsw_sp, lpm_tree);
531 return 0;
532}
533
Ido Schimmel8494ab02017-03-24 08:02:47 +0100534#define MLXSW_SP_LPM_TREE_MIN 2 /* trees 0 and 1 are reserved */
535
536static int mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp)
Jiri Pirko53342022016-07-04 08:23:08 +0200537{
538 struct mlxsw_sp_lpm_tree *lpm_tree;
Ido Schimmel8494ab02017-03-24 08:02:47 +0100539 u64 max_trees;
Jiri Pirko53342022016-07-04 08:23:08 +0200540 int i;
541
Ido Schimmel8494ab02017-03-24 08:02:47 +0100542 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LPM_TREES))
543 return -EIO;
544
545 max_trees = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LPM_TREES);
Ido Schimmel9011b672017-05-16 19:38:25 +0200546 mlxsw_sp->router->lpm.tree_count = max_trees - MLXSW_SP_LPM_TREE_MIN;
547 mlxsw_sp->router->lpm.trees = kcalloc(mlxsw_sp->router->lpm.tree_count,
Ido Schimmel8494ab02017-03-24 08:02:47 +0100548 sizeof(struct mlxsw_sp_lpm_tree),
549 GFP_KERNEL);
Ido Schimmel9011b672017-05-16 19:38:25 +0200550 if (!mlxsw_sp->router->lpm.trees)
Ido Schimmel8494ab02017-03-24 08:02:47 +0100551 return -ENOMEM;
552
Ido Schimmel9011b672017-05-16 19:38:25 +0200553 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
554 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
Jiri Pirko53342022016-07-04 08:23:08 +0200555 lpm_tree->id = i + MLXSW_SP_LPM_TREE_MIN;
556 }
Ido Schimmel8494ab02017-03-24 08:02:47 +0100557
558 return 0;
559}
560
561static void mlxsw_sp_lpm_fini(struct mlxsw_sp *mlxsw_sp)
562{
Ido Schimmel9011b672017-05-16 19:38:25 +0200563 kfree(mlxsw_sp->router->lpm.trees);
Jiri Pirko53342022016-07-04 08:23:08 +0200564}
565
Ido Schimmel76610eb2017-03-10 08:53:41 +0100566static bool mlxsw_sp_vr_is_used(const struct mlxsw_sp_vr *vr)
567{
568 return !!vr->fib4;
569}
570
Jiri Pirko6b75c482016-07-04 08:23:09 +0200571static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp)
572{
573 struct mlxsw_sp_vr *vr;
574 int i;
575
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200576 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
Ido Schimmel9011b672017-05-16 19:38:25 +0200577 vr = &mlxsw_sp->router->vrs[i];
Ido Schimmel76610eb2017-03-10 08:53:41 +0100578 if (!mlxsw_sp_vr_is_used(vr))
Jiri Pirko6b75c482016-07-04 08:23:09 +0200579 return vr;
580 }
581 return NULL;
582}
583
584static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel76610eb2017-03-10 08:53:41 +0100585 const struct mlxsw_sp_fib *fib)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200586{
587 char raltb_pl[MLXSW_REG_RALTB_LEN];
588
Ido Schimmel76610eb2017-03-10 08:53:41 +0100589 mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
590 (enum mlxsw_reg_ralxx_protocol) fib->proto,
591 fib->lpm_tree->id);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200592 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
593}
594
595static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel76610eb2017-03-10 08:53:41 +0100596 const struct mlxsw_sp_fib *fib)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200597{
598 char raltb_pl[MLXSW_REG_RALTB_LEN];
599
600 /* Bind to tree 0 which is default */
Ido Schimmel76610eb2017-03-10 08:53:41 +0100601 mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
602 (enum mlxsw_reg_ralxx_protocol) fib->proto, 0);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200603 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
604}
605
606static u32 mlxsw_sp_fix_tb_id(u32 tb_id)
607{
608 /* For our purpose, squash main and local table into one */
609 if (tb_id == RT_TABLE_LOCAL)
610 tb_id = RT_TABLE_MAIN;
611 return tb_id;
612}
613
614static struct mlxsw_sp_vr *mlxsw_sp_vr_find(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel76610eb2017-03-10 08:53:41 +0100615 u32 tb_id)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200616{
617 struct mlxsw_sp_vr *vr;
618 int i;
619
620 tb_id = mlxsw_sp_fix_tb_id(tb_id);
Nogah Frankel9497c042016-09-20 11:16:54 +0200621
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200622 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
Ido Schimmel9011b672017-05-16 19:38:25 +0200623 vr = &mlxsw_sp->router->vrs[i];
Ido Schimmel76610eb2017-03-10 08:53:41 +0100624 if (mlxsw_sp_vr_is_used(vr) && vr->tb_id == tb_id)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200625 return vr;
626 }
627 return NULL;
628}
629
Ido Schimmel76610eb2017-03-10 08:53:41 +0100630static struct mlxsw_sp_fib *mlxsw_sp_vr_fib(const struct mlxsw_sp_vr *vr,
631 enum mlxsw_sp_l3proto proto)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200632{
Ido Schimmel76610eb2017-03-10 08:53:41 +0100633 switch (proto) {
634 case MLXSW_SP_L3_PROTO_IPV4:
635 return vr->fib4;
636 case MLXSW_SP_L3_PROTO_IPV6:
637 BUG_ON(1);
638 }
639 return NULL;
640}
641
642static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
643 u32 tb_id)
644{
Jiri Pirko6b75c482016-07-04 08:23:09 +0200645 struct mlxsw_sp_vr *vr;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200646
647 vr = mlxsw_sp_vr_find_unused(mlxsw_sp);
648 if (!vr)
649 return ERR_PTR(-EBUSY);
Ido Schimmel76610eb2017-03-10 08:53:41 +0100650 vr->fib4 = mlxsw_sp_fib_create(vr, MLXSW_SP_L3_PROTO_IPV4);
651 if (IS_ERR(vr->fib4))
652 return ERR_CAST(vr->fib4);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200653 vr->tb_id = tb_id;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200654 return vr;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200655}
656
Ido Schimmel76610eb2017-03-10 08:53:41 +0100657static void mlxsw_sp_vr_destroy(struct mlxsw_sp_vr *vr)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200658{
Ido Schimmel76610eb2017-03-10 08:53:41 +0100659 mlxsw_sp_fib_destroy(vr->fib4);
660 vr->fib4 = NULL;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200661}
662
663static int
Ido Schimmel76610eb2017-03-10 08:53:41 +0100664mlxsw_sp_vr_lpm_tree_check(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fib *fib,
Jiri Pirko6b75c482016-07-04 08:23:09 +0200665 struct mlxsw_sp_prefix_usage *req_prefix_usage)
666{
Ido Schimmel76610eb2017-03-10 08:53:41 +0100667 struct mlxsw_sp_lpm_tree *lpm_tree = fib->lpm_tree;
Ido Schimmelf7df4922017-02-28 08:55:40 +0100668 struct mlxsw_sp_lpm_tree *new_tree;
669 int err;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200670
Ido Schimmelf7df4922017-02-28 08:55:40 +0100671 if (mlxsw_sp_prefix_usage_eq(req_prefix_usage, &lpm_tree->prefix_usage))
Jiri Pirko6b75c482016-07-04 08:23:09 +0200672 return 0;
673
Ido Schimmelf7df4922017-02-28 08:55:40 +0100674 new_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, req_prefix_usage,
Ido Schimmel76610eb2017-03-10 08:53:41 +0100675 fib->proto);
Ido Schimmelf7df4922017-02-28 08:55:40 +0100676 if (IS_ERR(new_tree)) {
Jiri Pirko6b75c482016-07-04 08:23:09 +0200677 /* We failed to get a tree according to the required
678 * prefix usage. However, the current tree might be still good
679 * for us if our requirement is subset of the prefixes used
680 * in the tree.
681 */
682 if (mlxsw_sp_prefix_usage_subset(req_prefix_usage,
Ido Schimmelf7df4922017-02-28 08:55:40 +0100683 &lpm_tree->prefix_usage))
Jiri Pirko6b75c482016-07-04 08:23:09 +0200684 return 0;
Ido Schimmelf7df4922017-02-28 08:55:40 +0100685 return PTR_ERR(new_tree);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200686 }
687
Ido Schimmelf7df4922017-02-28 08:55:40 +0100688 /* Prevent packet loss by overwriting existing binding */
Ido Schimmel76610eb2017-03-10 08:53:41 +0100689 fib->lpm_tree = new_tree;
690 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib);
Ido Schimmelf7df4922017-02-28 08:55:40 +0100691 if (err)
692 goto err_tree_bind;
693 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
694
695 return 0;
696
697err_tree_bind:
Ido Schimmel76610eb2017-03-10 08:53:41 +0100698 fib->lpm_tree = lpm_tree;
Ido Schimmelf7df4922017-02-28 08:55:40 +0100699 mlxsw_sp_lpm_tree_put(mlxsw_sp, new_tree);
700 return err;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200701}
702
Ido Schimmel76610eb2017-03-10 08:53:41 +0100703static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200704{
705 struct mlxsw_sp_vr *vr;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200706
707 tb_id = mlxsw_sp_fix_tb_id(tb_id);
Ido Schimmel76610eb2017-03-10 08:53:41 +0100708 vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
709 if (!vr)
710 vr = mlxsw_sp_vr_create(mlxsw_sp, tb_id);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200711 return vr;
712}
713
Ido Schimmel76610eb2017-03-10 08:53:41 +0100714static void mlxsw_sp_vr_put(struct mlxsw_sp_vr *vr)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200715{
Ido Schimmel69132292017-03-10 08:53:42 +0100716 if (!vr->rif_count && list_empty(&vr->fib4->node_list))
Ido Schimmel76610eb2017-03-10 08:53:41 +0100717 mlxsw_sp_vr_destroy(vr);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200718}
719
Nogah Frankel9497c042016-09-20 11:16:54 +0200720static int mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200721{
722 struct mlxsw_sp_vr *vr;
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200723 u64 max_vrs;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200724 int i;
725
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200726 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_VRS))
Nogah Frankel9497c042016-09-20 11:16:54 +0200727 return -EIO;
728
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200729 max_vrs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS);
Ido Schimmel9011b672017-05-16 19:38:25 +0200730 mlxsw_sp->router->vrs = kcalloc(max_vrs, sizeof(struct mlxsw_sp_vr),
731 GFP_KERNEL);
732 if (!mlxsw_sp->router->vrs)
Nogah Frankel9497c042016-09-20 11:16:54 +0200733 return -ENOMEM;
734
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200735 for (i = 0; i < max_vrs; i++) {
Ido Schimmel9011b672017-05-16 19:38:25 +0200736 vr = &mlxsw_sp->router->vrs[i];
Jiri Pirko6b75c482016-07-04 08:23:09 +0200737 vr->id = i;
738 }
Nogah Frankel9497c042016-09-20 11:16:54 +0200739
740 return 0;
741}
742
Ido Schimmelac571de2016-11-14 11:26:32 +0100743static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp);
744
Nogah Frankel9497c042016-09-20 11:16:54 +0200745static void mlxsw_sp_vrs_fini(struct mlxsw_sp *mlxsw_sp)
746{
Ido Schimmel30572242016-12-03 16:45:01 +0100747 /* At this stage we're guaranteed not to have new incoming
748 * FIB notifications and the work queue is free from FIBs
749 * sitting on top of mlxsw netdevs. However, we can still
750 * have other FIBs queued. Flush the queue before flushing
751 * the device's tables. No need for locks, as we're the only
752 * writer.
753 */
754 mlxsw_core_flush_owq();
Ido Schimmelac571de2016-11-14 11:26:32 +0100755 mlxsw_sp_router_fib_flush(mlxsw_sp);
Ido Schimmel9011b672017-05-16 19:38:25 +0200756 kfree(mlxsw_sp->router->vrs);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200757}
758
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200759struct mlxsw_sp_neigh_key {
Jiri Pirko33b13412016-11-10 12:31:04 +0100760 struct neighbour *n;
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200761};
762
763struct mlxsw_sp_neigh_entry {
Ido Schimmel9665b742017-02-08 11:16:42 +0100764 struct list_head rif_list_node;
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200765 struct rhash_head ht_node;
766 struct mlxsw_sp_neigh_key key;
767 u16 rif;
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100768 bool connected;
Yotam Gigia6bf9e92016-07-05 11:27:44 +0200769 unsigned char ha[ETH_ALEN];
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +0200770 struct list_head nexthop_list; /* list of nexthops using
771 * this neigh entry
772 */
Yotam Gigib2157142016-07-05 11:27:51 +0200773 struct list_head nexthop_neighs_list_node;
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200774};
775
776static const struct rhashtable_params mlxsw_sp_neigh_ht_params = {
777 .key_offset = offsetof(struct mlxsw_sp_neigh_entry, key),
778 .head_offset = offsetof(struct mlxsw_sp_neigh_entry, ht_node),
779 .key_len = sizeof(struct mlxsw_sp_neigh_key),
780};
781
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100782static struct mlxsw_sp_neigh_entry *
783mlxsw_sp_neigh_entry_alloc(struct mlxsw_sp *mlxsw_sp, struct neighbour *n,
784 u16 rif)
785{
786 struct mlxsw_sp_neigh_entry *neigh_entry;
787
788 neigh_entry = kzalloc(sizeof(*neigh_entry), GFP_KERNEL);
789 if (!neigh_entry)
790 return NULL;
791
792 neigh_entry->key.n = n;
793 neigh_entry->rif = rif;
794 INIT_LIST_HEAD(&neigh_entry->nexthop_list);
795
796 return neigh_entry;
797}
798
799static void mlxsw_sp_neigh_entry_free(struct mlxsw_sp_neigh_entry *neigh_entry)
800{
801 kfree(neigh_entry);
802}
803
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200804static int
805mlxsw_sp_neigh_entry_insert(struct mlxsw_sp *mlxsw_sp,
806 struct mlxsw_sp_neigh_entry *neigh_entry)
807{
Ido Schimmel9011b672017-05-16 19:38:25 +0200808 return rhashtable_insert_fast(&mlxsw_sp->router->neigh_ht,
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200809 &neigh_entry->ht_node,
810 mlxsw_sp_neigh_ht_params);
811}
812
813static void
814mlxsw_sp_neigh_entry_remove(struct mlxsw_sp *mlxsw_sp,
815 struct mlxsw_sp_neigh_entry *neigh_entry)
816{
Ido Schimmel9011b672017-05-16 19:38:25 +0200817 rhashtable_remove_fast(&mlxsw_sp->router->neigh_ht,
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200818 &neigh_entry->ht_node,
819 mlxsw_sp_neigh_ht_params);
820}
821
822static struct mlxsw_sp_neigh_entry *
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100823mlxsw_sp_neigh_entry_create(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200824{
825 struct mlxsw_sp_neigh_entry *neigh_entry;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +0100826 struct mlxsw_sp_rif *rif;
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100827 int err;
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200828
Arkadi Sharshevskybf952332017-03-17 09:38:00 +0100829 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, n->dev);
830 if (!rif)
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100831 return ERR_PTR(-EINVAL);
832
Arkadi Sharshevskybf952332017-03-17 09:38:00 +0100833 neigh_entry = mlxsw_sp_neigh_entry_alloc(mlxsw_sp, n, rif->rif_index);
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200834 if (!neigh_entry)
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100835 return ERR_PTR(-ENOMEM);
836
837 err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
838 if (err)
839 goto err_neigh_entry_insert;
840
Arkadi Sharshevskybf952332017-03-17 09:38:00 +0100841 list_add(&neigh_entry->rif_list_node, &rif->neigh_list);
Ido Schimmel9665b742017-02-08 11:16:42 +0100842
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200843 return neigh_entry;
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100844
845err_neigh_entry_insert:
846 mlxsw_sp_neigh_entry_free(neigh_entry);
847 return ERR_PTR(err);
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200848}
849
850static void
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100851mlxsw_sp_neigh_entry_destroy(struct mlxsw_sp *mlxsw_sp,
852 struct mlxsw_sp_neigh_entry *neigh_entry)
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200853{
Ido Schimmel9665b742017-02-08 11:16:42 +0100854 list_del(&neigh_entry->rif_list_node);
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100855 mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry);
856 mlxsw_sp_neigh_entry_free(neigh_entry);
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200857}
858
859static struct mlxsw_sp_neigh_entry *
Jiri Pirko33b13412016-11-10 12:31:04 +0100860mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200861{
Jiri Pirko33b13412016-11-10 12:31:04 +0100862 struct mlxsw_sp_neigh_key key;
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200863
Jiri Pirko33b13412016-11-10 12:31:04 +0100864 key.n = n;
Ido Schimmel9011b672017-05-16 19:38:25 +0200865 return rhashtable_lookup_fast(&mlxsw_sp->router->neigh_ht,
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200866 &key, mlxsw_sp_neigh_ht_params);
867}
868
Yotam Gigic723c7352016-07-05 11:27:43 +0200869static void
870mlxsw_sp_router_neighs_update_interval_init(struct mlxsw_sp *mlxsw_sp)
871{
872 unsigned long interval = NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME);
873
Ido Schimmel9011b672017-05-16 19:38:25 +0200874 mlxsw_sp->router->neighs_update.interval = jiffies_to_msecs(interval);
Yotam Gigic723c7352016-07-05 11:27:43 +0200875}
876
877static void mlxsw_sp_router_neigh_ent_ipv4_process(struct mlxsw_sp *mlxsw_sp,
878 char *rauhtd_pl,
879 int ent_index)
880{
881 struct net_device *dev;
882 struct neighbour *n;
883 __be32 dipn;
884 u32 dip;
885 u16 rif;
886
887 mlxsw_reg_rauhtd_ent_ipv4_unpack(rauhtd_pl, ent_index, &rif, &dip);
888
Ido Schimmel5f9efff2017-05-16 19:38:27 +0200889 if (!mlxsw_sp->router->rifs[rif]) {
Yotam Gigic723c7352016-07-05 11:27:43 +0200890 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
891 return;
892 }
893
894 dipn = htonl(dip);
Ido Schimmel5f9efff2017-05-16 19:38:27 +0200895 dev = mlxsw_sp->router->rifs[rif]->dev;
Yotam Gigic723c7352016-07-05 11:27:43 +0200896 n = neigh_lookup(&arp_tbl, &dipn, dev);
897 if (!n) {
898 netdev_err(dev, "Failed to find matching neighbour for IP=%pI4h\n",
899 &dip);
900 return;
901 }
902
903 netdev_dbg(dev, "Updating neighbour with IP=%pI4h\n", &dip);
904 neigh_event_send(n, NULL);
905 neigh_release(n);
906}
907
908static void mlxsw_sp_router_neigh_rec_ipv4_process(struct mlxsw_sp *mlxsw_sp,
909 char *rauhtd_pl,
910 int rec_index)
911{
912 u8 num_entries;
913 int i;
914
915 num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
916 rec_index);
917 /* Hardware starts counting at 0, so add 1. */
918 num_entries++;
919
920 /* Each record consists of several neighbour entries. */
921 for (i = 0; i < num_entries; i++) {
922 int ent_index;
923
924 ent_index = rec_index * MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC + i;
925 mlxsw_sp_router_neigh_ent_ipv4_process(mlxsw_sp, rauhtd_pl,
926 ent_index);
927 }
928
929}
930
931static void mlxsw_sp_router_neigh_rec_process(struct mlxsw_sp *mlxsw_sp,
932 char *rauhtd_pl, int rec_index)
933{
934 switch (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, rec_index)) {
935 case MLXSW_REG_RAUHTD_TYPE_IPV4:
936 mlxsw_sp_router_neigh_rec_ipv4_process(mlxsw_sp, rauhtd_pl,
937 rec_index);
938 break;
939 case MLXSW_REG_RAUHTD_TYPE_IPV6:
940 WARN_ON_ONCE(1);
941 break;
942 }
943}
944
Arkadi Sharshevsky42cdb332016-11-11 16:34:26 +0100945static bool mlxsw_sp_router_rauhtd_is_full(char *rauhtd_pl)
946{
947 u8 num_rec, last_rec_index, num_entries;
948
949 num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
950 last_rec_index = num_rec - 1;
951
952 if (num_rec < MLXSW_REG_RAUHTD_REC_MAX_NUM)
953 return false;
954 if (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, last_rec_index) ==
955 MLXSW_REG_RAUHTD_TYPE_IPV6)
956 return true;
957
958 num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
959 last_rec_index);
960 if (++num_entries == MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC)
961 return true;
962 return false;
963}
964
Yotam Gigib2157142016-07-05 11:27:51 +0200965static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp)
Yotam Gigic723c7352016-07-05 11:27:43 +0200966{
Yotam Gigic723c7352016-07-05 11:27:43 +0200967 char *rauhtd_pl;
968 u8 num_rec;
969 int i, err;
970
971 rauhtd_pl = kmalloc(MLXSW_REG_RAUHTD_LEN, GFP_KERNEL);
972 if (!rauhtd_pl)
Yotam Gigib2157142016-07-05 11:27:51 +0200973 return -ENOMEM;
Yotam Gigic723c7352016-07-05 11:27:43 +0200974
975 /* Make sure the neighbour's netdev isn't removed in the
976 * process.
977 */
978 rtnl_lock();
979 do {
980 mlxsw_reg_rauhtd_pack(rauhtd_pl, MLXSW_REG_RAUHTD_TYPE_IPV4);
981 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(rauhtd),
982 rauhtd_pl);
983 if (err) {
984 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to dump neighbour talbe\n");
985 break;
986 }
987 num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
988 for (i = 0; i < num_rec; i++)
989 mlxsw_sp_router_neigh_rec_process(mlxsw_sp, rauhtd_pl,
990 i);
Arkadi Sharshevsky42cdb332016-11-11 16:34:26 +0100991 } while (mlxsw_sp_router_rauhtd_is_full(rauhtd_pl));
Yotam Gigic723c7352016-07-05 11:27:43 +0200992 rtnl_unlock();
993
994 kfree(rauhtd_pl);
Yotam Gigib2157142016-07-05 11:27:51 +0200995 return err;
996}
997
998static void mlxsw_sp_router_neighs_update_nh(struct mlxsw_sp *mlxsw_sp)
999{
1000 struct mlxsw_sp_neigh_entry *neigh_entry;
1001
1002 /* Take RTNL mutex here to prevent lists from changes */
1003 rtnl_lock();
Ido Schimmel9011b672017-05-16 19:38:25 +02001004 list_for_each_entry(neigh_entry, &mlxsw_sp->router->nexthop_neighs_list,
Ido Schimmel8a0b7272017-02-06 16:20:15 +01001005 nexthop_neighs_list_node)
Yotam Gigib2157142016-07-05 11:27:51 +02001006 /* If this neigh have nexthops, make the kernel think this neigh
1007 * is active regardless of the traffic.
1008 */
Ido Schimmel8a0b7272017-02-06 16:20:15 +01001009 neigh_event_send(neigh_entry->key.n, NULL);
Yotam Gigib2157142016-07-05 11:27:51 +02001010 rtnl_unlock();
1011}
1012
1013static void
1014mlxsw_sp_router_neighs_update_work_schedule(struct mlxsw_sp *mlxsw_sp)
1015{
Ido Schimmel9011b672017-05-16 19:38:25 +02001016 unsigned long interval = mlxsw_sp->router->neighs_update.interval;
Yotam Gigib2157142016-07-05 11:27:51 +02001017
Ido Schimmel9011b672017-05-16 19:38:25 +02001018 mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw,
Yotam Gigib2157142016-07-05 11:27:51 +02001019 msecs_to_jiffies(interval));
1020}
1021
1022static void mlxsw_sp_router_neighs_update_work(struct work_struct *work)
1023{
Ido Schimmel9011b672017-05-16 19:38:25 +02001024 struct mlxsw_sp_router *router;
Yotam Gigib2157142016-07-05 11:27:51 +02001025 int err;
1026
Ido Schimmel9011b672017-05-16 19:38:25 +02001027 router = container_of(work, struct mlxsw_sp_router,
1028 neighs_update.dw.work);
1029 err = mlxsw_sp_router_neighs_update_rauhtd(router->mlxsw_sp);
Yotam Gigib2157142016-07-05 11:27:51 +02001030 if (err)
Ido Schimmel9011b672017-05-16 19:38:25 +02001031 dev_err(router->mlxsw_sp->bus_info->dev, "Could not update kernel for neigh activity");
Yotam Gigib2157142016-07-05 11:27:51 +02001032
Ido Schimmel9011b672017-05-16 19:38:25 +02001033 mlxsw_sp_router_neighs_update_nh(router->mlxsw_sp);
Yotam Gigib2157142016-07-05 11:27:51 +02001034
Ido Schimmel9011b672017-05-16 19:38:25 +02001035 mlxsw_sp_router_neighs_update_work_schedule(router->mlxsw_sp);
Yotam Gigic723c7352016-07-05 11:27:43 +02001036}
1037
Yotam Gigi0b2361d2016-07-05 11:27:52 +02001038static void mlxsw_sp_router_probe_unresolved_nexthops(struct work_struct *work)
1039{
1040 struct mlxsw_sp_neigh_entry *neigh_entry;
Ido Schimmel9011b672017-05-16 19:38:25 +02001041 struct mlxsw_sp_router *router;
Yotam Gigi0b2361d2016-07-05 11:27:52 +02001042
Ido Schimmel9011b672017-05-16 19:38:25 +02001043 router = container_of(work, struct mlxsw_sp_router,
1044 nexthop_probe_dw.work);
Yotam Gigi0b2361d2016-07-05 11:27:52 +02001045 /* Iterate over nexthop neighbours, find those who are unresolved and
1046 * send arp on them. This solves the chicken-egg problem when
1047 * the nexthop wouldn't get offloaded until the neighbor is resolved
1048 * but it wouldn't get resolved ever in case traffic is flowing in HW
1049 * using different nexthop.
1050 *
1051 * Take RTNL mutex here to prevent lists from changes.
1052 */
1053 rtnl_lock();
Ido Schimmel9011b672017-05-16 19:38:25 +02001054 list_for_each_entry(neigh_entry, &router->nexthop_neighs_list,
Ido Schimmel8a0b7272017-02-06 16:20:15 +01001055 nexthop_neighs_list_node)
Ido Schimmel01b1aa32017-02-06 16:20:16 +01001056 if (!neigh_entry->connected)
Jiri Pirko33b13412016-11-10 12:31:04 +01001057 neigh_event_send(neigh_entry->key.n, NULL);
Yotam Gigi0b2361d2016-07-05 11:27:52 +02001058 rtnl_unlock();
1059
Ido Schimmel9011b672017-05-16 19:38:25 +02001060 mlxsw_core_schedule_dw(&router->nexthop_probe_dw,
Yotam Gigi0b2361d2016-07-05 11:27:52 +02001061 MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL);
1062}
1063
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001064static void
1065mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
1066 struct mlxsw_sp_neigh_entry *neigh_entry,
1067 bool removing);
1068
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001069static enum mlxsw_reg_rauht_op mlxsw_sp_rauht_op(bool adding)
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001070{
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001071 return adding ? MLXSW_REG_RAUHT_OP_WRITE_ADD :
1072 MLXSW_REG_RAUHT_OP_WRITE_DELETE;
1073}
1074
1075static void
1076mlxsw_sp_router_neigh_entry_op4(struct mlxsw_sp *mlxsw_sp,
1077 struct mlxsw_sp_neigh_entry *neigh_entry,
1078 enum mlxsw_reg_rauht_op op)
1079{
Jiri Pirko33b13412016-11-10 12:31:04 +01001080 struct neighbour *n = neigh_entry->key.n;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001081 u32 dip = ntohl(*((__be32 *) n->primary_key));
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001082 char rauht_pl[MLXSW_REG_RAUHT_LEN];
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001083
1084 mlxsw_reg_rauht_pack4(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
1085 dip);
1086 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
1087}
1088
1089static void
1090mlxsw_sp_neigh_entry_update(struct mlxsw_sp *mlxsw_sp,
1091 struct mlxsw_sp_neigh_entry *neigh_entry,
1092 bool adding)
1093{
1094 if (!adding && !neigh_entry->connected)
1095 return;
1096 neigh_entry->connected = adding;
1097 if (neigh_entry->key.n->tbl == &arp_tbl)
1098 mlxsw_sp_router_neigh_entry_op4(mlxsw_sp, neigh_entry,
1099 mlxsw_sp_rauht_op(adding));
1100 else
1101 WARN_ON_ONCE(1);
1102}
1103
1104struct mlxsw_sp_neigh_event_work {
1105 struct work_struct work;
1106 struct mlxsw_sp *mlxsw_sp;
1107 struct neighbour *n;
1108};
1109
1110static void mlxsw_sp_router_neigh_event_work(struct work_struct *work)
1111{
1112 struct mlxsw_sp_neigh_event_work *neigh_work =
1113 container_of(work, struct mlxsw_sp_neigh_event_work, work);
1114 struct mlxsw_sp *mlxsw_sp = neigh_work->mlxsw_sp;
1115 struct mlxsw_sp_neigh_entry *neigh_entry;
1116 struct neighbour *n = neigh_work->n;
1117 unsigned char ha[ETH_ALEN];
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001118 bool entry_connected;
Ido Schimmel93a87e52016-12-23 09:32:49 +01001119 u8 nud_state, dead;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001120
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001121 /* If these parameters are changed after we release the lock,
1122 * then we are guaranteed to receive another event letting us
1123 * know about it.
1124 */
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001125 read_lock_bh(&n->lock);
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001126 memcpy(ha, n->ha, ETH_ALEN);
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001127 nud_state = n->nud_state;
Ido Schimmel93a87e52016-12-23 09:32:49 +01001128 dead = n->dead;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001129 read_unlock_bh(&n->lock);
1130
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001131 rtnl_lock();
Ido Schimmel93a87e52016-12-23 09:32:49 +01001132 entry_connected = nud_state & NUD_VALID && !dead;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001133 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
1134 if (!entry_connected && !neigh_entry)
1135 goto out;
1136 if (!neigh_entry) {
1137 neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
1138 if (IS_ERR(neigh_entry))
1139 goto out;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001140 }
1141
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001142 memcpy(neigh_entry->ha, ha, ETH_ALEN);
1143 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, entry_connected);
1144 mlxsw_sp_nexthop_neigh_update(mlxsw_sp, neigh_entry, !entry_connected);
1145
1146 if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
1147 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
1148
1149out:
1150 rtnl_unlock();
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001151 neigh_release(n);
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001152 kfree(neigh_work);
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001153}
1154
Jiri Pirkoe7322632016-09-01 10:37:43 +02001155int mlxsw_sp_router_netevent_event(struct notifier_block *unused,
1156 unsigned long event, void *ptr)
Yotam Gigic723c7352016-07-05 11:27:43 +02001157{
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001158 struct mlxsw_sp_neigh_event_work *neigh_work;
Yotam Gigic723c7352016-07-05 11:27:43 +02001159 struct mlxsw_sp_port *mlxsw_sp_port;
1160 struct mlxsw_sp *mlxsw_sp;
1161 unsigned long interval;
1162 struct neigh_parms *p;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001163 struct neighbour *n;
Yotam Gigic723c7352016-07-05 11:27:43 +02001164
1165 switch (event) {
1166 case NETEVENT_DELAY_PROBE_TIME_UPDATE:
1167 p = ptr;
1168
1169 /* We don't care about changes in the default table. */
1170 if (!p->dev || p->tbl != &arp_tbl)
1171 return NOTIFY_DONE;
1172
1173 /* We are in atomic context and can't take RTNL mutex,
1174 * so use RCU variant to walk the device chain.
1175 */
1176 mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(p->dev);
1177 if (!mlxsw_sp_port)
1178 return NOTIFY_DONE;
1179
1180 mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1181 interval = jiffies_to_msecs(NEIGH_VAR(p, DELAY_PROBE_TIME));
Ido Schimmel9011b672017-05-16 19:38:25 +02001182 mlxsw_sp->router->neighs_update.interval = interval;
Yotam Gigic723c7352016-07-05 11:27:43 +02001183
1184 mlxsw_sp_port_dev_put(mlxsw_sp_port);
1185 break;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001186 case NETEVENT_NEIGH_UPDATE:
1187 n = ptr;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001188
1189 if (n->tbl != &arp_tbl)
1190 return NOTIFY_DONE;
1191
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001192 mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(n->dev);
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001193 if (!mlxsw_sp_port)
1194 return NOTIFY_DONE;
1195
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001196 neigh_work = kzalloc(sizeof(*neigh_work), GFP_ATOMIC);
1197 if (!neigh_work) {
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001198 mlxsw_sp_port_dev_put(mlxsw_sp_port);
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001199 return NOTIFY_BAD;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001200 }
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001201
1202 INIT_WORK(&neigh_work->work, mlxsw_sp_router_neigh_event_work);
1203 neigh_work->mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1204 neigh_work->n = n;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001205
1206 /* Take a reference to ensure the neighbour won't be
1207 * destructed until we drop the reference in delayed
1208 * work.
1209 */
1210 neigh_clone(n);
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001211 mlxsw_core_schedule_work(&neigh_work->work);
1212 mlxsw_sp_port_dev_put(mlxsw_sp_port);
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001213 break;
Yotam Gigic723c7352016-07-05 11:27:43 +02001214 }
1215
1216 return NOTIFY_DONE;
1217}
1218
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001219static int mlxsw_sp_neigh_init(struct mlxsw_sp *mlxsw_sp)
1220{
Yotam Gigic723c7352016-07-05 11:27:43 +02001221 int err;
1222
Ido Schimmel9011b672017-05-16 19:38:25 +02001223 err = rhashtable_init(&mlxsw_sp->router->neigh_ht,
Yotam Gigic723c7352016-07-05 11:27:43 +02001224 &mlxsw_sp_neigh_ht_params);
1225 if (err)
1226 return err;
1227
1228 /* Initialize the polling interval according to the default
1229 * table.
1230 */
1231 mlxsw_sp_router_neighs_update_interval_init(mlxsw_sp);
1232
Yotam Gigi0b2361d2016-07-05 11:27:52 +02001233 /* Create the delayed works for the activity_update */
Ido Schimmel9011b672017-05-16 19:38:25 +02001234 INIT_DELAYED_WORK(&mlxsw_sp->router->neighs_update.dw,
Yotam Gigic723c7352016-07-05 11:27:43 +02001235 mlxsw_sp_router_neighs_update_work);
Ido Schimmel9011b672017-05-16 19:38:25 +02001236 INIT_DELAYED_WORK(&mlxsw_sp->router->nexthop_probe_dw,
Yotam Gigi0b2361d2016-07-05 11:27:52 +02001237 mlxsw_sp_router_probe_unresolved_nexthops);
Ido Schimmel9011b672017-05-16 19:38:25 +02001238 mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw, 0);
1239 mlxsw_core_schedule_dw(&mlxsw_sp->router->nexthop_probe_dw, 0);
Yotam Gigic723c7352016-07-05 11:27:43 +02001240 return 0;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001241}
1242
1243static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp)
1244{
Ido Schimmel9011b672017-05-16 19:38:25 +02001245 cancel_delayed_work_sync(&mlxsw_sp->router->neighs_update.dw);
1246 cancel_delayed_work_sync(&mlxsw_sp->router->nexthop_probe_dw);
1247 rhashtable_destroy(&mlxsw_sp->router->neigh_ht);
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001248}
1249
Ido Schimmel9665b742017-02-08 11:16:42 +01001250static int mlxsw_sp_neigh_rif_flush(struct mlxsw_sp *mlxsw_sp,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001251 const struct mlxsw_sp_rif *rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01001252{
1253 char rauht_pl[MLXSW_REG_RAUHT_LEN];
1254
1255 mlxsw_reg_rauht_pack(rauht_pl, MLXSW_REG_RAUHT_OP_WRITE_DELETE_ALL,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001256 rif->rif_index, rif->addr);
Ido Schimmel9665b742017-02-08 11:16:42 +01001257 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
1258}
1259
1260static void mlxsw_sp_neigh_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001261 struct mlxsw_sp_rif *rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01001262{
1263 struct mlxsw_sp_neigh_entry *neigh_entry, *tmp;
1264
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001265 mlxsw_sp_neigh_rif_flush(mlxsw_sp, rif);
1266 list_for_each_entry_safe(neigh_entry, tmp, &rif->neigh_list,
Ido Schimmel9665b742017-02-08 11:16:42 +01001267 rif_list_node)
1268 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
1269}
1270
Ido Schimmelc53b8e12017-02-08 11:16:30 +01001271struct mlxsw_sp_nexthop_key {
1272 struct fib_nh *fib_nh;
1273};
1274
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001275struct mlxsw_sp_nexthop {
1276 struct list_head neigh_list_node; /* member of neigh entry list */
Ido Schimmel9665b742017-02-08 11:16:42 +01001277 struct list_head rif_list_node;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001278 struct mlxsw_sp_nexthop_group *nh_grp; /* pointer back to the group
1279 * this belongs to
1280 */
Ido Schimmelc53b8e12017-02-08 11:16:30 +01001281 struct rhash_head ht_node;
1282 struct mlxsw_sp_nexthop_key key;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001283 struct mlxsw_sp_rif *rif;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001284 u8 should_offload:1, /* set indicates this neigh is connected and
1285 * should be put to KVD linear area of this group.
1286 */
1287 offloaded:1, /* set in case the neigh is actually put into
1288 * KVD linear area of this group.
1289 */
1290 update:1; /* set indicates that MAC of this neigh should be
1291 * updated in HW
1292 */
1293 struct mlxsw_sp_neigh_entry *neigh_entry;
1294};
1295
Ido Schimmele9ad5e72017-02-08 11:16:29 +01001296struct mlxsw_sp_nexthop_group_key {
1297 struct fib_info *fi;
1298};
1299
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001300struct mlxsw_sp_nexthop_group {
Ido Schimmele9ad5e72017-02-08 11:16:29 +01001301 struct rhash_head ht_node;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001302 struct list_head fib_list; /* list of fib entries that use this group */
Ido Schimmele9ad5e72017-02-08 11:16:29 +01001303 struct mlxsw_sp_nexthop_group_key key;
Ido Schimmelb3e8d1e2017-02-08 11:16:32 +01001304 u8 adj_index_valid:1,
1305 gateway:1; /* routes using the group use a gateway */
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001306 u32 adj_index;
1307 u16 ecmp_size;
1308 u16 count;
1309 struct mlxsw_sp_nexthop nexthops[0];
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001310#define nh_rif nexthops[0].rif
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001311};
1312
Ido Schimmele9ad5e72017-02-08 11:16:29 +01001313static const struct rhashtable_params mlxsw_sp_nexthop_group_ht_params = {
1314 .key_offset = offsetof(struct mlxsw_sp_nexthop_group, key),
1315 .head_offset = offsetof(struct mlxsw_sp_nexthop_group, ht_node),
1316 .key_len = sizeof(struct mlxsw_sp_nexthop_group_key),
1317};
1318
1319static int mlxsw_sp_nexthop_group_insert(struct mlxsw_sp *mlxsw_sp,
1320 struct mlxsw_sp_nexthop_group *nh_grp)
1321{
Ido Schimmel9011b672017-05-16 19:38:25 +02001322 return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_group_ht,
Ido Schimmele9ad5e72017-02-08 11:16:29 +01001323 &nh_grp->ht_node,
1324 mlxsw_sp_nexthop_group_ht_params);
1325}
1326
1327static void mlxsw_sp_nexthop_group_remove(struct mlxsw_sp *mlxsw_sp,
1328 struct mlxsw_sp_nexthop_group *nh_grp)
1329{
Ido Schimmel9011b672017-05-16 19:38:25 +02001330 rhashtable_remove_fast(&mlxsw_sp->router->nexthop_group_ht,
Ido Schimmele9ad5e72017-02-08 11:16:29 +01001331 &nh_grp->ht_node,
1332 mlxsw_sp_nexthop_group_ht_params);
1333}
1334
1335static struct mlxsw_sp_nexthop_group *
1336mlxsw_sp_nexthop_group_lookup(struct mlxsw_sp *mlxsw_sp,
1337 struct mlxsw_sp_nexthop_group_key key)
1338{
Ido Schimmel9011b672017-05-16 19:38:25 +02001339 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht, &key,
Ido Schimmele9ad5e72017-02-08 11:16:29 +01001340 mlxsw_sp_nexthop_group_ht_params);
1341}
1342
Ido Schimmelc53b8e12017-02-08 11:16:30 +01001343static const struct rhashtable_params mlxsw_sp_nexthop_ht_params = {
1344 .key_offset = offsetof(struct mlxsw_sp_nexthop, key),
1345 .head_offset = offsetof(struct mlxsw_sp_nexthop, ht_node),
1346 .key_len = sizeof(struct mlxsw_sp_nexthop_key),
1347};
1348
1349static int mlxsw_sp_nexthop_insert(struct mlxsw_sp *mlxsw_sp,
1350 struct mlxsw_sp_nexthop *nh)
1351{
Ido Schimmel9011b672017-05-16 19:38:25 +02001352 return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_ht,
Ido Schimmelc53b8e12017-02-08 11:16:30 +01001353 &nh->ht_node, mlxsw_sp_nexthop_ht_params);
1354}
1355
1356static void mlxsw_sp_nexthop_remove(struct mlxsw_sp *mlxsw_sp,
1357 struct mlxsw_sp_nexthop *nh)
1358{
Ido Schimmel9011b672017-05-16 19:38:25 +02001359 rhashtable_remove_fast(&mlxsw_sp->router->nexthop_ht, &nh->ht_node,
Ido Schimmelc53b8e12017-02-08 11:16:30 +01001360 mlxsw_sp_nexthop_ht_params);
1361}
1362
Ido Schimmelad178c82017-02-08 11:16:40 +01001363static struct mlxsw_sp_nexthop *
1364mlxsw_sp_nexthop_lookup(struct mlxsw_sp *mlxsw_sp,
1365 struct mlxsw_sp_nexthop_key key)
1366{
Ido Schimmel9011b672017-05-16 19:38:25 +02001367 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_ht, &key,
Ido Schimmelad178c82017-02-08 11:16:40 +01001368 mlxsw_sp_nexthop_ht_params);
1369}
1370
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001371static int mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel76610eb2017-03-10 08:53:41 +01001372 const struct mlxsw_sp_fib *fib,
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001373 u32 adj_index, u16 ecmp_size,
1374 u32 new_adj_index,
1375 u16 new_ecmp_size)
1376{
1377 char raleu_pl[MLXSW_REG_RALEU_LEN];
1378
Ido Schimmel1a9234e662016-09-19 08:29:26 +02001379 mlxsw_reg_raleu_pack(raleu_pl,
Ido Schimmel76610eb2017-03-10 08:53:41 +01001380 (enum mlxsw_reg_ralxx_protocol) fib->proto,
1381 fib->vr->id, adj_index, ecmp_size, new_adj_index,
Ido Schimmel1a9234e662016-09-19 08:29:26 +02001382 new_ecmp_size);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001383 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raleu), raleu_pl);
1384}
1385
1386static int mlxsw_sp_adj_index_mass_update(struct mlxsw_sp *mlxsw_sp,
1387 struct mlxsw_sp_nexthop_group *nh_grp,
1388 u32 old_adj_index, u16 old_ecmp_size)
1389{
1390 struct mlxsw_sp_fib_entry *fib_entry;
Ido Schimmel76610eb2017-03-10 08:53:41 +01001391 struct mlxsw_sp_fib *fib = NULL;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001392 int err;
1393
1394 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
Ido Schimmel76610eb2017-03-10 08:53:41 +01001395 if (fib == fib_entry->fib_node->fib)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001396 continue;
Ido Schimmel76610eb2017-03-10 08:53:41 +01001397 fib = fib_entry->fib_node->fib;
1398 err = mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp, fib,
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001399 old_adj_index,
1400 old_ecmp_size,
1401 nh_grp->adj_index,
1402 nh_grp->ecmp_size);
1403 if (err)
1404 return err;
1405 }
1406 return 0;
1407}
1408
1409static int mlxsw_sp_nexthop_mac_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
1410 struct mlxsw_sp_nexthop *nh)
1411{
1412 struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
1413 char ratr_pl[MLXSW_REG_RATR_LEN];
1414
1415 mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY,
1416 true, adj_index, neigh_entry->rif);
1417 mlxsw_reg_ratr_eth_entry_pack(ratr_pl, neigh_entry->ha);
1418 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
1419}
1420
1421static int
1422mlxsw_sp_nexthop_group_mac_update(struct mlxsw_sp *mlxsw_sp,
Ido Schimmela59b7e02017-01-23 11:11:42 +01001423 struct mlxsw_sp_nexthop_group *nh_grp,
1424 bool reallocate)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001425{
1426 u32 adj_index = nh_grp->adj_index; /* base */
1427 struct mlxsw_sp_nexthop *nh;
1428 int i;
1429 int err;
1430
1431 for (i = 0; i < nh_grp->count; i++) {
1432 nh = &nh_grp->nexthops[i];
1433
1434 if (!nh->should_offload) {
1435 nh->offloaded = 0;
1436 continue;
1437 }
1438
Ido Schimmela59b7e02017-01-23 11:11:42 +01001439 if (nh->update || reallocate) {
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001440 err = mlxsw_sp_nexthop_mac_update(mlxsw_sp,
1441 adj_index, nh);
1442 if (err)
1443 return err;
1444 nh->update = 0;
1445 nh->offloaded = 1;
1446 }
1447 adj_index++;
1448 }
1449 return 0;
1450}
1451
1452static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
1453 struct mlxsw_sp_fib_entry *fib_entry);
1454
1455static int
1456mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp *mlxsw_sp,
1457 struct mlxsw_sp_nexthop_group *nh_grp)
1458{
1459 struct mlxsw_sp_fib_entry *fib_entry;
1460 int err;
1461
1462 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
1463 err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
1464 if (err)
1465 return err;
1466 }
1467 return 0;
1468}
1469
1470static void
1471mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
1472 struct mlxsw_sp_nexthop_group *nh_grp)
1473{
1474 struct mlxsw_sp_nexthop *nh;
1475 bool offload_change = false;
1476 u32 adj_index;
1477 u16 ecmp_size = 0;
1478 bool old_adj_index_valid;
1479 u32 old_adj_index;
1480 u16 old_ecmp_size;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001481 int i;
1482 int err;
1483
Ido Schimmelb3e8d1e2017-02-08 11:16:32 +01001484 if (!nh_grp->gateway) {
1485 mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
1486 return;
1487 }
1488
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001489 for (i = 0; i < nh_grp->count; i++) {
1490 nh = &nh_grp->nexthops[i];
1491
1492 if (nh->should_offload ^ nh->offloaded) {
1493 offload_change = true;
1494 if (nh->should_offload)
1495 nh->update = 1;
1496 }
1497 if (nh->should_offload)
1498 ecmp_size++;
1499 }
1500 if (!offload_change) {
1501 /* Nothing was added or removed, so no need to reallocate. Just
1502 * update MAC on existing adjacency indexes.
1503 */
Ido Schimmela59b7e02017-01-23 11:11:42 +01001504 err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp,
1505 false);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001506 if (err) {
1507 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
1508 goto set_trap;
1509 }
1510 return;
1511 }
1512 if (!ecmp_size)
1513 /* No neigh of this group is connected so we just set
1514 * the trap and let everthing flow through kernel.
1515 */
1516 goto set_trap;
1517
Arkadi Sharshevsky13124442017-03-25 08:28:22 +01001518 err = mlxsw_sp_kvdl_alloc(mlxsw_sp, ecmp_size, &adj_index);
1519 if (err) {
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001520 /* We ran out of KVD linear space, just set the
1521 * trap and let everything flow through kernel.
1522 */
1523 dev_warn(mlxsw_sp->bus_info->dev, "Failed to allocate KVD linear area for nexthop group.\n");
1524 goto set_trap;
1525 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001526 old_adj_index_valid = nh_grp->adj_index_valid;
1527 old_adj_index = nh_grp->adj_index;
1528 old_ecmp_size = nh_grp->ecmp_size;
1529 nh_grp->adj_index_valid = 1;
1530 nh_grp->adj_index = adj_index;
1531 nh_grp->ecmp_size = ecmp_size;
Ido Schimmela59b7e02017-01-23 11:11:42 +01001532 err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp, true);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001533 if (err) {
1534 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
1535 goto set_trap;
1536 }
1537
1538 if (!old_adj_index_valid) {
1539 /* The trap was set for fib entries, so we have to call
1540 * fib entry update to unset it and use adjacency index.
1541 */
1542 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
1543 if (err) {
1544 dev_warn(mlxsw_sp->bus_info->dev, "Failed to add adjacency index to fib entries.\n");
1545 goto set_trap;
1546 }
1547 return;
1548 }
1549
1550 err = mlxsw_sp_adj_index_mass_update(mlxsw_sp, nh_grp,
1551 old_adj_index, old_ecmp_size);
1552 mlxsw_sp_kvdl_free(mlxsw_sp, old_adj_index);
1553 if (err) {
1554 dev_warn(mlxsw_sp->bus_info->dev, "Failed to mass-update adjacency index for nexthop group.\n");
1555 goto set_trap;
1556 }
1557 return;
1558
1559set_trap:
1560 old_adj_index_valid = nh_grp->adj_index_valid;
1561 nh_grp->adj_index_valid = 0;
1562 for (i = 0; i < nh_grp->count; i++) {
1563 nh = &nh_grp->nexthops[i];
1564 nh->offloaded = 0;
1565 }
1566 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
1567 if (err)
1568 dev_warn(mlxsw_sp->bus_info->dev, "Failed to set traps for fib entries.\n");
1569 if (old_adj_index_valid)
1570 mlxsw_sp_kvdl_free(mlxsw_sp, nh_grp->adj_index);
1571}
1572
1573static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh,
1574 bool removing)
1575{
1576 if (!removing && !nh->should_offload)
1577 nh->should_offload = 1;
1578 else if (removing && nh->offloaded)
1579 nh->should_offload = 0;
1580 nh->update = 1;
1581}
1582
1583static void
1584mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
1585 struct mlxsw_sp_neigh_entry *neigh_entry,
1586 bool removing)
1587{
1588 struct mlxsw_sp_nexthop *nh;
1589
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001590 list_for_each_entry(nh, &neigh_entry->nexthop_list,
1591 neigh_list_node) {
1592 __mlxsw_sp_nexthop_neigh_update(nh, removing);
1593 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
1594 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001595}
1596
Ido Schimmel9665b742017-02-08 11:16:42 +01001597static void mlxsw_sp_nexthop_rif_init(struct mlxsw_sp_nexthop *nh,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001598 struct mlxsw_sp_rif *rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01001599{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001600 if (nh->rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01001601 return;
1602
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001603 nh->rif = rif;
1604 list_add(&nh->rif_list_node, &rif->nexthop_list);
Ido Schimmel9665b742017-02-08 11:16:42 +01001605}
1606
1607static void mlxsw_sp_nexthop_rif_fini(struct mlxsw_sp_nexthop *nh)
1608{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001609 if (!nh->rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01001610 return;
1611
1612 list_del(&nh->rif_list_node);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001613 nh->rif = NULL;
Ido Schimmel9665b742017-02-08 11:16:42 +01001614}
1615
Ido Schimmela8c97012017-02-08 11:16:35 +01001616static int mlxsw_sp_nexthop_neigh_init(struct mlxsw_sp *mlxsw_sp,
1617 struct mlxsw_sp_nexthop *nh)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001618{
1619 struct mlxsw_sp_neigh_entry *neigh_entry;
Ido Schimmela8c97012017-02-08 11:16:35 +01001620 struct fib_nh *fib_nh = nh->key.fib_nh;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001621 struct neighbour *n;
Ido Schimmel93a87e52016-12-23 09:32:49 +01001622 u8 nud_state, dead;
Ido Schimmelc53b8e12017-02-08 11:16:30 +01001623 int err;
1624
Ido Schimmelad178c82017-02-08 11:16:40 +01001625 if (!nh->nh_grp->gateway || nh->neigh_entry)
Ido Schimmelb8399a12017-02-08 11:16:33 +01001626 return 0;
1627
Jiri Pirko33b13412016-11-10 12:31:04 +01001628 /* Take a reference of neigh here ensuring that neigh would
1629 * not be detructed before the nexthop entry is finished.
1630 * The reference is taken either in neigh_lookup() or
Ido Schimmelfd76d912017-02-06 16:20:17 +01001631 * in neigh_create() in case n is not found.
Jiri Pirko33b13412016-11-10 12:31:04 +01001632 */
Ido Schimmela8c97012017-02-08 11:16:35 +01001633 n = neigh_lookup(&arp_tbl, &fib_nh->nh_gw, fib_nh->nh_dev);
Jiri Pirko33b13412016-11-10 12:31:04 +01001634 if (!n) {
Ido Schimmela8c97012017-02-08 11:16:35 +01001635 n = neigh_create(&arp_tbl, &fib_nh->nh_gw, fib_nh->nh_dev);
1636 if (IS_ERR(n))
1637 return PTR_ERR(n);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001638 neigh_event_send(n, NULL);
Jiri Pirko33b13412016-11-10 12:31:04 +01001639 }
1640 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
1641 if (!neigh_entry) {
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001642 neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
1643 if (IS_ERR(neigh_entry)) {
Ido Schimmelc53b8e12017-02-08 11:16:30 +01001644 err = -EINVAL;
1645 goto err_neigh_entry_create;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001646 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001647 }
Yotam Gigib2157142016-07-05 11:27:51 +02001648
1649 /* If that is the first nexthop connected to that neigh, add to
1650 * nexthop_neighs_list
1651 */
1652 if (list_empty(&neigh_entry->nexthop_list))
1653 list_add_tail(&neigh_entry->nexthop_neighs_list_node,
Ido Schimmel9011b672017-05-16 19:38:25 +02001654 &mlxsw_sp->router->nexthop_neighs_list);
Yotam Gigib2157142016-07-05 11:27:51 +02001655
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001656 nh->neigh_entry = neigh_entry;
1657 list_add_tail(&nh->neigh_list_node, &neigh_entry->nexthop_list);
1658 read_lock_bh(&n->lock);
1659 nud_state = n->nud_state;
Ido Schimmel93a87e52016-12-23 09:32:49 +01001660 dead = n->dead;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001661 read_unlock_bh(&n->lock);
Ido Schimmel93a87e52016-12-23 09:32:49 +01001662 __mlxsw_sp_nexthop_neigh_update(nh, !(nud_state & NUD_VALID && !dead));
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001663
1664 return 0;
Ido Schimmelc53b8e12017-02-08 11:16:30 +01001665
1666err_neigh_entry_create:
1667 neigh_release(n);
Ido Schimmelc53b8e12017-02-08 11:16:30 +01001668 return err;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001669}
1670
Ido Schimmela8c97012017-02-08 11:16:35 +01001671static void mlxsw_sp_nexthop_neigh_fini(struct mlxsw_sp *mlxsw_sp,
1672 struct mlxsw_sp_nexthop *nh)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001673{
1674 struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
Ido Schimmela8c97012017-02-08 11:16:35 +01001675 struct neighbour *n;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001676
Ido Schimmelb8399a12017-02-08 11:16:33 +01001677 if (!neigh_entry)
Ido Schimmela8c97012017-02-08 11:16:35 +01001678 return;
1679 n = neigh_entry->key.n;
Ido Schimmelb8399a12017-02-08 11:16:33 +01001680
Ido Schimmel58312122016-12-23 09:32:50 +01001681 __mlxsw_sp_nexthop_neigh_update(nh, true);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001682 list_del(&nh->neigh_list_node);
Ido Schimmele58be792017-02-08 11:16:28 +01001683 nh->neigh_entry = NULL;
Yotam Gigib2157142016-07-05 11:27:51 +02001684
1685 /* If that is the last nexthop connected to that neigh, remove from
1686 * nexthop_neighs_list
1687 */
Ido Schimmele58be792017-02-08 11:16:28 +01001688 if (list_empty(&neigh_entry->nexthop_list))
1689 list_del(&neigh_entry->nexthop_neighs_list_node);
Yotam Gigib2157142016-07-05 11:27:51 +02001690
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001691 if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
1692 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
1693
1694 neigh_release(n);
Ido Schimmela8c97012017-02-08 11:16:35 +01001695}
Ido Schimmelc53b8e12017-02-08 11:16:30 +01001696
Ido Schimmela8c97012017-02-08 11:16:35 +01001697static int mlxsw_sp_nexthop_init(struct mlxsw_sp *mlxsw_sp,
1698 struct mlxsw_sp_nexthop_group *nh_grp,
1699 struct mlxsw_sp_nexthop *nh,
1700 struct fib_nh *fib_nh)
1701{
1702 struct net_device *dev = fib_nh->nh_dev;
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +01001703 struct in_device *in_dev;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001704 struct mlxsw_sp_rif *rif;
Ido Schimmela8c97012017-02-08 11:16:35 +01001705 int err;
1706
1707 nh->nh_grp = nh_grp;
1708 nh->key.fib_nh = fib_nh;
1709 err = mlxsw_sp_nexthop_insert(mlxsw_sp, nh);
1710 if (err)
1711 return err;
1712
Ido Schimmel97989ee2017-03-10 08:53:38 +01001713 if (!dev)
1714 return 0;
1715
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +01001716 in_dev = __in_dev_get_rtnl(dev);
1717 if (in_dev && IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
1718 fib_nh->nh_flags & RTNH_F_LINKDOWN)
1719 return 0;
1720
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001721 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
1722 if (!rif)
Ido Schimmela8c97012017-02-08 11:16:35 +01001723 return 0;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001724 mlxsw_sp_nexthop_rif_init(nh, rif);
Ido Schimmela8c97012017-02-08 11:16:35 +01001725
1726 err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
1727 if (err)
1728 goto err_nexthop_neigh_init;
1729
1730 return 0;
1731
1732err_nexthop_neigh_init:
1733 mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
1734 return err;
1735}
1736
1737static void mlxsw_sp_nexthop_fini(struct mlxsw_sp *mlxsw_sp,
1738 struct mlxsw_sp_nexthop *nh)
1739{
1740 mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh);
Ido Schimmel9665b742017-02-08 11:16:42 +01001741 mlxsw_sp_nexthop_rif_fini(nh);
Ido Schimmelc53b8e12017-02-08 11:16:30 +01001742 mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001743}
1744
Ido Schimmelad178c82017-02-08 11:16:40 +01001745static void mlxsw_sp_nexthop_event(struct mlxsw_sp *mlxsw_sp,
1746 unsigned long event, struct fib_nh *fib_nh)
1747{
1748 struct mlxsw_sp_nexthop_key key;
1749 struct mlxsw_sp_nexthop *nh;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001750 struct mlxsw_sp_rif *rif;
Ido Schimmelad178c82017-02-08 11:16:40 +01001751
Ido Schimmel9011b672017-05-16 19:38:25 +02001752 if (mlxsw_sp->router->aborted)
Ido Schimmelad178c82017-02-08 11:16:40 +01001753 return;
1754
1755 key.fib_nh = fib_nh;
1756 nh = mlxsw_sp_nexthop_lookup(mlxsw_sp, key);
1757 if (WARN_ON_ONCE(!nh))
1758 return;
1759
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001760 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, fib_nh->nh_dev);
1761 if (!rif)
Ido Schimmelad178c82017-02-08 11:16:40 +01001762 return;
1763
1764 switch (event) {
1765 case FIB_EVENT_NH_ADD:
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001766 mlxsw_sp_nexthop_rif_init(nh, rif);
Ido Schimmelad178c82017-02-08 11:16:40 +01001767 mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
1768 break;
1769 case FIB_EVENT_NH_DEL:
1770 mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh);
Ido Schimmel9665b742017-02-08 11:16:42 +01001771 mlxsw_sp_nexthop_rif_fini(nh);
Ido Schimmelad178c82017-02-08 11:16:40 +01001772 break;
1773 }
1774
1775 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
1776}
1777
Ido Schimmel9665b742017-02-08 11:16:42 +01001778static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001779 struct mlxsw_sp_rif *rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01001780{
1781 struct mlxsw_sp_nexthop *nh, *tmp;
1782
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001783 list_for_each_entry_safe(nh, tmp, &rif->nexthop_list, rif_list_node) {
Ido Schimmel9665b742017-02-08 11:16:42 +01001784 mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh);
1785 mlxsw_sp_nexthop_rif_fini(nh);
1786 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
1787 }
1788}
1789
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001790static struct mlxsw_sp_nexthop_group *
1791mlxsw_sp_nexthop_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi)
1792{
1793 struct mlxsw_sp_nexthop_group *nh_grp;
1794 struct mlxsw_sp_nexthop *nh;
1795 struct fib_nh *fib_nh;
1796 size_t alloc_size;
1797 int i;
1798 int err;
1799
1800 alloc_size = sizeof(*nh_grp) +
1801 fi->fib_nhs * sizeof(struct mlxsw_sp_nexthop);
1802 nh_grp = kzalloc(alloc_size, GFP_KERNEL);
1803 if (!nh_grp)
1804 return ERR_PTR(-ENOMEM);
1805 INIT_LIST_HEAD(&nh_grp->fib_list);
Ido Schimmelb3e8d1e2017-02-08 11:16:32 +01001806 nh_grp->gateway = fi->fib_nh->nh_scope == RT_SCOPE_LINK;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001807 nh_grp->count = fi->fib_nhs;
Ido Schimmele9ad5e72017-02-08 11:16:29 +01001808 nh_grp->key.fi = fi;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001809 for (i = 0; i < nh_grp->count; i++) {
1810 nh = &nh_grp->nexthops[i];
1811 fib_nh = &fi->fib_nh[i];
1812 err = mlxsw_sp_nexthop_init(mlxsw_sp, nh_grp, nh, fib_nh);
1813 if (err)
1814 goto err_nexthop_init;
1815 }
Ido Schimmele9ad5e72017-02-08 11:16:29 +01001816 err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
1817 if (err)
1818 goto err_nexthop_group_insert;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001819 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
1820 return nh_grp;
1821
Ido Schimmele9ad5e72017-02-08 11:16:29 +01001822err_nexthop_group_insert:
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001823err_nexthop_init:
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +01001824 for (i--; i >= 0; i--) {
1825 nh = &nh_grp->nexthops[i];
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001826 mlxsw_sp_nexthop_fini(mlxsw_sp, nh);
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +01001827 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001828 kfree(nh_grp);
1829 return ERR_PTR(err);
1830}
1831
1832static void
1833mlxsw_sp_nexthop_group_destroy(struct mlxsw_sp *mlxsw_sp,
1834 struct mlxsw_sp_nexthop_group *nh_grp)
1835{
1836 struct mlxsw_sp_nexthop *nh;
1837 int i;
1838
Ido Schimmele9ad5e72017-02-08 11:16:29 +01001839 mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001840 for (i = 0; i < nh_grp->count; i++) {
1841 nh = &nh_grp->nexthops[i];
1842 mlxsw_sp_nexthop_fini(mlxsw_sp, nh);
1843 }
Ido Schimmel58312122016-12-23 09:32:50 +01001844 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
1845 WARN_ON_ONCE(nh_grp->adj_index_valid);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001846 kfree(nh_grp);
1847}
1848
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001849static int mlxsw_sp_nexthop_group_get(struct mlxsw_sp *mlxsw_sp,
1850 struct mlxsw_sp_fib_entry *fib_entry,
1851 struct fib_info *fi)
1852{
Ido Schimmele9ad5e72017-02-08 11:16:29 +01001853 struct mlxsw_sp_nexthop_group_key key;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001854 struct mlxsw_sp_nexthop_group *nh_grp;
1855
Ido Schimmele9ad5e72017-02-08 11:16:29 +01001856 key.fi = fi;
1857 nh_grp = mlxsw_sp_nexthop_group_lookup(mlxsw_sp, key);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001858 if (!nh_grp) {
1859 nh_grp = mlxsw_sp_nexthop_group_create(mlxsw_sp, fi);
1860 if (IS_ERR(nh_grp))
1861 return PTR_ERR(nh_grp);
1862 }
1863 list_add_tail(&fib_entry->nexthop_group_node, &nh_grp->fib_list);
1864 fib_entry->nh_group = nh_grp;
1865 return 0;
1866}
1867
1868static void mlxsw_sp_nexthop_group_put(struct mlxsw_sp *mlxsw_sp,
1869 struct mlxsw_sp_fib_entry *fib_entry)
1870{
1871 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
1872
1873 list_del(&fib_entry->nexthop_group_node);
1874 if (!list_empty(&nh_grp->fib_list))
1875 return;
1876 mlxsw_sp_nexthop_group_destroy(mlxsw_sp, nh_grp);
1877}
1878
Ido Schimmel013b20f2017-02-08 11:16:36 +01001879static bool
1880mlxsw_sp_fib_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
1881{
1882 struct mlxsw_sp_nexthop_group *nh_group = fib_entry->nh_group;
1883
Ido Schimmel9aecce12017-02-09 10:28:42 +01001884 if (fib_entry->params.tos)
1885 return false;
1886
Ido Schimmel013b20f2017-02-08 11:16:36 +01001887 switch (fib_entry->type) {
1888 case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
1889 return !!nh_group->adj_index_valid;
1890 case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
Ido Schimmel70ad3502017-02-08 11:16:38 +01001891 return !!nh_group->nh_rif;
Ido Schimmel013b20f2017-02-08 11:16:36 +01001892 default:
1893 return false;
1894 }
1895}
1896
1897static void mlxsw_sp_fib_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
1898{
1899 fib_entry->offloaded = true;
1900
Ido Schimmel76610eb2017-03-10 08:53:41 +01001901 switch (fib_entry->fib_node->fib->proto) {
Ido Schimmel013b20f2017-02-08 11:16:36 +01001902 case MLXSW_SP_L3_PROTO_IPV4:
1903 fib_info_offload_inc(fib_entry->nh_group->key.fi);
1904 break;
1905 case MLXSW_SP_L3_PROTO_IPV6:
1906 WARN_ON_ONCE(1);
1907 }
1908}
1909
1910static void
1911mlxsw_sp_fib_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
1912{
Ido Schimmel76610eb2017-03-10 08:53:41 +01001913 switch (fib_entry->fib_node->fib->proto) {
Ido Schimmel013b20f2017-02-08 11:16:36 +01001914 case MLXSW_SP_L3_PROTO_IPV4:
1915 fib_info_offload_dec(fib_entry->nh_group->key.fi);
1916 break;
1917 case MLXSW_SP_L3_PROTO_IPV6:
1918 WARN_ON_ONCE(1);
1919 }
1920
1921 fib_entry->offloaded = false;
1922}
1923
1924static void
1925mlxsw_sp_fib_entry_offload_refresh(struct mlxsw_sp_fib_entry *fib_entry,
1926 enum mlxsw_reg_ralue_op op, int err)
1927{
1928 switch (op) {
1929 case MLXSW_REG_RALUE_OP_WRITE_DELETE:
1930 if (!fib_entry->offloaded)
1931 return;
1932 return mlxsw_sp_fib_entry_offload_unset(fib_entry);
1933 case MLXSW_REG_RALUE_OP_WRITE_WRITE:
1934 if (err)
1935 return;
1936 if (mlxsw_sp_fib_entry_should_offload(fib_entry) &&
1937 !fib_entry->offloaded)
1938 mlxsw_sp_fib_entry_offload_set(fib_entry);
1939 else if (!mlxsw_sp_fib_entry_should_offload(fib_entry) &&
1940 fib_entry->offloaded)
1941 mlxsw_sp_fib_entry_offload_unset(fib_entry);
1942 return;
1943 default:
1944 return;
1945 }
1946}
1947
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001948static int mlxsw_sp_fib_entry_op4_remote(struct mlxsw_sp *mlxsw_sp,
1949 struct mlxsw_sp_fib_entry *fib_entry,
1950 enum mlxsw_reg_ralue_op op)
1951{
1952 char ralue_pl[MLXSW_REG_RALUE_LEN];
Ido Schimmel76610eb2017-03-10 08:53:41 +01001953 struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;
Ido Schimmel9aecce12017-02-09 10:28:42 +01001954 u32 *p_dip = (u32 *) fib_entry->fib_node->key.addr;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001955 enum mlxsw_reg_ralue_trap_action trap_action;
1956 u16 trap_id = 0;
1957 u32 adjacency_index = 0;
1958 u16 ecmp_size = 0;
1959
1960 /* In case the nexthop group adjacency index is valid, use it
1961 * with provided ECMP size. Otherwise, setup trap and pass
1962 * traffic to kernel.
1963 */
Ido Schimmel4b411472017-02-08 11:16:37 +01001964 if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001965 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
1966 adjacency_index = fib_entry->nh_group->adj_index;
1967 ecmp_size = fib_entry->nh_group->ecmp_size;
1968 } else {
1969 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
1970 trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
1971 }
1972
Ido Schimmel1a9234e662016-09-19 08:29:26 +02001973 mlxsw_reg_ralue_pack4(ralue_pl,
Ido Schimmel76610eb2017-03-10 08:53:41 +01001974 (enum mlxsw_reg_ralxx_protocol) fib->proto, op,
1975 fib->vr->id, fib_entry->fib_node->key.prefix_len,
Ido Schimmel9aecce12017-02-09 10:28:42 +01001976 *p_dip);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001977 mlxsw_reg_ralue_act_remote_pack(ralue_pl, trap_action, trap_id,
1978 adjacency_index, ecmp_size);
1979 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
1980}
1981
Jiri Pirko61c503f2016-07-04 08:23:11 +02001982static int mlxsw_sp_fib_entry_op4_local(struct mlxsw_sp *mlxsw_sp,
1983 struct mlxsw_sp_fib_entry *fib_entry,
1984 enum mlxsw_reg_ralue_op op)
1985{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001986 struct mlxsw_sp_rif *rif = fib_entry->nh_group->nh_rif;
Ido Schimmel76610eb2017-03-10 08:53:41 +01001987 struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;
Ido Schimmel70ad3502017-02-08 11:16:38 +01001988 enum mlxsw_reg_ralue_trap_action trap_action;
Jiri Pirko61c503f2016-07-04 08:23:11 +02001989 char ralue_pl[MLXSW_REG_RALUE_LEN];
Ido Schimmel9aecce12017-02-09 10:28:42 +01001990 u32 *p_dip = (u32 *) fib_entry->fib_node->key.addr;
Ido Schimmel70ad3502017-02-08 11:16:38 +01001991 u16 trap_id = 0;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001992 u16 rif_index = 0;
Ido Schimmel70ad3502017-02-08 11:16:38 +01001993
1994 if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
1995 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001996 rif_index = rif->rif_index;
Ido Schimmel70ad3502017-02-08 11:16:38 +01001997 } else {
1998 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
1999 trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
2000 }
Jiri Pirko61c503f2016-07-04 08:23:11 +02002001
Ido Schimmel1a9234e662016-09-19 08:29:26 +02002002 mlxsw_reg_ralue_pack4(ralue_pl,
Ido Schimmel76610eb2017-03-10 08:53:41 +01002003 (enum mlxsw_reg_ralxx_protocol) fib->proto, op,
2004 fib->vr->id, fib_entry->fib_node->key.prefix_len,
Ido Schimmel9aecce12017-02-09 10:28:42 +01002005 *p_dip);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002006 mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, trap_id,
2007 rif_index);
Jiri Pirko61c503f2016-07-04 08:23:11 +02002008 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
2009}
2010
2011static int mlxsw_sp_fib_entry_op4_trap(struct mlxsw_sp *mlxsw_sp,
2012 struct mlxsw_sp_fib_entry *fib_entry,
2013 enum mlxsw_reg_ralue_op op)
2014{
Ido Schimmel76610eb2017-03-10 08:53:41 +01002015 struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;
Jiri Pirko61c503f2016-07-04 08:23:11 +02002016 char ralue_pl[MLXSW_REG_RALUE_LEN];
Ido Schimmel9aecce12017-02-09 10:28:42 +01002017 u32 *p_dip = (u32 *) fib_entry->fib_node->key.addr;
Jiri Pirko61c503f2016-07-04 08:23:11 +02002018
Ido Schimmel1a9234e662016-09-19 08:29:26 +02002019 mlxsw_reg_ralue_pack4(ralue_pl,
Ido Schimmel76610eb2017-03-10 08:53:41 +01002020 (enum mlxsw_reg_ralxx_protocol) fib->proto, op,
2021 fib->vr->id, fib_entry->fib_node->key.prefix_len,
Ido Schimmel9aecce12017-02-09 10:28:42 +01002022 *p_dip);
Jiri Pirko61c503f2016-07-04 08:23:11 +02002023 mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
2024 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
2025}
2026
2027static int mlxsw_sp_fib_entry_op4(struct mlxsw_sp *mlxsw_sp,
2028 struct mlxsw_sp_fib_entry *fib_entry,
2029 enum mlxsw_reg_ralue_op op)
2030{
2031 switch (fib_entry->type) {
2032 case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002033 return mlxsw_sp_fib_entry_op4_remote(mlxsw_sp, fib_entry, op);
Jiri Pirko61c503f2016-07-04 08:23:11 +02002034 case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
2035 return mlxsw_sp_fib_entry_op4_local(mlxsw_sp, fib_entry, op);
2036 case MLXSW_SP_FIB_ENTRY_TYPE_TRAP:
2037 return mlxsw_sp_fib_entry_op4_trap(mlxsw_sp, fib_entry, op);
2038 }
2039 return -EINVAL;
2040}
2041
2042static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
2043 struct mlxsw_sp_fib_entry *fib_entry,
2044 enum mlxsw_reg_ralue_op op)
2045{
Ido Schimmel013b20f2017-02-08 11:16:36 +01002046 int err = -EINVAL;
2047
Ido Schimmel76610eb2017-03-10 08:53:41 +01002048 switch (fib_entry->fib_node->fib->proto) {
Jiri Pirko61c503f2016-07-04 08:23:11 +02002049 case MLXSW_SP_L3_PROTO_IPV4:
Ido Schimmel013b20f2017-02-08 11:16:36 +01002050 err = mlxsw_sp_fib_entry_op4(mlxsw_sp, fib_entry, op);
2051 break;
Jiri Pirko61c503f2016-07-04 08:23:11 +02002052 case MLXSW_SP_L3_PROTO_IPV6:
Ido Schimmel013b20f2017-02-08 11:16:36 +01002053 return err;
Jiri Pirko61c503f2016-07-04 08:23:11 +02002054 }
Ido Schimmel013b20f2017-02-08 11:16:36 +01002055 mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, err);
2056 return err;
Jiri Pirko61c503f2016-07-04 08:23:11 +02002057}
2058
2059static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
2060 struct mlxsw_sp_fib_entry *fib_entry)
2061{
Jiri Pirko7146da32016-09-01 10:37:41 +02002062 return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
2063 MLXSW_REG_RALUE_OP_WRITE_WRITE);
Jiri Pirko61c503f2016-07-04 08:23:11 +02002064}
2065
2066static int mlxsw_sp_fib_entry_del(struct mlxsw_sp *mlxsw_sp,
2067 struct mlxsw_sp_fib_entry *fib_entry)
2068{
2069 return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
2070 MLXSW_REG_RALUE_OP_WRITE_DELETE);
2071}
2072
Jiri Pirko61c503f2016-07-04 08:23:11 +02002073static int
Ido Schimmel013b20f2017-02-08 11:16:36 +01002074mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp,
2075 const struct fib_entry_notifier_info *fen_info,
2076 struct mlxsw_sp_fib_entry *fib_entry)
Jiri Pirko61c503f2016-07-04 08:23:11 +02002077{
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002078 struct fib_info *fi = fen_info->fi;
Jiri Pirko61c503f2016-07-04 08:23:11 +02002079
Ido Schimmel97989ee2017-03-10 08:53:38 +01002080 switch (fen_info->type) {
2081 case RTN_BROADCAST: /* fall through */
2082 case RTN_LOCAL:
Jiri Pirko61c503f2016-07-04 08:23:11 +02002083 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
2084 return 0;
Ido Schimmel97989ee2017-03-10 08:53:38 +01002085 case RTN_UNREACHABLE: /* fall through */
2086 case RTN_BLACKHOLE: /* fall through */
2087 case RTN_PROHIBIT:
2088 /* Packets hitting these routes need to be trapped, but
2089 * can do so with a lower priority than packets directed
2090 * at the host, so use action type local instead of trap.
2091 */
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002092 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
Ido Schimmel97989ee2017-03-10 08:53:38 +01002093 return 0;
2094 case RTN_UNICAST:
2095 if (fi->fib_nh->nh_scope != RT_SCOPE_LINK)
2096 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
2097 else
2098 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
2099 return 0;
2100 default:
2101 return -EINVAL;
2102 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002103}
2104
Jiri Pirko5b004412016-09-01 10:37:40 +02002105static struct mlxsw_sp_fib_entry *
Ido Schimmel9aecce12017-02-09 10:28:42 +01002106mlxsw_sp_fib4_entry_create(struct mlxsw_sp *mlxsw_sp,
2107 struct mlxsw_sp_fib_node *fib_node,
2108 const struct fib_entry_notifier_info *fen_info)
Jiri Pirko5b004412016-09-01 10:37:40 +02002109{
2110 struct mlxsw_sp_fib_entry *fib_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01002111 int err;
2112
2113 fib_entry = kzalloc(sizeof(*fib_entry), GFP_KERNEL);
2114 if (!fib_entry) {
2115 err = -ENOMEM;
2116 goto err_fib_entry_alloc;
2117 }
2118
2119 err = mlxsw_sp_fib4_entry_type_set(mlxsw_sp, fen_info, fib_entry);
2120 if (err)
2121 goto err_fib4_entry_type_set;
2122
2123 err = mlxsw_sp_nexthop_group_get(mlxsw_sp, fib_entry, fen_info->fi);
2124 if (err)
2125 goto err_nexthop_group_get;
2126
2127 fib_entry->params.prio = fen_info->fi->fib_priority;
2128 fib_entry->params.tb_id = fen_info->tb_id;
2129 fib_entry->params.type = fen_info->type;
2130 fib_entry->params.tos = fen_info->tos;
2131
2132 fib_entry->fib_node = fib_node;
2133
2134 return fib_entry;
2135
2136err_nexthop_group_get:
2137err_fib4_entry_type_set:
2138 kfree(fib_entry);
2139err_fib_entry_alloc:
2140 return ERR_PTR(err);
2141}
2142
2143static void mlxsw_sp_fib4_entry_destroy(struct mlxsw_sp *mlxsw_sp,
2144 struct mlxsw_sp_fib_entry *fib_entry)
2145{
2146 mlxsw_sp_nexthop_group_put(mlxsw_sp, fib_entry);
2147 kfree(fib_entry);
2148}
2149
2150static struct mlxsw_sp_fib_node *
2151mlxsw_sp_fib4_node_get(struct mlxsw_sp *mlxsw_sp,
2152 const struct fib_entry_notifier_info *fen_info);
2153
2154static struct mlxsw_sp_fib_entry *
2155mlxsw_sp_fib4_entry_lookup(struct mlxsw_sp *mlxsw_sp,
2156 const struct fib_entry_notifier_info *fen_info)
2157{
2158 struct mlxsw_sp_fib_entry *fib_entry;
2159 struct mlxsw_sp_fib_node *fib_node;
2160
2161 fib_node = mlxsw_sp_fib4_node_get(mlxsw_sp, fen_info);
2162 if (IS_ERR(fib_node))
2163 return NULL;
2164
2165 list_for_each_entry(fib_entry, &fib_node->entry_list, list) {
2166 if (fib_entry->params.tb_id == fen_info->tb_id &&
2167 fib_entry->params.tos == fen_info->tos &&
2168 fib_entry->params.type == fen_info->type &&
2169 fib_entry->nh_group->key.fi == fen_info->fi) {
2170 return fib_entry;
2171 }
2172 }
2173
2174 return NULL;
2175}
2176
2177static const struct rhashtable_params mlxsw_sp_fib_ht_params = {
2178 .key_offset = offsetof(struct mlxsw_sp_fib_node, key),
2179 .head_offset = offsetof(struct mlxsw_sp_fib_node, ht_node),
2180 .key_len = sizeof(struct mlxsw_sp_fib_key),
2181 .automatic_shrinking = true,
2182};
2183
2184static int mlxsw_sp_fib_node_insert(struct mlxsw_sp_fib *fib,
2185 struct mlxsw_sp_fib_node *fib_node)
2186{
2187 return rhashtable_insert_fast(&fib->ht, &fib_node->ht_node,
2188 mlxsw_sp_fib_ht_params);
2189}
2190
2191static void mlxsw_sp_fib_node_remove(struct mlxsw_sp_fib *fib,
2192 struct mlxsw_sp_fib_node *fib_node)
2193{
2194 rhashtable_remove_fast(&fib->ht, &fib_node->ht_node,
2195 mlxsw_sp_fib_ht_params);
2196}
2197
2198static struct mlxsw_sp_fib_node *
2199mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
2200 size_t addr_len, unsigned char prefix_len)
2201{
2202 struct mlxsw_sp_fib_key key;
2203
2204 memset(&key, 0, sizeof(key));
2205 memcpy(key.addr, addr, addr_len);
2206 key.prefix_len = prefix_len;
2207 return rhashtable_lookup_fast(&fib->ht, &key, mlxsw_sp_fib_ht_params);
2208}
2209
2210static struct mlxsw_sp_fib_node *
Ido Schimmel76610eb2017-03-10 08:53:41 +01002211mlxsw_sp_fib_node_create(struct mlxsw_sp_fib *fib, const void *addr,
Ido Schimmel9aecce12017-02-09 10:28:42 +01002212 size_t addr_len, unsigned char prefix_len)
2213{
2214 struct mlxsw_sp_fib_node *fib_node;
2215
2216 fib_node = kzalloc(sizeof(*fib_node), GFP_KERNEL);
2217 if (!fib_node)
2218 return NULL;
2219
2220 INIT_LIST_HEAD(&fib_node->entry_list);
Ido Schimmel76610eb2017-03-10 08:53:41 +01002221 list_add(&fib_node->list, &fib->node_list);
Ido Schimmel9aecce12017-02-09 10:28:42 +01002222 memcpy(fib_node->key.addr, addr, addr_len);
2223 fib_node->key.prefix_len = prefix_len;
Ido Schimmel9aecce12017-02-09 10:28:42 +01002224
2225 return fib_node;
2226}
2227
2228static void mlxsw_sp_fib_node_destroy(struct mlxsw_sp_fib_node *fib_node)
2229{
Ido Schimmel9aecce12017-02-09 10:28:42 +01002230 list_del(&fib_node->list);
2231 WARN_ON(!list_empty(&fib_node->entry_list));
2232 kfree(fib_node);
2233}
2234
2235static bool
2236mlxsw_sp_fib_node_entry_is_first(const struct mlxsw_sp_fib_node *fib_node,
2237 const struct mlxsw_sp_fib_entry *fib_entry)
2238{
2239 return list_first_entry(&fib_node->entry_list,
2240 struct mlxsw_sp_fib_entry, list) == fib_entry;
2241}
2242
2243static void mlxsw_sp_fib_node_prefix_inc(struct mlxsw_sp_fib_node *fib_node)
2244{
2245 unsigned char prefix_len = fib_node->key.prefix_len;
Ido Schimmel76610eb2017-03-10 08:53:41 +01002246 struct mlxsw_sp_fib *fib = fib_node->fib;
Ido Schimmel9aecce12017-02-09 10:28:42 +01002247
2248 if (fib->prefix_ref_count[prefix_len]++ == 0)
2249 mlxsw_sp_prefix_usage_set(&fib->prefix_usage, prefix_len);
2250}
2251
2252static void mlxsw_sp_fib_node_prefix_dec(struct mlxsw_sp_fib_node *fib_node)
2253{
2254 unsigned char prefix_len = fib_node->key.prefix_len;
Ido Schimmel76610eb2017-03-10 08:53:41 +01002255 struct mlxsw_sp_fib *fib = fib_node->fib;
Ido Schimmel9aecce12017-02-09 10:28:42 +01002256
2257 if (--fib->prefix_ref_count[prefix_len] == 0)
2258 mlxsw_sp_prefix_usage_clear(&fib->prefix_usage, prefix_len);
2259}
2260
Ido Schimmel76610eb2017-03-10 08:53:41 +01002261static int mlxsw_sp_fib_node_init(struct mlxsw_sp *mlxsw_sp,
2262 struct mlxsw_sp_fib_node *fib_node,
2263 struct mlxsw_sp_fib *fib)
2264{
2265 struct mlxsw_sp_prefix_usage req_prefix_usage;
2266 struct mlxsw_sp_lpm_tree *lpm_tree;
2267 int err;
2268
2269 err = mlxsw_sp_fib_node_insert(fib, fib_node);
2270 if (err)
2271 return err;
2272 fib_node->fib = fib;
2273
2274 mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, &fib->prefix_usage);
2275 mlxsw_sp_prefix_usage_set(&req_prefix_usage, fib_node->key.prefix_len);
2276
2277 if (!mlxsw_sp_prefix_usage_none(&fib->prefix_usage)) {
2278 err = mlxsw_sp_vr_lpm_tree_check(mlxsw_sp, fib,
2279 &req_prefix_usage);
2280 if (err)
2281 goto err_tree_check;
2282 } else {
2283 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
2284 fib->proto);
2285 if (IS_ERR(lpm_tree))
2286 return PTR_ERR(lpm_tree);
2287 fib->lpm_tree = lpm_tree;
2288 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib);
2289 if (err)
2290 goto err_tree_bind;
2291 }
2292
2293 mlxsw_sp_fib_node_prefix_inc(fib_node);
2294
2295 return 0;
2296
2297err_tree_bind:
2298 fib->lpm_tree = NULL;
2299 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
2300err_tree_check:
2301 fib_node->fib = NULL;
2302 mlxsw_sp_fib_node_remove(fib, fib_node);
2303 return err;
2304}
2305
2306static void mlxsw_sp_fib_node_fini(struct mlxsw_sp *mlxsw_sp,
2307 struct mlxsw_sp_fib_node *fib_node)
2308{
2309 struct mlxsw_sp_lpm_tree *lpm_tree = fib_node->fib->lpm_tree;
2310 struct mlxsw_sp_fib *fib = fib_node->fib;
2311
2312 mlxsw_sp_fib_node_prefix_dec(fib_node);
2313
2314 if (mlxsw_sp_prefix_usage_none(&fib->prefix_usage)) {
2315 mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, fib);
2316 fib->lpm_tree = NULL;
2317 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
2318 } else {
2319 mlxsw_sp_vr_lpm_tree_check(mlxsw_sp, fib, &fib->prefix_usage);
2320 }
2321
2322 fib_node->fib = NULL;
2323 mlxsw_sp_fib_node_remove(fib, fib_node);
2324}
2325
Ido Schimmel9aecce12017-02-09 10:28:42 +01002326static struct mlxsw_sp_fib_node *
2327mlxsw_sp_fib4_node_get(struct mlxsw_sp *mlxsw_sp,
2328 const struct fib_entry_notifier_info *fen_info)
2329{
2330 struct mlxsw_sp_fib_node *fib_node;
Ido Schimmel76610eb2017-03-10 08:53:41 +01002331 struct mlxsw_sp_fib *fib;
Jiri Pirko5b004412016-09-01 10:37:40 +02002332 struct mlxsw_sp_vr *vr;
2333 int err;
2334
Ido Schimmel76610eb2017-03-10 08:53:41 +01002335 vr = mlxsw_sp_vr_get(mlxsw_sp, fen_info->tb_id);
Jiri Pirko5b004412016-09-01 10:37:40 +02002336 if (IS_ERR(vr))
2337 return ERR_CAST(vr);
Ido Schimmel76610eb2017-03-10 08:53:41 +01002338 fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV4);
Jiri Pirko5b004412016-09-01 10:37:40 +02002339
Ido Schimmel76610eb2017-03-10 08:53:41 +01002340 fib_node = mlxsw_sp_fib_node_lookup(fib, &fen_info->dst,
Ido Schimmel9aecce12017-02-09 10:28:42 +01002341 sizeof(fen_info->dst),
2342 fen_info->dst_len);
2343 if (fib_node)
2344 return fib_node;
2345
Ido Schimmel76610eb2017-03-10 08:53:41 +01002346 fib_node = mlxsw_sp_fib_node_create(fib, &fen_info->dst,
Ido Schimmel9aecce12017-02-09 10:28:42 +01002347 sizeof(fen_info->dst),
2348 fen_info->dst_len);
2349 if (!fib_node) {
Jiri Pirko5b004412016-09-01 10:37:40 +02002350 err = -ENOMEM;
Ido Schimmel9aecce12017-02-09 10:28:42 +01002351 goto err_fib_node_create;
Jiri Pirko5b004412016-09-01 10:37:40 +02002352 }
Jiri Pirko5b004412016-09-01 10:37:40 +02002353
Ido Schimmel76610eb2017-03-10 08:53:41 +01002354 err = mlxsw_sp_fib_node_init(mlxsw_sp, fib_node, fib);
2355 if (err)
2356 goto err_fib_node_init;
2357
Ido Schimmel9aecce12017-02-09 10:28:42 +01002358 return fib_node;
Jiri Pirko5b004412016-09-01 10:37:40 +02002359
Ido Schimmel76610eb2017-03-10 08:53:41 +01002360err_fib_node_init:
2361 mlxsw_sp_fib_node_destroy(fib_node);
Ido Schimmel9aecce12017-02-09 10:28:42 +01002362err_fib_node_create:
Ido Schimmel76610eb2017-03-10 08:53:41 +01002363 mlxsw_sp_vr_put(vr);
Jiri Pirko5b004412016-09-01 10:37:40 +02002364 return ERR_PTR(err);
2365}
2366
Ido Schimmel9aecce12017-02-09 10:28:42 +01002367static void mlxsw_sp_fib4_node_put(struct mlxsw_sp *mlxsw_sp,
2368 struct mlxsw_sp_fib_node *fib_node)
Jiri Pirko5b004412016-09-01 10:37:40 +02002369{
Ido Schimmel76610eb2017-03-10 08:53:41 +01002370 struct mlxsw_sp_vr *vr = fib_node->fib->vr;
Jiri Pirko5b004412016-09-01 10:37:40 +02002371
Ido Schimmel9aecce12017-02-09 10:28:42 +01002372 if (!list_empty(&fib_node->entry_list))
2373 return;
Ido Schimmel76610eb2017-03-10 08:53:41 +01002374 mlxsw_sp_fib_node_fini(mlxsw_sp, fib_node);
Ido Schimmel9aecce12017-02-09 10:28:42 +01002375 mlxsw_sp_fib_node_destroy(fib_node);
Ido Schimmel76610eb2017-03-10 08:53:41 +01002376 mlxsw_sp_vr_put(vr);
Jiri Pirko5b004412016-09-01 10:37:40 +02002377}
2378
Ido Schimmel9aecce12017-02-09 10:28:42 +01002379static struct mlxsw_sp_fib_entry *
2380mlxsw_sp_fib4_node_entry_find(const struct mlxsw_sp_fib_node *fib_node,
2381 const struct mlxsw_sp_fib_entry_params *params)
Jiri Pirko61c503f2016-07-04 08:23:11 +02002382{
Jiri Pirko61c503f2016-07-04 08:23:11 +02002383 struct mlxsw_sp_fib_entry *fib_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01002384
2385 list_for_each_entry(fib_entry, &fib_node->entry_list, list) {
2386 if (fib_entry->params.tb_id > params->tb_id)
2387 continue;
2388 if (fib_entry->params.tb_id != params->tb_id)
2389 break;
2390 if (fib_entry->params.tos > params->tos)
2391 continue;
2392 if (fib_entry->params.prio >= params->prio ||
2393 fib_entry->params.tos < params->tos)
2394 return fib_entry;
2395 }
2396
2397 return NULL;
2398}
2399
Ido Schimmel4283bce2017-02-09 10:28:43 +01002400static int mlxsw_sp_fib4_node_list_append(struct mlxsw_sp_fib_entry *fib_entry,
2401 struct mlxsw_sp_fib_entry *new_entry)
2402{
2403 struct mlxsw_sp_fib_node *fib_node;
2404
2405 if (WARN_ON(!fib_entry))
2406 return -EINVAL;
2407
2408 fib_node = fib_entry->fib_node;
2409 list_for_each_entry_from(fib_entry, &fib_node->entry_list, list) {
2410 if (fib_entry->params.tb_id != new_entry->params.tb_id ||
2411 fib_entry->params.tos != new_entry->params.tos ||
2412 fib_entry->params.prio != new_entry->params.prio)
2413 break;
2414 }
2415
2416 list_add_tail(&new_entry->list, &fib_entry->list);
2417 return 0;
2418}
2419
Ido Schimmel9aecce12017-02-09 10:28:42 +01002420static int
2421mlxsw_sp_fib4_node_list_insert(struct mlxsw_sp_fib_node *fib_node,
Ido Schimmel4283bce2017-02-09 10:28:43 +01002422 struct mlxsw_sp_fib_entry *new_entry,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002423 bool replace, bool append)
Ido Schimmel9aecce12017-02-09 10:28:42 +01002424{
2425 struct mlxsw_sp_fib_entry *fib_entry;
2426
2427 fib_entry = mlxsw_sp_fib4_node_entry_find(fib_node, &new_entry->params);
2428
Ido Schimmel4283bce2017-02-09 10:28:43 +01002429 if (append)
2430 return mlxsw_sp_fib4_node_list_append(fib_entry, new_entry);
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002431 if (replace && WARN_ON(!fib_entry))
2432 return -EINVAL;
Ido Schimmel4283bce2017-02-09 10:28:43 +01002433
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002434 /* Insert new entry before replaced one, so that we can later
2435 * remove the second.
2436 */
Ido Schimmel9aecce12017-02-09 10:28:42 +01002437 if (fib_entry) {
2438 list_add_tail(&new_entry->list, &fib_entry->list);
2439 } else {
2440 struct mlxsw_sp_fib_entry *last;
2441
2442 list_for_each_entry(last, &fib_node->entry_list, list) {
2443 if (new_entry->params.tb_id > last->params.tb_id)
2444 break;
2445 fib_entry = last;
2446 }
2447
2448 if (fib_entry)
2449 list_add(&new_entry->list, &fib_entry->list);
2450 else
2451 list_add(&new_entry->list, &fib_node->entry_list);
2452 }
2453
2454 return 0;
2455}
2456
2457static void
2458mlxsw_sp_fib4_node_list_remove(struct mlxsw_sp_fib_entry *fib_entry)
2459{
2460 list_del(&fib_entry->list);
2461}
2462
2463static int
2464mlxsw_sp_fib4_node_entry_add(struct mlxsw_sp *mlxsw_sp,
2465 const struct mlxsw_sp_fib_node *fib_node,
2466 struct mlxsw_sp_fib_entry *fib_entry)
2467{
2468 if (!mlxsw_sp_fib_node_entry_is_first(fib_node, fib_entry))
2469 return 0;
2470
2471 /* To prevent packet loss, overwrite the previously offloaded
2472 * entry.
2473 */
2474 if (!list_is_singular(&fib_node->entry_list)) {
2475 enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_DELETE;
2476 struct mlxsw_sp_fib_entry *n = list_next_entry(fib_entry, list);
2477
2478 mlxsw_sp_fib_entry_offload_refresh(n, op, 0);
2479 }
2480
2481 return mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
2482}
2483
2484static void
2485mlxsw_sp_fib4_node_entry_del(struct mlxsw_sp *mlxsw_sp,
2486 const struct mlxsw_sp_fib_node *fib_node,
2487 struct mlxsw_sp_fib_entry *fib_entry)
2488{
2489 if (!mlxsw_sp_fib_node_entry_is_first(fib_node, fib_entry))
2490 return;
2491
2492 /* Promote the next entry by overwriting the deleted entry */
2493 if (!list_is_singular(&fib_node->entry_list)) {
2494 struct mlxsw_sp_fib_entry *n = list_next_entry(fib_entry, list);
2495 enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_DELETE;
2496
2497 mlxsw_sp_fib_entry_update(mlxsw_sp, n);
2498 mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, 0);
2499 return;
2500 }
2501
2502 mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry);
2503}
2504
2505static int mlxsw_sp_fib4_node_entry_link(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel4283bce2017-02-09 10:28:43 +01002506 struct mlxsw_sp_fib_entry *fib_entry,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002507 bool replace, bool append)
Ido Schimmel9aecce12017-02-09 10:28:42 +01002508{
2509 struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
2510 int err;
2511
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002512 err = mlxsw_sp_fib4_node_list_insert(fib_node, fib_entry, replace,
2513 append);
Ido Schimmel9aecce12017-02-09 10:28:42 +01002514 if (err)
2515 return err;
2516
2517 err = mlxsw_sp_fib4_node_entry_add(mlxsw_sp, fib_node, fib_entry);
2518 if (err)
2519 goto err_fib4_node_entry_add;
2520
Ido Schimmel9aecce12017-02-09 10:28:42 +01002521 return 0;
2522
2523err_fib4_node_entry_add:
2524 mlxsw_sp_fib4_node_list_remove(fib_entry);
2525 return err;
2526}
2527
2528static void
2529mlxsw_sp_fib4_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
2530 struct mlxsw_sp_fib_entry *fib_entry)
2531{
2532 struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
2533
Ido Schimmel9aecce12017-02-09 10:28:42 +01002534 mlxsw_sp_fib4_node_entry_del(mlxsw_sp, fib_node, fib_entry);
2535 mlxsw_sp_fib4_node_list_remove(fib_entry);
2536}
2537
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002538static void mlxsw_sp_fib4_entry_replace(struct mlxsw_sp *mlxsw_sp,
2539 struct mlxsw_sp_fib_entry *fib_entry,
2540 bool replace)
2541{
2542 struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
2543 struct mlxsw_sp_fib_entry *replaced;
2544
2545 if (!replace)
2546 return;
2547
2548 /* We inserted the new entry before replaced one */
2549 replaced = list_next_entry(fib_entry, list);
2550
2551 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, replaced);
2552 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, replaced);
2553 mlxsw_sp_fib4_node_put(mlxsw_sp, fib_node);
2554}
2555
Ido Schimmel9aecce12017-02-09 10:28:42 +01002556static int
2557mlxsw_sp_router_fib4_add(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel4283bce2017-02-09 10:28:43 +01002558 const struct fib_entry_notifier_info *fen_info,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002559 bool replace, bool append)
Ido Schimmel9aecce12017-02-09 10:28:42 +01002560{
2561 struct mlxsw_sp_fib_entry *fib_entry;
2562 struct mlxsw_sp_fib_node *fib_node;
Jiri Pirko61c503f2016-07-04 08:23:11 +02002563 int err;
2564
Ido Schimmel9011b672017-05-16 19:38:25 +02002565 if (mlxsw_sp->router->aborted)
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002566 return 0;
2567
Ido Schimmel9aecce12017-02-09 10:28:42 +01002568 fib_node = mlxsw_sp_fib4_node_get(mlxsw_sp, fen_info);
2569 if (IS_ERR(fib_node)) {
2570 dev_warn(mlxsw_sp->bus_info->dev, "Failed to get FIB node\n");
2571 return PTR_ERR(fib_node);
2572 }
2573
2574 fib_entry = mlxsw_sp_fib4_entry_create(mlxsw_sp, fib_node, fen_info);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002575 if (IS_ERR(fib_entry)) {
Ido Schimmel9aecce12017-02-09 10:28:42 +01002576 dev_warn(mlxsw_sp->bus_info->dev, "Failed to create FIB entry\n");
2577 err = PTR_ERR(fib_entry);
2578 goto err_fib4_entry_create;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002579 }
Jiri Pirko61c503f2016-07-04 08:23:11 +02002580
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002581 err = mlxsw_sp_fib4_node_entry_link(mlxsw_sp, fib_entry, replace,
2582 append);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002583 if (err) {
Ido Schimmel9aecce12017-02-09 10:28:42 +01002584 dev_warn(mlxsw_sp->bus_info->dev, "Failed to link FIB entry to node\n");
2585 goto err_fib4_node_entry_link;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002586 }
Ido Schimmel9aecce12017-02-09 10:28:42 +01002587
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002588 mlxsw_sp_fib4_entry_replace(mlxsw_sp, fib_entry, replace);
2589
Jiri Pirko61c503f2016-07-04 08:23:11 +02002590 return 0;
2591
Ido Schimmel9aecce12017-02-09 10:28:42 +01002592err_fib4_node_entry_link:
2593 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib_entry);
2594err_fib4_entry_create:
2595 mlxsw_sp_fib4_node_put(mlxsw_sp, fib_node);
Jiri Pirko61c503f2016-07-04 08:23:11 +02002596 return err;
2597}
2598
Jiri Pirko37956d72016-10-20 16:05:43 +02002599static void mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
2600 struct fib_entry_notifier_info *fen_info)
Jiri Pirko61c503f2016-07-04 08:23:11 +02002601{
Jiri Pirko61c503f2016-07-04 08:23:11 +02002602 struct mlxsw_sp_fib_entry *fib_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01002603 struct mlxsw_sp_fib_node *fib_node;
Jiri Pirko61c503f2016-07-04 08:23:11 +02002604
Ido Schimmel9011b672017-05-16 19:38:25 +02002605 if (mlxsw_sp->router->aborted)
Jiri Pirko37956d72016-10-20 16:05:43 +02002606 return;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002607
Ido Schimmel9aecce12017-02-09 10:28:42 +01002608 fib_entry = mlxsw_sp_fib4_entry_lookup(mlxsw_sp, fen_info);
2609 if (WARN_ON(!fib_entry))
Jiri Pirko37956d72016-10-20 16:05:43 +02002610 return;
Ido Schimmel9aecce12017-02-09 10:28:42 +01002611 fib_node = fib_entry->fib_node;
Jiri Pirko5b004412016-09-01 10:37:40 +02002612
Ido Schimmel9aecce12017-02-09 10:28:42 +01002613 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, fib_entry);
2614 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib_entry);
2615 mlxsw_sp_fib4_node_put(mlxsw_sp, fib_node);
Jiri Pirko61c503f2016-07-04 08:23:11 +02002616}
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002617
2618static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp)
2619{
2620 char ralta_pl[MLXSW_REG_RALTA_LEN];
2621 char ralst_pl[MLXSW_REG_RALST_LEN];
Ido Schimmelb5d90e62017-03-10 08:53:43 +01002622 int i, err;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002623
2624 mlxsw_reg_ralta_pack(ralta_pl, true, MLXSW_REG_RALXX_PROTOCOL_IPV4,
2625 MLXSW_SP_LPM_TREE_MIN);
2626 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
2627 if (err)
2628 return err;
2629
2630 mlxsw_reg_ralst_pack(ralst_pl, 0xff, MLXSW_SP_LPM_TREE_MIN);
2631 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
2632 if (err)
2633 return err;
2634
Ido Schimmelb5d90e62017-03-10 08:53:43 +01002635 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
Ido Schimmel9011b672017-05-16 19:38:25 +02002636 struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
Ido Schimmelb5d90e62017-03-10 08:53:43 +01002637 char raltb_pl[MLXSW_REG_RALTB_LEN];
2638 char ralue_pl[MLXSW_REG_RALUE_LEN];
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002639
Ido Schimmelb5d90e62017-03-10 08:53:43 +01002640 if (!mlxsw_sp_vr_is_used(vr))
2641 continue;
2642
2643 mlxsw_reg_raltb_pack(raltb_pl, vr->id,
2644 MLXSW_REG_RALXX_PROTOCOL_IPV4,
2645 MLXSW_SP_LPM_TREE_MIN);
2646 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb),
2647 raltb_pl);
2648 if (err)
2649 return err;
2650
2651 mlxsw_reg_ralue_pack4(ralue_pl, MLXSW_SP_L3_PROTO_IPV4,
2652 MLXSW_REG_RALUE_OP_WRITE_WRITE, vr->id, 0,
2653 0);
2654 mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
2655 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue),
2656 ralue_pl);
2657 if (err)
2658 return err;
2659 }
2660
2661 return 0;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002662}
2663
Ido Schimmel9aecce12017-02-09 10:28:42 +01002664static void mlxsw_sp_fib4_node_flush(struct mlxsw_sp *mlxsw_sp,
2665 struct mlxsw_sp_fib_node *fib_node)
2666{
2667 struct mlxsw_sp_fib_entry *fib_entry, *tmp;
2668
2669 list_for_each_entry_safe(fib_entry, tmp, &fib_node->entry_list, list) {
2670 bool do_break = &tmp->list == &fib_node->entry_list;
2671
2672 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, fib_entry);
2673 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib_entry);
2674 mlxsw_sp_fib4_node_put(mlxsw_sp, fib_node);
2675 /* Break when entry list is empty and node was freed.
2676 * Otherwise, we'll access freed memory in the next
2677 * iteration.
2678 */
2679 if (do_break)
2680 break;
2681 }
2682}
2683
2684static void mlxsw_sp_fib_node_flush(struct mlxsw_sp *mlxsw_sp,
2685 struct mlxsw_sp_fib_node *fib_node)
2686{
Ido Schimmel76610eb2017-03-10 08:53:41 +01002687 switch (fib_node->fib->proto) {
Ido Schimmel9aecce12017-02-09 10:28:42 +01002688 case MLXSW_SP_L3_PROTO_IPV4:
2689 mlxsw_sp_fib4_node_flush(mlxsw_sp, fib_node);
2690 break;
2691 case MLXSW_SP_L3_PROTO_IPV6:
2692 WARN_ON_ONCE(1);
2693 break;
2694 }
2695}
2696
Ido Schimmel76610eb2017-03-10 08:53:41 +01002697static void mlxsw_sp_vr_fib_flush(struct mlxsw_sp *mlxsw_sp,
2698 struct mlxsw_sp_vr *vr,
2699 enum mlxsw_sp_l3proto proto)
2700{
2701 struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
2702 struct mlxsw_sp_fib_node *fib_node, *tmp;
2703
2704 list_for_each_entry_safe(fib_node, tmp, &fib->node_list, list) {
2705 bool do_break = &tmp->list == &fib->node_list;
2706
2707 mlxsw_sp_fib_node_flush(mlxsw_sp, fib_node);
2708 if (do_break)
2709 break;
2710 }
2711}
2712
Ido Schimmelac571de2016-11-14 11:26:32 +01002713static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp)
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002714{
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002715 int i;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002716
Jiri Pirkoc1a38312016-10-21 16:07:23 +02002717 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
Ido Schimmel9011b672017-05-16 19:38:25 +02002718 struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
Ido Schimmelac571de2016-11-14 11:26:32 +01002719
Ido Schimmel76610eb2017-03-10 08:53:41 +01002720 if (!mlxsw_sp_vr_is_used(vr))
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002721 continue;
Ido Schimmel76610eb2017-03-10 08:53:41 +01002722 mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002723 }
Ido Schimmelac571de2016-11-14 11:26:32 +01002724}
2725
2726static void mlxsw_sp_router_fib4_abort(struct mlxsw_sp *mlxsw_sp)
2727{
2728 int err;
2729
Ido Schimmel9011b672017-05-16 19:38:25 +02002730 if (mlxsw_sp->router->aborted)
Ido Schimmeld331d302016-11-16 09:51:58 +01002731 return;
2732 dev_warn(mlxsw_sp->bus_info->dev, "FIB abort triggered. Note that FIB entries are no longer being offloaded to this device.\n");
Ido Schimmelac571de2016-11-14 11:26:32 +01002733 mlxsw_sp_router_fib_flush(mlxsw_sp);
Ido Schimmel9011b672017-05-16 19:38:25 +02002734 mlxsw_sp->router->aborted = true;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002735 err = mlxsw_sp_router_set_abort_trap(mlxsw_sp);
2736 if (err)
2737 dev_warn(mlxsw_sp->bus_info->dev, "Failed to set abort trap.\n");
2738}
2739
Ido Schimmel30572242016-12-03 16:45:01 +01002740struct mlxsw_sp_fib_event_work {
Ido Schimmela0e47612017-02-06 16:20:10 +01002741 struct work_struct work;
Ido Schimmelad178c82017-02-08 11:16:40 +01002742 union {
2743 struct fib_entry_notifier_info fen_info;
Ido Schimmel5d7bfd12017-03-16 09:08:14 +01002744 struct fib_rule_notifier_info fr_info;
Ido Schimmelad178c82017-02-08 11:16:40 +01002745 struct fib_nh_notifier_info fnh_info;
2746 };
Ido Schimmel30572242016-12-03 16:45:01 +01002747 struct mlxsw_sp *mlxsw_sp;
2748 unsigned long event;
2749};
2750
2751static void mlxsw_sp_router_fib_event_work(struct work_struct *work)
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002752{
Ido Schimmel30572242016-12-03 16:45:01 +01002753 struct mlxsw_sp_fib_event_work *fib_work =
Ido Schimmela0e47612017-02-06 16:20:10 +01002754 container_of(work, struct mlxsw_sp_fib_event_work, work);
Ido Schimmel30572242016-12-03 16:45:01 +01002755 struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
Ido Schimmel5d7bfd12017-03-16 09:08:14 +01002756 struct fib_rule *rule;
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002757 bool replace, append;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002758 int err;
2759
Ido Schimmel30572242016-12-03 16:45:01 +01002760 /* Protect internal structures from changes */
2761 rtnl_lock();
2762 switch (fib_work->event) {
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002763 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
Ido Schimmel4283bce2017-02-09 10:28:43 +01002764 case FIB_EVENT_ENTRY_APPEND: /* fall through */
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002765 case FIB_EVENT_ENTRY_ADD:
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002766 replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
Ido Schimmel4283bce2017-02-09 10:28:43 +01002767 append = fib_work->event == FIB_EVENT_ENTRY_APPEND;
2768 err = mlxsw_sp_router_fib4_add(mlxsw_sp, &fib_work->fen_info,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002769 replace, append);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002770 if (err)
2771 mlxsw_sp_router_fib4_abort(mlxsw_sp);
Ido Schimmel30572242016-12-03 16:45:01 +01002772 fib_info_put(fib_work->fen_info.fi);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002773 break;
2774 case FIB_EVENT_ENTRY_DEL:
Ido Schimmel30572242016-12-03 16:45:01 +01002775 mlxsw_sp_router_fib4_del(mlxsw_sp, &fib_work->fen_info);
2776 fib_info_put(fib_work->fen_info.fi);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002777 break;
2778 case FIB_EVENT_RULE_ADD: /* fall through */
2779 case FIB_EVENT_RULE_DEL:
Ido Schimmel5d7bfd12017-03-16 09:08:14 +01002780 rule = fib_work->fr_info.rule;
Ido Schimmelc7f6e662017-03-16 09:08:20 +01002781 if (!fib4_rule_default(rule) && !rule->l3mdev)
Ido Schimmel5d7bfd12017-03-16 09:08:14 +01002782 mlxsw_sp_router_fib4_abort(mlxsw_sp);
2783 fib_rule_put(rule);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002784 break;
Ido Schimmelad178c82017-02-08 11:16:40 +01002785 case FIB_EVENT_NH_ADD: /* fall through */
2786 case FIB_EVENT_NH_DEL:
2787 mlxsw_sp_nexthop_event(mlxsw_sp, fib_work->event,
2788 fib_work->fnh_info.fib_nh);
2789 fib_info_put(fib_work->fnh_info.fib_nh->nh_parent);
2790 break;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002791 }
Ido Schimmel30572242016-12-03 16:45:01 +01002792 rtnl_unlock();
2793 kfree(fib_work);
2794}
2795
2796/* Called with rcu_read_lock() */
2797static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
2798 unsigned long event, void *ptr)
2799{
2800 struct mlxsw_sp *mlxsw_sp = container_of(nb, struct mlxsw_sp, fib_nb);
2801 struct mlxsw_sp_fib_event_work *fib_work;
2802 struct fib_notifier_info *info = ptr;
2803
2804 if (!net_eq(info->net, &init_net))
2805 return NOTIFY_DONE;
2806
2807 fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC);
2808 if (WARN_ON(!fib_work))
2809 return NOTIFY_BAD;
2810
Ido Schimmela0e47612017-02-06 16:20:10 +01002811 INIT_WORK(&fib_work->work, mlxsw_sp_router_fib_event_work);
Ido Schimmel30572242016-12-03 16:45:01 +01002812 fib_work->mlxsw_sp = mlxsw_sp;
2813 fib_work->event = event;
2814
2815 switch (event) {
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002816 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
Ido Schimmel4283bce2017-02-09 10:28:43 +01002817 case FIB_EVENT_ENTRY_APPEND: /* fall through */
Ido Schimmel30572242016-12-03 16:45:01 +01002818 case FIB_EVENT_ENTRY_ADD: /* fall through */
2819 case FIB_EVENT_ENTRY_DEL:
2820 memcpy(&fib_work->fen_info, ptr, sizeof(fib_work->fen_info));
2821 /* Take referece on fib_info to prevent it from being
2822 * freed while work is queued. Release it afterwards.
2823 */
2824 fib_info_hold(fib_work->fen_info.fi);
2825 break;
Ido Schimmel5d7bfd12017-03-16 09:08:14 +01002826 case FIB_EVENT_RULE_ADD: /* fall through */
2827 case FIB_EVENT_RULE_DEL:
2828 memcpy(&fib_work->fr_info, ptr, sizeof(fib_work->fr_info));
2829 fib_rule_get(fib_work->fr_info.rule);
2830 break;
Ido Schimmelad178c82017-02-08 11:16:40 +01002831 case FIB_EVENT_NH_ADD: /* fall through */
2832 case FIB_EVENT_NH_DEL:
2833 memcpy(&fib_work->fnh_info, ptr, sizeof(fib_work->fnh_info));
2834 fib_info_hold(fib_work->fnh_info.fib_nh->nh_parent);
2835 break;
Ido Schimmel30572242016-12-03 16:45:01 +01002836 }
2837
Ido Schimmela0e47612017-02-06 16:20:10 +01002838 mlxsw_core_schedule_work(&fib_work->work);
Ido Schimmel30572242016-12-03 16:45:01 +01002839
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002840 return NOTIFY_DONE;
2841}
2842
Ido Schimmel4724ba562017-03-10 08:53:39 +01002843static struct mlxsw_sp_rif *
2844mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
2845 const struct net_device *dev)
2846{
2847 int i;
2848
2849 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
Ido Schimmel5f9efff2017-05-16 19:38:27 +02002850 if (mlxsw_sp->router->rifs[i] &&
2851 mlxsw_sp->router->rifs[i]->dev == dev)
2852 return mlxsw_sp->router->rifs[i];
Ido Schimmel4724ba562017-03-10 08:53:39 +01002853
2854 return NULL;
2855}
2856
2857static int mlxsw_sp_router_rif_disable(struct mlxsw_sp *mlxsw_sp, u16 rif)
2858{
2859 char ritr_pl[MLXSW_REG_RITR_LEN];
2860 int err;
2861
2862 mlxsw_reg_ritr_rif_pack(ritr_pl, rif);
2863 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
2864 if (WARN_ON_ONCE(err))
2865 return err;
2866
2867 mlxsw_reg_ritr_enable_set(ritr_pl, false);
2868 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
2869}
2870
2871static void mlxsw_sp_router_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002872 struct mlxsw_sp_rif *rif)
Ido Schimmel4724ba562017-03-10 08:53:39 +01002873{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002874 mlxsw_sp_router_rif_disable(mlxsw_sp, rif->rif_index);
2875 mlxsw_sp_nexthop_rif_gone_sync(mlxsw_sp, rif);
2876 mlxsw_sp_neigh_rif_gone_sync(mlxsw_sp, rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01002877}
2878
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002879static bool mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *rif,
Ido Schimmel4724ba562017-03-10 08:53:39 +01002880 const struct in_device *in_dev,
2881 unsigned long event)
2882{
2883 switch (event) {
2884 case NETDEV_UP:
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002885 if (!rif)
Ido Schimmel4724ba562017-03-10 08:53:39 +01002886 return true;
2887 return false;
2888 case NETDEV_DOWN:
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002889 if (rif && !in_dev->ifa_list &&
2890 !netif_is_l3_slave(rif->dev))
Ido Schimmel4724ba562017-03-10 08:53:39 +01002891 return true;
2892 /* It is possible we already removed the RIF ourselves
2893 * if it was assigned to a netdev that is now a bridge
2894 * or LAG slave.
2895 */
2896 return false;
2897 }
2898
2899 return false;
2900}
2901
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002902#define MLXSW_SP_INVALID_INDEX_RIF 0xffff
Ido Schimmel4724ba562017-03-10 08:53:39 +01002903static int mlxsw_sp_avail_rif_get(struct mlxsw_sp *mlxsw_sp)
2904{
2905 int i;
2906
2907 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
Ido Schimmel5f9efff2017-05-16 19:38:27 +02002908 if (!mlxsw_sp->router->rifs[i])
Ido Schimmel4724ba562017-03-10 08:53:39 +01002909 return i;
2910
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002911 return MLXSW_SP_INVALID_INDEX_RIF;
Ido Schimmel4724ba562017-03-10 08:53:39 +01002912}
2913
2914static void mlxsw_sp_vport_rif_sp_attr_get(struct mlxsw_sp_port *mlxsw_sp_vport,
2915 bool *p_lagged, u16 *p_system_port)
2916{
2917 u8 local_port = mlxsw_sp_vport->local_port;
2918
2919 *p_lagged = mlxsw_sp_vport->lagged;
2920 *p_system_port = *p_lagged ? mlxsw_sp_vport->lag_id : local_port;
2921}
2922
2923static int mlxsw_sp_vport_rif_sp_op(struct mlxsw_sp_port *mlxsw_sp_vport,
Ido Schimmel69132292017-03-10 08:53:42 +01002924 u16 vr_id, struct net_device *l3_dev,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002925 u16 rif_index, bool create)
Ido Schimmel4724ba562017-03-10 08:53:39 +01002926{
2927 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
2928 bool lagged = mlxsw_sp_vport->lagged;
2929 char ritr_pl[MLXSW_REG_RITR_LEN];
2930 u16 system_port;
2931
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002932 mlxsw_reg_ritr_pack(ritr_pl, create, MLXSW_REG_RITR_SP_IF, rif_index,
2933 vr_id, l3_dev->mtu, l3_dev->dev_addr);
Ido Schimmel4724ba562017-03-10 08:53:39 +01002934
2935 mlxsw_sp_vport_rif_sp_attr_get(mlxsw_sp_vport, &lagged, &system_port);
2936 mlxsw_reg_ritr_sp_if_pack(ritr_pl, lagged, system_port,
2937 mlxsw_sp_vport_vid_get(mlxsw_sp_vport));
2938
2939 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
2940}
2941
2942static void mlxsw_sp_vport_rif_sp_leave(struct mlxsw_sp_port *mlxsw_sp_vport);
2943
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002944static u16 mlxsw_sp_rif_sp_to_fid(u16 rif_index)
Ido Schimmel4724ba562017-03-10 08:53:39 +01002945{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002946 return MLXSW_SP_RFID_BASE + rif_index;
Ido Schimmel4724ba562017-03-10 08:53:39 +01002947}
2948
2949static struct mlxsw_sp_fid *
2950mlxsw_sp_rfid_alloc(u16 fid, struct net_device *l3_dev)
2951{
2952 struct mlxsw_sp_fid *f;
2953
2954 f = kzalloc(sizeof(*f), GFP_KERNEL);
2955 if (!f)
2956 return NULL;
2957
2958 f->leave = mlxsw_sp_vport_rif_sp_leave;
2959 f->ref_count = 0;
2960 f->dev = l3_dev;
2961 f->fid = fid;
2962
2963 return f;
2964}
2965
2966static struct mlxsw_sp_rif *
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002967mlxsw_sp_rif_alloc(u16 rif_index, u16 vr_id, struct net_device *l3_dev,
Ido Schimmel69132292017-03-10 08:53:42 +01002968 struct mlxsw_sp_fid *f)
Ido Schimmel4724ba562017-03-10 08:53:39 +01002969{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002970 struct mlxsw_sp_rif *rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01002971
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002972 rif = kzalloc(sizeof(*rif), GFP_KERNEL);
2973 if (!rif)
Ido Schimmel4724ba562017-03-10 08:53:39 +01002974 return NULL;
2975
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002976 INIT_LIST_HEAD(&rif->nexthop_list);
2977 INIT_LIST_HEAD(&rif->neigh_list);
2978 ether_addr_copy(rif->addr, l3_dev->dev_addr);
2979 rif->mtu = l3_dev->mtu;
2980 rif->vr_id = vr_id;
2981 rif->dev = l3_dev;
2982 rif->rif_index = rif_index;
2983 rif->f = f;
Ido Schimmel4724ba562017-03-10 08:53:39 +01002984
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002985 return rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01002986}
2987
Ido Schimmel5f9efff2017-05-16 19:38:27 +02002988struct mlxsw_sp_rif *mlxsw_sp_rif_by_index(const struct mlxsw_sp *mlxsw_sp,
2989 u16 rif_index)
2990{
2991 return mlxsw_sp->router->rifs[rif_index];
2992}
2993
Arkadi Sharshevskyfd1b9d42017-03-28 17:24:16 +02002994u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif)
2995{
2996 return rif->rif_index;
2997}
2998
2999int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif)
3000{
3001 return rif->dev->ifindex;
3002}
3003
Ido Schimmel4724ba562017-03-10 08:53:39 +01003004static struct mlxsw_sp_rif *
3005mlxsw_sp_vport_rif_sp_create(struct mlxsw_sp_port *mlxsw_sp_vport,
3006 struct net_device *l3_dev)
3007{
3008 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
Ido Schimmel57837882017-03-16 09:08:16 +01003009 u32 tb_id = l3mdev_fib_table(l3_dev);
Ido Schimmel69132292017-03-10 08:53:42 +01003010 struct mlxsw_sp_vr *vr;
Ido Schimmel4724ba562017-03-10 08:53:39 +01003011 struct mlxsw_sp_fid *f;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003012 struct mlxsw_sp_rif *rif;
3013 u16 fid, rif_index;
Ido Schimmel4724ba562017-03-10 08:53:39 +01003014 int err;
3015
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003016 rif_index = mlxsw_sp_avail_rif_get(mlxsw_sp);
3017 if (rif_index == MLXSW_SP_INVALID_INDEX_RIF)
Ido Schimmel4724ba562017-03-10 08:53:39 +01003018 return ERR_PTR(-ERANGE);
3019
Ido Schimmel57837882017-03-16 09:08:16 +01003020 vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id ? : RT_TABLE_MAIN);
Ido Schimmel69132292017-03-10 08:53:42 +01003021 if (IS_ERR(vr))
3022 return ERR_CAST(vr);
3023
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003024 err = mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, vr->id, l3_dev,
3025 rif_index, true);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003026 if (err)
Ido Schimmel69132292017-03-10 08:53:42 +01003027 goto err_vport_rif_sp_op;
Ido Schimmel4724ba562017-03-10 08:53:39 +01003028
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003029 fid = mlxsw_sp_rif_sp_to_fid(rif_index);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003030 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, true);
3031 if (err)
3032 goto err_rif_fdb_op;
3033
3034 f = mlxsw_sp_rfid_alloc(fid, l3_dev);
3035 if (!f) {
3036 err = -ENOMEM;
3037 goto err_rfid_alloc;
3038 }
3039
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003040 rif = mlxsw_sp_rif_alloc(rif_index, vr->id, l3_dev, f);
3041 if (!rif) {
Ido Schimmel4724ba562017-03-10 08:53:39 +01003042 err = -ENOMEM;
3043 goto err_rif_alloc;
3044 }
3045
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +02003046 if (devlink_dpipe_table_counter_enabled(priv_to_devlink(mlxsw_sp->core),
3047 MLXSW_SP_DPIPE_TABLE_NAME_ERIF)) {
3048 err = mlxsw_sp_rif_counter_alloc(mlxsw_sp, rif,
3049 MLXSW_SP_RIF_COUNTER_EGRESS);
3050 if (err)
3051 netdev_dbg(mlxsw_sp_vport->dev,
3052 "Counter alloc Failed err=%d\n", err);
3053 }
3054
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003055 f->rif = rif;
Ido Schimmel5f9efff2017-05-16 19:38:27 +02003056 mlxsw_sp->router->rifs[rif_index] = rif;
Ido Schimmel69132292017-03-10 08:53:42 +01003057 vr->rif_count++;
Ido Schimmel4724ba562017-03-10 08:53:39 +01003058
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003059 return rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01003060
3061err_rif_alloc:
3062 kfree(f);
3063err_rfid_alloc:
3064 mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, false);
3065err_rif_fdb_op:
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003066 mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, vr->id, l3_dev, rif_index,
3067 false);
Ido Schimmel69132292017-03-10 08:53:42 +01003068err_vport_rif_sp_op:
3069 mlxsw_sp_vr_put(vr);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003070 return ERR_PTR(err);
3071}
3072
3073static void mlxsw_sp_vport_rif_sp_destroy(struct mlxsw_sp_port *mlxsw_sp_vport,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003074 struct mlxsw_sp_rif *rif)
Ido Schimmel4724ba562017-03-10 08:53:39 +01003075{
3076 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
Ido Schimmel9011b672017-05-16 19:38:25 +02003077 struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[rif->vr_id];
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003078 struct net_device *l3_dev = rif->dev;
3079 struct mlxsw_sp_fid *f = rif->f;
3080 u16 rif_index = rif->rif_index;
Ido Schimmel4724ba562017-03-10 08:53:39 +01003081 u16 fid = f->fid;
Ido Schimmel4724ba562017-03-10 08:53:39 +01003082
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003083 mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003084
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +02003085 mlxsw_sp_rif_counter_free(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS);
3086 mlxsw_sp_rif_counter_free(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_INGRESS);
3087
Ido Schimmel69132292017-03-10 08:53:42 +01003088 vr->rif_count--;
Ido Schimmel5f9efff2017-05-16 19:38:27 +02003089 mlxsw_sp->router->rifs[rif_index] = NULL;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003090 f->rif = NULL;
Ido Schimmel4724ba562017-03-10 08:53:39 +01003091
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003092 kfree(rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003093
3094 kfree(f);
3095
3096 mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, false);
3097
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003098 mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, vr->id, l3_dev, rif_index,
3099 false);
Ido Schimmel69132292017-03-10 08:53:42 +01003100 mlxsw_sp_vr_put(vr);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003101}
3102
3103static int mlxsw_sp_vport_rif_sp_join(struct mlxsw_sp_port *mlxsw_sp_vport,
3104 struct net_device *l3_dev)
3105{
3106 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003107 struct mlxsw_sp_rif *rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01003108
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003109 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
3110 if (!rif) {
3111 rif = mlxsw_sp_vport_rif_sp_create(mlxsw_sp_vport, l3_dev);
3112 if (IS_ERR(rif))
3113 return PTR_ERR(rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003114 }
3115
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003116 mlxsw_sp_vport_fid_set(mlxsw_sp_vport, rif->f);
3117 rif->f->ref_count++;
Ido Schimmel4724ba562017-03-10 08:53:39 +01003118
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003119 netdev_dbg(mlxsw_sp_vport->dev, "Joined FID=%d\n", rif->f->fid);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003120
3121 return 0;
3122}
3123
3124static void mlxsw_sp_vport_rif_sp_leave(struct mlxsw_sp_port *mlxsw_sp_vport)
3125{
3126 struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
3127
3128 netdev_dbg(mlxsw_sp_vport->dev, "Left FID=%d\n", f->fid);
3129
3130 mlxsw_sp_vport_fid_set(mlxsw_sp_vport, NULL);
3131 if (--f->ref_count == 0)
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003132 mlxsw_sp_vport_rif_sp_destroy(mlxsw_sp_vport, f->rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003133}
3134
3135static int mlxsw_sp_inetaddr_vport_event(struct net_device *l3_dev,
3136 struct net_device *port_dev,
3137 unsigned long event, u16 vid)
3138{
3139 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(port_dev);
3140 struct mlxsw_sp_port *mlxsw_sp_vport;
3141
3142 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
3143 if (WARN_ON(!mlxsw_sp_vport))
3144 return -EINVAL;
3145
3146 switch (event) {
3147 case NETDEV_UP:
3148 return mlxsw_sp_vport_rif_sp_join(mlxsw_sp_vport, l3_dev);
3149 case NETDEV_DOWN:
3150 mlxsw_sp_vport_rif_sp_leave(mlxsw_sp_vport);
3151 break;
3152 }
3153
3154 return 0;
3155}
3156
3157static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev,
3158 unsigned long event)
3159{
Jiri Pirko2b94e582017-04-18 16:55:37 +02003160 if (netif_is_bridge_port(port_dev) ||
3161 netif_is_lag_port(port_dev) ||
3162 netif_is_ovs_port(port_dev))
Ido Schimmel4724ba562017-03-10 08:53:39 +01003163 return 0;
3164
3165 return mlxsw_sp_inetaddr_vport_event(port_dev, port_dev, event, 1);
3166}
3167
3168static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev,
3169 struct net_device *lag_dev,
3170 unsigned long event, u16 vid)
3171{
3172 struct net_device *port_dev;
3173 struct list_head *iter;
3174 int err;
3175
3176 netdev_for_each_lower_dev(lag_dev, port_dev, iter) {
3177 if (mlxsw_sp_port_dev_check(port_dev)) {
3178 err = mlxsw_sp_inetaddr_vport_event(l3_dev, port_dev,
3179 event, vid);
3180 if (err)
3181 return err;
3182 }
3183 }
3184
3185 return 0;
3186}
3187
3188static int mlxsw_sp_inetaddr_lag_event(struct net_device *lag_dev,
3189 unsigned long event)
3190{
3191 if (netif_is_bridge_port(lag_dev))
3192 return 0;
3193
3194 return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event, 1);
3195}
3196
3197static struct mlxsw_sp_fid *mlxsw_sp_bridge_fid_get(struct mlxsw_sp *mlxsw_sp,
3198 struct net_device *l3_dev)
3199{
3200 u16 fid;
3201
3202 if (is_vlan_dev(l3_dev))
3203 fid = vlan_dev_vlan_id(l3_dev);
Ido Schimmel5f6935c2017-05-16 19:38:26 +02003204 else if (mlxsw_sp_master_bridge(mlxsw_sp)->dev == l3_dev)
Ido Schimmel4724ba562017-03-10 08:53:39 +01003205 fid = 1;
3206 else
3207 return mlxsw_sp_vfid_find(mlxsw_sp, l3_dev);
3208
3209 return mlxsw_sp_fid_find(mlxsw_sp, fid);
3210}
3211
Ido Schimmel5ec2ee72017-03-24 08:02:48 +01003212static u8 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp)
3213{
3214 return mlxsw_core_max_ports(mlxsw_sp->core) + 1;
3215}
3216
Ido Schimmel4724ba562017-03-10 08:53:39 +01003217static enum mlxsw_flood_table_type mlxsw_sp_flood_table_type_get(u16 fid)
3218{
3219 return mlxsw_sp_fid_is_vfid(fid) ? MLXSW_REG_SFGC_TABLE_TYPE_FID :
3220 MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST;
3221}
3222
3223static u16 mlxsw_sp_flood_table_index_get(u16 fid)
3224{
3225 return mlxsw_sp_fid_is_vfid(fid) ? mlxsw_sp_fid_to_vfid(fid) : fid;
3226}
3227
3228static int mlxsw_sp_router_port_flood_set(struct mlxsw_sp *mlxsw_sp, u16 fid,
3229 bool set)
3230{
Ido Schimmel5ec2ee72017-03-24 08:02:48 +01003231 u8 router_port = mlxsw_sp_router_port(mlxsw_sp);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003232 enum mlxsw_flood_table_type table_type;
3233 char *sftr_pl;
3234 u16 index;
3235 int err;
3236
3237 sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL);
3238 if (!sftr_pl)
3239 return -ENOMEM;
3240
3241 table_type = mlxsw_sp_flood_table_type_get(fid);
3242 index = mlxsw_sp_flood_table_index_get(fid);
3243 mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BC, index, table_type,
Ido Schimmel5ec2ee72017-03-24 08:02:48 +01003244 1, router_port, set);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003245 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
3246
3247 kfree(sftr_pl);
3248 return err;
3249}
3250
3251static enum mlxsw_reg_ritr_if_type mlxsw_sp_rif_type_get(u16 fid)
3252{
3253 if (mlxsw_sp_fid_is_vfid(fid))
3254 return MLXSW_REG_RITR_FID_IF;
3255 else
3256 return MLXSW_REG_RITR_VLAN_IF;
3257}
3258
Ido Schimmel69132292017-03-10 08:53:42 +01003259static int mlxsw_sp_rif_bridge_op(struct mlxsw_sp *mlxsw_sp, u16 vr_id,
Ido Schimmel4724ba562017-03-10 08:53:39 +01003260 struct net_device *l3_dev,
3261 u16 fid, u16 rif,
3262 bool create)
3263{
3264 enum mlxsw_reg_ritr_if_type rif_type;
3265 char ritr_pl[MLXSW_REG_RITR_LEN];
3266
3267 rif_type = mlxsw_sp_rif_type_get(fid);
Ido Schimmel69132292017-03-10 08:53:42 +01003268 mlxsw_reg_ritr_pack(ritr_pl, create, rif_type, rif, vr_id, l3_dev->mtu,
Ido Schimmel4724ba562017-03-10 08:53:39 +01003269 l3_dev->dev_addr);
3270 mlxsw_reg_ritr_fid_set(ritr_pl, rif_type, fid);
3271
3272 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
3273}
3274
3275static int mlxsw_sp_rif_bridge_create(struct mlxsw_sp *mlxsw_sp,
3276 struct net_device *l3_dev,
3277 struct mlxsw_sp_fid *f)
3278{
Ido Schimmel57837882017-03-16 09:08:16 +01003279 u32 tb_id = l3mdev_fib_table(l3_dev);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003280 struct mlxsw_sp_rif *rif;
Ido Schimmel69132292017-03-10 08:53:42 +01003281 struct mlxsw_sp_vr *vr;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003282 u16 rif_index;
Ido Schimmel4724ba562017-03-10 08:53:39 +01003283 int err;
3284
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003285 rif_index = mlxsw_sp_avail_rif_get(mlxsw_sp);
3286 if (rif_index == MLXSW_SP_INVALID_INDEX_RIF)
Ido Schimmel4724ba562017-03-10 08:53:39 +01003287 return -ERANGE;
3288
Ido Schimmel57837882017-03-16 09:08:16 +01003289 vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id ? : RT_TABLE_MAIN);
Ido Schimmel69132292017-03-10 08:53:42 +01003290 if (IS_ERR(vr))
3291 return PTR_ERR(vr);
3292
Ido Schimmel4724ba562017-03-10 08:53:39 +01003293 err = mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, true);
3294 if (err)
Ido Schimmel69132292017-03-10 08:53:42 +01003295 goto err_port_flood_set;
Ido Schimmel4724ba562017-03-10 08:53:39 +01003296
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003297 err = mlxsw_sp_rif_bridge_op(mlxsw_sp, vr->id, l3_dev, f->fid,
3298 rif_index, true);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003299 if (err)
3300 goto err_rif_bridge_op;
3301
3302 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, true);
3303 if (err)
3304 goto err_rif_fdb_op;
3305
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003306 rif = mlxsw_sp_rif_alloc(rif_index, vr->id, l3_dev, f);
3307 if (!rif) {
Ido Schimmel4724ba562017-03-10 08:53:39 +01003308 err = -ENOMEM;
3309 goto err_rif_alloc;
3310 }
3311
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003312 f->rif = rif;
Ido Schimmel5f9efff2017-05-16 19:38:27 +02003313 mlxsw_sp->router->rifs[rif_index] = rif;
Ido Schimmel69132292017-03-10 08:53:42 +01003314 vr->rif_count++;
Ido Schimmel4724ba562017-03-10 08:53:39 +01003315
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003316 netdev_dbg(l3_dev, "RIF=%d created\n", rif_index);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003317
3318 return 0;
3319
3320err_rif_alloc:
3321 mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, false);
3322err_rif_fdb_op:
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003323 mlxsw_sp_rif_bridge_op(mlxsw_sp, vr->id, l3_dev, f->fid, rif_index,
3324 false);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003325err_rif_bridge_op:
3326 mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, false);
Ido Schimmel69132292017-03-10 08:53:42 +01003327err_port_flood_set:
3328 mlxsw_sp_vr_put(vr);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003329 return err;
3330}
3331
3332void mlxsw_sp_rif_bridge_destroy(struct mlxsw_sp *mlxsw_sp,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003333 struct mlxsw_sp_rif *rif)
Ido Schimmel4724ba562017-03-10 08:53:39 +01003334{
Ido Schimmel9011b672017-05-16 19:38:25 +02003335 struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[rif->vr_id];
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003336 struct net_device *l3_dev = rif->dev;
3337 struct mlxsw_sp_fid *f = rif->f;
3338 u16 rif_index = rif->rif_index;
Ido Schimmel4724ba562017-03-10 08:53:39 +01003339
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003340 mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003341
Ido Schimmel69132292017-03-10 08:53:42 +01003342 vr->rif_count--;
Ido Schimmel5f9efff2017-05-16 19:38:27 +02003343 mlxsw_sp->router->rifs[rif_index] = NULL;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003344 f->rif = NULL;
Ido Schimmel4724ba562017-03-10 08:53:39 +01003345
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003346 kfree(rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003347
3348 mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, false);
3349
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003350 mlxsw_sp_rif_bridge_op(mlxsw_sp, vr->id, l3_dev, f->fid, rif_index,
3351 false);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003352
3353 mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, false);
3354
Ido Schimmel69132292017-03-10 08:53:42 +01003355 mlxsw_sp_vr_put(vr);
3356
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003357 netdev_dbg(l3_dev, "RIF=%d destroyed\n", rif_index);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003358}
3359
3360static int mlxsw_sp_inetaddr_bridge_event(struct net_device *l3_dev,
3361 struct net_device *br_dev,
3362 unsigned long event)
3363{
3364 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
3365 struct mlxsw_sp_fid *f;
3366
3367 /* FID can either be an actual FID if the L3 device is the
3368 * VLAN-aware bridge or a VLAN device on top. Otherwise, the
3369 * L3 device is a VLAN-unaware bridge and we get a vFID.
3370 */
3371 f = mlxsw_sp_bridge_fid_get(mlxsw_sp, l3_dev);
3372 if (WARN_ON(!f))
3373 return -EINVAL;
3374
3375 switch (event) {
3376 case NETDEV_UP:
3377 return mlxsw_sp_rif_bridge_create(mlxsw_sp, l3_dev, f);
3378 case NETDEV_DOWN:
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003379 mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003380 break;
3381 }
3382
3383 return 0;
3384}
3385
3386static int mlxsw_sp_inetaddr_vlan_event(struct net_device *vlan_dev,
3387 unsigned long event)
3388{
3389 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
3390 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(vlan_dev);
3391 u16 vid = vlan_dev_vlan_id(vlan_dev);
3392
3393 if (mlxsw_sp_port_dev_check(real_dev))
3394 return mlxsw_sp_inetaddr_vport_event(vlan_dev, real_dev, event,
3395 vid);
3396 else if (netif_is_lag_master(real_dev))
3397 return __mlxsw_sp_inetaddr_lag_event(vlan_dev, real_dev, event,
3398 vid);
3399 else if (netif_is_bridge_master(real_dev) &&
Ido Schimmel5f6935c2017-05-16 19:38:26 +02003400 mlxsw_sp_master_bridge(mlxsw_sp)->dev == real_dev)
Ido Schimmel4724ba562017-03-10 08:53:39 +01003401 return mlxsw_sp_inetaddr_bridge_event(vlan_dev, real_dev,
3402 event);
3403
3404 return 0;
3405}
3406
Ido Schimmelb1e45522017-04-30 19:47:14 +03003407static int __mlxsw_sp_inetaddr_event(struct net_device *dev,
3408 unsigned long event)
3409{
3410 if (mlxsw_sp_port_dev_check(dev))
3411 return mlxsw_sp_inetaddr_port_event(dev, event);
3412 else if (netif_is_lag_master(dev))
3413 return mlxsw_sp_inetaddr_lag_event(dev, event);
3414 else if (netif_is_bridge_master(dev))
3415 return mlxsw_sp_inetaddr_bridge_event(dev, dev, event);
3416 else if (is_vlan_dev(dev))
3417 return mlxsw_sp_inetaddr_vlan_event(dev, event);
3418 else
3419 return 0;
3420}
3421
Ido Schimmel4724ba562017-03-10 08:53:39 +01003422int mlxsw_sp_inetaddr_event(struct notifier_block *unused,
3423 unsigned long event, void *ptr)
3424{
3425 struct in_ifaddr *ifa = (struct in_ifaddr *) ptr;
3426 struct net_device *dev = ifa->ifa_dev->dev;
3427 struct mlxsw_sp *mlxsw_sp;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003428 struct mlxsw_sp_rif *rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01003429 int err = 0;
3430
3431 mlxsw_sp = mlxsw_sp_lower_get(dev);
3432 if (!mlxsw_sp)
3433 goto out;
3434
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003435 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
3436 if (!mlxsw_sp_rif_should_config(rif, ifa->ifa_dev, event))
Ido Schimmel4724ba562017-03-10 08:53:39 +01003437 goto out;
3438
Ido Schimmelb1e45522017-04-30 19:47:14 +03003439 err = __mlxsw_sp_inetaddr_event(dev, event);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003440out:
3441 return notifier_from_errno(err);
3442}
3443
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003444static int mlxsw_sp_rif_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
Ido Schimmel4724ba562017-03-10 08:53:39 +01003445 const char *mac, int mtu)
3446{
3447 char ritr_pl[MLXSW_REG_RITR_LEN];
3448 int err;
3449
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003450 mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003451 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
3452 if (err)
3453 return err;
3454
3455 mlxsw_reg_ritr_mtu_set(ritr_pl, mtu);
3456 mlxsw_reg_ritr_if_mac_memcpy_to(ritr_pl, mac);
3457 mlxsw_reg_ritr_op_set(ritr_pl, MLXSW_REG_RITR_RIF_CREATE);
3458 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
3459}
3460
3461int mlxsw_sp_netdevice_router_port_event(struct net_device *dev)
3462{
3463 struct mlxsw_sp *mlxsw_sp;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003464 struct mlxsw_sp_rif *rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01003465 int err;
3466
3467 mlxsw_sp = mlxsw_sp_lower_get(dev);
3468 if (!mlxsw_sp)
3469 return 0;
3470
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003471 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
3472 if (!rif)
Ido Schimmel4724ba562017-03-10 08:53:39 +01003473 return 0;
3474
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003475 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, rif->f->fid, false);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003476 if (err)
3477 return err;
3478
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003479 err = mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, dev->dev_addr,
3480 dev->mtu);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003481 if (err)
3482 goto err_rif_edit;
3483
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003484 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, dev->dev_addr, rif->f->fid, true);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003485 if (err)
3486 goto err_rif_fdb_op;
3487
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003488 ether_addr_copy(rif->addr, dev->dev_addr);
3489 rif->mtu = dev->mtu;
Ido Schimmel4724ba562017-03-10 08:53:39 +01003490
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003491 netdev_dbg(dev, "Updated RIF=%d\n", rif->rif_index);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003492
3493 return 0;
3494
3495err_rif_fdb_op:
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003496 mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, rif->addr, rif->mtu);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003497err_rif_edit:
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003498 mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, rif->f->fid, true);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003499 return err;
3500}
3501
Ido Schimmelb1e45522017-04-30 19:47:14 +03003502static int mlxsw_sp_port_vrf_join(struct mlxsw_sp *mlxsw_sp,
3503 struct net_device *l3_dev)
Ido Schimmel7179eb52017-03-16 09:08:18 +01003504{
Ido Schimmelb1e45522017-04-30 19:47:14 +03003505 struct mlxsw_sp_rif *rif;
Ido Schimmel7179eb52017-03-16 09:08:18 +01003506
Ido Schimmelb1e45522017-04-30 19:47:14 +03003507 /* If netdev is already associated with a RIF, then we need to
3508 * destroy it and create a new one with the new virtual router ID.
Ido Schimmel7179eb52017-03-16 09:08:18 +01003509 */
Ido Schimmelb1e45522017-04-30 19:47:14 +03003510 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
3511 if (rif)
3512 __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_DOWN);
Ido Schimmel7179eb52017-03-16 09:08:18 +01003513
Ido Schimmelb1e45522017-04-30 19:47:14 +03003514 return __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_UP);
Ido Schimmel7179eb52017-03-16 09:08:18 +01003515}
3516
Ido Schimmelb1e45522017-04-30 19:47:14 +03003517static void mlxsw_sp_port_vrf_leave(struct mlxsw_sp *mlxsw_sp,
3518 struct net_device *l3_dev)
Ido Schimmel7179eb52017-03-16 09:08:18 +01003519{
Ido Schimmelb1e45522017-04-30 19:47:14 +03003520 struct mlxsw_sp_rif *rif;
Ido Schimmel7179eb52017-03-16 09:08:18 +01003521
Ido Schimmelb1e45522017-04-30 19:47:14 +03003522 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
3523 if (!rif)
Ido Schimmel7179eb52017-03-16 09:08:18 +01003524 return;
Ido Schimmelb1e45522017-04-30 19:47:14 +03003525 __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_DOWN);
Ido Schimmel7179eb52017-03-16 09:08:18 +01003526}
3527
Ido Schimmelb1e45522017-04-30 19:47:14 +03003528int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event,
3529 struct netdev_notifier_changeupper_info *info)
Ido Schimmel3d70e4582017-03-16 09:08:19 +01003530{
Ido Schimmelb1e45522017-04-30 19:47:14 +03003531 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
3532 int err = 0;
Ido Schimmel3d70e4582017-03-16 09:08:19 +01003533
Ido Schimmelb1e45522017-04-30 19:47:14 +03003534 if (!mlxsw_sp)
3535 return 0;
Ido Schimmel3d70e4582017-03-16 09:08:19 +01003536
Ido Schimmelb1e45522017-04-30 19:47:14 +03003537 switch (event) {
3538 case NETDEV_PRECHANGEUPPER:
3539 return 0;
3540 case NETDEV_CHANGEUPPER:
3541 if (info->linking)
3542 err = mlxsw_sp_port_vrf_join(mlxsw_sp, l3_dev);
3543 else
3544 mlxsw_sp_port_vrf_leave(mlxsw_sp, l3_dev);
3545 break;
3546 }
Ido Schimmel3d70e4582017-03-16 09:08:19 +01003547
Ido Schimmelb1e45522017-04-30 19:47:14 +03003548 return err;
Ido Schimmel3d70e4582017-03-16 09:08:19 +01003549}
3550
Ido Schimmelc3852ef2016-12-03 16:45:07 +01003551static void mlxsw_sp_router_fib_dump_flush(struct notifier_block *nb)
3552{
3553 struct mlxsw_sp *mlxsw_sp = container_of(nb, struct mlxsw_sp, fib_nb);
3554
3555 /* Flush pending FIB notifications and then flush the device's
3556 * table before requesting another dump. The FIB notification
3557 * block is unregistered, so no need to take RTNL.
3558 */
3559 mlxsw_core_flush_owq();
3560 mlxsw_sp_router_fib_flush(mlxsw_sp);
3561}
3562
Ido Schimmel4724ba562017-03-10 08:53:39 +01003563static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
3564{
3565 char rgcr_pl[MLXSW_REG_RGCR_LEN];
3566 u64 max_rifs;
3567 int err;
3568
3569 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_RIFS))
3570 return -EIO;
3571
3572 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
Ido Schimmel5f9efff2017-05-16 19:38:27 +02003573 mlxsw_sp->router->rifs = kcalloc(max_rifs,
3574 sizeof(struct mlxsw_sp_rif *),
3575 GFP_KERNEL);
3576 if (!mlxsw_sp->router->rifs)
Ido Schimmel4724ba562017-03-10 08:53:39 +01003577 return -ENOMEM;
3578
3579 mlxsw_reg_rgcr_pack(rgcr_pl, true);
3580 mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs);
3581 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
3582 if (err)
3583 goto err_rgcr_fail;
3584
3585 return 0;
3586
3587err_rgcr_fail:
Ido Schimmel5f9efff2017-05-16 19:38:27 +02003588 kfree(mlxsw_sp->router->rifs);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003589 return err;
3590}
3591
3592static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
3593{
3594 char rgcr_pl[MLXSW_REG_RGCR_LEN];
3595 int i;
3596
3597 mlxsw_reg_rgcr_pack(rgcr_pl, false);
3598 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
3599
3600 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
Ido Schimmel5f9efff2017-05-16 19:38:27 +02003601 WARN_ON_ONCE(mlxsw_sp->router->rifs[i]);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003602
Ido Schimmel5f9efff2017-05-16 19:38:27 +02003603 kfree(mlxsw_sp->router->rifs);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003604}
3605
Jiri Pirkob45f64d2016-09-26 12:52:31 +02003606int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
3607{
Ido Schimmel9011b672017-05-16 19:38:25 +02003608 struct mlxsw_sp_router *router;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02003609 int err;
3610
Ido Schimmel9011b672017-05-16 19:38:25 +02003611 router = kzalloc(sizeof(*mlxsw_sp->router), GFP_KERNEL);
3612 if (!router)
3613 return -ENOMEM;
3614 mlxsw_sp->router = router;
3615 router->mlxsw_sp = mlxsw_sp;
3616
3617 INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_neighs_list);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02003618 err = __mlxsw_sp_router_init(mlxsw_sp);
3619 if (err)
Ido Schimmel9011b672017-05-16 19:38:25 +02003620 goto err_router_init;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02003621
Ido Schimmel9011b672017-05-16 19:38:25 +02003622 err = rhashtable_init(&mlxsw_sp->router->nexthop_ht,
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003623 &mlxsw_sp_nexthop_ht_params);
3624 if (err)
3625 goto err_nexthop_ht_init;
3626
Ido Schimmel9011b672017-05-16 19:38:25 +02003627 err = rhashtable_init(&mlxsw_sp->router->nexthop_group_ht,
Ido Schimmele9ad5e72017-02-08 11:16:29 +01003628 &mlxsw_sp_nexthop_group_ht_params);
3629 if (err)
3630 goto err_nexthop_group_ht_init;
3631
Ido Schimmel8494ab02017-03-24 08:02:47 +01003632 err = mlxsw_sp_lpm_init(mlxsw_sp);
3633 if (err)
3634 goto err_lpm_init;
3635
Jiri Pirkob45f64d2016-09-26 12:52:31 +02003636 err = mlxsw_sp_vrs_init(mlxsw_sp);
3637 if (err)
3638 goto err_vrs_init;
3639
Ido Schimmel8c9583a2016-10-27 15:12:57 +02003640 err = mlxsw_sp_neigh_init(mlxsw_sp);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02003641 if (err)
3642 goto err_neigh_init;
3643
3644 mlxsw_sp->fib_nb.notifier_call = mlxsw_sp_router_fib_event;
Ido Schimmelc3852ef2016-12-03 16:45:07 +01003645 err = register_fib_notifier(&mlxsw_sp->fib_nb,
3646 mlxsw_sp_router_fib_dump_flush);
3647 if (err)
3648 goto err_register_fib_notifier;
3649
Jiri Pirkob45f64d2016-09-26 12:52:31 +02003650 return 0;
3651
Ido Schimmelc3852ef2016-12-03 16:45:07 +01003652err_register_fib_notifier:
3653 mlxsw_sp_neigh_fini(mlxsw_sp);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02003654err_neigh_init:
3655 mlxsw_sp_vrs_fini(mlxsw_sp);
3656err_vrs_init:
Ido Schimmel8494ab02017-03-24 08:02:47 +01003657 mlxsw_sp_lpm_fini(mlxsw_sp);
3658err_lpm_init:
Ido Schimmel9011b672017-05-16 19:38:25 +02003659 rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
Ido Schimmele9ad5e72017-02-08 11:16:29 +01003660err_nexthop_group_ht_init:
Ido Schimmel9011b672017-05-16 19:38:25 +02003661 rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003662err_nexthop_ht_init:
Jiri Pirkob45f64d2016-09-26 12:52:31 +02003663 __mlxsw_sp_router_fini(mlxsw_sp);
Ido Schimmel9011b672017-05-16 19:38:25 +02003664err_router_init:
3665 kfree(mlxsw_sp->router);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02003666 return err;
3667}
3668
3669void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
3670{
3671 unregister_fib_notifier(&mlxsw_sp->fib_nb);
3672 mlxsw_sp_neigh_fini(mlxsw_sp);
3673 mlxsw_sp_vrs_fini(mlxsw_sp);
Ido Schimmel8494ab02017-03-24 08:02:47 +01003674 mlxsw_sp_lpm_fini(mlxsw_sp);
Ido Schimmel9011b672017-05-16 19:38:25 +02003675 rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
3676 rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02003677 __mlxsw_sp_router_fini(mlxsw_sp);
Ido Schimmel9011b672017-05-16 19:38:25 +02003678 kfree(mlxsw_sp->router);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02003679}