blob: df4051f5a44299397197dd291a7947deca1f4069 [file] [log] [blame]
Ido Schimmel464dce12016-07-02 11:00:15 +02001/*
2 * drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
3 * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
5 * Copyright (c) 2016 Ido Schimmel <idosch@mellanox.com>
Yotam Gigic723c7352016-07-05 11:27:43 +02006 * Copyright (c) 2016 Yotam Gigi <yotamg@mellanox.com>
Ido Schimmel464dce12016-07-02 11:00:15 +02007 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include <linux/kernel.h>
38#include <linux/types.h>
Jiri Pirko5e9c16c2016-07-04 08:23:04 +020039#include <linux/rhashtable.h>
40#include <linux/bitops.h>
41#include <linux/in6.h>
Yotam Gigic723c7352016-07-05 11:27:43 +020042#include <linux/notifier.h>
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +010043#include <linux/inetdevice.h>
Ido Schimmel9db032b2017-03-16 09:08:17 +010044#include <linux/netdevice.h>
Yotam Gigic723c7352016-07-05 11:27:43 +020045#include <net/netevent.h>
Jiri Pirko6cf3c972016-07-05 11:27:39 +020046#include <net/neighbour.h>
47#include <net/arp.h>
Jiri Pirkob45f64d2016-09-26 12:52:31 +020048#include <net/ip_fib.h>
Ido Schimmel5d7bfd12017-03-16 09:08:14 +010049#include <net/fib_rules.h>
Ido Schimmel57837882017-03-16 09:08:16 +010050#include <net/l3mdev.h>
Ido Schimmel464dce12016-07-02 11:00:15 +020051
52#include "spectrum.h"
53#include "core.h"
54#include "reg.h"
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +020055#include "spectrum_cnt.h"
56#include "spectrum_dpipe.h"
57#include "spectrum_router.h"
Ido Schimmel464dce12016-07-02 11:00:15 +020058
Ido Schimmel9011b672017-05-16 19:38:25 +020059struct mlxsw_sp_vr;
60struct mlxsw_sp_lpm_tree;
61
62struct mlxsw_sp_router {
63 struct mlxsw_sp *mlxsw_sp;
Ido Schimmel5f9efff2017-05-16 19:38:27 +020064 struct mlxsw_sp_rif **rifs;
Ido Schimmel9011b672017-05-16 19:38:25 +020065 struct mlxsw_sp_vr *vrs;
66 struct rhashtable neigh_ht;
67 struct rhashtable nexthop_group_ht;
68 struct rhashtable nexthop_ht;
69 struct {
70 struct mlxsw_sp_lpm_tree *trees;
71 unsigned int tree_count;
72 } lpm;
73 struct {
74 struct delayed_work dw;
75 unsigned long interval; /* ms */
76 } neighs_update;
77 struct delayed_work nexthop_probe_dw;
78#define MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL 5000 /* ms */
79 struct list_head nexthop_neighs_list;
80 bool aborted;
Ido Schimmel7e39d112017-05-16 19:38:28 +020081 struct notifier_block fib_nb;
Ido Schimmel9011b672017-05-16 19:38:25 +020082};
83
Ido Schimmel4724ba562017-03-10 08:53:39 +010084struct mlxsw_sp_rif {
85 struct list_head nexthop_list;
86 struct list_head neigh_list;
87 struct net_device *dev;
88 struct mlxsw_sp_fid *f;
89 unsigned char addr[ETH_ALEN];
90 int mtu;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +010091 u16 rif_index;
Ido Schimmel69132292017-03-10 08:53:42 +010092 u16 vr_id;
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +020093 unsigned int counter_ingress;
94 bool counter_ingress_valid;
95 unsigned int counter_egress;
96 bool counter_egress_valid;
Ido Schimmel4724ba562017-03-10 08:53:39 +010097};
98
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +020099static unsigned int *
100mlxsw_sp_rif_p_counter_get(struct mlxsw_sp_rif *rif,
101 enum mlxsw_sp_rif_counter_dir dir)
102{
103 switch (dir) {
104 case MLXSW_SP_RIF_COUNTER_EGRESS:
105 return &rif->counter_egress;
106 case MLXSW_SP_RIF_COUNTER_INGRESS:
107 return &rif->counter_ingress;
108 }
109 return NULL;
110}
111
112static bool
113mlxsw_sp_rif_counter_valid_get(struct mlxsw_sp_rif *rif,
114 enum mlxsw_sp_rif_counter_dir dir)
115{
116 switch (dir) {
117 case MLXSW_SP_RIF_COUNTER_EGRESS:
118 return rif->counter_egress_valid;
119 case MLXSW_SP_RIF_COUNTER_INGRESS:
120 return rif->counter_ingress_valid;
121 }
122 return false;
123}
124
125static void
126mlxsw_sp_rif_counter_valid_set(struct mlxsw_sp_rif *rif,
127 enum mlxsw_sp_rif_counter_dir dir,
128 bool valid)
129{
130 switch (dir) {
131 case MLXSW_SP_RIF_COUNTER_EGRESS:
132 rif->counter_egress_valid = valid;
133 break;
134 case MLXSW_SP_RIF_COUNTER_INGRESS:
135 rif->counter_ingress_valid = valid;
136 break;
137 }
138}
139
140static int mlxsw_sp_rif_counter_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
141 unsigned int counter_index, bool enable,
142 enum mlxsw_sp_rif_counter_dir dir)
143{
144 char ritr_pl[MLXSW_REG_RITR_LEN];
145 bool is_egress = false;
146 int err;
147
148 if (dir == MLXSW_SP_RIF_COUNTER_EGRESS)
149 is_egress = true;
150 mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
151 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
152 if (err)
153 return err;
154
155 mlxsw_reg_ritr_counter_pack(ritr_pl, counter_index, enable,
156 is_egress);
157 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
158}
159
160int mlxsw_sp_rif_counter_value_get(struct mlxsw_sp *mlxsw_sp,
161 struct mlxsw_sp_rif *rif,
162 enum mlxsw_sp_rif_counter_dir dir, u64 *cnt)
163{
164 char ricnt_pl[MLXSW_REG_RICNT_LEN];
165 unsigned int *p_counter_index;
166 bool valid;
167 int err;
168
169 valid = mlxsw_sp_rif_counter_valid_get(rif, dir);
170 if (!valid)
171 return -EINVAL;
172
173 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
174 if (!p_counter_index)
175 return -EINVAL;
176 mlxsw_reg_ricnt_pack(ricnt_pl, *p_counter_index,
177 MLXSW_REG_RICNT_OPCODE_NOP);
178 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
179 if (err)
180 return err;
181 *cnt = mlxsw_reg_ricnt_good_unicast_packets_get(ricnt_pl);
182 return 0;
183}
184
185static int mlxsw_sp_rif_counter_clear(struct mlxsw_sp *mlxsw_sp,
186 unsigned int counter_index)
187{
188 char ricnt_pl[MLXSW_REG_RICNT_LEN];
189
190 mlxsw_reg_ricnt_pack(ricnt_pl, counter_index,
191 MLXSW_REG_RICNT_OPCODE_CLEAR);
192 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
193}
194
195int mlxsw_sp_rif_counter_alloc(struct mlxsw_sp *mlxsw_sp,
196 struct mlxsw_sp_rif *rif,
197 enum mlxsw_sp_rif_counter_dir dir)
198{
199 unsigned int *p_counter_index;
200 int err;
201
202 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
203 if (!p_counter_index)
204 return -EINVAL;
205 err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
206 p_counter_index);
207 if (err)
208 return err;
209
210 err = mlxsw_sp_rif_counter_clear(mlxsw_sp, *p_counter_index);
211 if (err)
212 goto err_counter_clear;
213
214 err = mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
215 *p_counter_index, true, dir);
216 if (err)
217 goto err_counter_edit;
218 mlxsw_sp_rif_counter_valid_set(rif, dir, true);
219 return 0;
220
221err_counter_edit:
222err_counter_clear:
223 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
224 *p_counter_index);
225 return err;
226}
227
228void mlxsw_sp_rif_counter_free(struct mlxsw_sp *mlxsw_sp,
229 struct mlxsw_sp_rif *rif,
230 enum mlxsw_sp_rif_counter_dir dir)
231{
232 unsigned int *p_counter_index;
233
234 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
235 if (WARN_ON(!p_counter_index))
236 return;
237 mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
238 *p_counter_index, false, dir);
239 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
240 *p_counter_index);
241 mlxsw_sp_rif_counter_valid_set(rif, dir, false);
242}
243
Ido Schimmel4724ba562017-03-10 08:53:39 +0100244static struct mlxsw_sp_rif *
245mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
246 const struct net_device *dev);
247
Ido Schimmel9011b672017-05-16 19:38:25 +0200248#define MLXSW_SP_PREFIX_COUNT (sizeof(struct in6_addr) * BITS_PER_BYTE)
249
250struct mlxsw_sp_prefix_usage {
251 DECLARE_BITMAP(b, MLXSW_SP_PREFIX_COUNT);
252};
253
Jiri Pirko53342022016-07-04 08:23:08 +0200254#define mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) \
255 for_each_set_bit(prefix, (prefix_usage)->b, MLXSW_SP_PREFIX_COUNT)
256
257static bool
Jiri Pirko6b75c482016-07-04 08:23:09 +0200258mlxsw_sp_prefix_usage_subset(struct mlxsw_sp_prefix_usage *prefix_usage1,
259 struct mlxsw_sp_prefix_usage *prefix_usage2)
260{
261 unsigned char prefix;
262
263 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage1) {
264 if (!test_bit(prefix, prefix_usage2->b))
265 return false;
266 }
267 return true;
268}
269
270static bool
Jiri Pirko53342022016-07-04 08:23:08 +0200271mlxsw_sp_prefix_usage_eq(struct mlxsw_sp_prefix_usage *prefix_usage1,
272 struct mlxsw_sp_prefix_usage *prefix_usage2)
273{
274 return !memcmp(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
275}
276
Jiri Pirko6b75c482016-07-04 08:23:09 +0200277static bool
278mlxsw_sp_prefix_usage_none(struct mlxsw_sp_prefix_usage *prefix_usage)
279{
280 struct mlxsw_sp_prefix_usage prefix_usage_none = {{ 0 } };
281
282 return mlxsw_sp_prefix_usage_eq(prefix_usage, &prefix_usage_none);
283}
284
285static void
286mlxsw_sp_prefix_usage_cpy(struct mlxsw_sp_prefix_usage *prefix_usage1,
287 struct mlxsw_sp_prefix_usage *prefix_usage2)
288{
289 memcpy(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
290}
291
292static void
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200293mlxsw_sp_prefix_usage_set(struct mlxsw_sp_prefix_usage *prefix_usage,
294 unsigned char prefix_len)
295{
296 set_bit(prefix_len, prefix_usage->b);
297}
298
299static void
300mlxsw_sp_prefix_usage_clear(struct mlxsw_sp_prefix_usage *prefix_usage,
301 unsigned char prefix_len)
302{
303 clear_bit(prefix_len, prefix_usage->b);
304}
305
306struct mlxsw_sp_fib_key {
307 unsigned char addr[sizeof(struct in6_addr)];
308 unsigned char prefix_len;
309};
310
Jiri Pirko61c503f2016-07-04 08:23:11 +0200311enum mlxsw_sp_fib_entry_type {
312 MLXSW_SP_FIB_ENTRY_TYPE_REMOTE,
313 MLXSW_SP_FIB_ENTRY_TYPE_LOCAL,
314 MLXSW_SP_FIB_ENTRY_TYPE_TRAP,
315};
316
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +0200317struct mlxsw_sp_nexthop_group;
Ido Schimmel9011b672017-05-16 19:38:25 +0200318struct mlxsw_sp_fib;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +0200319
Ido Schimmel9aecce12017-02-09 10:28:42 +0100320struct mlxsw_sp_fib_node {
321 struct list_head entry_list;
Jiri Pirkob45f64d2016-09-26 12:52:31 +0200322 struct list_head list;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100323 struct rhash_head ht_node;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100324 struct mlxsw_sp_fib *fib;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100325 struct mlxsw_sp_fib_key key;
326};
327
328struct mlxsw_sp_fib_entry_params {
329 u32 tb_id;
330 u32 prio;
331 u8 tos;
332 u8 type;
333};
334
335struct mlxsw_sp_fib_entry {
336 struct list_head list;
337 struct mlxsw_sp_fib_node *fib_node;
338 enum mlxsw_sp_fib_entry_type type;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +0200339 struct list_head nexthop_group_node;
340 struct mlxsw_sp_nexthop_group *nh_group;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100341 struct mlxsw_sp_fib_entry_params params;
Ido Schimmel013b20f2017-02-08 11:16:36 +0100342 bool offloaded;
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200343};
344
Ido Schimmel9011b672017-05-16 19:38:25 +0200345enum mlxsw_sp_l3proto {
346 MLXSW_SP_L3_PROTO_IPV4,
347 MLXSW_SP_L3_PROTO_IPV6,
348};
349
350struct mlxsw_sp_lpm_tree {
351 u8 id; /* tree ID */
352 unsigned int ref_count;
353 enum mlxsw_sp_l3proto proto;
354 struct mlxsw_sp_prefix_usage prefix_usage;
355};
356
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200357struct mlxsw_sp_fib {
358 struct rhashtable ht;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100359 struct list_head node_list;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100360 struct mlxsw_sp_vr *vr;
361 struct mlxsw_sp_lpm_tree *lpm_tree;
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200362 unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT];
363 struct mlxsw_sp_prefix_usage prefix_usage;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100364 enum mlxsw_sp_l3proto proto;
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200365};
366
Ido Schimmel9011b672017-05-16 19:38:25 +0200367struct mlxsw_sp_vr {
368 u16 id; /* virtual router ID */
369 u32 tb_id; /* kernel fib table id */
370 unsigned int rif_count;
371 struct mlxsw_sp_fib *fib4;
372};
373
Ido Schimmel9aecce12017-02-09 10:28:42 +0100374static const struct rhashtable_params mlxsw_sp_fib_ht_params;
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200375
Ido Schimmel76610eb2017-03-10 08:53:41 +0100376static struct mlxsw_sp_fib *mlxsw_sp_fib_create(struct mlxsw_sp_vr *vr,
377 enum mlxsw_sp_l3proto proto)
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200378{
379 struct mlxsw_sp_fib *fib;
380 int err;
381
382 fib = kzalloc(sizeof(*fib), GFP_KERNEL);
383 if (!fib)
384 return ERR_PTR(-ENOMEM);
385 err = rhashtable_init(&fib->ht, &mlxsw_sp_fib_ht_params);
386 if (err)
387 goto err_rhashtable_init;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100388 INIT_LIST_HEAD(&fib->node_list);
Ido Schimmel76610eb2017-03-10 08:53:41 +0100389 fib->proto = proto;
390 fib->vr = vr;
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200391 return fib;
392
393err_rhashtable_init:
394 kfree(fib);
395 return ERR_PTR(err);
396}
397
398static void mlxsw_sp_fib_destroy(struct mlxsw_sp_fib *fib)
399{
Ido Schimmel9aecce12017-02-09 10:28:42 +0100400 WARN_ON(!list_empty(&fib->node_list));
Ido Schimmel76610eb2017-03-10 08:53:41 +0100401 WARN_ON(fib->lpm_tree);
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200402 rhashtable_destroy(&fib->ht);
403 kfree(fib);
404}
405
Jiri Pirko53342022016-07-04 08:23:08 +0200406static struct mlxsw_sp_lpm_tree *
Ido Schimmel382dbb42017-03-10 08:53:40 +0100407mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp *mlxsw_sp)
Jiri Pirko53342022016-07-04 08:23:08 +0200408{
409 static struct mlxsw_sp_lpm_tree *lpm_tree;
410 int i;
411
Ido Schimmel9011b672017-05-16 19:38:25 +0200412 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
413 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
Ido Schimmel382dbb42017-03-10 08:53:40 +0100414 if (lpm_tree->ref_count == 0)
415 return lpm_tree;
Jiri Pirko53342022016-07-04 08:23:08 +0200416 }
417 return NULL;
418}
419
420static int mlxsw_sp_lpm_tree_alloc(struct mlxsw_sp *mlxsw_sp,
421 struct mlxsw_sp_lpm_tree *lpm_tree)
422{
423 char ralta_pl[MLXSW_REG_RALTA_LEN];
424
Ido Schimmel1a9234e662016-09-19 08:29:26 +0200425 mlxsw_reg_ralta_pack(ralta_pl, true,
426 (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
427 lpm_tree->id);
Jiri Pirko53342022016-07-04 08:23:08 +0200428 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
429}
430
431static int mlxsw_sp_lpm_tree_free(struct mlxsw_sp *mlxsw_sp,
432 struct mlxsw_sp_lpm_tree *lpm_tree)
433{
434 char ralta_pl[MLXSW_REG_RALTA_LEN];
435
Ido Schimmel1a9234e662016-09-19 08:29:26 +0200436 mlxsw_reg_ralta_pack(ralta_pl, false,
437 (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
438 lpm_tree->id);
Jiri Pirko53342022016-07-04 08:23:08 +0200439 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
440}
441
442static int
443mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp *mlxsw_sp,
444 struct mlxsw_sp_prefix_usage *prefix_usage,
445 struct mlxsw_sp_lpm_tree *lpm_tree)
446{
447 char ralst_pl[MLXSW_REG_RALST_LEN];
448 u8 root_bin = 0;
449 u8 prefix;
450 u8 last_prefix = MLXSW_REG_RALST_BIN_NO_CHILD;
451
452 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage)
453 root_bin = prefix;
454
455 mlxsw_reg_ralst_pack(ralst_pl, root_bin, lpm_tree->id);
456 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) {
457 if (prefix == 0)
458 continue;
459 mlxsw_reg_ralst_bin_pack(ralst_pl, prefix, last_prefix,
460 MLXSW_REG_RALST_BIN_NO_CHILD);
461 last_prefix = prefix;
462 }
463 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
464}
465
466static struct mlxsw_sp_lpm_tree *
467mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
468 struct mlxsw_sp_prefix_usage *prefix_usage,
Ido Schimmel382dbb42017-03-10 08:53:40 +0100469 enum mlxsw_sp_l3proto proto)
Jiri Pirko53342022016-07-04 08:23:08 +0200470{
471 struct mlxsw_sp_lpm_tree *lpm_tree;
472 int err;
473
Ido Schimmel382dbb42017-03-10 08:53:40 +0100474 lpm_tree = mlxsw_sp_lpm_tree_find_unused(mlxsw_sp);
Jiri Pirko53342022016-07-04 08:23:08 +0200475 if (!lpm_tree)
476 return ERR_PTR(-EBUSY);
477 lpm_tree->proto = proto;
478 err = mlxsw_sp_lpm_tree_alloc(mlxsw_sp, lpm_tree);
479 if (err)
480 return ERR_PTR(err);
481
482 err = mlxsw_sp_lpm_tree_left_struct_set(mlxsw_sp, prefix_usage,
483 lpm_tree);
484 if (err)
485 goto err_left_struct_set;
Jiri Pirko2083d362016-10-25 11:25:56 +0200486 memcpy(&lpm_tree->prefix_usage, prefix_usage,
487 sizeof(lpm_tree->prefix_usage));
Jiri Pirko53342022016-07-04 08:23:08 +0200488 return lpm_tree;
489
490err_left_struct_set:
491 mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
492 return ERR_PTR(err);
493}
494
495static int mlxsw_sp_lpm_tree_destroy(struct mlxsw_sp *mlxsw_sp,
496 struct mlxsw_sp_lpm_tree *lpm_tree)
497{
498 return mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
499}
500
501static struct mlxsw_sp_lpm_tree *
502mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
503 struct mlxsw_sp_prefix_usage *prefix_usage,
Ido Schimmel382dbb42017-03-10 08:53:40 +0100504 enum mlxsw_sp_l3proto proto)
Jiri Pirko53342022016-07-04 08:23:08 +0200505{
506 struct mlxsw_sp_lpm_tree *lpm_tree;
507 int i;
508
Ido Schimmel9011b672017-05-16 19:38:25 +0200509 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
510 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
Jiri Pirko8b99bec2016-10-25 11:25:57 +0200511 if (lpm_tree->ref_count != 0 &&
512 lpm_tree->proto == proto &&
Jiri Pirko53342022016-07-04 08:23:08 +0200513 mlxsw_sp_prefix_usage_eq(&lpm_tree->prefix_usage,
514 prefix_usage))
515 goto inc_ref_count;
516 }
517 lpm_tree = mlxsw_sp_lpm_tree_create(mlxsw_sp, prefix_usage,
Ido Schimmel382dbb42017-03-10 08:53:40 +0100518 proto);
Jiri Pirko53342022016-07-04 08:23:08 +0200519 if (IS_ERR(lpm_tree))
520 return lpm_tree;
521
522inc_ref_count:
523 lpm_tree->ref_count++;
524 return lpm_tree;
525}
526
527static int mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
528 struct mlxsw_sp_lpm_tree *lpm_tree)
529{
530 if (--lpm_tree->ref_count == 0)
531 return mlxsw_sp_lpm_tree_destroy(mlxsw_sp, lpm_tree);
532 return 0;
533}
534
Ido Schimmel8494ab02017-03-24 08:02:47 +0100535#define MLXSW_SP_LPM_TREE_MIN 2 /* trees 0 and 1 are reserved */
536
537static int mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp)
Jiri Pirko53342022016-07-04 08:23:08 +0200538{
539 struct mlxsw_sp_lpm_tree *lpm_tree;
Ido Schimmel8494ab02017-03-24 08:02:47 +0100540 u64 max_trees;
Jiri Pirko53342022016-07-04 08:23:08 +0200541 int i;
542
Ido Schimmel8494ab02017-03-24 08:02:47 +0100543 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LPM_TREES))
544 return -EIO;
545
546 max_trees = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LPM_TREES);
Ido Schimmel9011b672017-05-16 19:38:25 +0200547 mlxsw_sp->router->lpm.tree_count = max_trees - MLXSW_SP_LPM_TREE_MIN;
548 mlxsw_sp->router->lpm.trees = kcalloc(mlxsw_sp->router->lpm.tree_count,
Ido Schimmel8494ab02017-03-24 08:02:47 +0100549 sizeof(struct mlxsw_sp_lpm_tree),
550 GFP_KERNEL);
Ido Schimmel9011b672017-05-16 19:38:25 +0200551 if (!mlxsw_sp->router->lpm.trees)
Ido Schimmel8494ab02017-03-24 08:02:47 +0100552 return -ENOMEM;
553
Ido Schimmel9011b672017-05-16 19:38:25 +0200554 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
555 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
Jiri Pirko53342022016-07-04 08:23:08 +0200556 lpm_tree->id = i + MLXSW_SP_LPM_TREE_MIN;
557 }
Ido Schimmel8494ab02017-03-24 08:02:47 +0100558
559 return 0;
560}
561
562static void mlxsw_sp_lpm_fini(struct mlxsw_sp *mlxsw_sp)
563{
Ido Schimmel9011b672017-05-16 19:38:25 +0200564 kfree(mlxsw_sp->router->lpm.trees);
Jiri Pirko53342022016-07-04 08:23:08 +0200565}
566
Ido Schimmel76610eb2017-03-10 08:53:41 +0100567static bool mlxsw_sp_vr_is_used(const struct mlxsw_sp_vr *vr)
568{
569 return !!vr->fib4;
570}
571
Jiri Pirko6b75c482016-07-04 08:23:09 +0200572static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp)
573{
574 struct mlxsw_sp_vr *vr;
575 int i;
576
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200577 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
Ido Schimmel9011b672017-05-16 19:38:25 +0200578 vr = &mlxsw_sp->router->vrs[i];
Ido Schimmel76610eb2017-03-10 08:53:41 +0100579 if (!mlxsw_sp_vr_is_used(vr))
Jiri Pirko6b75c482016-07-04 08:23:09 +0200580 return vr;
581 }
582 return NULL;
583}
584
585static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel76610eb2017-03-10 08:53:41 +0100586 const struct mlxsw_sp_fib *fib)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200587{
588 char raltb_pl[MLXSW_REG_RALTB_LEN];
589
Ido Schimmel76610eb2017-03-10 08:53:41 +0100590 mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
591 (enum mlxsw_reg_ralxx_protocol) fib->proto,
592 fib->lpm_tree->id);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200593 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
594}
595
596static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel76610eb2017-03-10 08:53:41 +0100597 const struct mlxsw_sp_fib *fib)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200598{
599 char raltb_pl[MLXSW_REG_RALTB_LEN];
600
601 /* Bind to tree 0 which is default */
Ido Schimmel76610eb2017-03-10 08:53:41 +0100602 mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
603 (enum mlxsw_reg_ralxx_protocol) fib->proto, 0);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200604 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
605}
606
607static u32 mlxsw_sp_fix_tb_id(u32 tb_id)
608{
609 /* For our purpose, squash main and local table into one */
610 if (tb_id == RT_TABLE_LOCAL)
611 tb_id = RT_TABLE_MAIN;
612 return tb_id;
613}
614
615static struct mlxsw_sp_vr *mlxsw_sp_vr_find(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel76610eb2017-03-10 08:53:41 +0100616 u32 tb_id)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200617{
618 struct mlxsw_sp_vr *vr;
619 int i;
620
621 tb_id = mlxsw_sp_fix_tb_id(tb_id);
Nogah Frankel9497c042016-09-20 11:16:54 +0200622
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200623 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
Ido Schimmel9011b672017-05-16 19:38:25 +0200624 vr = &mlxsw_sp->router->vrs[i];
Ido Schimmel76610eb2017-03-10 08:53:41 +0100625 if (mlxsw_sp_vr_is_used(vr) && vr->tb_id == tb_id)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200626 return vr;
627 }
628 return NULL;
629}
630
Ido Schimmel76610eb2017-03-10 08:53:41 +0100631static struct mlxsw_sp_fib *mlxsw_sp_vr_fib(const struct mlxsw_sp_vr *vr,
632 enum mlxsw_sp_l3proto proto)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200633{
Ido Schimmel76610eb2017-03-10 08:53:41 +0100634 switch (proto) {
635 case MLXSW_SP_L3_PROTO_IPV4:
636 return vr->fib4;
637 case MLXSW_SP_L3_PROTO_IPV6:
638 BUG_ON(1);
639 }
640 return NULL;
641}
642
643static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
644 u32 tb_id)
645{
Jiri Pirko6b75c482016-07-04 08:23:09 +0200646 struct mlxsw_sp_vr *vr;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200647
648 vr = mlxsw_sp_vr_find_unused(mlxsw_sp);
649 if (!vr)
650 return ERR_PTR(-EBUSY);
Ido Schimmel76610eb2017-03-10 08:53:41 +0100651 vr->fib4 = mlxsw_sp_fib_create(vr, MLXSW_SP_L3_PROTO_IPV4);
652 if (IS_ERR(vr->fib4))
653 return ERR_CAST(vr->fib4);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200654 vr->tb_id = tb_id;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200655 return vr;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200656}
657
Ido Schimmel76610eb2017-03-10 08:53:41 +0100658static void mlxsw_sp_vr_destroy(struct mlxsw_sp_vr *vr)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200659{
Ido Schimmel76610eb2017-03-10 08:53:41 +0100660 mlxsw_sp_fib_destroy(vr->fib4);
661 vr->fib4 = NULL;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200662}
663
664static int
Ido Schimmel76610eb2017-03-10 08:53:41 +0100665mlxsw_sp_vr_lpm_tree_check(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fib *fib,
Jiri Pirko6b75c482016-07-04 08:23:09 +0200666 struct mlxsw_sp_prefix_usage *req_prefix_usage)
667{
Ido Schimmel76610eb2017-03-10 08:53:41 +0100668 struct mlxsw_sp_lpm_tree *lpm_tree = fib->lpm_tree;
Ido Schimmelf7df4922017-02-28 08:55:40 +0100669 struct mlxsw_sp_lpm_tree *new_tree;
670 int err;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200671
Ido Schimmelf7df4922017-02-28 08:55:40 +0100672 if (mlxsw_sp_prefix_usage_eq(req_prefix_usage, &lpm_tree->prefix_usage))
Jiri Pirko6b75c482016-07-04 08:23:09 +0200673 return 0;
674
Ido Schimmelf7df4922017-02-28 08:55:40 +0100675 new_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, req_prefix_usage,
Ido Schimmel76610eb2017-03-10 08:53:41 +0100676 fib->proto);
Ido Schimmelf7df4922017-02-28 08:55:40 +0100677 if (IS_ERR(new_tree)) {
Jiri Pirko6b75c482016-07-04 08:23:09 +0200678 /* We failed to get a tree according to the required
679 * prefix usage. However, the current tree might be still good
680 * for us if our requirement is subset of the prefixes used
681 * in the tree.
682 */
683 if (mlxsw_sp_prefix_usage_subset(req_prefix_usage,
Ido Schimmelf7df4922017-02-28 08:55:40 +0100684 &lpm_tree->prefix_usage))
Jiri Pirko6b75c482016-07-04 08:23:09 +0200685 return 0;
Ido Schimmelf7df4922017-02-28 08:55:40 +0100686 return PTR_ERR(new_tree);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200687 }
688
Ido Schimmelf7df4922017-02-28 08:55:40 +0100689 /* Prevent packet loss by overwriting existing binding */
Ido Schimmel76610eb2017-03-10 08:53:41 +0100690 fib->lpm_tree = new_tree;
691 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib);
Ido Schimmelf7df4922017-02-28 08:55:40 +0100692 if (err)
693 goto err_tree_bind;
694 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
695
696 return 0;
697
698err_tree_bind:
Ido Schimmel76610eb2017-03-10 08:53:41 +0100699 fib->lpm_tree = lpm_tree;
Ido Schimmelf7df4922017-02-28 08:55:40 +0100700 mlxsw_sp_lpm_tree_put(mlxsw_sp, new_tree);
701 return err;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200702}
703
Ido Schimmel76610eb2017-03-10 08:53:41 +0100704static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200705{
706 struct mlxsw_sp_vr *vr;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200707
708 tb_id = mlxsw_sp_fix_tb_id(tb_id);
Ido Schimmel76610eb2017-03-10 08:53:41 +0100709 vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
710 if (!vr)
711 vr = mlxsw_sp_vr_create(mlxsw_sp, tb_id);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200712 return vr;
713}
714
Ido Schimmel76610eb2017-03-10 08:53:41 +0100715static void mlxsw_sp_vr_put(struct mlxsw_sp_vr *vr)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200716{
Ido Schimmel69132292017-03-10 08:53:42 +0100717 if (!vr->rif_count && list_empty(&vr->fib4->node_list))
Ido Schimmel76610eb2017-03-10 08:53:41 +0100718 mlxsw_sp_vr_destroy(vr);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200719}
720
Nogah Frankel9497c042016-09-20 11:16:54 +0200721static int mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200722{
723 struct mlxsw_sp_vr *vr;
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200724 u64 max_vrs;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200725 int i;
726
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200727 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_VRS))
Nogah Frankel9497c042016-09-20 11:16:54 +0200728 return -EIO;
729
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200730 max_vrs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS);
Ido Schimmel9011b672017-05-16 19:38:25 +0200731 mlxsw_sp->router->vrs = kcalloc(max_vrs, sizeof(struct mlxsw_sp_vr),
732 GFP_KERNEL);
733 if (!mlxsw_sp->router->vrs)
Nogah Frankel9497c042016-09-20 11:16:54 +0200734 return -ENOMEM;
735
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200736 for (i = 0; i < max_vrs; i++) {
Ido Schimmel9011b672017-05-16 19:38:25 +0200737 vr = &mlxsw_sp->router->vrs[i];
Jiri Pirko6b75c482016-07-04 08:23:09 +0200738 vr->id = i;
739 }
Nogah Frankel9497c042016-09-20 11:16:54 +0200740
741 return 0;
742}
743
Ido Schimmelac571de2016-11-14 11:26:32 +0100744static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp);
745
Nogah Frankel9497c042016-09-20 11:16:54 +0200746static void mlxsw_sp_vrs_fini(struct mlxsw_sp *mlxsw_sp)
747{
Ido Schimmel30572242016-12-03 16:45:01 +0100748 /* At this stage we're guaranteed not to have new incoming
749 * FIB notifications and the work queue is free from FIBs
750 * sitting on top of mlxsw netdevs. However, we can still
751 * have other FIBs queued. Flush the queue before flushing
752 * the device's tables. No need for locks, as we're the only
753 * writer.
754 */
755 mlxsw_core_flush_owq();
Ido Schimmelac571de2016-11-14 11:26:32 +0100756 mlxsw_sp_router_fib_flush(mlxsw_sp);
Ido Schimmel9011b672017-05-16 19:38:25 +0200757 kfree(mlxsw_sp->router->vrs);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200758}
759
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200760struct mlxsw_sp_neigh_key {
Jiri Pirko33b13412016-11-10 12:31:04 +0100761 struct neighbour *n;
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200762};
763
764struct mlxsw_sp_neigh_entry {
Ido Schimmel9665b742017-02-08 11:16:42 +0100765 struct list_head rif_list_node;
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200766 struct rhash_head ht_node;
767 struct mlxsw_sp_neigh_key key;
768 u16 rif;
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100769 bool connected;
Yotam Gigia6bf9e92016-07-05 11:27:44 +0200770 unsigned char ha[ETH_ALEN];
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +0200771 struct list_head nexthop_list; /* list of nexthops using
772 * this neigh entry
773 */
Yotam Gigib2157142016-07-05 11:27:51 +0200774 struct list_head nexthop_neighs_list_node;
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200775};
776
777static const struct rhashtable_params mlxsw_sp_neigh_ht_params = {
778 .key_offset = offsetof(struct mlxsw_sp_neigh_entry, key),
779 .head_offset = offsetof(struct mlxsw_sp_neigh_entry, ht_node),
780 .key_len = sizeof(struct mlxsw_sp_neigh_key),
781};
782
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100783static struct mlxsw_sp_neigh_entry *
784mlxsw_sp_neigh_entry_alloc(struct mlxsw_sp *mlxsw_sp, struct neighbour *n,
785 u16 rif)
786{
787 struct mlxsw_sp_neigh_entry *neigh_entry;
788
789 neigh_entry = kzalloc(sizeof(*neigh_entry), GFP_KERNEL);
790 if (!neigh_entry)
791 return NULL;
792
793 neigh_entry->key.n = n;
794 neigh_entry->rif = rif;
795 INIT_LIST_HEAD(&neigh_entry->nexthop_list);
796
797 return neigh_entry;
798}
799
800static void mlxsw_sp_neigh_entry_free(struct mlxsw_sp_neigh_entry *neigh_entry)
801{
802 kfree(neigh_entry);
803}
804
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200805static int
806mlxsw_sp_neigh_entry_insert(struct mlxsw_sp *mlxsw_sp,
807 struct mlxsw_sp_neigh_entry *neigh_entry)
808{
Ido Schimmel9011b672017-05-16 19:38:25 +0200809 return rhashtable_insert_fast(&mlxsw_sp->router->neigh_ht,
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200810 &neigh_entry->ht_node,
811 mlxsw_sp_neigh_ht_params);
812}
813
814static void
815mlxsw_sp_neigh_entry_remove(struct mlxsw_sp *mlxsw_sp,
816 struct mlxsw_sp_neigh_entry *neigh_entry)
817{
Ido Schimmel9011b672017-05-16 19:38:25 +0200818 rhashtable_remove_fast(&mlxsw_sp->router->neigh_ht,
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200819 &neigh_entry->ht_node,
820 mlxsw_sp_neigh_ht_params);
821}
822
823static struct mlxsw_sp_neigh_entry *
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100824mlxsw_sp_neigh_entry_create(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200825{
826 struct mlxsw_sp_neigh_entry *neigh_entry;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +0100827 struct mlxsw_sp_rif *rif;
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100828 int err;
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200829
Arkadi Sharshevskybf952332017-03-17 09:38:00 +0100830 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, n->dev);
831 if (!rif)
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100832 return ERR_PTR(-EINVAL);
833
Arkadi Sharshevskybf952332017-03-17 09:38:00 +0100834 neigh_entry = mlxsw_sp_neigh_entry_alloc(mlxsw_sp, n, rif->rif_index);
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200835 if (!neigh_entry)
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100836 return ERR_PTR(-ENOMEM);
837
838 err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
839 if (err)
840 goto err_neigh_entry_insert;
841
Arkadi Sharshevskybf952332017-03-17 09:38:00 +0100842 list_add(&neigh_entry->rif_list_node, &rif->neigh_list);
Ido Schimmel9665b742017-02-08 11:16:42 +0100843
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200844 return neigh_entry;
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100845
846err_neigh_entry_insert:
847 mlxsw_sp_neigh_entry_free(neigh_entry);
848 return ERR_PTR(err);
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200849}
850
851static void
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100852mlxsw_sp_neigh_entry_destroy(struct mlxsw_sp *mlxsw_sp,
853 struct mlxsw_sp_neigh_entry *neigh_entry)
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200854{
Ido Schimmel9665b742017-02-08 11:16:42 +0100855 list_del(&neigh_entry->rif_list_node);
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100856 mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry);
857 mlxsw_sp_neigh_entry_free(neigh_entry);
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200858}
859
860static struct mlxsw_sp_neigh_entry *
Jiri Pirko33b13412016-11-10 12:31:04 +0100861mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200862{
Jiri Pirko33b13412016-11-10 12:31:04 +0100863 struct mlxsw_sp_neigh_key key;
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200864
Jiri Pirko33b13412016-11-10 12:31:04 +0100865 key.n = n;
Ido Schimmel9011b672017-05-16 19:38:25 +0200866 return rhashtable_lookup_fast(&mlxsw_sp->router->neigh_ht,
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200867 &key, mlxsw_sp_neigh_ht_params);
868}
869
Yotam Gigic723c7352016-07-05 11:27:43 +0200870static void
871mlxsw_sp_router_neighs_update_interval_init(struct mlxsw_sp *mlxsw_sp)
872{
873 unsigned long interval = NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME);
874
Ido Schimmel9011b672017-05-16 19:38:25 +0200875 mlxsw_sp->router->neighs_update.interval = jiffies_to_msecs(interval);
Yotam Gigic723c7352016-07-05 11:27:43 +0200876}
877
878static void mlxsw_sp_router_neigh_ent_ipv4_process(struct mlxsw_sp *mlxsw_sp,
879 char *rauhtd_pl,
880 int ent_index)
881{
882 struct net_device *dev;
883 struct neighbour *n;
884 __be32 dipn;
885 u32 dip;
886 u16 rif;
887
888 mlxsw_reg_rauhtd_ent_ipv4_unpack(rauhtd_pl, ent_index, &rif, &dip);
889
Ido Schimmel5f9efff2017-05-16 19:38:27 +0200890 if (!mlxsw_sp->router->rifs[rif]) {
Yotam Gigic723c7352016-07-05 11:27:43 +0200891 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
892 return;
893 }
894
895 dipn = htonl(dip);
Ido Schimmel5f9efff2017-05-16 19:38:27 +0200896 dev = mlxsw_sp->router->rifs[rif]->dev;
Yotam Gigic723c7352016-07-05 11:27:43 +0200897 n = neigh_lookup(&arp_tbl, &dipn, dev);
898 if (!n) {
899 netdev_err(dev, "Failed to find matching neighbour for IP=%pI4h\n",
900 &dip);
901 return;
902 }
903
904 netdev_dbg(dev, "Updating neighbour with IP=%pI4h\n", &dip);
905 neigh_event_send(n, NULL);
906 neigh_release(n);
907}
908
909static void mlxsw_sp_router_neigh_rec_ipv4_process(struct mlxsw_sp *mlxsw_sp,
910 char *rauhtd_pl,
911 int rec_index)
912{
913 u8 num_entries;
914 int i;
915
916 num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
917 rec_index);
918 /* Hardware starts counting at 0, so add 1. */
919 num_entries++;
920
921 /* Each record consists of several neighbour entries. */
922 for (i = 0; i < num_entries; i++) {
923 int ent_index;
924
925 ent_index = rec_index * MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC + i;
926 mlxsw_sp_router_neigh_ent_ipv4_process(mlxsw_sp, rauhtd_pl,
927 ent_index);
928 }
929
930}
931
932static void mlxsw_sp_router_neigh_rec_process(struct mlxsw_sp *mlxsw_sp,
933 char *rauhtd_pl, int rec_index)
934{
935 switch (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, rec_index)) {
936 case MLXSW_REG_RAUHTD_TYPE_IPV4:
937 mlxsw_sp_router_neigh_rec_ipv4_process(mlxsw_sp, rauhtd_pl,
938 rec_index);
939 break;
940 case MLXSW_REG_RAUHTD_TYPE_IPV6:
941 WARN_ON_ONCE(1);
942 break;
943 }
944}
945
Arkadi Sharshevsky42cdb332016-11-11 16:34:26 +0100946static bool mlxsw_sp_router_rauhtd_is_full(char *rauhtd_pl)
947{
948 u8 num_rec, last_rec_index, num_entries;
949
950 num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
951 last_rec_index = num_rec - 1;
952
953 if (num_rec < MLXSW_REG_RAUHTD_REC_MAX_NUM)
954 return false;
955 if (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, last_rec_index) ==
956 MLXSW_REG_RAUHTD_TYPE_IPV6)
957 return true;
958
959 num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
960 last_rec_index);
961 if (++num_entries == MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC)
962 return true;
963 return false;
964}
965
Yotam Gigib2157142016-07-05 11:27:51 +0200966static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp)
Yotam Gigic723c7352016-07-05 11:27:43 +0200967{
Yotam Gigic723c7352016-07-05 11:27:43 +0200968 char *rauhtd_pl;
969 u8 num_rec;
970 int i, err;
971
972 rauhtd_pl = kmalloc(MLXSW_REG_RAUHTD_LEN, GFP_KERNEL);
973 if (!rauhtd_pl)
Yotam Gigib2157142016-07-05 11:27:51 +0200974 return -ENOMEM;
Yotam Gigic723c7352016-07-05 11:27:43 +0200975
976 /* Make sure the neighbour's netdev isn't removed in the
977 * process.
978 */
979 rtnl_lock();
980 do {
981 mlxsw_reg_rauhtd_pack(rauhtd_pl, MLXSW_REG_RAUHTD_TYPE_IPV4);
982 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(rauhtd),
983 rauhtd_pl);
984 if (err) {
985 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to dump neighbour talbe\n");
986 break;
987 }
988 num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
989 for (i = 0; i < num_rec; i++)
990 mlxsw_sp_router_neigh_rec_process(mlxsw_sp, rauhtd_pl,
991 i);
Arkadi Sharshevsky42cdb332016-11-11 16:34:26 +0100992 } while (mlxsw_sp_router_rauhtd_is_full(rauhtd_pl));
Yotam Gigic723c7352016-07-05 11:27:43 +0200993 rtnl_unlock();
994
995 kfree(rauhtd_pl);
Yotam Gigib2157142016-07-05 11:27:51 +0200996 return err;
997}
998
999static void mlxsw_sp_router_neighs_update_nh(struct mlxsw_sp *mlxsw_sp)
1000{
1001 struct mlxsw_sp_neigh_entry *neigh_entry;
1002
1003 /* Take RTNL mutex here to prevent lists from changes */
1004 rtnl_lock();
Ido Schimmel9011b672017-05-16 19:38:25 +02001005 list_for_each_entry(neigh_entry, &mlxsw_sp->router->nexthop_neighs_list,
Ido Schimmel8a0b7272017-02-06 16:20:15 +01001006 nexthop_neighs_list_node)
Yotam Gigib2157142016-07-05 11:27:51 +02001007 /* If this neigh have nexthops, make the kernel think this neigh
1008 * is active regardless of the traffic.
1009 */
Ido Schimmel8a0b7272017-02-06 16:20:15 +01001010 neigh_event_send(neigh_entry->key.n, NULL);
Yotam Gigib2157142016-07-05 11:27:51 +02001011 rtnl_unlock();
1012}
1013
1014static void
1015mlxsw_sp_router_neighs_update_work_schedule(struct mlxsw_sp *mlxsw_sp)
1016{
Ido Schimmel9011b672017-05-16 19:38:25 +02001017 unsigned long interval = mlxsw_sp->router->neighs_update.interval;
Yotam Gigib2157142016-07-05 11:27:51 +02001018
Ido Schimmel9011b672017-05-16 19:38:25 +02001019 mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw,
Yotam Gigib2157142016-07-05 11:27:51 +02001020 msecs_to_jiffies(interval));
1021}
1022
1023static void mlxsw_sp_router_neighs_update_work(struct work_struct *work)
1024{
Ido Schimmel9011b672017-05-16 19:38:25 +02001025 struct mlxsw_sp_router *router;
Yotam Gigib2157142016-07-05 11:27:51 +02001026 int err;
1027
Ido Schimmel9011b672017-05-16 19:38:25 +02001028 router = container_of(work, struct mlxsw_sp_router,
1029 neighs_update.dw.work);
1030 err = mlxsw_sp_router_neighs_update_rauhtd(router->mlxsw_sp);
Yotam Gigib2157142016-07-05 11:27:51 +02001031 if (err)
Ido Schimmel9011b672017-05-16 19:38:25 +02001032 dev_err(router->mlxsw_sp->bus_info->dev, "Could not update kernel for neigh activity");
Yotam Gigib2157142016-07-05 11:27:51 +02001033
Ido Schimmel9011b672017-05-16 19:38:25 +02001034 mlxsw_sp_router_neighs_update_nh(router->mlxsw_sp);
Yotam Gigib2157142016-07-05 11:27:51 +02001035
Ido Schimmel9011b672017-05-16 19:38:25 +02001036 mlxsw_sp_router_neighs_update_work_schedule(router->mlxsw_sp);
Yotam Gigic723c7352016-07-05 11:27:43 +02001037}
1038
Yotam Gigi0b2361d2016-07-05 11:27:52 +02001039static void mlxsw_sp_router_probe_unresolved_nexthops(struct work_struct *work)
1040{
1041 struct mlxsw_sp_neigh_entry *neigh_entry;
Ido Schimmel9011b672017-05-16 19:38:25 +02001042 struct mlxsw_sp_router *router;
Yotam Gigi0b2361d2016-07-05 11:27:52 +02001043
Ido Schimmel9011b672017-05-16 19:38:25 +02001044 router = container_of(work, struct mlxsw_sp_router,
1045 nexthop_probe_dw.work);
Yotam Gigi0b2361d2016-07-05 11:27:52 +02001046 /* Iterate over nexthop neighbours, find those who are unresolved and
1047 * send arp on them. This solves the chicken-egg problem when
1048 * the nexthop wouldn't get offloaded until the neighbor is resolved
1049 * but it wouldn't get resolved ever in case traffic is flowing in HW
1050 * using different nexthop.
1051 *
1052 * Take RTNL mutex here to prevent lists from changes.
1053 */
1054 rtnl_lock();
Ido Schimmel9011b672017-05-16 19:38:25 +02001055 list_for_each_entry(neigh_entry, &router->nexthop_neighs_list,
Ido Schimmel8a0b7272017-02-06 16:20:15 +01001056 nexthop_neighs_list_node)
Ido Schimmel01b1aa32017-02-06 16:20:16 +01001057 if (!neigh_entry->connected)
Jiri Pirko33b13412016-11-10 12:31:04 +01001058 neigh_event_send(neigh_entry->key.n, NULL);
Yotam Gigi0b2361d2016-07-05 11:27:52 +02001059 rtnl_unlock();
1060
Ido Schimmel9011b672017-05-16 19:38:25 +02001061 mlxsw_core_schedule_dw(&router->nexthop_probe_dw,
Yotam Gigi0b2361d2016-07-05 11:27:52 +02001062 MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL);
1063}
1064
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001065static void
1066mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
1067 struct mlxsw_sp_neigh_entry *neigh_entry,
1068 bool removing);
1069
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001070static enum mlxsw_reg_rauht_op mlxsw_sp_rauht_op(bool adding)
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001071{
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001072 return adding ? MLXSW_REG_RAUHT_OP_WRITE_ADD :
1073 MLXSW_REG_RAUHT_OP_WRITE_DELETE;
1074}
1075
1076static void
1077mlxsw_sp_router_neigh_entry_op4(struct mlxsw_sp *mlxsw_sp,
1078 struct mlxsw_sp_neigh_entry *neigh_entry,
1079 enum mlxsw_reg_rauht_op op)
1080{
Jiri Pirko33b13412016-11-10 12:31:04 +01001081 struct neighbour *n = neigh_entry->key.n;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001082 u32 dip = ntohl(*((__be32 *) n->primary_key));
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001083 char rauht_pl[MLXSW_REG_RAUHT_LEN];
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001084
1085 mlxsw_reg_rauht_pack4(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
1086 dip);
1087 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
1088}
1089
1090static void
1091mlxsw_sp_neigh_entry_update(struct mlxsw_sp *mlxsw_sp,
1092 struct mlxsw_sp_neigh_entry *neigh_entry,
1093 bool adding)
1094{
1095 if (!adding && !neigh_entry->connected)
1096 return;
1097 neigh_entry->connected = adding;
1098 if (neigh_entry->key.n->tbl == &arp_tbl)
1099 mlxsw_sp_router_neigh_entry_op4(mlxsw_sp, neigh_entry,
1100 mlxsw_sp_rauht_op(adding));
1101 else
1102 WARN_ON_ONCE(1);
1103}
1104
1105struct mlxsw_sp_neigh_event_work {
1106 struct work_struct work;
1107 struct mlxsw_sp *mlxsw_sp;
1108 struct neighbour *n;
1109};
1110
1111static void mlxsw_sp_router_neigh_event_work(struct work_struct *work)
1112{
1113 struct mlxsw_sp_neigh_event_work *neigh_work =
1114 container_of(work, struct mlxsw_sp_neigh_event_work, work);
1115 struct mlxsw_sp *mlxsw_sp = neigh_work->mlxsw_sp;
1116 struct mlxsw_sp_neigh_entry *neigh_entry;
1117 struct neighbour *n = neigh_work->n;
1118 unsigned char ha[ETH_ALEN];
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001119 bool entry_connected;
Ido Schimmel93a87e52016-12-23 09:32:49 +01001120 u8 nud_state, dead;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001121
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001122 /* If these parameters are changed after we release the lock,
1123 * then we are guaranteed to receive another event letting us
1124 * know about it.
1125 */
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001126 read_lock_bh(&n->lock);
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001127 memcpy(ha, n->ha, ETH_ALEN);
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001128 nud_state = n->nud_state;
Ido Schimmel93a87e52016-12-23 09:32:49 +01001129 dead = n->dead;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001130 read_unlock_bh(&n->lock);
1131
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001132 rtnl_lock();
Ido Schimmel93a87e52016-12-23 09:32:49 +01001133 entry_connected = nud_state & NUD_VALID && !dead;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001134 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
1135 if (!entry_connected && !neigh_entry)
1136 goto out;
1137 if (!neigh_entry) {
1138 neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
1139 if (IS_ERR(neigh_entry))
1140 goto out;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001141 }
1142
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001143 memcpy(neigh_entry->ha, ha, ETH_ALEN);
1144 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, entry_connected);
1145 mlxsw_sp_nexthop_neigh_update(mlxsw_sp, neigh_entry, !entry_connected);
1146
1147 if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
1148 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
1149
1150out:
1151 rtnl_unlock();
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001152 neigh_release(n);
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001153 kfree(neigh_work);
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001154}
1155
Jiri Pirkoe7322632016-09-01 10:37:43 +02001156int mlxsw_sp_router_netevent_event(struct notifier_block *unused,
1157 unsigned long event, void *ptr)
Yotam Gigic723c7352016-07-05 11:27:43 +02001158{
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001159 struct mlxsw_sp_neigh_event_work *neigh_work;
Yotam Gigic723c7352016-07-05 11:27:43 +02001160 struct mlxsw_sp_port *mlxsw_sp_port;
1161 struct mlxsw_sp *mlxsw_sp;
1162 unsigned long interval;
1163 struct neigh_parms *p;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001164 struct neighbour *n;
Yotam Gigic723c7352016-07-05 11:27:43 +02001165
1166 switch (event) {
1167 case NETEVENT_DELAY_PROBE_TIME_UPDATE:
1168 p = ptr;
1169
1170 /* We don't care about changes in the default table. */
1171 if (!p->dev || p->tbl != &arp_tbl)
1172 return NOTIFY_DONE;
1173
1174 /* We are in atomic context and can't take RTNL mutex,
1175 * so use RCU variant to walk the device chain.
1176 */
1177 mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(p->dev);
1178 if (!mlxsw_sp_port)
1179 return NOTIFY_DONE;
1180
1181 mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1182 interval = jiffies_to_msecs(NEIGH_VAR(p, DELAY_PROBE_TIME));
Ido Schimmel9011b672017-05-16 19:38:25 +02001183 mlxsw_sp->router->neighs_update.interval = interval;
Yotam Gigic723c7352016-07-05 11:27:43 +02001184
1185 mlxsw_sp_port_dev_put(mlxsw_sp_port);
1186 break;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001187 case NETEVENT_NEIGH_UPDATE:
1188 n = ptr;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001189
1190 if (n->tbl != &arp_tbl)
1191 return NOTIFY_DONE;
1192
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001193 mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(n->dev);
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001194 if (!mlxsw_sp_port)
1195 return NOTIFY_DONE;
1196
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001197 neigh_work = kzalloc(sizeof(*neigh_work), GFP_ATOMIC);
1198 if (!neigh_work) {
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001199 mlxsw_sp_port_dev_put(mlxsw_sp_port);
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001200 return NOTIFY_BAD;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001201 }
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001202
1203 INIT_WORK(&neigh_work->work, mlxsw_sp_router_neigh_event_work);
1204 neigh_work->mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1205 neigh_work->n = n;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001206
1207 /* Take a reference to ensure the neighbour won't be
1208 * destructed until we drop the reference in delayed
1209 * work.
1210 */
1211 neigh_clone(n);
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001212 mlxsw_core_schedule_work(&neigh_work->work);
1213 mlxsw_sp_port_dev_put(mlxsw_sp_port);
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001214 break;
Yotam Gigic723c7352016-07-05 11:27:43 +02001215 }
1216
1217 return NOTIFY_DONE;
1218}
1219
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001220static int mlxsw_sp_neigh_init(struct mlxsw_sp *mlxsw_sp)
1221{
Yotam Gigic723c7352016-07-05 11:27:43 +02001222 int err;
1223
Ido Schimmel9011b672017-05-16 19:38:25 +02001224 err = rhashtable_init(&mlxsw_sp->router->neigh_ht,
Yotam Gigic723c7352016-07-05 11:27:43 +02001225 &mlxsw_sp_neigh_ht_params);
1226 if (err)
1227 return err;
1228
1229 /* Initialize the polling interval according to the default
1230 * table.
1231 */
1232 mlxsw_sp_router_neighs_update_interval_init(mlxsw_sp);
1233
Yotam Gigi0b2361d2016-07-05 11:27:52 +02001234 /* Create the delayed works for the activity_update */
Ido Schimmel9011b672017-05-16 19:38:25 +02001235 INIT_DELAYED_WORK(&mlxsw_sp->router->neighs_update.dw,
Yotam Gigic723c7352016-07-05 11:27:43 +02001236 mlxsw_sp_router_neighs_update_work);
Ido Schimmel9011b672017-05-16 19:38:25 +02001237 INIT_DELAYED_WORK(&mlxsw_sp->router->nexthop_probe_dw,
Yotam Gigi0b2361d2016-07-05 11:27:52 +02001238 mlxsw_sp_router_probe_unresolved_nexthops);
Ido Schimmel9011b672017-05-16 19:38:25 +02001239 mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw, 0);
1240 mlxsw_core_schedule_dw(&mlxsw_sp->router->nexthop_probe_dw, 0);
Yotam Gigic723c7352016-07-05 11:27:43 +02001241 return 0;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001242}
1243
1244static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp)
1245{
Ido Schimmel9011b672017-05-16 19:38:25 +02001246 cancel_delayed_work_sync(&mlxsw_sp->router->neighs_update.dw);
1247 cancel_delayed_work_sync(&mlxsw_sp->router->nexthop_probe_dw);
1248 rhashtable_destroy(&mlxsw_sp->router->neigh_ht);
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001249}
1250
Ido Schimmel9665b742017-02-08 11:16:42 +01001251static int mlxsw_sp_neigh_rif_flush(struct mlxsw_sp *mlxsw_sp,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001252 const struct mlxsw_sp_rif *rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01001253{
1254 char rauht_pl[MLXSW_REG_RAUHT_LEN];
1255
1256 mlxsw_reg_rauht_pack(rauht_pl, MLXSW_REG_RAUHT_OP_WRITE_DELETE_ALL,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001257 rif->rif_index, rif->addr);
Ido Schimmel9665b742017-02-08 11:16:42 +01001258 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
1259}
1260
1261static void mlxsw_sp_neigh_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001262 struct mlxsw_sp_rif *rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01001263{
1264 struct mlxsw_sp_neigh_entry *neigh_entry, *tmp;
1265
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001266 mlxsw_sp_neigh_rif_flush(mlxsw_sp, rif);
1267 list_for_each_entry_safe(neigh_entry, tmp, &rif->neigh_list,
Ido Schimmel9665b742017-02-08 11:16:42 +01001268 rif_list_node)
1269 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
1270}
1271
Ido Schimmelc53b8e12017-02-08 11:16:30 +01001272struct mlxsw_sp_nexthop_key {
1273 struct fib_nh *fib_nh;
1274};
1275
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001276struct mlxsw_sp_nexthop {
1277 struct list_head neigh_list_node; /* member of neigh entry list */
Ido Schimmel9665b742017-02-08 11:16:42 +01001278 struct list_head rif_list_node;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001279 struct mlxsw_sp_nexthop_group *nh_grp; /* pointer back to the group
1280 * this belongs to
1281 */
Ido Schimmelc53b8e12017-02-08 11:16:30 +01001282 struct rhash_head ht_node;
1283 struct mlxsw_sp_nexthop_key key;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001284 struct mlxsw_sp_rif *rif;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001285 u8 should_offload:1, /* set indicates this neigh is connected and
1286 * should be put to KVD linear area of this group.
1287 */
1288 offloaded:1, /* set in case the neigh is actually put into
1289 * KVD linear area of this group.
1290 */
1291 update:1; /* set indicates that MAC of this neigh should be
1292 * updated in HW
1293 */
1294 struct mlxsw_sp_neigh_entry *neigh_entry;
1295};
1296
Ido Schimmele9ad5e72017-02-08 11:16:29 +01001297struct mlxsw_sp_nexthop_group_key {
1298 struct fib_info *fi;
1299};
1300
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001301struct mlxsw_sp_nexthop_group {
Ido Schimmele9ad5e72017-02-08 11:16:29 +01001302 struct rhash_head ht_node;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001303 struct list_head fib_list; /* list of fib entries that use this group */
Ido Schimmele9ad5e72017-02-08 11:16:29 +01001304 struct mlxsw_sp_nexthop_group_key key;
Ido Schimmelb3e8d1e2017-02-08 11:16:32 +01001305 u8 adj_index_valid:1,
1306 gateway:1; /* routes using the group use a gateway */
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001307 u32 adj_index;
1308 u16 ecmp_size;
1309 u16 count;
1310 struct mlxsw_sp_nexthop nexthops[0];
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001311#define nh_rif nexthops[0].rif
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001312};
1313
Ido Schimmele9ad5e72017-02-08 11:16:29 +01001314static const struct rhashtable_params mlxsw_sp_nexthop_group_ht_params = {
1315 .key_offset = offsetof(struct mlxsw_sp_nexthop_group, key),
1316 .head_offset = offsetof(struct mlxsw_sp_nexthop_group, ht_node),
1317 .key_len = sizeof(struct mlxsw_sp_nexthop_group_key),
1318};
1319
1320static int mlxsw_sp_nexthop_group_insert(struct mlxsw_sp *mlxsw_sp,
1321 struct mlxsw_sp_nexthop_group *nh_grp)
1322{
Ido Schimmel9011b672017-05-16 19:38:25 +02001323 return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_group_ht,
Ido Schimmele9ad5e72017-02-08 11:16:29 +01001324 &nh_grp->ht_node,
1325 mlxsw_sp_nexthop_group_ht_params);
1326}
1327
1328static void mlxsw_sp_nexthop_group_remove(struct mlxsw_sp *mlxsw_sp,
1329 struct mlxsw_sp_nexthop_group *nh_grp)
1330{
Ido Schimmel9011b672017-05-16 19:38:25 +02001331 rhashtable_remove_fast(&mlxsw_sp->router->nexthop_group_ht,
Ido Schimmele9ad5e72017-02-08 11:16:29 +01001332 &nh_grp->ht_node,
1333 mlxsw_sp_nexthop_group_ht_params);
1334}
1335
1336static struct mlxsw_sp_nexthop_group *
1337mlxsw_sp_nexthop_group_lookup(struct mlxsw_sp *mlxsw_sp,
1338 struct mlxsw_sp_nexthop_group_key key)
1339{
Ido Schimmel9011b672017-05-16 19:38:25 +02001340 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht, &key,
Ido Schimmele9ad5e72017-02-08 11:16:29 +01001341 mlxsw_sp_nexthop_group_ht_params);
1342}
1343
Ido Schimmelc53b8e12017-02-08 11:16:30 +01001344static const struct rhashtable_params mlxsw_sp_nexthop_ht_params = {
1345 .key_offset = offsetof(struct mlxsw_sp_nexthop, key),
1346 .head_offset = offsetof(struct mlxsw_sp_nexthop, ht_node),
1347 .key_len = sizeof(struct mlxsw_sp_nexthop_key),
1348};
1349
1350static int mlxsw_sp_nexthop_insert(struct mlxsw_sp *mlxsw_sp,
1351 struct mlxsw_sp_nexthop *nh)
1352{
Ido Schimmel9011b672017-05-16 19:38:25 +02001353 return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_ht,
Ido Schimmelc53b8e12017-02-08 11:16:30 +01001354 &nh->ht_node, mlxsw_sp_nexthop_ht_params);
1355}
1356
1357static void mlxsw_sp_nexthop_remove(struct mlxsw_sp *mlxsw_sp,
1358 struct mlxsw_sp_nexthop *nh)
1359{
Ido Schimmel9011b672017-05-16 19:38:25 +02001360 rhashtable_remove_fast(&mlxsw_sp->router->nexthop_ht, &nh->ht_node,
Ido Schimmelc53b8e12017-02-08 11:16:30 +01001361 mlxsw_sp_nexthop_ht_params);
1362}
1363
Ido Schimmelad178c82017-02-08 11:16:40 +01001364static struct mlxsw_sp_nexthop *
1365mlxsw_sp_nexthop_lookup(struct mlxsw_sp *mlxsw_sp,
1366 struct mlxsw_sp_nexthop_key key)
1367{
Ido Schimmel9011b672017-05-16 19:38:25 +02001368 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_ht, &key,
Ido Schimmelad178c82017-02-08 11:16:40 +01001369 mlxsw_sp_nexthop_ht_params);
1370}
1371
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001372static int mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel76610eb2017-03-10 08:53:41 +01001373 const struct mlxsw_sp_fib *fib,
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001374 u32 adj_index, u16 ecmp_size,
1375 u32 new_adj_index,
1376 u16 new_ecmp_size)
1377{
1378 char raleu_pl[MLXSW_REG_RALEU_LEN];
1379
Ido Schimmel1a9234e662016-09-19 08:29:26 +02001380 mlxsw_reg_raleu_pack(raleu_pl,
Ido Schimmel76610eb2017-03-10 08:53:41 +01001381 (enum mlxsw_reg_ralxx_protocol) fib->proto,
1382 fib->vr->id, adj_index, ecmp_size, new_adj_index,
Ido Schimmel1a9234e662016-09-19 08:29:26 +02001383 new_ecmp_size);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001384 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raleu), raleu_pl);
1385}
1386
1387static int mlxsw_sp_adj_index_mass_update(struct mlxsw_sp *mlxsw_sp,
1388 struct mlxsw_sp_nexthop_group *nh_grp,
1389 u32 old_adj_index, u16 old_ecmp_size)
1390{
1391 struct mlxsw_sp_fib_entry *fib_entry;
Ido Schimmel76610eb2017-03-10 08:53:41 +01001392 struct mlxsw_sp_fib *fib = NULL;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001393 int err;
1394
1395 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
Ido Schimmel76610eb2017-03-10 08:53:41 +01001396 if (fib == fib_entry->fib_node->fib)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001397 continue;
Ido Schimmel76610eb2017-03-10 08:53:41 +01001398 fib = fib_entry->fib_node->fib;
1399 err = mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp, fib,
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001400 old_adj_index,
1401 old_ecmp_size,
1402 nh_grp->adj_index,
1403 nh_grp->ecmp_size);
1404 if (err)
1405 return err;
1406 }
1407 return 0;
1408}
1409
1410static int mlxsw_sp_nexthop_mac_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
1411 struct mlxsw_sp_nexthop *nh)
1412{
1413 struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
1414 char ratr_pl[MLXSW_REG_RATR_LEN];
1415
1416 mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY,
1417 true, adj_index, neigh_entry->rif);
1418 mlxsw_reg_ratr_eth_entry_pack(ratr_pl, neigh_entry->ha);
1419 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
1420}
1421
1422static int
1423mlxsw_sp_nexthop_group_mac_update(struct mlxsw_sp *mlxsw_sp,
Ido Schimmela59b7e02017-01-23 11:11:42 +01001424 struct mlxsw_sp_nexthop_group *nh_grp,
1425 bool reallocate)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001426{
1427 u32 adj_index = nh_grp->adj_index; /* base */
1428 struct mlxsw_sp_nexthop *nh;
1429 int i;
1430 int err;
1431
1432 for (i = 0; i < nh_grp->count; i++) {
1433 nh = &nh_grp->nexthops[i];
1434
1435 if (!nh->should_offload) {
1436 nh->offloaded = 0;
1437 continue;
1438 }
1439
Ido Schimmela59b7e02017-01-23 11:11:42 +01001440 if (nh->update || reallocate) {
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001441 err = mlxsw_sp_nexthop_mac_update(mlxsw_sp,
1442 adj_index, nh);
1443 if (err)
1444 return err;
1445 nh->update = 0;
1446 nh->offloaded = 1;
1447 }
1448 adj_index++;
1449 }
1450 return 0;
1451}
1452
1453static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
1454 struct mlxsw_sp_fib_entry *fib_entry);
1455
1456static int
1457mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp *mlxsw_sp,
1458 struct mlxsw_sp_nexthop_group *nh_grp)
1459{
1460 struct mlxsw_sp_fib_entry *fib_entry;
1461 int err;
1462
1463 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
1464 err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
1465 if (err)
1466 return err;
1467 }
1468 return 0;
1469}
1470
1471static void
1472mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
1473 struct mlxsw_sp_nexthop_group *nh_grp)
1474{
1475 struct mlxsw_sp_nexthop *nh;
1476 bool offload_change = false;
1477 u32 adj_index;
1478 u16 ecmp_size = 0;
1479 bool old_adj_index_valid;
1480 u32 old_adj_index;
1481 u16 old_ecmp_size;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001482 int i;
1483 int err;
1484
Ido Schimmelb3e8d1e2017-02-08 11:16:32 +01001485 if (!nh_grp->gateway) {
1486 mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
1487 return;
1488 }
1489
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001490 for (i = 0; i < nh_grp->count; i++) {
1491 nh = &nh_grp->nexthops[i];
1492
1493 if (nh->should_offload ^ nh->offloaded) {
1494 offload_change = true;
1495 if (nh->should_offload)
1496 nh->update = 1;
1497 }
1498 if (nh->should_offload)
1499 ecmp_size++;
1500 }
1501 if (!offload_change) {
1502 /* Nothing was added or removed, so no need to reallocate. Just
1503 * update MAC on existing adjacency indexes.
1504 */
Ido Schimmela59b7e02017-01-23 11:11:42 +01001505 err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp,
1506 false);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001507 if (err) {
1508 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
1509 goto set_trap;
1510 }
1511 return;
1512 }
1513 if (!ecmp_size)
1514 /* No neigh of this group is connected so we just set
1515 * the trap and let everthing flow through kernel.
1516 */
1517 goto set_trap;
1518
Arkadi Sharshevsky13124442017-03-25 08:28:22 +01001519 err = mlxsw_sp_kvdl_alloc(mlxsw_sp, ecmp_size, &adj_index);
1520 if (err) {
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001521 /* We ran out of KVD linear space, just set the
1522 * trap and let everything flow through kernel.
1523 */
1524 dev_warn(mlxsw_sp->bus_info->dev, "Failed to allocate KVD linear area for nexthop group.\n");
1525 goto set_trap;
1526 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001527 old_adj_index_valid = nh_grp->adj_index_valid;
1528 old_adj_index = nh_grp->adj_index;
1529 old_ecmp_size = nh_grp->ecmp_size;
1530 nh_grp->adj_index_valid = 1;
1531 nh_grp->adj_index = adj_index;
1532 nh_grp->ecmp_size = ecmp_size;
Ido Schimmela59b7e02017-01-23 11:11:42 +01001533 err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp, true);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001534 if (err) {
1535 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
1536 goto set_trap;
1537 }
1538
1539 if (!old_adj_index_valid) {
1540 /* The trap was set for fib entries, so we have to call
1541 * fib entry update to unset it and use adjacency index.
1542 */
1543 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
1544 if (err) {
1545 dev_warn(mlxsw_sp->bus_info->dev, "Failed to add adjacency index to fib entries.\n");
1546 goto set_trap;
1547 }
1548 return;
1549 }
1550
1551 err = mlxsw_sp_adj_index_mass_update(mlxsw_sp, nh_grp,
1552 old_adj_index, old_ecmp_size);
1553 mlxsw_sp_kvdl_free(mlxsw_sp, old_adj_index);
1554 if (err) {
1555 dev_warn(mlxsw_sp->bus_info->dev, "Failed to mass-update adjacency index for nexthop group.\n");
1556 goto set_trap;
1557 }
1558 return;
1559
1560set_trap:
1561 old_adj_index_valid = nh_grp->adj_index_valid;
1562 nh_grp->adj_index_valid = 0;
1563 for (i = 0; i < nh_grp->count; i++) {
1564 nh = &nh_grp->nexthops[i];
1565 nh->offloaded = 0;
1566 }
1567 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
1568 if (err)
1569 dev_warn(mlxsw_sp->bus_info->dev, "Failed to set traps for fib entries.\n");
1570 if (old_adj_index_valid)
1571 mlxsw_sp_kvdl_free(mlxsw_sp, nh_grp->adj_index);
1572}
1573
1574static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh,
1575 bool removing)
1576{
1577 if (!removing && !nh->should_offload)
1578 nh->should_offload = 1;
1579 else if (removing && nh->offloaded)
1580 nh->should_offload = 0;
1581 nh->update = 1;
1582}
1583
1584static void
1585mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
1586 struct mlxsw_sp_neigh_entry *neigh_entry,
1587 bool removing)
1588{
1589 struct mlxsw_sp_nexthop *nh;
1590
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001591 list_for_each_entry(nh, &neigh_entry->nexthop_list,
1592 neigh_list_node) {
1593 __mlxsw_sp_nexthop_neigh_update(nh, removing);
1594 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
1595 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001596}
1597
Ido Schimmel9665b742017-02-08 11:16:42 +01001598static void mlxsw_sp_nexthop_rif_init(struct mlxsw_sp_nexthop *nh,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001599 struct mlxsw_sp_rif *rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01001600{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001601 if (nh->rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01001602 return;
1603
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001604 nh->rif = rif;
1605 list_add(&nh->rif_list_node, &rif->nexthop_list);
Ido Schimmel9665b742017-02-08 11:16:42 +01001606}
1607
1608static void mlxsw_sp_nexthop_rif_fini(struct mlxsw_sp_nexthop *nh)
1609{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001610 if (!nh->rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01001611 return;
1612
1613 list_del(&nh->rif_list_node);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001614 nh->rif = NULL;
Ido Schimmel9665b742017-02-08 11:16:42 +01001615}
1616
Ido Schimmela8c97012017-02-08 11:16:35 +01001617static int mlxsw_sp_nexthop_neigh_init(struct mlxsw_sp *mlxsw_sp,
1618 struct mlxsw_sp_nexthop *nh)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001619{
1620 struct mlxsw_sp_neigh_entry *neigh_entry;
Ido Schimmela8c97012017-02-08 11:16:35 +01001621 struct fib_nh *fib_nh = nh->key.fib_nh;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001622 struct neighbour *n;
Ido Schimmel93a87e52016-12-23 09:32:49 +01001623 u8 nud_state, dead;
Ido Schimmelc53b8e12017-02-08 11:16:30 +01001624 int err;
1625
Ido Schimmelad178c82017-02-08 11:16:40 +01001626 if (!nh->nh_grp->gateway || nh->neigh_entry)
Ido Schimmelb8399a12017-02-08 11:16:33 +01001627 return 0;
1628
Jiri Pirko33b13412016-11-10 12:31:04 +01001629 /* Take a reference of neigh here ensuring that neigh would
1630 * not be detructed before the nexthop entry is finished.
1631 * The reference is taken either in neigh_lookup() or
Ido Schimmelfd76d912017-02-06 16:20:17 +01001632 * in neigh_create() in case n is not found.
Jiri Pirko33b13412016-11-10 12:31:04 +01001633 */
Ido Schimmela8c97012017-02-08 11:16:35 +01001634 n = neigh_lookup(&arp_tbl, &fib_nh->nh_gw, fib_nh->nh_dev);
Jiri Pirko33b13412016-11-10 12:31:04 +01001635 if (!n) {
Ido Schimmela8c97012017-02-08 11:16:35 +01001636 n = neigh_create(&arp_tbl, &fib_nh->nh_gw, fib_nh->nh_dev);
1637 if (IS_ERR(n))
1638 return PTR_ERR(n);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001639 neigh_event_send(n, NULL);
Jiri Pirko33b13412016-11-10 12:31:04 +01001640 }
1641 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
1642 if (!neigh_entry) {
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001643 neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
1644 if (IS_ERR(neigh_entry)) {
Ido Schimmelc53b8e12017-02-08 11:16:30 +01001645 err = -EINVAL;
1646 goto err_neigh_entry_create;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001647 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001648 }
Yotam Gigib2157142016-07-05 11:27:51 +02001649
1650 /* If that is the first nexthop connected to that neigh, add to
1651 * nexthop_neighs_list
1652 */
1653 if (list_empty(&neigh_entry->nexthop_list))
1654 list_add_tail(&neigh_entry->nexthop_neighs_list_node,
Ido Schimmel9011b672017-05-16 19:38:25 +02001655 &mlxsw_sp->router->nexthop_neighs_list);
Yotam Gigib2157142016-07-05 11:27:51 +02001656
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001657 nh->neigh_entry = neigh_entry;
1658 list_add_tail(&nh->neigh_list_node, &neigh_entry->nexthop_list);
1659 read_lock_bh(&n->lock);
1660 nud_state = n->nud_state;
Ido Schimmel93a87e52016-12-23 09:32:49 +01001661 dead = n->dead;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001662 read_unlock_bh(&n->lock);
Ido Schimmel93a87e52016-12-23 09:32:49 +01001663 __mlxsw_sp_nexthop_neigh_update(nh, !(nud_state & NUD_VALID && !dead));
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001664
1665 return 0;
Ido Schimmelc53b8e12017-02-08 11:16:30 +01001666
1667err_neigh_entry_create:
1668 neigh_release(n);
Ido Schimmelc53b8e12017-02-08 11:16:30 +01001669 return err;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001670}
1671
Ido Schimmela8c97012017-02-08 11:16:35 +01001672static void mlxsw_sp_nexthop_neigh_fini(struct mlxsw_sp *mlxsw_sp,
1673 struct mlxsw_sp_nexthop *nh)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001674{
1675 struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
Ido Schimmela8c97012017-02-08 11:16:35 +01001676 struct neighbour *n;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001677
Ido Schimmelb8399a12017-02-08 11:16:33 +01001678 if (!neigh_entry)
Ido Schimmela8c97012017-02-08 11:16:35 +01001679 return;
1680 n = neigh_entry->key.n;
Ido Schimmelb8399a12017-02-08 11:16:33 +01001681
Ido Schimmel58312122016-12-23 09:32:50 +01001682 __mlxsw_sp_nexthop_neigh_update(nh, true);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001683 list_del(&nh->neigh_list_node);
Ido Schimmele58be792017-02-08 11:16:28 +01001684 nh->neigh_entry = NULL;
Yotam Gigib2157142016-07-05 11:27:51 +02001685
1686 /* If that is the last nexthop connected to that neigh, remove from
1687 * nexthop_neighs_list
1688 */
Ido Schimmele58be792017-02-08 11:16:28 +01001689 if (list_empty(&neigh_entry->nexthop_list))
1690 list_del(&neigh_entry->nexthop_neighs_list_node);
Yotam Gigib2157142016-07-05 11:27:51 +02001691
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001692 if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
1693 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
1694
1695 neigh_release(n);
Ido Schimmela8c97012017-02-08 11:16:35 +01001696}
Ido Schimmelc53b8e12017-02-08 11:16:30 +01001697
Ido Schimmela8c97012017-02-08 11:16:35 +01001698static int mlxsw_sp_nexthop_init(struct mlxsw_sp *mlxsw_sp,
1699 struct mlxsw_sp_nexthop_group *nh_grp,
1700 struct mlxsw_sp_nexthop *nh,
1701 struct fib_nh *fib_nh)
1702{
1703 struct net_device *dev = fib_nh->nh_dev;
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +01001704 struct in_device *in_dev;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001705 struct mlxsw_sp_rif *rif;
Ido Schimmela8c97012017-02-08 11:16:35 +01001706 int err;
1707
1708 nh->nh_grp = nh_grp;
1709 nh->key.fib_nh = fib_nh;
1710 err = mlxsw_sp_nexthop_insert(mlxsw_sp, nh);
1711 if (err)
1712 return err;
1713
Ido Schimmel97989ee2017-03-10 08:53:38 +01001714 if (!dev)
1715 return 0;
1716
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +01001717 in_dev = __in_dev_get_rtnl(dev);
1718 if (in_dev && IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
1719 fib_nh->nh_flags & RTNH_F_LINKDOWN)
1720 return 0;
1721
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001722 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
1723 if (!rif)
Ido Schimmela8c97012017-02-08 11:16:35 +01001724 return 0;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001725 mlxsw_sp_nexthop_rif_init(nh, rif);
Ido Schimmela8c97012017-02-08 11:16:35 +01001726
1727 err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
1728 if (err)
1729 goto err_nexthop_neigh_init;
1730
1731 return 0;
1732
1733err_nexthop_neigh_init:
1734 mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
1735 return err;
1736}
1737
1738static void mlxsw_sp_nexthop_fini(struct mlxsw_sp *mlxsw_sp,
1739 struct mlxsw_sp_nexthop *nh)
1740{
1741 mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh);
Ido Schimmel9665b742017-02-08 11:16:42 +01001742 mlxsw_sp_nexthop_rif_fini(nh);
Ido Schimmelc53b8e12017-02-08 11:16:30 +01001743 mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001744}
1745
Ido Schimmelad178c82017-02-08 11:16:40 +01001746static void mlxsw_sp_nexthop_event(struct mlxsw_sp *mlxsw_sp,
1747 unsigned long event, struct fib_nh *fib_nh)
1748{
1749 struct mlxsw_sp_nexthop_key key;
1750 struct mlxsw_sp_nexthop *nh;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001751 struct mlxsw_sp_rif *rif;
Ido Schimmelad178c82017-02-08 11:16:40 +01001752
Ido Schimmel9011b672017-05-16 19:38:25 +02001753 if (mlxsw_sp->router->aborted)
Ido Schimmelad178c82017-02-08 11:16:40 +01001754 return;
1755
1756 key.fib_nh = fib_nh;
1757 nh = mlxsw_sp_nexthop_lookup(mlxsw_sp, key);
1758 if (WARN_ON_ONCE(!nh))
1759 return;
1760
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001761 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, fib_nh->nh_dev);
1762 if (!rif)
Ido Schimmelad178c82017-02-08 11:16:40 +01001763 return;
1764
1765 switch (event) {
1766 case FIB_EVENT_NH_ADD:
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001767 mlxsw_sp_nexthop_rif_init(nh, rif);
Ido Schimmelad178c82017-02-08 11:16:40 +01001768 mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
1769 break;
1770 case FIB_EVENT_NH_DEL:
1771 mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh);
Ido Schimmel9665b742017-02-08 11:16:42 +01001772 mlxsw_sp_nexthop_rif_fini(nh);
Ido Schimmelad178c82017-02-08 11:16:40 +01001773 break;
1774 }
1775
1776 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
1777}
1778
Ido Schimmel9665b742017-02-08 11:16:42 +01001779static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001780 struct mlxsw_sp_rif *rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01001781{
1782 struct mlxsw_sp_nexthop *nh, *tmp;
1783
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001784 list_for_each_entry_safe(nh, tmp, &rif->nexthop_list, rif_list_node) {
Ido Schimmel9665b742017-02-08 11:16:42 +01001785 mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh);
1786 mlxsw_sp_nexthop_rif_fini(nh);
1787 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
1788 }
1789}
1790
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001791static struct mlxsw_sp_nexthop_group *
1792mlxsw_sp_nexthop_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi)
1793{
1794 struct mlxsw_sp_nexthop_group *nh_grp;
1795 struct mlxsw_sp_nexthop *nh;
1796 struct fib_nh *fib_nh;
1797 size_t alloc_size;
1798 int i;
1799 int err;
1800
1801 alloc_size = sizeof(*nh_grp) +
1802 fi->fib_nhs * sizeof(struct mlxsw_sp_nexthop);
1803 nh_grp = kzalloc(alloc_size, GFP_KERNEL);
1804 if (!nh_grp)
1805 return ERR_PTR(-ENOMEM);
1806 INIT_LIST_HEAD(&nh_grp->fib_list);
Ido Schimmelb3e8d1e2017-02-08 11:16:32 +01001807 nh_grp->gateway = fi->fib_nh->nh_scope == RT_SCOPE_LINK;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001808 nh_grp->count = fi->fib_nhs;
Ido Schimmele9ad5e72017-02-08 11:16:29 +01001809 nh_grp->key.fi = fi;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001810 for (i = 0; i < nh_grp->count; i++) {
1811 nh = &nh_grp->nexthops[i];
1812 fib_nh = &fi->fib_nh[i];
1813 err = mlxsw_sp_nexthop_init(mlxsw_sp, nh_grp, nh, fib_nh);
1814 if (err)
1815 goto err_nexthop_init;
1816 }
Ido Schimmele9ad5e72017-02-08 11:16:29 +01001817 err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
1818 if (err)
1819 goto err_nexthop_group_insert;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001820 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
1821 return nh_grp;
1822
Ido Schimmele9ad5e72017-02-08 11:16:29 +01001823err_nexthop_group_insert:
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001824err_nexthop_init:
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +01001825 for (i--; i >= 0; i--) {
1826 nh = &nh_grp->nexthops[i];
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001827 mlxsw_sp_nexthop_fini(mlxsw_sp, nh);
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +01001828 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001829 kfree(nh_grp);
1830 return ERR_PTR(err);
1831}
1832
1833static void
1834mlxsw_sp_nexthop_group_destroy(struct mlxsw_sp *mlxsw_sp,
1835 struct mlxsw_sp_nexthop_group *nh_grp)
1836{
1837 struct mlxsw_sp_nexthop *nh;
1838 int i;
1839
Ido Schimmele9ad5e72017-02-08 11:16:29 +01001840 mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001841 for (i = 0; i < nh_grp->count; i++) {
1842 nh = &nh_grp->nexthops[i];
1843 mlxsw_sp_nexthop_fini(mlxsw_sp, nh);
1844 }
Ido Schimmel58312122016-12-23 09:32:50 +01001845 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
1846 WARN_ON_ONCE(nh_grp->adj_index_valid);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001847 kfree(nh_grp);
1848}
1849
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001850static int mlxsw_sp_nexthop_group_get(struct mlxsw_sp *mlxsw_sp,
1851 struct mlxsw_sp_fib_entry *fib_entry,
1852 struct fib_info *fi)
1853{
Ido Schimmele9ad5e72017-02-08 11:16:29 +01001854 struct mlxsw_sp_nexthop_group_key key;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001855 struct mlxsw_sp_nexthop_group *nh_grp;
1856
Ido Schimmele9ad5e72017-02-08 11:16:29 +01001857 key.fi = fi;
1858 nh_grp = mlxsw_sp_nexthop_group_lookup(mlxsw_sp, key);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001859 if (!nh_grp) {
1860 nh_grp = mlxsw_sp_nexthop_group_create(mlxsw_sp, fi);
1861 if (IS_ERR(nh_grp))
1862 return PTR_ERR(nh_grp);
1863 }
1864 list_add_tail(&fib_entry->nexthop_group_node, &nh_grp->fib_list);
1865 fib_entry->nh_group = nh_grp;
1866 return 0;
1867}
1868
1869static void mlxsw_sp_nexthop_group_put(struct mlxsw_sp *mlxsw_sp,
1870 struct mlxsw_sp_fib_entry *fib_entry)
1871{
1872 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
1873
1874 list_del(&fib_entry->nexthop_group_node);
1875 if (!list_empty(&nh_grp->fib_list))
1876 return;
1877 mlxsw_sp_nexthop_group_destroy(mlxsw_sp, nh_grp);
1878}
1879
Ido Schimmel013b20f2017-02-08 11:16:36 +01001880static bool
1881mlxsw_sp_fib_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
1882{
1883 struct mlxsw_sp_nexthop_group *nh_group = fib_entry->nh_group;
1884
Ido Schimmel9aecce12017-02-09 10:28:42 +01001885 if (fib_entry->params.tos)
1886 return false;
1887
Ido Schimmel013b20f2017-02-08 11:16:36 +01001888 switch (fib_entry->type) {
1889 case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
1890 return !!nh_group->adj_index_valid;
1891 case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
Ido Schimmel70ad3502017-02-08 11:16:38 +01001892 return !!nh_group->nh_rif;
Ido Schimmel013b20f2017-02-08 11:16:36 +01001893 default:
1894 return false;
1895 }
1896}
1897
1898static void mlxsw_sp_fib_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
1899{
1900 fib_entry->offloaded = true;
1901
Ido Schimmel76610eb2017-03-10 08:53:41 +01001902 switch (fib_entry->fib_node->fib->proto) {
Ido Schimmel013b20f2017-02-08 11:16:36 +01001903 case MLXSW_SP_L3_PROTO_IPV4:
1904 fib_info_offload_inc(fib_entry->nh_group->key.fi);
1905 break;
1906 case MLXSW_SP_L3_PROTO_IPV6:
1907 WARN_ON_ONCE(1);
1908 }
1909}
1910
1911static void
1912mlxsw_sp_fib_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
1913{
Ido Schimmel76610eb2017-03-10 08:53:41 +01001914 switch (fib_entry->fib_node->fib->proto) {
Ido Schimmel013b20f2017-02-08 11:16:36 +01001915 case MLXSW_SP_L3_PROTO_IPV4:
1916 fib_info_offload_dec(fib_entry->nh_group->key.fi);
1917 break;
1918 case MLXSW_SP_L3_PROTO_IPV6:
1919 WARN_ON_ONCE(1);
1920 }
1921
1922 fib_entry->offloaded = false;
1923}
1924
1925static void
1926mlxsw_sp_fib_entry_offload_refresh(struct mlxsw_sp_fib_entry *fib_entry,
1927 enum mlxsw_reg_ralue_op op, int err)
1928{
1929 switch (op) {
1930 case MLXSW_REG_RALUE_OP_WRITE_DELETE:
1931 if (!fib_entry->offloaded)
1932 return;
1933 return mlxsw_sp_fib_entry_offload_unset(fib_entry);
1934 case MLXSW_REG_RALUE_OP_WRITE_WRITE:
1935 if (err)
1936 return;
1937 if (mlxsw_sp_fib_entry_should_offload(fib_entry) &&
1938 !fib_entry->offloaded)
1939 mlxsw_sp_fib_entry_offload_set(fib_entry);
1940 else if (!mlxsw_sp_fib_entry_should_offload(fib_entry) &&
1941 fib_entry->offloaded)
1942 mlxsw_sp_fib_entry_offload_unset(fib_entry);
1943 return;
1944 default:
1945 return;
1946 }
1947}
1948
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001949static int mlxsw_sp_fib_entry_op4_remote(struct mlxsw_sp *mlxsw_sp,
1950 struct mlxsw_sp_fib_entry *fib_entry,
1951 enum mlxsw_reg_ralue_op op)
1952{
1953 char ralue_pl[MLXSW_REG_RALUE_LEN];
Ido Schimmel76610eb2017-03-10 08:53:41 +01001954 struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;
Ido Schimmel9aecce12017-02-09 10:28:42 +01001955 u32 *p_dip = (u32 *) fib_entry->fib_node->key.addr;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001956 enum mlxsw_reg_ralue_trap_action trap_action;
1957 u16 trap_id = 0;
1958 u32 adjacency_index = 0;
1959 u16 ecmp_size = 0;
1960
1961 /* In case the nexthop group adjacency index is valid, use it
1962 * with provided ECMP size. Otherwise, setup trap and pass
1963 * traffic to kernel.
1964 */
Ido Schimmel4b411472017-02-08 11:16:37 +01001965 if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001966 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
1967 adjacency_index = fib_entry->nh_group->adj_index;
1968 ecmp_size = fib_entry->nh_group->ecmp_size;
1969 } else {
1970 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
1971 trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
1972 }
1973
Ido Schimmel1a9234e662016-09-19 08:29:26 +02001974 mlxsw_reg_ralue_pack4(ralue_pl,
Ido Schimmel76610eb2017-03-10 08:53:41 +01001975 (enum mlxsw_reg_ralxx_protocol) fib->proto, op,
1976 fib->vr->id, fib_entry->fib_node->key.prefix_len,
Ido Schimmel9aecce12017-02-09 10:28:42 +01001977 *p_dip);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001978 mlxsw_reg_ralue_act_remote_pack(ralue_pl, trap_action, trap_id,
1979 adjacency_index, ecmp_size);
1980 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
1981}
1982
Jiri Pirko61c503f2016-07-04 08:23:11 +02001983static int mlxsw_sp_fib_entry_op4_local(struct mlxsw_sp *mlxsw_sp,
1984 struct mlxsw_sp_fib_entry *fib_entry,
1985 enum mlxsw_reg_ralue_op op)
1986{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001987 struct mlxsw_sp_rif *rif = fib_entry->nh_group->nh_rif;
Ido Schimmel76610eb2017-03-10 08:53:41 +01001988 struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;
Ido Schimmel70ad3502017-02-08 11:16:38 +01001989 enum mlxsw_reg_ralue_trap_action trap_action;
Jiri Pirko61c503f2016-07-04 08:23:11 +02001990 char ralue_pl[MLXSW_REG_RALUE_LEN];
Ido Schimmel9aecce12017-02-09 10:28:42 +01001991 u32 *p_dip = (u32 *) fib_entry->fib_node->key.addr;
Ido Schimmel70ad3502017-02-08 11:16:38 +01001992 u16 trap_id = 0;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001993 u16 rif_index = 0;
Ido Schimmel70ad3502017-02-08 11:16:38 +01001994
1995 if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
1996 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001997 rif_index = rif->rif_index;
Ido Schimmel70ad3502017-02-08 11:16:38 +01001998 } else {
1999 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
2000 trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
2001 }
Jiri Pirko61c503f2016-07-04 08:23:11 +02002002
Ido Schimmel1a9234e662016-09-19 08:29:26 +02002003 mlxsw_reg_ralue_pack4(ralue_pl,
Ido Schimmel76610eb2017-03-10 08:53:41 +01002004 (enum mlxsw_reg_ralxx_protocol) fib->proto, op,
2005 fib->vr->id, fib_entry->fib_node->key.prefix_len,
Ido Schimmel9aecce12017-02-09 10:28:42 +01002006 *p_dip);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002007 mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, trap_id,
2008 rif_index);
Jiri Pirko61c503f2016-07-04 08:23:11 +02002009 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
2010}
2011
2012static int mlxsw_sp_fib_entry_op4_trap(struct mlxsw_sp *mlxsw_sp,
2013 struct mlxsw_sp_fib_entry *fib_entry,
2014 enum mlxsw_reg_ralue_op op)
2015{
Ido Schimmel76610eb2017-03-10 08:53:41 +01002016 struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;
Jiri Pirko61c503f2016-07-04 08:23:11 +02002017 char ralue_pl[MLXSW_REG_RALUE_LEN];
Ido Schimmel9aecce12017-02-09 10:28:42 +01002018 u32 *p_dip = (u32 *) fib_entry->fib_node->key.addr;
Jiri Pirko61c503f2016-07-04 08:23:11 +02002019
Ido Schimmel1a9234e662016-09-19 08:29:26 +02002020 mlxsw_reg_ralue_pack4(ralue_pl,
Ido Schimmel76610eb2017-03-10 08:53:41 +01002021 (enum mlxsw_reg_ralxx_protocol) fib->proto, op,
2022 fib->vr->id, fib_entry->fib_node->key.prefix_len,
Ido Schimmel9aecce12017-02-09 10:28:42 +01002023 *p_dip);
Jiri Pirko61c503f2016-07-04 08:23:11 +02002024 mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
2025 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
2026}
2027
2028static int mlxsw_sp_fib_entry_op4(struct mlxsw_sp *mlxsw_sp,
2029 struct mlxsw_sp_fib_entry *fib_entry,
2030 enum mlxsw_reg_ralue_op op)
2031{
2032 switch (fib_entry->type) {
2033 case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002034 return mlxsw_sp_fib_entry_op4_remote(mlxsw_sp, fib_entry, op);
Jiri Pirko61c503f2016-07-04 08:23:11 +02002035 case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
2036 return mlxsw_sp_fib_entry_op4_local(mlxsw_sp, fib_entry, op);
2037 case MLXSW_SP_FIB_ENTRY_TYPE_TRAP:
2038 return mlxsw_sp_fib_entry_op4_trap(mlxsw_sp, fib_entry, op);
2039 }
2040 return -EINVAL;
2041}
2042
2043static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
2044 struct mlxsw_sp_fib_entry *fib_entry,
2045 enum mlxsw_reg_ralue_op op)
2046{
Ido Schimmel013b20f2017-02-08 11:16:36 +01002047 int err = -EINVAL;
2048
Ido Schimmel76610eb2017-03-10 08:53:41 +01002049 switch (fib_entry->fib_node->fib->proto) {
Jiri Pirko61c503f2016-07-04 08:23:11 +02002050 case MLXSW_SP_L3_PROTO_IPV4:
Ido Schimmel013b20f2017-02-08 11:16:36 +01002051 err = mlxsw_sp_fib_entry_op4(mlxsw_sp, fib_entry, op);
2052 break;
Jiri Pirko61c503f2016-07-04 08:23:11 +02002053 case MLXSW_SP_L3_PROTO_IPV6:
Ido Schimmel013b20f2017-02-08 11:16:36 +01002054 return err;
Jiri Pirko61c503f2016-07-04 08:23:11 +02002055 }
Ido Schimmel013b20f2017-02-08 11:16:36 +01002056 mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, err);
2057 return err;
Jiri Pirko61c503f2016-07-04 08:23:11 +02002058}
2059
2060static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
2061 struct mlxsw_sp_fib_entry *fib_entry)
2062{
Jiri Pirko7146da32016-09-01 10:37:41 +02002063 return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
2064 MLXSW_REG_RALUE_OP_WRITE_WRITE);
Jiri Pirko61c503f2016-07-04 08:23:11 +02002065}
2066
2067static int mlxsw_sp_fib_entry_del(struct mlxsw_sp *mlxsw_sp,
2068 struct mlxsw_sp_fib_entry *fib_entry)
2069{
2070 return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
2071 MLXSW_REG_RALUE_OP_WRITE_DELETE);
2072}
2073
Jiri Pirko61c503f2016-07-04 08:23:11 +02002074static int
Ido Schimmel013b20f2017-02-08 11:16:36 +01002075mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp,
2076 const struct fib_entry_notifier_info *fen_info,
2077 struct mlxsw_sp_fib_entry *fib_entry)
Jiri Pirko61c503f2016-07-04 08:23:11 +02002078{
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002079 struct fib_info *fi = fen_info->fi;
Jiri Pirko61c503f2016-07-04 08:23:11 +02002080
Ido Schimmel97989ee2017-03-10 08:53:38 +01002081 switch (fen_info->type) {
2082 case RTN_BROADCAST: /* fall through */
2083 case RTN_LOCAL:
Jiri Pirko61c503f2016-07-04 08:23:11 +02002084 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
2085 return 0;
Ido Schimmel97989ee2017-03-10 08:53:38 +01002086 case RTN_UNREACHABLE: /* fall through */
2087 case RTN_BLACKHOLE: /* fall through */
2088 case RTN_PROHIBIT:
2089 /* Packets hitting these routes need to be trapped, but
2090 * can do so with a lower priority than packets directed
2091 * at the host, so use action type local instead of trap.
2092 */
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002093 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
Ido Schimmel97989ee2017-03-10 08:53:38 +01002094 return 0;
2095 case RTN_UNICAST:
2096 if (fi->fib_nh->nh_scope != RT_SCOPE_LINK)
2097 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
2098 else
2099 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
2100 return 0;
2101 default:
2102 return -EINVAL;
2103 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002104}
2105
Jiri Pirko5b004412016-09-01 10:37:40 +02002106static struct mlxsw_sp_fib_entry *
Ido Schimmel9aecce12017-02-09 10:28:42 +01002107mlxsw_sp_fib4_entry_create(struct mlxsw_sp *mlxsw_sp,
2108 struct mlxsw_sp_fib_node *fib_node,
2109 const struct fib_entry_notifier_info *fen_info)
Jiri Pirko5b004412016-09-01 10:37:40 +02002110{
2111 struct mlxsw_sp_fib_entry *fib_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01002112 int err;
2113
2114 fib_entry = kzalloc(sizeof(*fib_entry), GFP_KERNEL);
2115 if (!fib_entry) {
2116 err = -ENOMEM;
2117 goto err_fib_entry_alloc;
2118 }
2119
2120 err = mlxsw_sp_fib4_entry_type_set(mlxsw_sp, fen_info, fib_entry);
2121 if (err)
2122 goto err_fib4_entry_type_set;
2123
2124 err = mlxsw_sp_nexthop_group_get(mlxsw_sp, fib_entry, fen_info->fi);
2125 if (err)
2126 goto err_nexthop_group_get;
2127
2128 fib_entry->params.prio = fen_info->fi->fib_priority;
2129 fib_entry->params.tb_id = fen_info->tb_id;
2130 fib_entry->params.type = fen_info->type;
2131 fib_entry->params.tos = fen_info->tos;
2132
2133 fib_entry->fib_node = fib_node;
2134
2135 return fib_entry;
2136
2137err_nexthop_group_get:
2138err_fib4_entry_type_set:
2139 kfree(fib_entry);
2140err_fib_entry_alloc:
2141 return ERR_PTR(err);
2142}
2143
2144static void mlxsw_sp_fib4_entry_destroy(struct mlxsw_sp *mlxsw_sp,
2145 struct mlxsw_sp_fib_entry *fib_entry)
2146{
2147 mlxsw_sp_nexthop_group_put(mlxsw_sp, fib_entry);
2148 kfree(fib_entry);
2149}
2150
2151static struct mlxsw_sp_fib_node *
2152mlxsw_sp_fib4_node_get(struct mlxsw_sp *mlxsw_sp,
2153 const struct fib_entry_notifier_info *fen_info);
2154
2155static struct mlxsw_sp_fib_entry *
2156mlxsw_sp_fib4_entry_lookup(struct mlxsw_sp *mlxsw_sp,
2157 const struct fib_entry_notifier_info *fen_info)
2158{
2159 struct mlxsw_sp_fib_entry *fib_entry;
2160 struct mlxsw_sp_fib_node *fib_node;
2161
2162 fib_node = mlxsw_sp_fib4_node_get(mlxsw_sp, fen_info);
2163 if (IS_ERR(fib_node))
2164 return NULL;
2165
2166 list_for_each_entry(fib_entry, &fib_node->entry_list, list) {
2167 if (fib_entry->params.tb_id == fen_info->tb_id &&
2168 fib_entry->params.tos == fen_info->tos &&
2169 fib_entry->params.type == fen_info->type &&
2170 fib_entry->nh_group->key.fi == fen_info->fi) {
2171 return fib_entry;
2172 }
2173 }
2174
2175 return NULL;
2176}
2177
2178static const struct rhashtable_params mlxsw_sp_fib_ht_params = {
2179 .key_offset = offsetof(struct mlxsw_sp_fib_node, key),
2180 .head_offset = offsetof(struct mlxsw_sp_fib_node, ht_node),
2181 .key_len = sizeof(struct mlxsw_sp_fib_key),
2182 .automatic_shrinking = true,
2183};
2184
2185static int mlxsw_sp_fib_node_insert(struct mlxsw_sp_fib *fib,
2186 struct mlxsw_sp_fib_node *fib_node)
2187{
2188 return rhashtable_insert_fast(&fib->ht, &fib_node->ht_node,
2189 mlxsw_sp_fib_ht_params);
2190}
2191
2192static void mlxsw_sp_fib_node_remove(struct mlxsw_sp_fib *fib,
2193 struct mlxsw_sp_fib_node *fib_node)
2194{
2195 rhashtable_remove_fast(&fib->ht, &fib_node->ht_node,
2196 mlxsw_sp_fib_ht_params);
2197}
2198
2199static struct mlxsw_sp_fib_node *
2200mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
2201 size_t addr_len, unsigned char prefix_len)
2202{
2203 struct mlxsw_sp_fib_key key;
2204
2205 memset(&key, 0, sizeof(key));
2206 memcpy(key.addr, addr, addr_len);
2207 key.prefix_len = prefix_len;
2208 return rhashtable_lookup_fast(&fib->ht, &key, mlxsw_sp_fib_ht_params);
2209}
2210
2211static struct mlxsw_sp_fib_node *
Ido Schimmel76610eb2017-03-10 08:53:41 +01002212mlxsw_sp_fib_node_create(struct mlxsw_sp_fib *fib, const void *addr,
Ido Schimmel9aecce12017-02-09 10:28:42 +01002213 size_t addr_len, unsigned char prefix_len)
2214{
2215 struct mlxsw_sp_fib_node *fib_node;
2216
2217 fib_node = kzalloc(sizeof(*fib_node), GFP_KERNEL);
2218 if (!fib_node)
2219 return NULL;
2220
2221 INIT_LIST_HEAD(&fib_node->entry_list);
Ido Schimmel76610eb2017-03-10 08:53:41 +01002222 list_add(&fib_node->list, &fib->node_list);
Ido Schimmel9aecce12017-02-09 10:28:42 +01002223 memcpy(fib_node->key.addr, addr, addr_len);
2224 fib_node->key.prefix_len = prefix_len;
Ido Schimmel9aecce12017-02-09 10:28:42 +01002225
2226 return fib_node;
2227}
2228
2229static void mlxsw_sp_fib_node_destroy(struct mlxsw_sp_fib_node *fib_node)
2230{
Ido Schimmel9aecce12017-02-09 10:28:42 +01002231 list_del(&fib_node->list);
2232 WARN_ON(!list_empty(&fib_node->entry_list));
2233 kfree(fib_node);
2234}
2235
2236static bool
2237mlxsw_sp_fib_node_entry_is_first(const struct mlxsw_sp_fib_node *fib_node,
2238 const struct mlxsw_sp_fib_entry *fib_entry)
2239{
2240 return list_first_entry(&fib_node->entry_list,
2241 struct mlxsw_sp_fib_entry, list) == fib_entry;
2242}
2243
2244static void mlxsw_sp_fib_node_prefix_inc(struct mlxsw_sp_fib_node *fib_node)
2245{
2246 unsigned char prefix_len = fib_node->key.prefix_len;
Ido Schimmel76610eb2017-03-10 08:53:41 +01002247 struct mlxsw_sp_fib *fib = fib_node->fib;
Ido Schimmel9aecce12017-02-09 10:28:42 +01002248
2249 if (fib->prefix_ref_count[prefix_len]++ == 0)
2250 mlxsw_sp_prefix_usage_set(&fib->prefix_usage, prefix_len);
2251}
2252
2253static void mlxsw_sp_fib_node_prefix_dec(struct mlxsw_sp_fib_node *fib_node)
2254{
2255 unsigned char prefix_len = fib_node->key.prefix_len;
Ido Schimmel76610eb2017-03-10 08:53:41 +01002256 struct mlxsw_sp_fib *fib = fib_node->fib;
Ido Schimmel9aecce12017-02-09 10:28:42 +01002257
2258 if (--fib->prefix_ref_count[prefix_len] == 0)
2259 mlxsw_sp_prefix_usage_clear(&fib->prefix_usage, prefix_len);
2260}
2261
Ido Schimmel76610eb2017-03-10 08:53:41 +01002262static int mlxsw_sp_fib_node_init(struct mlxsw_sp *mlxsw_sp,
2263 struct mlxsw_sp_fib_node *fib_node,
2264 struct mlxsw_sp_fib *fib)
2265{
2266 struct mlxsw_sp_prefix_usage req_prefix_usage;
2267 struct mlxsw_sp_lpm_tree *lpm_tree;
2268 int err;
2269
2270 err = mlxsw_sp_fib_node_insert(fib, fib_node);
2271 if (err)
2272 return err;
2273 fib_node->fib = fib;
2274
2275 mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, &fib->prefix_usage);
2276 mlxsw_sp_prefix_usage_set(&req_prefix_usage, fib_node->key.prefix_len);
2277
2278 if (!mlxsw_sp_prefix_usage_none(&fib->prefix_usage)) {
2279 err = mlxsw_sp_vr_lpm_tree_check(mlxsw_sp, fib,
2280 &req_prefix_usage);
2281 if (err)
2282 goto err_tree_check;
2283 } else {
2284 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
2285 fib->proto);
2286 if (IS_ERR(lpm_tree))
2287 return PTR_ERR(lpm_tree);
2288 fib->lpm_tree = lpm_tree;
2289 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib);
2290 if (err)
2291 goto err_tree_bind;
2292 }
2293
2294 mlxsw_sp_fib_node_prefix_inc(fib_node);
2295
2296 return 0;
2297
2298err_tree_bind:
2299 fib->lpm_tree = NULL;
2300 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
2301err_tree_check:
2302 fib_node->fib = NULL;
2303 mlxsw_sp_fib_node_remove(fib, fib_node);
2304 return err;
2305}
2306
2307static void mlxsw_sp_fib_node_fini(struct mlxsw_sp *mlxsw_sp,
2308 struct mlxsw_sp_fib_node *fib_node)
2309{
2310 struct mlxsw_sp_lpm_tree *lpm_tree = fib_node->fib->lpm_tree;
2311 struct mlxsw_sp_fib *fib = fib_node->fib;
2312
2313 mlxsw_sp_fib_node_prefix_dec(fib_node);
2314
2315 if (mlxsw_sp_prefix_usage_none(&fib->prefix_usage)) {
2316 mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, fib);
2317 fib->lpm_tree = NULL;
2318 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
2319 } else {
2320 mlxsw_sp_vr_lpm_tree_check(mlxsw_sp, fib, &fib->prefix_usage);
2321 }
2322
2323 fib_node->fib = NULL;
2324 mlxsw_sp_fib_node_remove(fib, fib_node);
2325}
2326
Ido Schimmel9aecce12017-02-09 10:28:42 +01002327static struct mlxsw_sp_fib_node *
2328mlxsw_sp_fib4_node_get(struct mlxsw_sp *mlxsw_sp,
2329 const struct fib_entry_notifier_info *fen_info)
2330{
2331 struct mlxsw_sp_fib_node *fib_node;
Ido Schimmel76610eb2017-03-10 08:53:41 +01002332 struct mlxsw_sp_fib *fib;
Jiri Pirko5b004412016-09-01 10:37:40 +02002333 struct mlxsw_sp_vr *vr;
2334 int err;
2335
Ido Schimmel76610eb2017-03-10 08:53:41 +01002336 vr = mlxsw_sp_vr_get(mlxsw_sp, fen_info->tb_id);
Jiri Pirko5b004412016-09-01 10:37:40 +02002337 if (IS_ERR(vr))
2338 return ERR_CAST(vr);
Ido Schimmel76610eb2017-03-10 08:53:41 +01002339 fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV4);
Jiri Pirko5b004412016-09-01 10:37:40 +02002340
Ido Schimmel76610eb2017-03-10 08:53:41 +01002341 fib_node = mlxsw_sp_fib_node_lookup(fib, &fen_info->dst,
Ido Schimmel9aecce12017-02-09 10:28:42 +01002342 sizeof(fen_info->dst),
2343 fen_info->dst_len);
2344 if (fib_node)
2345 return fib_node;
2346
Ido Schimmel76610eb2017-03-10 08:53:41 +01002347 fib_node = mlxsw_sp_fib_node_create(fib, &fen_info->dst,
Ido Schimmel9aecce12017-02-09 10:28:42 +01002348 sizeof(fen_info->dst),
2349 fen_info->dst_len);
2350 if (!fib_node) {
Jiri Pirko5b004412016-09-01 10:37:40 +02002351 err = -ENOMEM;
Ido Schimmel9aecce12017-02-09 10:28:42 +01002352 goto err_fib_node_create;
Jiri Pirko5b004412016-09-01 10:37:40 +02002353 }
Jiri Pirko5b004412016-09-01 10:37:40 +02002354
Ido Schimmel76610eb2017-03-10 08:53:41 +01002355 err = mlxsw_sp_fib_node_init(mlxsw_sp, fib_node, fib);
2356 if (err)
2357 goto err_fib_node_init;
2358
Ido Schimmel9aecce12017-02-09 10:28:42 +01002359 return fib_node;
Jiri Pirko5b004412016-09-01 10:37:40 +02002360
Ido Schimmel76610eb2017-03-10 08:53:41 +01002361err_fib_node_init:
2362 mlxsw_sp_fib_node_destroy(fib_node);
Ido Schimmel9aecce12017-02-09 10:28:42 +01002363err_fib_node_create:
Ido Schimmel76610eb2017-03-10 08:53:41 +01002364 mlxsw_sp_vr_put(vr);
Jiri Pirko5b004412016-09-01 10:37:40 +02002365 return ERR_PTR(err);
2366}
2367
Ido Schimmel9aecce12017-02-09 10:28:42 +01002368static void mlxsw_sp_fib4_node_put(struct mlxsw_sp *mlxsw_sp,
2369 struct mlxsw_sp_fib_node *fib_node)
Jiri Pirko5b004412016-09-01 10:37:40 +02002370{
Ido Schimmel76610eb2017-03-10 08:53:41 +01002371 struct mlxsw_sp_vr *vr = fib_node->fib->vr;
Jiri Pirko5b004412016-09-01 10:37:40 +02002372
Ido Schimmel9aecce12017-02-09 10:28:42 +01002373 if (!list_empty(&fib_node->entry_list))
2374 return;
Ido Schimmel76610eb2017-03-10 08:53:41 +01002375 mlxsw_sp_fib_node_fini(mlxsw_sp, fib_node);
Ido Schimmel9aecce12017-02-09 10:28:42 +01002376 mlxsw_sp_fib_node_destroy(fib_node);
Ido Schimmel76610eb2017-03-10 08:53:41 +01002377 mlxsw_sp_vr_put(vr);
Jiri Pirko5b004412016-09-01 10:37:40 +02002378}
2379
Ido Schimmel9aecce12017-02-09 10:28:42 +01002380static struct mlxsw_sp_fib_entry *
2381mlxsw_sp_fib4_node_entry_find(const struct mlxsw_sp_fib_node *fib_node,
2382 const struct mlxsw_sp_fib_entry_params *params)
Jiri Pirko61c503f2016-07-04 08:23:11 +02002383{
Jiri Pirko61c503f2016-07-04 08:23:11 +02002384 struct mlxsw_sp_fib_entry *fib_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01002385
2386 list_for_each_entry(fib_entry, &fib_node->entry_list, list) {
2387 if (fib_entry->params.tb_id > params->tb_id)
2388 continue;
2389 if (fib_entry->params.tb_id != params->tb_id)
2390 break;
2391 if (fib_entry->params.tos > params->tos)
2392 continue;
2393 if (fib_entry->params.prio >= params->prio ||
2394 fib_entry->params.tos < params->tos)
2395 return fib_entry;
2396 }
2397
2398 return NULL;
2399}
2400
Ido Schimmel4283bce2017-02-09 10:28:43 +01002401static int mlxsw_sp_fib4_node_list_append(struct mlxsw_sp_fib_entry *fib_entry,
2402 struct mlxsw_sp_fib_entry *new_entry)
2403{
2404 struct mlxsw_sp_fib_node *fib_node;
2405
2406 if (WARN_ON(!fib_entry))
2407 return -EINVAL;
2408
2409 fib_node = fib_entry->fib_node;
2410 list_for_each_entry_from(fib_entry, &fib_node->entry_list, list) {
2411 if (fib_entry->params.tb_id != new_entry->params.tb_id ||
2412 fib_entry->params.tos != new_entry->params.tos ||
2413 fib_entry->params.prio != new_entry->params.prio)
2414 break;
2415 }
2416
2417 list_add_tail(&new_entry->list, &fib_entry->list);
2418 return 0;
2419}
2420
Ido Schimmel9aecce12017-02-09 10:28:42 +01002421static int
2422mlxsw_sp_fib4_node_list_insert(struct mlxsw_sp_fib_node *fib_node,
Ido Schimmel4283bce2017-02-09 10:28:43 +01002423 struct mlxsw_sp_fib_entry *new_entry,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002424 bool replace, bool append)
Ido Schimmel9aecce12017-02-09 10:28:42 +01002425{
2426 struct mlxsw_sp_fib_entry *fib_entry;
2427
2428 fib_entry = mlxsw_sp_fib4_node_entry_find(fib_node, &new_entry->params);
2429
Ido Schimmel4283bce2017-02-09 10:28:43 +01002430 if (append)
2431 return mlxsw_sp_fib4_node_list_append(fib_entry, new_entry);
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002432 if (replace && WARN_ON(!fib_entry))
2433 return -EINVAL;
Ido Schimmel4283bce2017-02-09 10:28:43 +01002434
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002435 /* Insert new entry before replaced one, so that we can later
2436 * remove the second.
2437 */
Ido Schimmel9aecce12017-02-09 10:28:42 +01002438 if (fib_entry) {
2439 list_add_tail(&new_entry->list, &fib_entry->list);
2440 } else {
2441 struct mlxsw_sp_fib_entry *last;
2442
2443 list_for_each_entry(last, &fib_node->entry_list, list) {
2444 if (new_entry->params.tb_id > last->params.tb_id)
2445 break;
2446 fib_entry = last;
2447 }
2448
2449 if (fib_entry)
2450 list_add(&new_entry->list, &fib_entry->list);
2451 else
2452 list_add(&new_entry->list, &fib_node->entry_list);
2453 }
2454
2455 return 0;
2456}
2457
2458static void
2459mlxsw_sp_fib4_node_list_remove(struct mlxsw_sp_fib_entry *fib_entry)
2460{
2461 list_del(&fib_entry->list);
2462}
2463
2464static int
2465mlxsw_sp_fib4_node_entry_add(struct mlxsw_sp *mlxsw_sp,
2466 const struct mlxsw_sp_fib_node *fib_node,
2467 struct mlxsw_sp_fib_entry *fib_entry)
2468{
2469 if (!mlxsw_sp_fib_node_entry_is_first(fib_node, fib_entry))
2470 return 0;
2471
2472 /* To prevent packet loss, overwrite the previously offloaded
2473 * entry.
2474 */
2475 if (!list_is_singular(&fib_node->entry_list)) {
2476 enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_DELETE;
2477 struct mlxsw_sp_fib_entry *n = list_next_entry(fib_entry, list);
2478
2479 mlxsw_sp_fib_entry_offload_refresh(n, op, 0);
2480 }
2481
2482 return mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
2483}
2484
2485static void
2486mlxsw_sp_fib4_node_entry_del(struct mlxsw_sp *mlxsw_sp,
2487 const struct mlxsw_sp_fib_node *fib_node,
2488 struct mlxsw_sp_fib_entry *fib_entry)
2489{
2490 if (!mlxsw_sp_fib_node_entry_is_first(fib_node, fib_entry))
2491 return;
2492
2493 /* Promote the next entry by overwriting the deleted entry */
2494 if (!list_is_singular(&fib_node->entry_list)) {
2495 struct mlxsw_sp_fib_entry *n = list_next_entry(fib_entry, list);
2496 enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_DELETE;
2497
2498 mlxsw_sp_fib_entry_update(mlxsw_sp, n);
2499 mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, 0);
2500 return;
2501 }
2502
2503 mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry);
2504}
2505
2506static int mlxsw_sp_fib4_node_entry_link(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel4283bce2017-02-09 10:28:43 +01002507 struct mlxsw_sp_fib_entry *fib_entry,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002508 bool replace, bool append)
Ido Schimmel9aecce12017-02-09 10:28:42 +01002509{
2510 struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
2511 int err;
2512
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002513 err = mlxsw_sp_fib4_node_list_insert(fib_node, fib_entry, replace,
2514 append);
Ido Schimmel9aecce12017-02-09 10:28:42 +01002515 if (err)
2516 return err;
2517
2518 err = mlxsw_sp_fib4_node_entry_add(mlxsw_sp, fib_node, fib_entry);
2519 if (err)
2520 goto err_fib4_node_entry_add;
2521
Ido Schimmel9aecce12017-02-09 10:28:42 +01002522 return 0;
2523
2524err_fib4_node_entry_add:
2525 mlxsw_sp_fib4_node_list_remove(fib_entry);
2526 return err;
2527}
2528
2529static void
2530mlxsw_sp_fib4_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
2531 struct mlxsw_sp_fib_entry *fib_entry)
2532{
2533 struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
2534
Ido Schimmel9aecce12017-02-09 10:28:42 +01002535 mlxsw_sp_fib4_node_entry_del(mlxsw_sp, fib_node, fib_entry);
2536 mlxsw_sp_fib4_node_list_remove(fib_entry);
2537}
2538
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002539static void mlxsw_sp_fib4_entry_replace(struct mlxsw_sp *mlxsw_sp,
2540 struct mlxsw_sp_fib_entry *fib_entry,
2541 bool replace)
2542{
2543 struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
2544 struct mlxsw_sp_fib_entry *replaced;
2545
2546 if (!replace)
2547 return;
2548
2549 /* We inserted the new entry before replaced one */
2550 replaced = list_next_entry(fib_entry, list);
2551
2552 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, replaced);
2553 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, replaced);
2554 mlxsw_sp_fib4_node_put(mlxsw_sp, fib_node);
2555}
2556
Ido Schimmel9aecce12017-02-09 10:28:42 +01002557static int
2558mlxsw_sp_router_fib4_add(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel4283bce2017-02-09 10:28:43 +01002559 const struct fib_entry_notifier_info *fen_info,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002560 bool replace, bool append)
Ido Schimmel9aecce12017-02-09 10:28:42 +01002561{
2562 struct mlxsw_sp_fib_entry *fib_entry;
2563 struct mlxsw_sp_fib_node *fib_node;
Jiri Pirko61c503f2016-07-04 08:23:11 +02002564 int err;
2565
Ido Schimmel9011b672017-05-16 19:38:25 +02002566 if (mlxsw_sp->router->aborted)
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002567 return 0;
2568
Ido Schimmel9aecce12017-02-09 10:28:42 +01002569 fib_node = mlxsw_sp_fib4_node_get(mlxsw_sp, fen_info);
2570 if (IS_ERR(fib_node)) {
2571 dev_warn(mlxsw_sp->bus_info->dev, "Failed to get FIB node\n");
2572 return PTR_ERR(fib_node);
2573 }
2574
2575 fib_entry = mlxsw_sp_fib4_entry_create(mlxsw_sp, fib_node, fen_info);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002576 if (IS_ERR(fib_entry)) {
Ido Schimmel9aecce12017-02-09 10:28:42 +01002577 dev_warn(mlxsw_sp->bus_info->dev, "Failed to create FIB entry\n");
2578 err = PTR_ERR(fib_entry);
2579 goto err_fib4_entry_create;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002580 }
Jiri Pirko61c503f2016-07-04 08:23:11 +02002581
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002582 err = mlxsw_sp_fib4_node_entry_link(mlxsw_sp, fib_entry, replace,
2583 append);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002584 if (err) {
Ido Schimmel9aecce12017-02-09 10:28:42 +01002585 dev_warn(mlxsw_sp->bus_info->dev, "Failed to link FIB entry to node\n");
2586 goto err_fib4_node_entry_link;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002587 }
Ido Schimmel9aecce12017-02-09 10:28:42 +01002588
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002589 mlxsw_sp_fib4_entry_replace(mlxsw_sp, fib_entry, replace);
2590
Jiri Pirko61c503f2016-07-04 08:23:11 +02002591 return 0;
2592
Ido Schimmel9aecce12017-02-09 10:28:42 +01002593err_fib4_node_entry_link:
2594 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib_entry);
2595err_fib4_entry_create:
2596 mlxsw_sp_fib4_node_put(mlxsw_sp, fib_node);
Jiri Pirko61c503f2016-07-04 08:23:11 +02002597 return err;
2598}
2599
Jiri Pirko37956d72016-10-20 16:05:43 +02002600static void mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
2601 struct fib_entry_notifier_info *fen_info)
Jiri Pirko61c503f2016-07-04 08:23:11 +02002602{
Jiri Pirko61c503f2016-07-04 08:23:11 +02002603 struct mlxsw_sp_fib_entry *fib_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01002604 struct mlxsw_sp_fib_node *fib_node;
Jiri Pirko61c503f2016-07-04 08:23:11 +02002605
Ido Schimmel9011b672017-05-16 19:38:25 +02002606 if (mlxsw_sp->router->aborted)
Jiri Pirko37956d72016-10-20 16:05:43 +02002607 return;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002608
Ido Schimmel9aecce12017-02-09 10:28:42 +01002609 fib_entry = mlxsw_sp_fib4_entry_lookup(mlxsw_sp, fen_info);
2610 if (WARN_ON(!fib_entry))
Jiri Pirko37956d72016-10-20 16:05:43 +02002611 return;
Ido Schimmel9aecce12017-02-09 10:28:42 +01002612 fib_node = fib_entry->fib_node;
Jiri Pirko5b004412016-09-01 10:37:40 +02002613
Ido Schimmel9aecce12017-02-09 10:28:42 +01002614 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, fib_entry);
2615 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib_entry);
2616 mlxsw_sp_fib4_node_put(mlxsw_sp, fib_node);
Jiri Pirko61c503f2016-07-04 08:23:11 +02002617}
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002618
2619static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp)
2620{
2621 char ralta_pl[MLXSW_REG_RALTA_LEN];
2622 char ralst_pl[MLXSW_REG_RALST_LEN];
Ido Schimmelb5d90e62017-03-10 08:53:43 +01002623 int i, err;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002624
2625 mlxsw_reg_ralta_pack(ralta_pl, true, MLXSW_REG_RALXX_PROTOCOL_IPV4,
2626 MLXSW_SP_LPM_TREE_MIN);
2627 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
2628 if (err)
2629 return err;
2630
2631 mlxsw_reg_ralst_pack(ralst_pl, 0xff, MLXSW_SP_LPM_TREE_MIN);
2632 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
2633 if (err)
2634 return err;
2635
Ido Schimmelb5d90e62017-03-10 08:53:43 +01002636 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
Ido Schimmel9011b672017-05-16 19:38:25 +02002637 struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
Ido Schimmelb5d90e62017-03-10 08:53:43 +01002638 char raltb_pl[MLXSW_REG_RALTB_LEN];
2639 char ralue_pl[MLXSW_REG_RALUE_LEN];
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002640
Ido Schimmelb5d90e62017-03-10 08:53:43 +01002641 if (!mlxsw_sp_vr_is_used(vr))
2642 continue;
2643
2644 mlxsw_reg_raltb_pack(raltb_pl, vr->id,
2645 MLXSW_REG_RALXX_PROTOCOL_IPV4,
2646 MLXSW_SP_LPM_TREE_MIN);
2647 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb),
2648 raltb_pl);
2649 if (err)
2650 return err;
2651
2652 mlxsw_reg_ralue_pack4(ralue_pl, MLXSW_SP_L3_PROTO_IPV4,
2653 MLXSW_REG_RALUE_OP_WRITE_WRITE, vr->id, 0,
2654 0);
2655 mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
2656 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue),
2657 ralue_pl);
2658 if (err)
2659 return err;
2660 }
2661
2662 return 0;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002663}
2664
Ido Schimmel9aecce12017-02-09 10:28:42 +01002665static void mlxsw_sp_fib4_node_flush(struct mlxsw_sp *mlxsw_sp,
2666 struct mlxsw_sp_fib_node *fib_node)
2667{
2668 struct mlxsw_sp_fib_entry *fib_entry, *tmp;
2669
2670 list_for_each_entry_safe(fib_entry, tmp, &fib_node->entry_list, list) {
2671 bool do_break = &tmp->list == &fib_node->entry_list;
2672
2673 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, fib_entry);
2674 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib_entry);
2675 mlxsw_sp_fib4_node_put(mlxsw_sp, fib_node);
2676 /* Break when entry list is empty and node was freed.
2677 * Otherwise, we'll access freed memory in the next
2678 * iteration.
2679 */
2680 if (do_break)
2681 break;
2682 }
2683}
2684
2685static void mlxsw_sp_fib_node_flush(struct mlxsw_sp *mlxsw_sp,
2686 struct mlxsw_sp_fib_node *fib_node)
2687{
Ido Schimmel76610eb2017-03-10 08:53:41 +01002688 switch (fib_node->fib->proto) {
Ido Schimmel9aecce12017-02-09 10:28:42 +01002689 case MLXSW_SP_L3_PROTO_IPV4:
2690 mlxsw_sp_fib4_node_flush(mlxsw_sp, fib_node);
2691 break;
2692 case MLXSW_SP_L3_PROTO_IPV6:
2693 WARN_ON_ONCE(1);
2694 break;
2695 }
2696}
2697
Ido Schimmel76610eb2017-03-10 08:53:41 +01002698static void mlxsw_sp_vr_fib_flush(struct mlxsw_sp *mlxsw_sp,
2699 struct mlxsw_sp_vr *vr,
2700 enum mlxsw_sp_l3proto proto)
2701{
2702 struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
2703 struct mlxsw_sp_fib_node *fib_node, *tmp;
2704
2705 list_for_each_entry_safe(fib_node, tmp, &fib->node_list, list) {
2706 bool do_break = &tmp->list == &fib->node_list;
2707
2708 mlxsw_sp_fib_node_flush(mlxsw_sp, fib_node);
2709 if (do_break)
2710 break;
2711 }
2712}
2713
Ido Schimmelac571de2016-11-14 11:26:32 +01002714static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp)
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002715{
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002716 int i;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002717
Jiri Pirkoc1a38312016-10-21 16:07:23 +02002718 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
Ido Schimmel9011b672017-05-16 19:38:25 +02002719 struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
Ido Schimmelac571de2016-11-14 11:26:32 +01002720
Ido Schimmel76610eb2017-03-10 08:53:41 +01002721 if (!mlxsw_sp_vr_is_used(vr))
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002722 continue;
Ido Schimmel76610eb2017-03-10 08:53:41 +01002723 mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002724 }
Ido Schimmelac571de2016-11-14 11:26:32 +01002725}
2726
2727static void mlxsw_sp_router_fib4_abort(struct mlxsw_sp *mlxsw_sp)
2728{
2729 int err;
2730
Ido Schimmel9011b672017-05-16 19:38:25 +02002731 if (mlxsw_sp->router->aborted)
Ido Schimmeld331d302016-11-16 09:51:58 +01002732 return;
2733 dev_warn(mlxsw_sp->bus_info->dev, "FIB abort triggered. Note that FIB entries are no longer being offloaded to this device.\n");
Ido Schimmelac571de2016-11-14 11:26:32 +01002734 mlxsw_sp_router_fib_flush(mlxsw_sp);
Ido Schimmel9011b672017-05-16 19:38:25 +02002735 mlxsw_sp->router->aborted = true;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002736 err = mlxsw_sp_router_set_abort_trap(mlxsw_sp);
2737 if (err)
2738 dev_warn(mlxsw_sp->bus_info->dev, "Failed to set abort trap.\n");
2739}
2740
Ido Schimmel30572242016-12-03 16:45:01 +01002741struct mlxsw_sp_fib_event_work {
Ido Schimmela0e47612017-02-06 16:20:10 +01002742 struct work_struct work;
Ido Schimmelad178c82017-02-08 11:16:40 +01002743 union {
2744 struct fib_entry_notifier_info fen_info;
Ido Schimmel5d7bfd12017-03-16 09:08:14 +01002745 struct fib_rule_notifier_info fr_info;
Ido Schimmelad178c82017-02-08 11:16:40 +01002746 struct fib_nh_notifier_info fnh_info;
2747 };
Ido Schimmel30572242016-12-03 16:45:01 +01002748 struct mlxsw_sp *mlxsw_sp;
2749 unsigned long event;
2750};
2751
2752static void mlxsw_sp_router_fib_event_work(struct work_struct *work)
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002753{
Ido Schimmel30572242016-12-03 16:45:01 +01002754 struct mlxsw_sp_fib_event_work *fib_work =
Ido Schimmela0e47612017-02-06 16:20:10 +01002755 container_of(work, struct mlxsw_sp_fib_event_work, work);
Ido Schimmel30572242016-12-03 16:45:01 +01002756 struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
Ido Schimmel5d7bfd12017-03-16 09:08:14 +01002757 struct fib_rule *rule;
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002758 bool replace, append;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002759 int err;
2760
Ido Schimmel30572242016-12-03 16:45:01 +01002761 /* Protect internal structures from changes */
2762 rtnl_lock();
2763 switch (fib_work->event) {
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002764 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
Ido Schimmel4283bce2017-02-09 10:28:43 +01002765 case FIB_EVENT_ENTRY_APPEND: /* fall through */
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002766 case FIB_EVENT_ENTRY_ADD:
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002767 replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
Ido Schimmel4283bce2017-02-09 10:28:43 +01002768 append = fib_work->event == FIB_EVENT_ENTRY_APPEND;
2769 err = mlxsw_sp_router_fib4_add(mlxsw_sp, &fib_work->fen_info,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002770 replace, append);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002771 if (err)
2772 mlxsw_sp_router_fib4_abort(mlxsw_sp);
Ido Schimmel30572242016-12-03 16:45:01 +01002773 fib_info_put(fib_work->fen_info.fi);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002774 break;
2775 case FIB_EVENT_ENTRY_DEL:
Ido Schimmel30572242016-12-03 16:45:01 +01002776 mlxsw_sp_router_fib4_del(mlxsw_sp, &fib_work->fen_info);
2777 fib_info_put(fib_work->fen_info.fi);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002778 break;
2779 case FIB_EVENT_RULE_ADD: /* fall through */
2780 case FIB_EVENT_RULE_DEL:
Ido Schimmel5d7bfd12017-03-16 09:08:14 +01002781 rule = fib_work->fr_info.rule;
Ido Schimmelc7f6e662017-03-16 09:08:20 +01002782 if (!fib4_rule_default(rule) && !rule->l3mdev)
Ido Schimmel5d7bfd12017-03-16 09:08:14 +01002783 mlxsw_sp_router_fib4_abort(mlxsw_sp);
2784 fib_rule_put(rule);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002785 break;
Ido Schimmelad178c82017-02-08 11:16:40 +01002786 case FIB_EVENT_NH_ADD: /* fall through */
2787 case FIB_EVENT_NH_DEL:
2788 mlxsw_sp_nexthop_event(mlxsw_sp, fib_work->event,
2789 fib_work->fnh_info.fib_nh);
2790 fib_info_put(fib_work->fnh_info.fib_nh->nh_parent);
2791 break;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002792 }
Ido Schimmel30572242016-12-03 16:45:01 +01002793 rtnl_unlock();
2794 kfree(fib_work);
2795}
2796
2797/* Called with rcu_read_lock() */
2798static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
2799 unsigned long event, void *ptr)
2800{
Ido Schimmel30572242016-12-03 16:45:01 +01002801 struct mlxsw_sp_fib_event_work *fib_work;
2802 struct fib_notifier_info *info = ptr;
Ido Schimmel7e39d112017-05-16 19:38:28 +02002803 struct mlxsw_sp_router *router;
Ido Schimmel30572242016-12-03 16:45:01 +01002804
2805 if (!net_eq(info->net, &init_net))
2806 return NOTIFY_DONE;
2807
2808 fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC);
2809 if (WARN_ON(!fib_work))
2810 return NOTIFY_BAD;
2811
Ido Schimmela0e47612017-02-06 16:20:10 +01002812 INIT_WORK(&fib_work->work, mlxsw_sp_router_fib_event_work);
Ido Schimmel7e39d112017-05-16 19:38:28 +02002813 router = container_of(nb, struct mlxsw_sp_router, fib_nb);
2814 fib_work->mlxsw_sp = router->mlxsw_sp;
Ido Schimmel30572242016-12-03 16:45:01 +01002815 fib_work->event = event;
2816
2817 switch (event) {
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002818 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
Ido Schimmel4283bce2017-02-09 10:28:43 +01002819 case FIB_EVENT_ENTRY_APPEND: /* fall through */
Ido Schimmel30572242016-12-03 16:45:01 +01002820 case FIB_EVENT_ENTRY_ADD: /* fall through */
2821 case FIB_EVENT_ENTRY_DEL:
2822 memcpy(&fib_work->fen_info, ptr, sizeof(fib_work->fen_info));
2823 /* Take referece on fib_info to prevent it from being
2824 * freed while work is queued. Release it afterwards.
2825 */
2826 fib_info_hold(fib_work->fen_info.fi);
2827 break;
Ido Schimmel5d7bfd12017-03-16 09:08:14 +01002828 case FIB_EVENT_RULE_ADD: /* fall through */
2829 case FIB_EVENT_RULE_DEL:
2830 memcpy(&fib_work->fr_info, ptr, sizeof(fib_work->fr_info));
2831 fib_rule_get(fib_work->fr_info.rule);
2832 break;
Ido Schimmelad178c82017-02-08 11:16:40 +01002833 case FIB_EVENT_NH_ADD: /* fall through */
2834 case FIB_EVENT_NH_DEL:
2835 memcpy(&fib_work->fnh_info, ptr, sizeof(fib_work->fnh_info));
2836 fib_info_hold(fib_work->fnh_info.fib_nh->nh_parent);
2837 break;
Ido Schimmel30572242016-12-03 16:45:01 +01002838 }
2839
Ido Schimmela0e47612017-02-06 16:20:10 +01002840 mlxsw_core_schedule_work(&fib_work->work);
Ido Schimmel30572242016-12-03 16:45:01 +01002841
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002842 return NOTIFY_DONE;
2843}
2844
Ido Schimmel4724ba562017-03-10 08:53:39 +01002845static struct mlxsw_sp_rif *
2846mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
2847 const struct net_device *dev)
2848{
2849 int i;
2850
2851 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
Ido Schimmel5f9efff2017-05-16 19:38:27 +02002852 if (mlxsw_sp->router->rifs[i] &&
2853 mlxsw_sp->router->rifs[i]->dev == dev)
2854 return mlxsw_sp->router->rifs[i];
Ido Schimmel4724ba562017-03-10 08:53:39 +01002855
2856 return NULL;
2857}
2858
2859static int mlxsw_sp_router_rif_disable(struct mlxsw_sp *mlxsw_sp, u16 rif)
2860{
2861 char ritr_pl[MLXSW_REG_RITR_LEN];
2862 int err;
2863
2864 mlxsw_reg_ritr_rif_pack(ritr_pl, rif);
2865 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
2866 if (WARN_ON_ONCE(err))
2867 return err;
2868
2869 mlxsw_reg_ritr_enable_set(ritr_pl, false);
2870 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
2871}
2872
2873static void mlxsw_sp_router_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002874 struct mlxsw_sp_rif *rif)
Ido Schimmel4724ba562017-03-10 08:53:39 +01002875{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002876 mlxsw_sp_router_rif_disable(mlxsw_sp, rif->rif_index);
2877 mlxsw_sp_nexthop_rif_gone_sync(mlxsw_sp, rif);
2878 mlxsw_sp_neigh_rif_gone_sync(mlxsw_sp, rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01002879}
2880
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002881static bool mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *rif,
Ido Schimmel4724ba562017-03-10 08:53:39 +01002882 const struct in_device *in_dev,
2883 unsigned long event)
2884{
2885 switch (event) {
2886 case NETDEV_UP:
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002887 if (!rif)
Ido Schimmel4724ba562017-03-10 08:53:39 +01002888 return true;
2889 return false;
2890 case NETDEV_DOWN:
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002891 if (rif && !in_dev->ifa_list &&
2892 !netif_is_l3_slave(rif->dev))
Ido Schimmel4724ba562017-03-10 08:53:39 +01002893 return true;
2894 /* It is possible we already removed the RIF ourselves
2895 * if it was assigned to a netdev that is now a bridge
2896 * or LAG slave.
2897 */
2898 return false;
2899 }
2900
2901 return false;
2902}
2903
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002904#define MLXSW_SP_INVALID_INDEX_RIF 0xffff
Ido Schimmel4724ba562017-03-10 08:53:39 +01002905static int mlxsw_sp_avail_rif_get(struct mlxsw_sp *mlxsw_sp)
2906{
2907 int i;
2908
2909 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
Ido Schimmel5f9efff2017-05-16 19:38:27 +02002910 if (!mlxsw_sp->router->rifs[i])
Ido Schimmel4724ba562017-03-10 08:53:39 +01002911 return i;
2912
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002913 return MLXSW_SP_INVALID_INDEX_RIF;
Ido Schimmel4724ba562017-03-10 08:53:39 +01002914}
2915
2916static void mlxsw_sp_vport_rif_sp_attr_get(struct mlxsw_sp_port *mlxsw_sp_vport,
2917 bool *p_lagged, u16 *p_system_port)
2918{
2919 u8 local_port = mlxsw_sp_vport->local_port;
2920
2921 *p_lagged = mlxsw_sp_vport->lagged;
2922 *p_system_port = *p_lagged ? mlxsw_sp_vport->lag_id : local_port;
2923}
2924
2925static int mlxsw_sp_vport_rif_sp_op(struct mlxsw_sp_port *mlxsw_sp_vport,
Ido Schimmel69132292017-03-10 08:53:42 +01002926 u16 vr_id, struct net_device *l3_dev,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002927 u16 rif_index, bool create)
Ido Schimmel4724ba562017-03-10 08:53:39 +01002928{
2929 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
2930 bool lagged = mlxsw_sp_vport->lagged;
2931 char ritr_pl[MLXSW_REG_RITR_LEN];
2932 u16 system_port;
2933
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002934 mlxsw_reg_ritr_pack(ritr_pl, create, MLXSW_REG_RITR_SP_IF, rif_index,
2935 vr_id, l3_dev->mtu, l3_dev->dev_addr);
Ido Schimmel4724ba562017-03-10 08:53:39 +01002936
2937 mlxsw_sp_vport_rif_sp_attr_get(mlxsw_sp_vport, &lagged, &system_port);
2938 mlxsw_reg_ritr_sp_if_pack(ritr_pl, lagged, system_port,
2939 mlxsw_sp_vport_vid_get(mlxsw_sp_vport));
2940
2941 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
2942}
2943
2944static void mlxsw_sp_vport_rif_sp_leave(struct mlxsw_sp_port *mlxsw_sp_vport);
2945
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002946static u16 mlxsw_sp_rif_sp_to_fid(u16 rif_index)
Ido Schimmel4724ba562017-03-10 08:53:39 +01002947{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002948 return MLXSW_SP_RFID_BASE + rif_index;
Ido Schimmel4724ba562017-03-10 08:53:39 +01002949}
2950
2951static struct mlxsw_sp_fid *
2952mlxsw_sp_rfid_alloc(u16 fid, struct net_device *l3_dev)
2953{
2954 struct mlxsw_sp_fid *f;
2955
2956 f = kzalloc(sizeof(*f), GFP_KERNEL);
2957 if (!f)
2958 return NULL;
2959
2960 f->leave = mlxsw_sp_vport_rif_sp_leave;
2961 f->ref_count = 0;
2962 f->dev = l3_dev;
2963 f->fid = fid;
2964
2965 return f;
2966}
2967
2968static struct mlxsw_sp_rif *
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002969mlxsw_sp_rif_alloc(u16 rif_index, u16 vr_id, struct net_device *l3_dev,
Ido Schimmel69132292017-03-10 08:53:42 +01002970 struct mlxsw_sp_fid *f)
Ido Schimmel4724ba562017-03-10 08:53:39 +01002971{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002972 struct mlxsw_sp_rif *rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01002973
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002974 rif = kzalloc(sizeof(*rif), GFP_KERNEL);
2975 if (!rif)
Ido Schimmel4724ba562017-03-10 08:53:39 +01002976 return NULL;
2977
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002978 INIT_LIST_HEAD(&rif->nexthop_list);
2979 INIT_LIST_HEAD(&rif->neigh_list);
2980 ether_addr_copy(rif->addr, l3_dev->dev_addr);
2981 rif->mtu = l3_dev->mtu;
2982 rif->vr_id = vr_id;
2983 rif->dev = l3_dev;
2984 rif->rif_index = rif_index;
2985 rif->f = f;
Ido Schimmel4724ba562017-03-10 08:53:39 +01002986
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002987 return rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01002988}
2989
Ido Schimmel5f9efff2017-05-16 19:38:27 +02002990struct mlxsw_sp_rif *mlxsw_sp_rif_by_index(const struct mlxsw_sp *mlxsw_sp,
2991 u16 rif_index)
2992{
2993 return mlxsw_sp->router->rifs[rif_index];
2994}
2995
Arkadi Sharshevskyfd1b9d42017-03-28 17:24:16 +02002996u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif)
2997{
2998 return rif->rif_index;
2999}
3000
3001int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif)
3002{
3003 return rif->dev->ifindex;
3004}
3005
Ido Schimmel4724ba562017-03-10 08:53:39 +01003006static struct mlxsw_sp_rif *
3007mlxsw_sp_vport_rif_sp_create(struct mlxsw_sp_port *mlxsw_sp_vport,
3008 struct net_device *l3_dev)
3009{
3010 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
Ido Schimmel57837882017-03-16 09:08:16 +01003011 u32 tb_id = l3mdev_fib_table(l3_dev);
Ido Schimmel69132292017-03-10 08:53:42 +01003012 struct mlxsw_sp_vr *vr;
Ido Schimmel4724ba562017-03-10 08:53:39 +01003013 struct mlxsw_sp_fid *f;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003014 struct mlxsw_sp_rif *rif;
3015 u16 fid, rif_index;
Ido Schimmel4724ba562017-03-10 08:53:39 +01003016 int err;
3017
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003018 rif_index = mlxsw_sp_avail_rif_get(mlxsw_sp);
3019 if (rif_index == MLXSW_SP_INVALID_INDEX_RIF)
Ido Schimmel4724ba562017-03-10 08:53:39 +01003020 return ERR_PTR(-ERANGE);
3021
Ido Schimmel57837882017-03-16 09:08:16 +01003022 vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id ? : RT_TABLE_MAIN);
Ido Schimmel69132292017-03-10 08:53:42 +01003023 if (IS_ERR(vr))
3024 return ERR_CAST(vr);
3025
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003026 err = mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, vr->id, l3_dev,
3027 rif_index, true);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003028 if (err)
Ido Schimmel69132292017-03-10 08:53:42 +01003029 goto err_vport_rif_sp_op;
Ido Schimmel4724ba562017-03-10 08:53:39 +01003030
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003031 fid = mlxsw_sp_rif_sp_to_fid(rif_index);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003032 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, true);
3033 if (err)
3034 goto err_rif_fdb_op;
3035
3036 f = mlxsw_sp_rfid_alloc(fid, l3_dev);
3037 if (!f) {
3038 err = -ENOMEM;
3039 goto err_rfid_alloc;
3040 }
3041
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003042 rif = mlxsw_sp_rif_alloc(rif_index, vr->id, l3_dev, f);
3043 if (!rif) {
Ido Schimmel4724ba562017-03-10 08:53:39 +01003044 err = -ENOMEM;
3045 goto err_rif_alloc;
3046 }
3047
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +02003048 if (devlink_dpipe_table_counter_enabled(priv_to_devlink(mlxsw_sp->core),
3049 MLXSW_SP_DPIPE_TABLE_NAME_ERIF)) {
3050 err = mlxsw_sp_rif_counter_alloc(mlxsw_sp, rif,
3051 MLXSW_SP_RIF_COUNTER_EGRESS);
3052 if (err)
3053 netdev_dbg(mlxsw_sp_vport->dev,
3054 "Counter alloc Failed err=%d\n", err);
3055 }
3056
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003057 f->rif = rif;
Ido Schimmel5f9efff2017-05-16 19:38:27 +02003058 mlxsw_sp->router->rifs[rif_index] = rif;
Ido Schimmel69132292017-03-10 08:53:42 +01003059 vr->rif_count++;
Ido Schimmel4724ba562017-03-10 08:53:39 +01003060
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003061 return rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01003062
3063err_rif_alloc:
3064 kfree(f);
3065err_rfid_alloc:
3066 mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, false);
3067err_rif_fdb_op:
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003068 mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, vr->id, l3_dev, rif_index,
3069 false);
Ido Schimmel69132292017-03-10 08:53:42 +01003070err_vport_rif_sp_op:
3071 mlxsw_sp_vr_put(vr);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003072 return ERR_PTR(err);
3073}
3074
3075static void mlxsw_sp_vport_rif_sp_destroy(struct mlxsw_sp_port *mlxsw_sp_vport,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003076 struct mlxsw_sp_rif *rif)
Ido Schimmel4724ba562017-03-10 08:53:39 +01003077{
3078 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
Ido Schimmel9011b672017-05-16 19:38:25 +02003079 struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[rif->vr_id];
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003080 struct net_device *l3_dev = rif->dev;
3081 struct mlxsw_sp_fid *f = rif->f;
3082 u16 rif_index = rif->rif_index;
Ido Schimmel4724ba562017-03-10 08:53:39 +01003083 u16 fid = f->fid;
Ido Schimmel4724ba562017-03-10 08:53:39 +01003084
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003085 mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003086
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +02003087 mlxsw_sp_rif_counter_free(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS);
3088 mlxsw_sp_rif_counter_free(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_INGRESS);
3089
Ido Schimmel69132292017-03-10 08:53:42 +01003090 vr->rif_count--;
Ido Schimmel5f9efff2017-05-16 19:38:27 +02003091 mlxsw_sp->router->rifs[rif_index] = NULL;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003092 f->rif = NULL;
Ido Schimmel4724ba562017-03-10 08:53:39 +01003093
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003094 kfree(rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003095
3096 kfree(f);
3097
3098 mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, false);
3099
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003100 mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, vr->id, l3_dev, rif_index,
3101 false);
Ido Schimmel69132292017-03-10 08:53:42 +01003102 mlxsw_sp_vr_put(vr);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003103}
3104
3105static int mlxsw_sp_vport_rif_sp_join(struct mlxsw_sp_port *mlxsw_sp_vport,
3106 struct net_device *l3_dev)
3107{
3108 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003109 struct mlxsw_sp_rif *rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01003110
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003111 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
3112 if (!rif) {
3113 rif = mlxsw_sp_vport_rif_sp_create(mlxsw_sp_vport, l3_dev);
3114 if (IS_ERR(rif))
3115 return PTR_ERR(rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003116 }
3117
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003118 mlxsw_sp_vport_fid_set(mlxsw_sp_vport, rif->f);
3119 rif->f->ref_count++;
Ido Schimmel4724ba562017-03-10 08:53:39 +01003120
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003121 netdev_dbg(mlxsw_sp_vport->dev, "Joined FID=%d\n", rif->f->fid);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003122
3123 return 0;
3124}
3125
3126static void mlxsw_sp_vport_rif_sp_leave(struct mlxsw_sp_port *mlxsw_sp_vport)
3127{
3128 struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
3129
3130 netdev_dbg(mlxsw_sp_vport->dev, "Left FID=%d\n", f->fid);
3131
3132 mlxsw_sp_vport_fid_set(mlxsw_sp_vport, NULL);
3133 if (--f->ref_count == 0)
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003134 mlxsw_sp_vport_rif_sp_destroy(mlxsw_sp_vport, f->rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003135}
3136
3137static int mlxsw_sp_inetaddr_vport_event(struct net_device *l3_dev,
3138 struct net_device *port_dev,
3139 unsigned long event, u16 vid)
3140{
3141 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(port_dev);
3142 struct mlxsw_sp_port *mlxsw_sp_vport;
3143
3144 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
3145 if (WARN_ON(!mlxsw_sp_vport))
3146 return -EINVAL;
3147
3148 switch (event) {
3149 case NETDEV_UP:
3150 return mlxsw_sp_vport_rif_sp_join(mlxsw_sp_vport, l3_dev);
3151 case NETDEV_DOWN:
3152 mlxsw_sp_vport_rif_sp_leave(mlxsw_sp_vport);
3153 break;
3154 }
3155
3156 return 0;
3157}
3158
3159static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev,
3160 unsigned long event)
3161{
Jiri Pirko2b94e582017-04-18 16:55:37 +02003162 if (netif_is_bridge_port(port_dev) ||
3163 netif_is_lag_port(port_dev) ||
3164 netif_is_ovs_port(port_dev))
Ido Schimmel4724ba562017-03-10 08:53:39 +01003165 return 0;
3166
3167 return mlxsw_sp_inetaddr_vport_event(port_dev, port_dev, event, 1);
3168}
3169
3170static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev,
3171 struct net_device *lag_dev,
3172 unsigned long event, u16 vid)
3173{
3174 struct net_device *port_dev;
3175 struct list_head *iter;
3176 int err;
3177
3178 netdev_for_each_lower_dev(lag_dev, port_dev, iter) {
3179 if (mlxsw_sp_port_dev_check(port_dev)) {
3180 err = mlxsw_sp_inetaddr_vport_event(l3_dev, port_dev,
3181 event, vid);
3182 if (err)
3183 return err;
3184 }
3185 }
3186
3187 return 0;
3188}
3189
3190static int mlxsw_sp_inetaddr_lag_event(struct net_device *lag_dev,
3191 unsigned long event)
3192{
3193 if (netif_is_bridge_port(lag_dev))
3194 return 0;
3195
3196 return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event, 1);
3197}
3198
3199static struct mlxsw_sp_fid *mlxsw_sp_bridge_fid_get(struct mlxsw_sp *mlxsw_sp,
3200 struct net_device *l3_dev)
3201{
3202 u16 fid;
3203
3204 if (is_vlan_dev(l3_dev))
3205 fid = vlan_dev_vlan_id(l3_dev);
Ido Schimmel5f6935c2017-05-16 19:38:26 +02003206 else if (mlxsw_sp_master_bridge(mlxsw_sp)->dev == l3_dev)
Ido Schimmel4724ba562017-03-10 08:53:39 +01003207 fid = 1;
3208 else
3209 return mlxsw_sp_vfid_find(mlxsw_sp, l3_dev);
3210
3211 return mlxsw_sp_fid_find(mlxsw_sp, fid);
3212}
3213
Ido Schimmel5ec2ee72017-03-24 08:02:48 +01003214static u8 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp)
3215{
3216 return mlxsw_core_max_ports(mlxsw_sp->core) + 1;
3217}
3218
Ido Schimmel4724ba562017-03-10 08:53:39 +01003219static enum mlxsw_flood_table_type mlxsw_sp_flood_table_type_get(u16 fid)
3220{
3221 return mlxsw_sp_fid_is_vfid(fid) ? MLXSW_REG_SFGC_TABLE_TYPE_FID :
3222 MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST;
3223}
3224
3225static u16 mlxsw_sp_flood_table_index_get(u16 fid)
3226{
3227 return mlxsw_sp_fid_is_vfid(fid) ? mlxsw_sp_fid_to_vfid(fid) : fid;
3228}
3229
3230static int mlxsw_sp_router_port_flood_set(struct mlxsw_sp *mlxsw_sp, u16 fid,
3231 bool set)
3232{
Ido Schimmel5ec2ee72017-03-24 08:02:48 +01003233 u8 router_port = mlxsw_sp_router_port(mlxsw_sp);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003234 enum mlxsw_flood_table_type table_type;
3235 char *sftr_pl;
3236 u16 index;
3237 int err;
3238
3239 sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL);
3240 if (!sftr_pl)
3241 return -ENOMEM;
3242
3243 table_type = mlxsw_sp_flood_table_type_get(fid);
3244 index = mlxsw_sp_flood_table_index_get(fid);
3245 mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BC, index, table_type,
Ido Schimmel5ec2ee72017-03-24 08:02:48 +01003246 1, router_port, set);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003247 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
3248
3249 kfree(sftr_pl);
3250 return err;
3251}
3252
3253static enum mlxsw_reg_ritr_if_type mlxsw_sp_rif_type_get(u16 fid)
3254{
3255 if (mlxsw_sp_fid_is_vfid(fid))
3256 return MLXSW_REG_RITR_FID_IF;
3257 else
3258 return MLXSW_REG_RITR_VLAN_IF;
3259}
3260
Ido Schimmel69132292017-03-10 08:53:42 +01003261static int mlxsw_sp_rif_bridge_op(struct mlxsw_sp *mlxsw_sp, u16 vr_id,
Ido Schimmel4724ba562017-03-10 08:53:39 +01003262 struct net_device *l3_dev,
3263 u16 fid, u16 rif,
3264 bool create)
3265{
3266 enum mlxsw_reg_ritr_if_type rif_type;
3267 char ritr_pl[MLXSW_REG_RITR_LEN];
3268
3269 rif_type = mlxsw_sp_rif_type_get(fid);
Ido Schimmel69132292017-03-10 08:53:42 +01003270 mlxsw_reg_ritr_pack(ritr_pl, create, rif_type, rif, vr_id, l3_dev->mtu,
Ido Schimmel4724ba562017-03-10 08:53:39 +01003271 l3_dev->dev_addr);
3272 mlxsw_reg_ritr_fid_set(ritr_pl, rif_type, fid);
3273
3274 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
3275}
3276
3277static int mlxsw_sp_rif_bridge_create(struct mlxsw_sp *mlxsw_sp,
3278 struct net_device *l3_dev,
3279 struct mlxsw_sp_fid *f)
3280{
Ido Schimmel57837882017-03-16 09:08:16 +01003281 u32 tb_id = l3mdev_fib_table(l3_dev);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003282 struct mlxsw_sp_rif *rif;
Ido Schimmel69132292017-03-10 08:53:42 +01003283 struct mlxsw_sp_vr *vr;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003284 u16 rif_index;
Ido Schimmel4724ba562017-03-10 08:53:39 +01003285 int err;
3286
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003287 rif_index = mlxsw_sp_avail_rif_get(mlxsw_sp);
3288 if (rif_index == MLXSW_SP_INVALID_INDEX_RIF)
Ido Schimmel4724ba562017-03-10 08:53:39 +01003289 return -ERANGE;
3290
Ido Schimmel57837882017-03-16 09:08:16 +01003291 vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id ? : RT_TABLE_MAIN);
Ido Schimmel69132292017-03-10 08:53:42 +01003292 if (IS_ERR(vr))
3293 return PTR_ERR(vr);
3294
Ido Schimmel4724ba562017-03-10 08:53:39 +01003295 err = mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, true);
3296 if (err)
Ido Schimmel69132292017-03-10 08:53:42 +01003297 goto err_port_flood_set;
Ido Schimmel4724ba562017-03-10 08:53:39 +01003298
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003299 err = mlxsw_sp_rif_bridge_op(mlxsw_sp, vr->id, l3_dev, f->fid,
3300 rif_index, true);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003301 if (err)
3302 goto err_rif_bridge_op;
3303
3304 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, true);
3305 if (err)
3306 goto err_rif_fdb_op;
3307
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003308 rif = mlxsw_sp_rif_alloc(rif_index, vr->id, l3_dev, f);
3309 if (!rif) {
Ido Schimmel4724ba562017-03-10 08:53:39 +01003310 err = -ENOMEM;
3311 goto err_rif_alloc;
3312 }
3313
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003314 f->rif = rif;
Ido Schimmel5f9efff2017-05-16 19:38:27 +02003315 mlxsw_sp->router->rifs[rif_index] = rif;
Ido Schimmel69132292017-03-10 08:53:42 +01003316 vr->rif_count++;
Ido Schimmel4724ba562017-03-10 08:53:39 +01003317
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003318 netdev_dbg(l3_dev, "RIF=%d created\n", rif_index);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003319
3320 return 0;
3321
3322err_rif_alloc:
3323 mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, false);
3324err_rif_fdb_op:
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003325 mlxsw_sp_rif_bridge_op(mlxsw_sp, vr->id, l3_dev, f->fid, rif_index,
3326 false);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003327err_rif_bridge_op:
3328 mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, false);
Ido Schimmel69132292017-03-10 08:53:42 +01003329err_port_flood_set:
3330 mlxsw_sp_vr_put(vr);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003331 return err;
3332}
3333
3334void mlxsw_sp_rif_bridge_destroy(struct mlxsw_sp *mlxsw_sp,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003335 struct mlxsw_sp_rif *rif)
Ido Schimmel4724ba562017-03-10 08:53:39 +01003336{
Ido Schimmel9011b672017-05-16 19:38:25 +02003337 struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[rif->vr_id];
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003338 struct net_device *l3_dev = rif->dev;
3339 struct mlxsw_sp_fid *f = rif->f;
3340 u16 rif_index = rif->rif_index;
Ido Schimmel4724ba562017-03-10 08:53:39 +01003341
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003342 mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003343
Ido Schimmel69132292017-03-10 08:53:42 +01003344 vr->rif_count--;
Ido Schimmel5f9efff2017-05-16 19:38:27 +02003345 mlxsw_sp->router->rifs[rif_index] = NULL;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003346 f->rif = NULL;
Ido Schimmel4724ba562017-03-10 08:53:39 +01003347
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003348 kfree(rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003349
3350 mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, false);
3351
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003352 mlxsw_sp_rif_bridge_op(mlxsw_sp, vr->id, l3_dev, f->fid, rif_index,
3353 false);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003354
3355 mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, false);
3356
Ido Schimmel69132292017-03-10 08:53:42 +01003357 mlxsw_sp_vr_put(vr);
3358
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003359 netdev_dbg(l3_dev, "RIF=%d destroyed\n", rif_index);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003360}
3361
3362static int mlxsw_sp_inetaddr_bridge_event(struct net_device *l3_dev,
3363 struct net_device *br_dev,
3364 unsigned long event)
3365{
3366 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
3367 struct mlxsw_sp_fid *f;
3368
3369 /* FID can either be an actual FID if the L3 device is the
3370 * VLAN-aware bridge or a VLAN device on top. Otherwise, the
3371 * L3 device is a VLAN-unaware bridge and we get a vFID.
3372 */
3373 f = mlxsw_sp_bridge_fid_get(mlxsw_sp, l3_dev);
3374 if (WARN_ON(!f))
3375 return -EINVAL;
3376
3377 switch (event) {
3378 case NETDEV_UP:
3379 return mlxsw_sp_rif_bridge_create(mlxsw_sp, l3_dev, f);
3380 case NETDEV_DOWN:
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003381 mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003382 break;
3383 }
3384
3385 return 0;
3386}
3387
3388static int mlxsw_sp_inetaddr_vlan_event(struct net_device *vlan_dev,
3389 unsigned long event)
3390{
3391 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
3392 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(vlan_dev);
3393 u16 vid = vlan_dev_vlan_id(vlan_dev);
3394
3395 if (mlxsw_sp_port_dev_check(real_dev))
3396 return mlxsw_sp_inetaddr_vport_event(vlan_dev, real_dev, event,
3397 vid);
3398 else if (netif_is_lag_master(real_dev))
3399 return __mlxsw_sp_inetaddr_lag_event(vlan_dev, real_dev, event,
3400 vid);
3401 else if (netif_is_bridge_master(real_dev) &&
Ido Schimmel5f6935c2017-05-16 19:38:26 +02003402 mlxsw_sp_master_bridge(mlxsw_sp)->dev == real_dev)
Ido Schimmel4724ba562017-03-10 08:53:39 +01003403 return mlxsw_sp_inetaddr_bridge_event(vlan_dev, real_dev,
3404 event);
3405
3406 return 0;
3407}
3408
Ido Schimmelb1e45522017-04-30 19:47:14 +03003409static int __mlxsw_sp_inetaddr_event(struct net_device *dev,
3410 unsigned long event)
3411{
3412 if (mlxsw_sp_port_dev_check(dev))
3413 return mlxsw_sp_inetaddr_port_event(dev, event);
3414 else if (netif_is_lag_master(dev))
3415 return mlxsw_sp_inetaddr_lag_event(dev, event);
3416 else if (netif_is_bridge_master(dev))
3417 return mlxsw_sp_inetaddr_bridge_event(dev, dev, event);
3418 else if (is_vlan_dev(dev))
3419 return mlxsw_sp_inetaddr_vlan_event(dev, event);
3420 else
3421 return 0;
3422}
3423
Ido Schimmel4724ba562017-03-10 08:53:39 +01003424int mlxsw_sp_inetaddr_event(struct notifier_block *unused,
3425 unsigned long event, void *ptr)
3426{
3427 struct in_ifaddr *ifa = (struct in_ifaddr *) ptr;
3428 struct net_device *dev = ifa->ifa_dev->dev;
3429 struct mlxsw_sp *mlxsw_sp;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003430 struct mlxsw_sp_rif *rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01003431 int err = 0;
3432
3433 mlxsw_sp = mlxsw_sp_lower_get(dev);
3434 if (!mlxsw_sp)
3435 goto out;
3436
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003437 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
3438 if (!mlxsw_sp_rif_should_config(rif, ifa->ifa_dev, event))
Ido Schimmel4724ba562017-03-10 08:53:39 +01003439 goto out;
3440
Ido Schimmelb1e45522017-04-30 19:47:14 +03003441 err = __mlxsw_sp_inetaddr_event(dev, event);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003442out:
3443 return notifier_from_errno(err);
3444}
3445
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003446static int mlxsw_sp_rif_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
Ido Schimmel4724ba562017-03-10 08:53:39 +01003447 const char *mac, int mtu)
3448{
3449 char ritr_pl[MLXSW_REG_RITR_LEN];
3450 int err;
3451
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003452 mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003453 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
3454 if (err)
3455 return err;
3456
3457 mlxsw_reg_ritr_mtu_set(ritr_pl, mtu);
3458 mlxsw_reg_ritr_if_mac_memcpy_to(ritr_pl, mac);
3459 mlxsw_reg_ritr_op_set(ritr_pl, MLXSW_REG_RITR_RIF_CREATE);
3460 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
3461}
3462
3463int mlxsw_sp_netdevice_router_port_event(struct net_device *dev)
3464{
3465 struct mlxsw_sp *mlxsw_sp;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003466 struct mlxsw_sp_rif *rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01003467 int err;
3468
3469 mlxsw_sp = mlxsw_sp_lower_get(dev);
3470 if (!mlxsw_sp)
3471 return 0;
3472
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003473 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
3474 if (!rif)
Ido Schimmel4724ba562017-03-10 08:53:39 +01003475 return 0;
3476
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003477 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, rif->f->fid, false);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003478 if (err)
3479 return err;
3480
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003481 err = mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, dev->dev_addr,
3482 dev->mtu);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003483 if (err)
3484 goto err_rif_edit;
3485
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003486 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, dev->dev_addr, rif->f->fid, true);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003487 if (err)
3488 goto err_rif_fdb_op;
3489
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003490 ether_addr_copy(rif->addr, dev->dev_addr);
3491 rif->mtu = dev->mtu;
Ido Schimmel4724ba562017-03-10 08:53:39 +01003492
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003493 netdev_dbg(dev, "Updated RIF=%d\n", rif->rif_index);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003494
3495 return 0;
3496
3497err_rif_fdb_op:
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003498 mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, rif->addr, rif->mtu);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003499err_rif_edit:
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003500 mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, rif->f->fid, true);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003501 return err;
3502}
3503
Ido Schimmelb1e45522017-04-30 19:47:14 +03003504static int mlxsw_sp_port_vrf_join(struct mlxsw_sp *mlxsw_sp,
3505 struct net_device *l3_dev)
Ido Schimmel7179eb52017-03-16 09:08:18 +01003506{
Ido Schimmelb1e45522017-04-30 19:47:14 +03003507 struct mlxsw_sp_rif *rif;
Ido Schimmel7179eb52017-03-16 09:08:18 +01003508
Ido Schimmelb1e45522017-04-30 19:47:14 +03003509 /* If netdev is already associated with a RIF, then we need to
3510 * destroy it and create a new one with the new virtual router ID.
Ido Schimmel7179eb52017-03-16 09:08:18 +01003511 */
Ido Schimmelb1e45522017-04-30 19:47:14 +03003512 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
3513 if (rif)
3514 __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_DOWN);
Ido Schimmel7179eb52017-03-16 09:08:18 +01003515
Ido Schimmelb1e45522017-04-30 19:47:14 +03003516 return __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_UP);
Ido Schimmel7179eb52017-03-16 09:08:18 +01003517}
3518
Ido Schimmelb1e45522017-04-30 19:47:14 +03003519static void mlxsw_sp_port_vrf_leave(struct mlxsw_sp *mlxsw_sp,
3520 struct net_device *l3_dev)
Ido Schimmel7179eb52017-03-16 09:08:18 +01003521{
Ido Schimmelb1e45522017-04-30 19:47:14 +03003522 struct mlxsw_sp_rif *rif;
Ido Schimmel7179eb52017-03-16 09:08:18 +01003523
Ido Schimmelb1e45522017-04-30 19:47:14 +03003524 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
3525 if (!rif)
Ido Schimmel7179eb52017-03-16 09:08:18 +01003526 return;
Ido Schimmelb1e45522017-04-30 19:47:14 +03003527 __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_DOWN);
Ido Schimmel7179eb52017-03-16 09:08:18 +01003528}
3529
Ido Schimmelb1e45522017-04-30 19:47:14 +03003530int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event,
3531 struct netdev_notifier_changeupper_info *info)
Ido Schimmel3d70e4582017-03-16 09:08:19 +01003532{
Ido Schimmelb1e45522017-04-30 19:47:14 +03003533 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
3534 int err = 0;
Ido Schimmel3d70e4582017-03-16 09:08:19 +01003535
Ido Schimmelb1e45522017-04-30 19:47:14 +03003536 if (!mlxsw_sp)
3537 return 0;
Ido Schimmel3d70e4582017-03-16 09:08:19 +01003538
Ido Schimmelb1e45522017-04-30 19:47:14 +03003539 switch (event) {
3540 case NETDEV_PRECHANGEUPPER:
3541 return 0;
3542 case NETDEV_CHANGEUPPER:
3543 if (info->linking)
3544 err = mlxsw_sp_port_vrf_join(mlxsw_sp, l3_dev);
3545 else
3546 mlxsw_sp_port_vrf_leave(mlxsw_sp, l3_dev);
3547 break;
3548 }
Ido Schimmel3d70e4582017-03-16 09:08:19 +01003549
Ido Schimmelb1e45522017-04-30 19:47:14 +03003550 return err;
Ido Schimmel3d70e4582017-03-16 09:08:19 +01003551}
3552
Ido Schimmelc3852ef2016-12-03 16:45:07 +01003553static void mlxsw_sp_router_fib_dump_flush(struct notifier_block *nb)
3554{
Ido Schimmel7e39d112017-05-16 19:38:28 +02003555 struct mlxsw_sp_router *router;
Ido Schimmelc3852ef2016-12-03 16:45:07 +01003556
3557 /* Flush pending FIB notifications and then flush the device's
3558 * table before requesting another dump. The FIB notification
3559 * block is unregistered, so no need to take RTNL.
3560 */
3561 mlxsw_core_flush_owq();
Ido Schimmel7e39d112017-05-16 19:38:28 +02003562 router = container_of(nb, struct mlxsw_sp_router, fib_nb);
3563 mlxsw_sp_router_fib_flush(router->mlxsw_sp);
Ido Schimmelc3852ef2016-12-03 16:45:07 +01003564}
3565
Ido Schimmel4724ba562017-03-10 08:53:39 +01003566static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
3567{
3568 char rgcr_pl[MLXSW_REG_RGCR_LEN];
3569 u64 max_rifs;
3570 int err;
3571
3572 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_RIFS))
3573 return -EIO;
3574
3575 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
Ido Schimmel5f9efff2017-05-16 19:38:27 +02003576 mlxsw_sp->router->rifs = kcalloc(max_rifs,
3577 sizeof(struct mlxsw_sp_rif *),
3578 GFP_KERNEL);
3579 if (!mlxsw_sp->router->rifs)
Ido Schimmel4724ba562017-03-10 08:53:39 +01003580 return -ENOMEM;
3581
3582 mlxsw_reg_rgcr_pack(rgcr_pl, true);
3583 mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs);
3584 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
3585 if (err)
3586 goto err_rgcr_fail;
3587
3588 return 0;
3589
3590err_rgcr_fail:
Ido Schimmel5f9efff2017-05-16 19:38:27 +02003591 kfree(mlxsw_sp->router->rifs);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003592 return err;
3593}
3594
3595static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
3596{
3597 char rgcr_pl[MLXSW_REG_RGCR_LEN];
3598 int i;
3599
3600 mlxsw_reg_rgcr_pack(rgcr_pl, false);
3601 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
3602
3603 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
Ido Schimmel5f9efff2017-05-16 19:38:27 +02003604 WARN_ON_ONCE(mlxsw_sp->router->rifs[i]);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003605
Ido Schimmel5f9efff2017-05-16 19:38:27 +02003606 kfree(mlxsw_sp->router->rifs);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003607}
3608
Jiri Pirkob45f64d2016-09-26 12:52:31 +02003609int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
3610{
Ido Schimmel9011b672017-05-16 19:38:25 +02003611 struct mlxsw_sp_router *router;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02003612 int err;
3613
Ido Schimmel9011b672017-05-16 19:38:25 +02003614 router = kzalloc(sizeof(*mlxsw_sp->router), GFP_KERNEL);
3615 if (!router)
3616 return -ENOMEM;
3617 mlxsw_sp->router = router;
3618 router->mlxsw_sp = mlxsw_sp;
3619
3620 INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_neighs_list);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02003621 err = __mlxsw_sp_router_init(mlxsw_sp);
3622 if (err)
Ido Schimmel9011b672017-05-16 19:38:25 +02003623 goto err_router_init;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02003624
Ido Schimmel9011b672017-05-16 19:38:25 +02003625 err = rhashtable_init(&mlxsw_sp->router->nexthop_ht,
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003626 &mlxsw_sp_nexthop_ht_params);
3627 if (err)
3628 goto err_nexthop_ht_init;
3629
Ido Schimmel9011b672017-05-16 19:38:25 +02003630 err = rhashtable_init(&mlxsw_sp->router->nexthop_group_ht,
Ido Schimmele9ad5e72017-02-08 11:16:29 +01003631 &mlxsw_sp_nexthop_group_ht_params);
3632 if (err)
3633 goto err_nexthop_group_ht_init;
3634
Ido Schimmel8494ab02017-03-24 08:02:47 +01003635 err = mlxsw_sp_lpm_init(mlxsw_sp);
3636 if (err)
3637 goto err_lpm_init;
3638
Jiri Pirkob45f64d2016-09-26 12:52:31 +02003639 err = mlxsw_sp_vrs_init(mlxsw_sp);
3640 if (err)
3641 goto err_vrs_init;
3642
Ido Schimmel8c9583a2016-10-27 15:12:57 +02003643 err = mlxsw_sp_neigh_init(mlxsw_sp);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02003644 if (err)
3645 goto err_neigh_init;
3646
Ido Schimmel7e39d112017-05-16 19:38:28 +02003647 mlxsw_sp->router->fib_nb.notifier_call = mlxsw_sp_router_fib_event;
3648 err = register_fib_notifier(&mlxsw_sp->router->fib_nb,
Ido Schimmelc3852ef2016-12-03 16:45:07 +01003649 mlxsw_sp_router_fib_dump_flush);
3650 if (err)
3651 goto err_register_fib_notifier;
3652
Jiri Pirkob45f64d2016-09-26 12:52:31 +02003653 return 0;
3654
Ido Schimmelc3852ef2016-12-03 16:45:07 +01003655err_register_fib_notifier:
3656 mlxsw_sp_neigh_fini(mlxsw_sp);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02003657err_neigh_init:
3658 mlxsw_sp_vrs_fini(mlxsw_sp);
3659err_vrs_init:
Ido Schimmel8494ab02017-03-24 08:02:47 +01003660 mlxsw_sp_lpm_fini(mlxsw_sp);
3661err_lpm_init:
Ido Schimmel9011b672017-05-16 19:38:25 +02003662 rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
Ido Schimmele9ad5e72017-02-08 11:16:29 +01003663err_nexthop_group_ht_init:
Ido Schimmel9011b672017-05-16 19:38:25 +02003664 rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003665err_nexthop_ht_init:
Jiri Pirkob45f64d2016-09-26 12:52:31 +02003666 __mlxsw_sp_router_fini(mlxsw_sp);
Ido Schimmel9011b672017-05-16 19:38:25 +02003667err_router_init:
3668 kfree(mlxsw_sp->router);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02003669 return err;
3670}
3671
3672void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
3673{
Ido Schimmel7e39d112017-05-16 19:38:28 +02003674 unregister_fib_notifier(&mlxsw_sp->router->fib_nb);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02003675 mlxsw_sp_neigh_fini(mlxsw_sp);
3676 mlxsw_sp_vrs_fini(mlxsw_sp);
Ido Schimmel8494ab02017-03-24 08:02:47 +01003677 mlxsw_sp_lpm_fini(mlxsw_sp);
Ido Schimmel9011b672017-05-16 19:38:25 +02003678 rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
3679 rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02003680 __mlxsw_sp_router_fini(mlxsw_sp);
Ido Schimmel9011b672017-05-16 19:38:25 +02003681 kfree(mlxsw_sp->router);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02003682}