blob: 80345a1ddf173feff51822825e29ef79c376e74d [file] [log] [blame]
Ido Schimmel464dce12016-07-02 11:00:15 +02001/*
2 * drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
3 * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
5 * Copyright (c) 2016 Ido Schimmel <idosch@mellanox.com>
Yotam Gigic723c7352016-07-05 11:27:43 +02006 * Copyright (c) 2016 Yotam Gigi <yotamg@mellanox.com>
Ido Schimmel464dce12016-07-02 11:00:15 +02007 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include <linux/kernel.h>
38#include <linux/types.h>
Jiri Pirko5e9c16c2016-07-04 08:23:04 +020039#include <linux/rhashtable.h>
40#include <linux/bitops.h>
41#include <linux/in6.h>
Yotam Gigic723c7352016-07-05 11:27:43 +020042#include <linux/notifier.h>
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +010043#include <linux/inetdevice.h>
Yotam Gigic723c7352016-07-05 11:27:43 +020044#include <net/netevent.h>
Jiri Pirko6cf3c972016-07-05 11:27:39 +020045#include <net/neighbour.h>
46#include <net/arp.h>
Jiri Pirkob45f64d2016-09-26 12:52:31 +020047#include <net/ip_fib.h>
Ido Schimmel464dce12016-07-02 11:00:15 +020048
49#include "spectrum.h"
50#include "core.h"
51#include "reg.h"
52
Ido Schimmel4724ba562017-03-10 08:53:39 +010053struct mlxsw_sp_rif {
54 struct list_head nexthop_list;
55 struct list_head neigh_list;
56 struct net_device *dev;
57 struct mlxsw_sp_fid *f;
58 unsigned char addr[ETH_ALEN];
59 int mtu;
60 u16 rif;
Ido Schimmel69132292017-03-10 08:53:42 +010061 u16 vr_id;
Ido Schimmel4724ba562017-03-10 08:53:39 +010062};
63
64static struct mlxsw_sp_rif *
65mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
66 const struct net_device *dev);
67
Jiri Pirko53342022016-07-04 08:23:08 +020068#define mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) \
69 for_each_set_bit(prefix, (prefix_usage)->b, MLXSW_SP_PREFIX_COUNT)
70
71static bool
Jiri Pirko6b75c482016-07-04 08:23:09 +020072mlxsw_sp_prefix_usage_subset(struct mlxsw_sp_prefix_usage *prefix_usage1,
73 struct mlxsw_sp_prefix_usage *prefix_usage2)
74{
75 unsigned char prefix;
76
77 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage1) {
78 if (!test_bit(prefix, prefix_usage2->b))
79 return false;
80 }
81 return true;
82}
83
84static bool
Jiri Pirko53342022016-07-04 08:23:08 +020085mlxsw_sp_prefix_usage_eq(struct mlxsw_sp_prefix_usage *prefix_usage1,
86 struct mlxsw_sp_prefix_usage *prefix_usage2)
87{
88 return !memcmp(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
89}
90
Jiri Pirko6b75c482016-07-04 08:23:09 +020091static bool
92mlxsw_sp_prefix_usage_none(struct mlxsw_sp_prefix_usage *prefix_usage)
93{
94 struct mlxsw_sp_prefix_usage prefix_usage_none = {{ 0 } };
95
96 return mlxsw_sp_prefix_usage_eq(prefix_usage, &prefix_usage_none);
97}
98
99static void
100mlxsw_sp_prefix_usage_cpy(struct mlxsw_sp_prefix_usage *prefix_usage1,
101 struct mlxsw_sp_prefix_usage *prefix_usage2)
102{
103 memcpy(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
104}
105
106static void
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200107mlxsw_sp_prefix_usage_set(struct mlxsw_sp_prefix_usage *prefix_usage,
108 unsigned char prefix_len)
109{
110 set_bit(prefix_len, prefix_usage->b);
111}
112
113static void
114mlxsw_sp_prefix_usage_clear(struct mlxsw_sp_prefix_usage *prefix_usage,
115 unsigned char prefix_len)
116{
117 clear_bit(prefix_len, prefix_usage->b);
118}
119
120struct mlxsw_sp_fib_key {
121 unsigned char addr[sizeof(struct in6_addr)];
122 unsigned char prefix_len;
123};
124
Jiri Pirko61c503f2016-07-04 08:23:11 +0200125enum mlxsw_sp_fib_entry_type {
126 MLXSW_SP_FIB_ENTRY_TYPE_REMOTE,
127 MLXSW_SP_FIB_ENTRY_TYPE_LOCAL,
128 MLXSW_SP_FIB_ENTRY_TYPE_TRAP,
129};
130
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +0200131struct mlxsw_sp_nexthop_group;
132
Ido Schimmel9aecce12017-02-09 10:28:42 +0100133struct mlxsw_sp_fib_node {
134 struct list_head entry_list;
Jiri Pirkob45f64d2016-09-26 12:52:31 +0200135 struct list_head list;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100136 struct rhash_head ht_node;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100137 struct mlxsw_sp_fib *fib;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100138 struct mlxsw_sp_fib_key key;
139};
140
141struct mlxsw_sp_fib_entry_params {
142 u32 tb_id;
143 u32 prio;
144 u8 tos;
145 u8 type;
146};
147
148struct mlxsw_sp_fib_entry {
149 struct list_head list;
150 struct mlxsw_sp_fib_node *fib_node;
151 enum mlxsw_sp_fib_entry_type type;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +0200152 struct list_head nexthop_group_node;
153 struct mlxsw_sp_nexthop_group *nh_group;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100154 struct mlxsw_sp_fib_entry_params params;
Ido Schimmel013b20f2017-02-08 11:16:36 +0100155 bool offloaded;
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200156};
157
158struct mlxsw_sp_fib {
159 struct rhashtable ht;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100160 struct list_head node_list;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100161 struct mlxsw_sp_vr *vr;
162 struct mlxsw_sp_lpm_tree *lpm_tree;
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200163 unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT];
164 struct mlxsw_sp_prefix_usage prefix_usage;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100165 enum mlxsw_sp_l3proto proto;
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200166};
167
Ido Schimmel9aecce12017-02-09 10:28:42 +0100168static const struct rhashtable_params mlxsw_sp_fib_ht_params;
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200169
Ido Schimmel76610eb2017-03-10 08:53:41 +0100170static struct mlxsw_sp_fib *mlxsw_sp_fib_create(struct mlxsw_sp_vr *vr,
171 enum mlxsw_sp_l3proto proto)
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200172{
173 struct mlxsw_sp_fib *fib;
174 int err;
175
176 fib = kzalloc(sizeof(*fib), GFP_KERNEL);
177 if (!fib)
178 return ERR_PTR(-ENOMEM);
179 err = rhashtable_init(&fib->ht, &mlxsw_sp_fib_ht_params);
180 if (err)
181 goto err_rhashtable_init;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100182 INIT_LIST_HEAD(&fib->node_list);
Ido Schimmel76610eb2017-03-10 08:53:41 +0100183 fib->proto = proto;
184 fib->vr = vr;
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200185 return fib;
186
187err_rhashtable_init:
188 kfree(fib);
189 return ERR_PTR(err);
190}
191
192static void mlxsw_sp_fib_destroy(struct mlxsw_sp_fib *fib)
193{
Ido Schimmel9aecce12017-02-09 10:28:42 +0100194 WARN_ON(!list_empty(&fib->node_list));
Ido Schimmel76610eb2017-03-10 08:53:41 +0100195 WARN_ON(fib->lpm_tree);
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200196 rhashtable_destroy(&fib->ht);
197 kfree(fib);
198}
199
Jiri Pirko53342022016-07-04 08:23:08 +0200200static struct mlxsw_sp_lpm_tree *
Ido Schimmel382dbb42017-03-10 08:53:40 +0100201mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp *mlxsw_sp)
Jiri Pirko53342022016-07-04 08:23:08 +0200202{
203 static struct mlxsw_sp_lpm_tree *lpm_tree;
204 int i;
205
206 for (i = 0; i < MLXSW_SP_LPM_TREE_COUNT; i++) {
207 lpm_tree = &mlxsw_sp->router.lpm_trees[i];
Ido Schimmel382dbb42017-03-10 08:53:40 +0100208 if (lpm_tree->ref_count == 0)
209 return lpm_tree;
Jiri Pirko53342022016-07-04 08:23:08 +0200210 }
211 return NULL;
212}
213
214static int mlxsw_sp_lpm_tree_alloc(struct mlxsw_sp *mlxsw_sp,
215 struct mlxsw_sp_lpm_tree *lpm_tree)
216{
217 char ralta_pl[MLXSW_REG_RALTA_LEN];
218
Ido Schimmel1a9234e662016-09-19 08:29:26 +0200219 mlxsw_reg_ralta_pack(ralta_pl, true,
220 (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
221 lpm_tree->id);
Jiri Pirko53342022016-07-04 08:23:08 +0200222 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
223}
224
225static int mlxsw_sp_lpm_tree_free(struct mlxsw_sp *mlxsw_sp,
226 struct mlxsw_sp_lpm_tree *lpm_tree)
227{
228 char ralta_pl[MLXSW_REG_RALTA_LEN];
229
Ido Schimmel1a9234e662016-09-19 08:29:26 +0200230 mlxsw_reg_ralta_pack(ralta_pl, false,
231 (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
232 lpm_tree->id);
Jiri Pirko53342022016-07-04 08:23:08 +0200233 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
234}
235
236static int
237mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp *mlxsw_sp,
238 struct mlxsw_sp_prefix_usage *prefix_usage,
239 struct mlxsw_sp_lpm_tree *lpm_tree)
240{
241 char ralst_pl[MLXSW_REG_RALST_LEN];
242 u8 root_bin = 0;
243 u8 prefix;
244 u8 last_prefix = MLXSW_REG_RALST_BIN_NO_CHILD;
245
246 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage)
247 root_bin = prefix;
248
249 mlxsw_reg_ralst_pack(ralst_pl, root_bin, lpm_tree->id);
250 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) {
251 if (prefix == 0)
252 continue;
253 mlxsw_reg_ralst_bin_pack(ralst_pl, prefix, last_prefix,
254 MLXSW_REG_RALST_BIN_NO_CHILD);
255 last_prefix = prefix;
256 }
257 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
258}
259
260static struct mlxsw_sp_lpm_tree *
261mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
262 struct mlxsw_sp_prefix_usage *prefix_usage,
Ido Schimmel382dbb42017-03-10 08:53:40 +0100263 enum mlxsw_sp_l3proto proto)
Jiri Pirko53342022016-07-04 08:23:08 +0200264{
265 struct mlxsw_sp_lpm_tree *lpm_tree;
266 int err;
267
Ido Schimmel382dbb42017-03-10 08:53:40 +0100268 lpm_tree = mlxsw_sp_lpm_tree_find_unused(mlxsw_sp);
Jiri Pirko53342022016-07-04 08:23:08 +0200269 if (!lpm_tree)
270 return ERR_PTR(-EBUSY);
271 lpm_tree->proto = proto;
272 err = mlxsw_sp_lpm_tree_alloc(mlxsw_sp, lpm_tree);
273 if (err)
274 return ERR_PTR(err);
275
276 err = mlxsw_sp_lpm_tree_left_struct_set(mlxsw_sp, prefix_usage,
277 lpm_tree);
278 if (err)
279 goto err_left_struct_set;
Jiri Pirko2083d362016-10-25 11:25:56 +0200280 memcpy(&lpm_tree->prefix_usage, prefix_usage,
281 sizeof(lpm_tree->prefix_usage));
Jiri Pirko53342022016-07-04 08:23:08 +0200282 return lpm_tree;
283
284err_left_struct_set:
285 mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
286 return ERR_PTR(err);
287}
288
289static int mlxsw_sp_lpm_tree_destroy(struct mlxsw_sp *mlxsw_sp,
290 struct mlxsw_sp_lpm_tree *lpm_tree)
291{
292 return mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
293}
294
295static struct mlxsw_sp_lpm_tree *
296mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
297 struct mlxsw_sp_prefix_usage *prefix_usage,
Ido Schimmel382dbb42017-03-10 08:53:40 +0100298 enum mlxsw_sp_l3proto proto)
Jiri Pirko53342022016-07-04 08:23:08 +0200299{
300 struct mlxsw_sp_lpm_tree *lpm_tree;
301 int i;
302
303 for (i = 0; i < MLXSW_SP_LPM_TREE_COUNT; i++) {
304 lpm_tree = &mlxsw_sp->router.lpm_trees[i];
Jiri Pirko8b99bec2016-10-25 11:25:57 +0200305 if (lpm_tree->ref_count != 0 &&
306 lpm_tree->proto == proto &&
Jiri Pirko53342022016-07-04 08:23:08 +0200307 mlxsw_sp_prefix_usage_eq(&lpm_tree->prefix_usage,
308 prefix_usage))
309 goto inc_ref_count;
310 }
311 lpm_tree = mlxsw_sp_lpm_tree_create(mlxsw_sp, prefix_usage,
Ido Schimmel382dbb42017-03-10 08:53:40 +0100312 proto);
Jiri Pirko53342022016-07-04 08:23:08 +0200313 if (IS_ERR(lpm_tree))
314 return lpm_tree;
315
316inc_ref_count:
317 lpm_tree->ref_count++;
318 return lpm_tree;
319}
320
321static int mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
322 struct mlxsw_sp_lpm_tree *lpm_tree)
323{
324 if (--lpm_tree->ref_count == 0)
325 return mlxsw_sp_lpm_tree_destroy(mlxsw_sp, lpm_tree);
326 return 0;
327}
328
329static void mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp)
330{
331 struct mlxsw_sp_lpm_tree *lpm_tree;
332 int i;
333
334 for (i = 0; i < MLXSW_SP_LPM_TREE_COUNT; i++) {
335 lpm_tree = &mlxsw_sp->router.lpm_trees[i];
336 lpm_tree->id = i + MLXSW_SP_LPM_TREE_MIN;
337 }
338}
339
Ido Schimmel76610eb2017-03-10 08:53:41 +0100340static bool mlxsw_sp_vr_is_used(const struct mlxsw_sp_vr *vr)
341{
342 return !!vr->fib4;
343}
344
Jiri Pirko6b75c482016-07-04 08:23:09 +0200345static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp)
346{
347 struct mlxsw_sp_vr *vr;
348 int i;
349
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200350 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
Jiri Pirko6b75c482016-07-04 08:23:09 +0200351 vr = &mlxsw_sp->router.vrs[i];
Ido Schimmel76610eb2017-03-10 08:53:41 +0100352 if (!mlxsw_sp_vr_is_used(vr))
Jiri Pirko6b75c482016-07-04 08:23:09 +0200353 return vr;
354 }
355 return NULL;
356}
357
358static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel76610eb2017-03-10 08:53:41 +0100359 const struct mlxsw_sp_fib *fib)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200360{
361 char raltb_pl[MLXSW_REG_RALTB_LEN];
362
Ido Schimmel76610eb2017-03-10 08:53:41 +0100363 mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
364 (enum mlxsw_reg_ralxx_protocol) fib->proto,
365 fib->lpm_tree->id);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200366 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
367}
368
369static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel76610eb2017-03-10 08:53:41 +0100370 const struct mlxsw_sp_fib *fib)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200371{
372 char raltb_pl[MLXSW_REG_RALTB_LEN];
373
374 /* Bind to tree 0 which is default */
Ido Schimmel76610eb2017-03-10 08:53:41 +0100375 mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
376 (enum mlxsw_reg_ralxx_protocol) fib->proto, 0);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200377 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
378}
379
380static u32 mlxsw_sp_fix_tb_id(u32 tb_id)
381{
382 /* For our purpose, squash main and local table into one */
383 if (tb_id == RT_TABLE_LOCAL)
384 tb_id = RT_TABLE_MAIN;
385 return tb_id;
386}
387
388static struct mlxsw_sp_vr *mlxsw_sp_vr_find(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel76610eb2017-03-10 08:53:41 +0100389 u32 tb_id)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200390{
391 struct mlxsw_sp_vr *vr;
392 int i;
393
394 tb_id = mlxsw_sp_fix_tb_id(tb_id);
Nogah Frankel9497c042016-09-20 11:16:54 +0200395
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200396 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
Jiri Pirko6b75c482016-07-04 08:23:09 +0200397 vr = &mlxsw_sp->router.vrs[i];
Ido Schimmel76610eb2017-03-10 08:53:41 +0100398 if (mlxsw_sp_vr_is_used(vr) && vr->tb_id == tb_id)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200399 return vr;
400 }
401 return NULL;
402}
403
Ido Schimmel76610eb2017-03-10 08:53:41 +0100404static struct mlxsw_sp_fib *mlxsw_sp_vr_fib(const struct mlxsw_sp_vr *vr,
405 enum mlxsw_sp_l3proto proto)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200406{
Ido Schimmel76610eb2017-03-10 08:53:41 +0100407 switch (proto) {
408 case MLXSW_SP_L3_PROTO_IPV4:
409 return vr->fib4;
410 case MLXSW_SP_L3_PROTO_IPV6:
411 BUG_ON(1);
412 }
413 return NULL;
414}
415
416static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
417 u32 tb_id)
418{
Jiri Pirko6b75c482016-07-04 08:23:09 +0200419 struct mlxsw_sp_vr *vr;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200420
421 vr = mlxsw_sp_vr_find_unused(mlxsw_sp);
422 if (!vr)
423 return ERR_PTR(-EBUSY);
Ido Schimmel76610eb2017-03-10 08:53:41 +0100424 vr->fib4 = mlxsw_sp_fib_create(vr, MLXSW_SP_L3_PROTO_IPV4);
425 if (IS_ERR(vr->fib4))
426 return ERR_CAST(vr->fib4);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200427 vr->tb_id = tb_id;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200428 return vr;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200429}
430
Ido Schimmel76610eb2017-03-10 08:53:41 +0100431static void mlxsw_sp_vr_destroy(struct mlxsw_sp_vr *vr)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200432{
Ido Schimmel76610eb2017-03-10 08:53:41 +0100433 mlxsw_sp_fib_destroy(vr->fib4);
434 vr->fib4 = NULL;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200435}
436
437static int
Ido Schimmel76610eb2017-03-10 08:53:41 +0100438mlxsw_sp_vr_lpm_tree_check(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fib *fib,
Jiri Pirko6b75c482016-07-04 08:23:09 +0200439 struct mlxsw_sp_prefix_usage *req_prefix_usage)
440{
Ido Schimmel76610eb2017-03-10 08:53:41 +0100441 struct mlxsw_sp_lpm_tree *lpm_tree = fib->lpm_tree;
Ido Schimmelf7df4922017-02-28 08:55:40 +0100442 struct mlxsw_sp_lpm_tree *new_tree;
443 int err;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200444
Ido Schimmelf7df4922017-02-28 08:55:40 +0100445 if (mlxsw_sp_prefix_usage_eq(req_prefix_usage, &lpm_tree->prefix_usage))
Jiri Pirko6b75c482016-07-04 08:23:09 +0200446 return 0;
447
Ido Schimmelf7df4922017-02-28 08:55:40 +0100448 new_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, req_prefix_usage,
Ido Schimmel76610eb2017-03-10 08:53:41 +0100449 fib->proto);
Ido Schimmelf7df4922017-02-28 08:55:40 +0100450 if (IS_ERR(new_tree)) {
Jiri Pirko6b75c482016-07-04 08:23:09 +0200451 /* We failed to get a tree according to the required
452 * prefix usage. However, the current tree might be still good
453 * for us if our requirement is subset of the prefixes used
454 * in the tree.
455 */
456 if (mlxsw_sp_prefix_usage_subset(req_prefix_usage,
Ido Schimmelf7df4922017-02-28 08:55:40 +0100457 &lpm_tree->prefix_usage))
Jiri Pirko6b75c482016-07-04 08:23:09 +0200458 return 0;
Ido Schimmelf7df4922017-02-28 08:55:40 +0100459 return PTR_ERR(new_tree);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200460 }
461
Ido Schimmelf7df4922017-02-28 08:55:40 +0100462 /* Prevent packet loss by overwriting existing binding */
Ido Schimmel76610eb2017-03-10 08:53:41 +0100463 fib->lpm_tree = new_tree;
464 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib);
Ido Schimmelf7df4922017-02-28 08:55:40 +0100465 if (err)
466 goto err_tree_bind;
467 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
468
469 return 0;
470
471err_tree_bind:
Ido Schimmel76610eb2017-03-10 08:53:41 +0100472 fib->lpm_tree = lpm_tree;
Ido Schimmelf7df4922017-02-28 08:55:40 +0100473 mlxsw_sp_lpm_tree_put(mlxsw_sp, new_tree);
474 return err;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200475}
476
Ido Schimmel76610eb2017-03-10 08:53:41 +0100477static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200478{
479 struct mlxsw_sp_vr *vr;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200480
481 tb_id = mlxsw_sp_fix_tb_id(tb_id);
Ido Schimmel76610eb2017-03-10 08:53:41 +0100482 vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
483 if (!vr)
484 vr = mlxsw_sp_vr_create(mlxsw_sp, tb_id);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200485 return vr;
486}
487
Ido Schimmel76610eb2017-03-10 08:53:41 +0100488static void mlxsw_sp_vr_put(struct mlxsw_sp_vr *vr)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200489{
Ido Schimmel69132292017-03-10 08:53:42 +0100490 if (!vr->rif_count && list_empty(&vr->fib4->node_list))
Ido Schimmel76610eb2017-03-10 08:53:41 +0100491 mlxsw_sp_vr_destroy(vr);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200492}
493
Nogah Frankel9497c042016-09-20 11:16:54 +0200494static int mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200495{
496 struct mlxsw_sp_vr *vr;
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200497 u64 max_vrs;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200498 int i;
499
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200500 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_VRS))
Nogah Frankel9497c042016-09-20 11:16:54 +0200501 return -EIO;
502
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200503 max_vrs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS);
504 mlxsw_sp->router.vrs = kcalloc(max_vrs, sizeof(struct mlxsw_sp_vr),
Nogah Frankel9497c042016-09-20 11:16:54 +0200505 GFP_KERNEL);
506 if (!mlxsw_sp->router.vrs)
507 return -ENOMEM;
508
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200509 for (i = 0; i < max_vrs; i++) {
Jiri Pirko6b75c482016-07-04 08:23:09 +0200510 vr = &mlxsw_sp->router.vrs[i];
511 vr->id = i;
512 }
Nogah Frankel9497c042016-09-20 11:16:54 +0200513
514 return 0;
515}
516
Ido Schimmelac571de2016-11-14 11:26:32 +0100517static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp);
518
Nogah Frankel9497c042016-09-20 11:16:54 +0200519static void mlxsw_sp_vrs_fini(struct mlxsw_sp *mlxsw_sp)
520{
Ido Schimmel30572242016-12-03 16:45:01 +0100521 /* At this stage we're guaranteed not to have new incoming
522 * FIB notifications and the work queue is free from FIBs
523 * sitting on top of mlxsw netdevs. However, we can still
524 * have other FIBs queued. Flush the queue before flushing
525 * the device's tables. No need for locks, as we're the only
526 * writer.
527 */
528 mlxsw_core_flush_owq();
Ido Schimmelac571de2016-11-14 11:26:32 +0100529 mlxsw_sp_router_fib_flush(mlxsw_sp);
Nogah Frankel9497c042016-09-20 11:16:54 +0200530 kfree(mlxsw_sp->router.vrs);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200531}
532
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200533struct mlxsw_sp_neigh_key {
Jiri Pirko33b13412016-11-10 12:31:04 +0100534 struct neighbour *n;
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200535};
536
537struct mlxsw_sp_neigh_entry {
Ido Schimmel9665b742017-02-08 11:16:42 +0100538 struct list_head rif_list_node;
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200539 struct rhash_head ht_node;
540 struct mlxsw_sp_neigh_key key;
541 u16 rif;
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100542 bool connected;
Yotam Gigia6bf9e92016-07-05 11:27:44 +0200543 unsigned char ha[ETH_ALEN];
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +0200544 struct list_head nexthop_list; /* list of nexthops using
545 * this neigh entry
546 */
Yotam Gigib2157142016-07-05 11:27:51 +0200547 struct list_head nexthop_neighs_list_node;
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200548};
549
550static const struct rhashtable_params mlxsw_sp_neigh_ht_params = {
551 .key_offset = offsetof(struct mlxsw_sp_neigh_entry, key),
552 .head_offset = offsetof(struct mlxsw_sp_neigh_entry, ht_node),
553 .key_len = sizeof(struct mlxsw_sp_neigh_key),
554};
555
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100556static struct mlxsw_sp_neigh_entry *
557mlxsw_sp_neigh_entry_alloc(struct mlxsw_sp *mlxsw_sp, struct neighbour *n,
558 u16 rif)
559{
560 struct mlxsw_sp_neigh_entry *neigh_entry;
561
562 neigh_entry = kzalloc(sizeof(*neigh_entry), GFP_KERNEL);
563 if (!neigh_entry)
564 return NULL;
565
566 neigh_entry->key.n = n;
567 neigh_entry->rif = rif;
568 INIT_LIST_HEAD(&neigh_entry->nexthop_list);
569
570 return neigh_entry;
571}
572
573static void mlxsw_sp_neigh_entry_free(struct mlxsw_sp_neigh_entry *neigh_entry)
574{
575 kfree(neigh_entry);
576}
577
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200578static int
579mlxsw_sp_neigh_entry_insert(struct mlxsw_sp *mlxsw_sp,
580 struct mlxsw_sp_neigh_entry *neigh_entry)
581{
582 return rhashtable_insert_fast(&mlxsw_sp->router.neigh_ht,
583 &neigh_entry->ht_node,
584 mlxsw_sp_neigh_ht_params);
585}
586
587static void
588mlxsw_sp_neigh_entry_remove(struct mlxsw_sp *mlxsw_sp,
589 struct mlxsw_sp_neigh_entry *neigh_entry)
590{
591 rhashtable_remove_fast(&mlxsw_sp->router.neigh_ht,
592 &neigh_entry->ht_node,
593 mlxsw_sp_neigh_ht_params);
594}
595
596static struct mlxsw_sp_neigh_entry *
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100597mlxsw_sp_neigh_entry_create(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200598{
599 struct mlxsw_sp_neigh_entry *neigh_entry;
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100600 struct mlxsw_sp_rif *r;
601 int err;
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200602
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100603 r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, n->dev);
604 if (!r)
605 return ERR_PTR(-EINVAL);
606
607 neigh_entry = mlxsw_sp_neigh_entry_alloc(mlxsw_sp, n, r->rif);
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200608 if (!neigh_entry)
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100609 return ERR_PTR(-ENOMEM);
610
611 err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
612 if (err)
613 goto err_neigh_entry_insert;
614
Ido Schimmel9665b742017-02-08 11:16:42 +0100615 list_add(&neigh_entry->rif_list_node, &r->neigh_list);
616
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200617 return neigh_entry;
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100618
619err_neigh_entry_insert:
620 mlxsw_sp_neigh_entry_free(neigh_entry);
621 return ERR_PTR(err);
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200622}
623
624static void
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100625mlxsw_sp_neigh_entry_destroy(struct mlxsw_sp *mlxsw_sp,
626 struct mlxsw_sp_neigh_entry *neigh_entry)
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200627{
Ido Schimmel9665b742017-02-08 11:16:42 +0100628 list_del(&neigh_entry->rif_list_node);
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100629 mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry);
630 mlxsw_sp_neigh_entry_free(neigh_entry);
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200631}
632
633static struct mlxsw_sp_neigh_entry *
Jiri Pirko33b13412016-11-10 12:31:04 +0100634mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200635{
Jiri Pirko33b13412016-11-10 12:31:04 +0100636 struct mlxsw_sp_neigh_key key;
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200637
Jiri Pirko33b13412016-11-10 12:31:04 +0100638 key.n = n;
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200639 return rhashtable_lookup_fast(&mlxsw_sp->router.neigh_ht,
640 &key, mlxsw_sp_neigh_ht_params);
641}
642
Yotam Gigic723c7352016-07-05 11:27:43 +0200643static void
644mlxsw_sp_router_neighs_update_interval_init(struct mlxsw_sp *mlxsw_sp)
645{
646 unsigned long interval = NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME);
647
648 mlxsw_sp->router.neighs_update.interval = jiffies_to_msecs(interval);
649}
650
651static void mlxsw_sp_router_neigh_ent_ipv4_process(struct mlxsw_sp *mlxsw_sp,
652 char *rauhtd_pl,
653 int ent_index)
654{
655 struct net_device *dev;
656 struct neighbour *n;
657 __be32 dipn;
658 u32 dip;
659 u16 rif;
660
661 mlxsw_reg_rauhtd_ent_ipv4_unpack(rauhtd_pl, ent_index, &rif, &dip);
662
663 if (!mlxsw_sp->rifs[rif]) {
664 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
665 return;
666 }
667
668 dipn = htonl(dip);
669 dev = mlxsw_sp->rifs[rif]->dev;
670 n = neigh_lookup(&arp_tbl, &dipn, dev);
671 if (!n) {
672 netdev_err(dev, "Failed to find matching neighbour for IP=%pI4h\n",
673 &dip);
674 return;
675 }
676
677 netdev_dbg(dev, "Updating neighbour with IP=%pI4h\n", &dip);
678 neigh_event_send(n, NULL);
679 neigh_release(n);
680}
681
682static void mlxsw_sp_router_neigh_rec_ipv4_process(struct mlxsw_sp *mlxsw_sp,
683 char *rauhtd_pl,
684 int rec_index)
685{
686 u8 num_entries;
687 int i;
688
689 num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
690 rec_index);
691 /* Hardware starts counting at 0, so add 1. */
692 num_entries++;
693
694 /* Each record consists of several neighbour entries. */
695 for (i = 0; i < num_entries; i++) {
696 int ent_index;
697
698 ent_index = rec_index * MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC + i;
699 mlxsw_sp_router_neigh_ent_ipv4_process(mlxsw_sp, rauhtd_pl,
700 ent_index);
701 }
702
703}
704
705static void mlxsw_sp_router_neigh_rec_process(struct mlxsw_sp *mlxsw_sp,
706 char *rauhtd_pl, int rec_index)
707{
708 switch (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, rec_index)) {
709 case MLXSW_REG_RAUHTD_TYPE_IPV4:
710 mlxsw_sp_router_neigh_rec_ipv4_process(mlxsw_sp, rauhtd_pl,
711 rec_index);
712 break;
713 case MLXSW_REG_RAUHTD_TYPE_IPV6:
714 WARN_ON_ONCE(1);
715 break;
716 }
717}
718
Arkadi Sharshevsky42cdb332016-11-11 16:34:26 +0100719static bool mlxsw_sp_router_rauhtd_is_full(char *rauhtd_pl)
720{
721 u8 num_rec, last_rec_index, num_entries;
722
723 num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
724 last_rec_index = num_rec - 1;
725
726 if (num_rec < MLXSW_REG_RAUHTD_REC_MAX_NUM)
727 return false;
728 if (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, last_rec_index) ==
729 MLXSW_REG_RAUHTD_TYPE_IPV6)
730 return true;
731
732 num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
733 last_rec_index);
734 if (++num_entries == MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC)
735 return true;
736 return false;
737}
738
Yotam Gigib2157142016-07-05 11:27:51 +0200739static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp)
Yotam Gigic723c7352016-07-05 11:27:43 +0200740{
Yotam Gigic723c7352016-07-05 11:27:43 +0200741 char *rauhtd_pl;
742 u8 num_rec;
743 int i, err;
744
745 rauhtd_pl = kmalloc(MLXSW_REG_RAUHTD_LEN, GFP_KERNEL);
746 if (!rauhtd_pl)
Yotam Gigib2157142016-07-05 11:27:51 +0200747 return -ENOMEM;
Yotam Gigic723c7352016-07-05 11:27:43 +0200748
749 /* Make sure the neighbour's netdev isn't removed in the
750 * process.
751 */
752 rtnl_lock();
753 do {
754 mlxsw_reg_rauhtd_pack(rauhtd_pl, MLXSW_REG_RAUHTD_TYPE_IPV4);
755 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(rauhtd),
756 rauhtd_pl);
757 if (err) {
758 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to dump neighbour talbe\n");
759 break;
760 }
761 num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
762 for (i = 0; i < num_rec; i++)
763 mlxsw_sp_router_neigh_rec_process(mlxsw_sp, rauhtd_pl,
764 i);
Arkadi Sharshevsky42cdb332016-11-11 16:34:26 +0100765 } while (mlxsw_sp_router_rauhtd_is_full(rauhtd_pl));
Yotam Gigic723c7352016-07-05 11:27:43 +0200766 rtnl_unlock();
767
768 kfree(rauhtd_pl);
Yotam Gigib2157142016-07-05 11:27:51 +0200769 return err;
770}
771
772static void mlxsw_sp_router_neighs_update_nh(struct mlxsw_sp *mlxsw_sp)
773{
774 struct mlxsw_sp_neigh_entry *neigh_entry;
775
776 /* Take RTNL mutex here to prevent lists from changes */
777 rtnl_lock();
778 list_for_each_entry(neigh_entry, &mlxsw_sp->router.nexthop_neighs_list,
Ido Schimmel8a0b7272017-02-06 16:20:15 +0100779 nexthop_neighs_list_node)
Yotam Gigib2157142016-07-05 11:27:51 +0200780 /* If this neigh have nexthops, make the kernel think this neigh
781 * is active regardless of the traffic.
782 */
Ido Schimmel8a0b7272017-02-06 16:20:15 +0100783 neigh_event_send(neigh_entry->key.n, NULL);
Yotam Gigib2157142016-07-05 11:27:51 +0200784 rtnl_unlock();
785}
786
787static void
788mlxsw_sp_router_neighs_update_work_schedule(struct mlxsw_sp *mlxsw_sp)
789{
790 unsigned long interval = mlxsw_sp->router.neighs_update.interval;
791
792 mlxsw_core_schedule_dw(&mlxsw_sp->router.neighs_update.dw,
793 msecs_to_jiffies(interval));
794}
795
796static void mlxsw_sp_router_neighs_update_work(struct work_struct *work)
797{
798 struct mlxsw_sp *mlxsw_sp = container_of(work, struct mlxsw_sp,
799 router.neighs_update.dw.work);
800 int err;
801
802 err = mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp);
803 if (err)
804 dev_err(mlxsw_sp->bus_info->dev, "Could not update kernel for neigh activity");
805
806 mlxsw_sp_router_neighs_update_nh(mlxsw_sp);
807
Yotam Gigic723c7352016-07-05 11:27:43 +0200808 mlxsw_sp_router_neighs_update_work_schedule(mlxsw_sp);
809}
810
Yotam Gigi0b2361d2016-07-05 11:27:52 +0200811static void mlxsw_sp_router_probe_unresolved_nexthops(struct work_struct *work)
812{
813 struct mlxsw_sp_neigh_entry *neigh_entry;
814 struct mlxsw_sp *mlxsw_sp = container_of(work, struct mlxsw_sp,
815 router.nexthop_probe_dw.work);
816
817 /* Iterate over nexthop neighbours, find those who are unresolved and
818 * send arp on them. This solves the chicken-egg problem when
819 * the nexthop wouldn't get offloaded until the neighbor is resolved
820 * but it wouldn't get resolved ever in case traffic is flowing in HW
821 * using different nexthop.
822 *
823 * Take RTNL mutex here to prevent lists from changes.
824 */
825 rtnl_lock();
826 list_for_each_entry(neigh_entry, &mlxsw_sp->router.nexthop_neighs_list,
Ido Schimmel8a0b7272017-02-06 16:20:15 +0100827 nexthop_neighs_list_node)
Ido Schimmel01b1aa32017-02-06 16:20:16 +0100828 if (!neigh_entry->connected)
Jiri Pirko33b13412016-11-10 12:31:04 +0100829 neigh_event_send(neigh_entry->key.n, NULL);
Yotam Gigi0b2361d2016-07-05 11:27:52 +0200830 rtnl_unlock();
831
832 mlxsw_core_schedule_dw(&mlxsw_sp->router.nexthop_probe_dw,
833 MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL);
834}
835
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +0200836static void
837mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
838 struct mlxsw_sp_neigh_entry *neigh_entry,
839 bool removing);
840
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100841static enum mlxsw_reg_rauht_op mlxsw_sp_rauht_op(bool adding)
Yotam Gigia6bf9e92016-07-05 11:27:44 +0200842{
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100843 return adding ? MLXSW_REG_RAUHT_OP_WRITE_ADD :
844 MLXSW_REG_RAUHT_OP_WRITE_DELETE;
845}
846
847static void
848mlxsw_sp_router_neigh_entry_op4(struct mlxsw_sp *mlxsw_sp,
849 struct mlxsw_sp_neigh_entry *neigh_entry,
850 enum mlxsw_reg_rauht_op op)
851{
Jiri Pirko33b13412016-11-10 12:31:04 +0100852 struct neighbour *n = neigh_entry->key.n;
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100853 u32 dip = ntohl(*((__be32 *) n->primary_key));
Yotam Gigia6bf9e92016-07-05 11:27:44 +0200854 char rauht_pl[MLXSW_REG_RAUHT_LEN];
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100855
856 mlxsw_reg_rauht_pack4(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
857 dip);
858 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
859}
860
861static void
862mlxsw_sp_neigh_entry_update(struct mlxsw_sp *mlxsw_sp,
863 struct mlxsw_sp_neigh_entry *neigh_entry,
864 bool adding)
865{
866 if (!adding && !neigh_entry->connected)
867 return;
868 neigh_entry->connected = adding;
869 if (neigh_entry->key.n->tbl == &arp_tbl)
870 mlxsw_sp_router_neigh_entry_op4(mlxsw_sp, neigh_entry,
871 mlxsw_sp_rauht_op(adding));
872 else
873 WARN_ON_ONCE(1);
874}
875
876struct mlxsw_sp_neigh_event_work {
877 struct work_struct work;
878 struct mlxsw_sp *mlxsw_sp;
879 struct neighbour *n;
880};
881
882static void mlxsw_sp_router_neigh_event_work(struct work_struct *work)
883{
884 struct mlxsw_sp_neigh_event_work *neigh_work =
885 container_of(work, struct mlxsw_sp_neigh_event_work, work);
886 struct mlxsw_sp *mlxsw_sp = neigh_work->mlxsw_sp;
887 struct mlxsw_sp_neigh_entry *neigh_entry;
888 struct neighbour *n = neigh_work->n;
889 unsigned char ha[ETH_ALEN];
Yotam Gigia6bf9e92016-07-05 11:27:44 +0200890 bool entry_connected;
Ido Schimmel93a87e52016-12-23 09:32:49 +0100891 u8 nud_state, dead;
Yotam Gigia6bf9e92016-07-05 11:27:44 +0200892
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100893 /* If these parameters are changed after we release the lock,
894 * then we are guaranteed to receive another event letting us
895 * know about it.
896 */
Yotam Gigia6bf9e92016-07-05 11:27:44 +0200897 read_lock_bh(&n->lock);
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100898 memcpy(ha, n->ha, ETH_ALEN);
Yotam Gigia6bf9e92016-07-05 11:27:44 +0200899 nud_state = n->nud_state;
Ido Schimmel93a87e52016-12-23 09:32:49 +0100900 dead = n->dead;
Yotam Gigia6bf9e92016-07-05 11:27:44 +0200901 read_unlock_bh(&n->lock);
902
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100903 rtnl_lock();
Ido Schimmel93a87e52016-12-23 09:32:49 +0100904 entry_connected = nud_state & NUD_VALID && !dead;
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100905 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
906 if (!entry_connected && !neigh_entry)
907 goto out;
908 if (!neigh_entry) {
909 neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
910 if (IS_ERR(neigh_entry))
911 goto out;
Yotam Gigia6bf9e92016-07-05 11:27:44 +0200912 }
913
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100914 memcpy(neigh_entry->ha, ha, ETH_ALEN);
915 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, entry_connected);
916 mlxsw_sp_nexthop_neigh_update(mlxsw_sp, neigh_entry, !entry_connected);
917
918 if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
919 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
920
921out:
922 rtnl_unlock();
Yotam Gigia6bf9e92016-07-05 11:27:44 +0200923 neigh_release(n);
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100924 kfree(neigh_work);
Yotam Gigia6bf9e92016-07-05 11:27:44 +0200925}
926
Jiri Pirkoe7322632016-09-01 10:37:43 +0200927int mlxsw_sp_router_netevent_event(struct notifier_block *unused,
928 unsigned long event, void *ptr)
Yotam Gigic723c7352016-07-05 11:27:43 +0200929{
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100930 struct mlxsw_sp_neigh_event_work *neigh_work;
Yotam Gigic723c7352016-07-05 11:27:43 +0200931 struct mlxsw_sp_port *mlxsw_sp_port;
932 struct mlxsw_sp *mlxsw_sp;
933 unsigned long interval;
934 struct neigh_parms *p;
Yotam Gigia6bf9e92016-07-05 11:27:44 +0200935 struct neighbour *n;
Yotam Gigic723c7352016-07-05 11:27:43 +0200936
937 switch (event) {
938 case NETEVENT_DELAY_PROBE_TIME_UPDATE:
939 p = ptr;
940
941 /* We don't care about changes in the default table. */
942 if (!p->dev || p->tbl != &arp_tbl)
943 return NOTIFY_DONE;
944
945 /* We are in atomic context and can't take RTNL mutex,
946 * so use RCU variant to walk the device chain.
947 */
948 mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(p->dev);
949 if (!mlxsw_sp_port)
950 return NOTIFY_DONE;
951
952 mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
953 interval = jiffies_to_msecs(NEIGH_VAR(p, DELAY_PROBE_TIME));
954 mlxsw_sp->router.neighs_update.interval = interval;
955
956 mlxsw_sp_port_dev_put(mlxsw_sp_port);
957 break;
Yotam Gigia6bf9e92016-07-05 11:27:44 +0200958 case NETEVENT_NEIGH_UPDATE:
959 n = ptr;
Yotam Gigia6bf9e92016-07-05 11:27:44 +0200960
961 if (n->tbl != &arp_tbl)
962 return NOTIFY_DONE;
963
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100964 mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(n->dev);
Yotam Gigia6bf9e92016-07-05 11:27:44 +0200965 if (!mlxsw_sp_port)
966 return NOTIFY_DONE;
967
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100968 neigh_work = kzalloc(sizeof(*neigh_work), GFP_ATOMIC);
969 if (!neigh_work) {
Yotam Gigia6bf9e92016-07-05 11:27:44 +0200970 mlxsw_sp_port_dev_put(mlxsw_sp_port);
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100971 return NOTIFY_BAD;
Yotam Gigia6bf9e92016-07-05 11:27:44 +0200972 }
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100973
974 INIT_WORK(&neigh_work->work, mlxsw_sp_router_neigh_event_work);
975 neigh_work->mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
976 neigh_work->n = n;
Yotam Gigia6bf9e92016-07-05 11:27:44 +0200977
978 /* Take a reference to ensure the neighbour won't be
979 * destructed until we drop the reference in delayed
980 * work.
981 */
982 neigh_clone(n);
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100983 mlxsw_core_schedule_work(&neigh_work->work);
984 mlxsw_sp_port_dev_put(mlxsw_sp_port);
Yotam Gigia6bf9e92016-07-05 11:27:44 +0200985 break;
Yotam Gigic723c7352016-07-05 11:27:43 +0200986 }
987
988 return NOTIFY_DONE;
989}
990
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200991static int mlxsw_sp_neigh_init(struct mlxsw_sp *mlxsw_sp)
992{
Yotam Gigic723c7352016-07-05 11:27:43 +0200993 int err;
994
995 err = rhashtable_init(&mlxsw_sp->router.neigh_ht,
996 &mlxsw_sp_neigh_ht_params);
997 if (err)
998 return err;
999
1000 /* Initialize the polling interval according to the default
1001 * table.
1002 */
1003 mlxsw_sp_router_neighs_update_interval_init(mlxsw_sp);
1004
Yotam Gigi0b2361d2016-07-05 11:27:52 +02001005 /* Create the delayed works for the activity_update */
Yotam Gigic723c7352016-07-05 11:27:43 +02001006 INIT_DELAYED_WORK(&mlxsw_sp->router.neighs_update.dw,
1007 mlxsw_sp_router_neighs_update_work);
Yotam Gigi0b2361d2016-07-05 11:27:52 +02001008 INIT_DELAYED_WORK(&mlxsw_sp->router.nexthop_probe_dw,
1009 mlxsw_sp_router_probe_unresolved_nexthops);
Yotam Gigic723c7352016-07-05 11:27:43 +02001010 mlxsw_core_schedule_dw(&mlxsw_sp->router.neighs_update.dw, 0);
Yotam Gigi0b2361d2016-07-05 11:27:52 +02001011 mlxsw_core_schedule_dw(&mlxsw_sp->router.nexthop_probe_dw, 0);
Yotam Gigic723c7352016-07-05 11:27:43 +02001012 return 0;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001013}
1014
1015static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp)
1016{
Yotam Gigic723c7352016-07-05 11:27:43 +02001017 cancel_delayed_work_sync(&mlxsw_sp->router.neighs_update.dw);
Yotam Gigi0b2361d2016-07-05 11:27:52 +02001018 cancel_delayed_work_sync(&mlxsw_sp->router.nexthop_probe_dw);
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001019 rhashtable_destroy(&mlxsw_sp->router.neigh_ht);
1020}
1021
Ido Schimmel9665b742017-02-08 11:16:42 +01001022static int mlxsw_sp_neigh_rif_flush(struct mlxsw_sp *mlxsw_sp,
1023 const struct mlxsw_sp_rif *r)
1024{
1025 char rauht_pl[MLXSW_REG_RAUHT_LEN];
1026
1027 mlxsw_reg_rauht_pack(rauht_pl, MLXSW_REG_RAUHT_OP_WRITE_DELETE_ALL,
1028 r->rif, r->addr);
1029 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
1030}
1031
1032static void mlxsw_sp_neigh_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
1033 struct mlxsw_sp_rif *r)
1034{
1035 struct mlxsw_sp_neigh_entry *neigh_entry, *tmp;
1036
1037 mlxsw_sp_neigh_rif_flush(mlxsw_sp, r);
1038 list_for_each_entry_safe(neigh_entry, tmp, &r->neigh_list,
1039 rif_list_node)
1040 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
1041}
1042
Ido Schimmelc53b8e12017-02-08 11:16:30 +01001043struct mlxsw_sp_nexthop_key {
1044 struct fib_nh *fib_nh;
1045};
1046
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001047struct mlxsw_sp_nexthop {
1048 struct list_head neigh_list_node; /* member of neigh entry list */
Ido Schimmel9665b742017-02-08 11:16:42 +01001049 struct list_head rif_list_node;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001050 struct mlxsw_sp_nexthop_group *nh_grp; /* pointer back to the group
1051 * this belongs to
1052 */
Ido Schimmelc53b8e12017-02-08 11:16:30 +01001053 struct rhash_head ht_node;
1054 struct mlxsw_sp_nexthop_key key;
Ido Schimmelb8399a12017-02-08 11:16:33 +01001055 struct mlxsw_sp_rif *r;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001056 u8 should_offload:1, /* set indicates this neigh is connected and
1057 * should be put to KVD linear area of this group.
1058 */
1059 offloaded:1, /* set in case the neigh is actually put into
1060 * KVD linear area of this group.
1061 */
1062 update:1; /* set indicates that MAC of this neigh should be
1063 * updated in HW
1064 */
1065 struct mlxsw_sp_neigh_entry *neigh_entry;
1066};
1067
Ido Schimmele9ad5e72017-02-08 11:16:29 +01001068struct mlxsw_sp_nexthop_group_key {
1069 struct fib_info *fi;
1070};
1071
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001072struct mlxsw_sp_nexthop_group {
Ido Schimmele9ad5e72017-02-08 11:16:29 +01001073 struct rhash_head ht_node;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001074 struct list_head fib_list; /* list of fib entries that use this group */
Ido Schimmele9ad5e72017-02-08 11:16:29 +01001075 struct mlxsw_sp_nexthop_group_key key;
Ido Schimmelb3e8d1e2017-02-08 11:16:32 +01001076 u8 adj_index_valid:1,
1077 gateway:1; /* routes using the group use a gateway */
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001078 u32 adj_index;
1079 u16 ecmp_size;
1080 u16 count;
1081 struct mlxsw_sp_nexthop nexthops[0];
Ido Schimmelb8399a12017-02-08 11:16:33 +01001082#define nh_rif nexthops[0].r
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001083};
1084
Ido Schimmele9ad5e72017-02-08 11:16:29 +01001085static const struct rhashtable_params mlxsw_sp_nexthop_group_ht_params = {
1086 .key_offset = offsetof(struct mlxsw_sp_nexthop_group, key),
1087 .head_offset = offsetof(struct mlxsw_sp_nexthop_group, ht_node),
1088 .key_len = sizeof(struct mlxsw_sp_nexthop_group_key),
1089};
1090
1091static int mlxsw_sp_nexthop_group_insert(struct mlxsw_sp *mlxsw_sp,
1092 struct mlxsw_sp_nexthop_group *nh_grp)
1093{
1094 return rhashtable_insert_fast(&mlxsw_sp->router.nexthop_group_ht,
1095 &nh_grp->ht_node,
1096 mlxsw_sp_nexthop_group_ht_params);
1097}
1098
1099static void mlxsw_sp_nexthop_group_remove(struct mlxsw_sp *mlxsw_sp,
1100 struct mlxsw_sp_nexthop_group *nh_grp)
1101{
1102 rhashtable_remove_fast(&mlxsw_sp->router.nexthop_group_ht,
1103 &nh_grp->ht_node,
1104 mlxsw_sp_nexthop_group_ht_params);
1105}
1106
1107static struct mlxsw_sp_nexthop_group *
1108mlxsw_sp_nexthop_group_lookup(struct mlxsw_sp *mlxsw_sp,
1109 struct mlxsw_sp_nexthop_group_key key)
1110{
1111 return rhashtable_lookup_fast(&mlxsw_sp->router.nexthop_group_ht, &key,
1112 mlxsw_sp_nexthop_group_ht_params);
1113}
1114
Ido Schimmelc53b8e12017-02-08 11:16:30 +01001115static const struct rhashtable_params mlxsw_sp_nexthop_ht_params = {
1116 .key_offset = offsetof(struct mlxsw_sp_nexthop, key),
1117 .head_offset = offsetof(struct mlxsw_sp_nexthop, ht_node),
1118 .key_len = sizeof(struct mlxsw_sp_nexthop_key),
1119};
1120
1121static int mlxsw_sp_nexthop_insert(struct mlxsw_sp *mlxsw_sp,
1122 struct mlxsw_sp_nexthop *nh)
1123{
1124 return rhashtable_insert_fast(&mlxsw_sp->router.nexthop_ht,
1125 &nh->ht_node, mlxsw_sp_nexthop_ht_params);
1126}
1127
1128static void mlxsw_sp_nexthop_remove(struct mlxsw_sp *mlxsw_sp,
1129 struct mlxsw_sp_nexthop *nh)
1130{
1131 rhashtable_remove_fast(&mlxsw_sp->router.nexthop_ht, &nh->ht_node,
1132 mlxsw_sp_nexthop_ht_params);
1133}
1134
Ido Schimmelad178c82017-02-08 11:16:40 +01001135static struct mlxsw_sp_nexthop *
1136mlxsw_sp_nexthop_lookup(struct mlxsw_sp *mlxsw_sp,
1137 struct mlxsw_sp_nexthop_key key)
1138{
1139 return rhashtable_lookup_fast(&mlxsw_sp->router.nexthop_ht, &key,
1140 mlxsw_sp_nexthop_ht_params);
1141}
1142
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001143static int mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel76610eb2017-03-10 08:53:41 +01001144 const struct mlxsw_sp_fib *fib,
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001145 u32 adj_index, u16 ecmp_size,
1146 u32 new_adj_index,
1147 u16 new_ecmp_size)
1148{
1149 char raleu_pl[MLXSW_REG_RALEU_LEN];
1150
Ido Schimmel1a9234e662016-09-19 08:29:26 +02001151 mlxsw_reg_raleu_pack(raleu_pl,
Ido Schimmel76610eb2017-03-10 08:53:41 +01001152 (enum mlxsw_reg_ralxx_protocol) fib->proto,
1153 fib->vr->id, adj_index, ecmp_size, new_adj_index,
Ido Schimmel1a9234e662016-09-19 08:29:26 +02001154 new_ecmp_size);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001155 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raleu), raleu_pl);
1156}
1157
1158static int mlxsw_sp_adj_index_mass_update(struct mlxsw_sp *mlxsw_sp,
1159 struct mlxsw_sp_nexthop_group *nh_grp,
1160 u32 old_adj_index, u16 old_ecmp_size)
1161{
1162 struct mlxsw_sp_fib_entry *fib_entry;
Ido Schimmel76610eb2017-03-10 08:53:41 +01001163 struct mlxsw_sp_fib *fib = NULL;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001164 int err;
1165
1166 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
Ido Schimmel76610eb2017-03-10 08:53:41 +01001167 if (fib == fib_entry->fib_node->fib)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001168 continue;
Ido Schimmel76610eb2017-03-10 08:53:41 +01001169 fib = fib_entry->fib_node->fib;
1170 err = mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp, fib,
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001171 old_adj_index,
1172 old_ecmp_size,
1173 nh_grp->adj_index,
1174 nh_grp->ecmp_size);
1175 if (err)
1176 return err;
1177 }
1178 return 0;
1179}
1180
1181static int mlxsw_sp_nexthop_mac_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
1182 struct mlxsw_sp_nexthop *nh)
1183{
1184 struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
1185 char ratr_pl[MLXSW_REG_RATR_LEN];
1186
1187 mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY,
1188 true, adj_index, neigh_entry->rif);
1189 mlxsw_reg_ratr_eth_entry_pack(ratr_pl, neigh_entry->ha);
1190 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
1191}
1192
1193static int
1194mlxsw_sp_nexthop_group_mac_update(struct mlxsw_sp *mlxsw_sp,
Ido Schimmela59b7e02017-01-23 11:11:42 +01001195 struct mlxsw_sp_nexthop_group *nh_grp,
1196 bool reallocate)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001197{
1198 u32 adj_index = nh_grp->adj_index; /* base */
1199 struct mlxsw_sp_nexthop *nh;
1200 int i;
1201 int err;
1202
1203 for (i = 0; i < nh_grp->count; i++) {
1204 nh = &nh_grp->nexthops[i];
1205
1206 if (!nh->should_offload) {
1207 nh->offloaded = 0;
1208 continue;
1209 }
1210
Ido Schimmela59b7e02017-01-23 11:11:42 +01001211 if (nh->update || reallocate) {
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001212 err = mlxsw_sp_nexthop_mac_update(mlxsw_sp,
1213 adj_index, nh);
1214 if (err)
1215 return err;
1216 nh->update = 0;
1217 nh->offloaded = 1;
1218 }
1219 adj_index++;
1220 }
1221 return 0;
1222}
1223
1224static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
1225 struct mlxsw_sp_fib_entry *fib_entry);
1226
1227static int
1228mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp *mlxsw_sp,
1229 struct mlxsw_sp_nexthop_group *nh_grp)
1230{
1231 struct mlxsw_sp_fib_entry *fib_entry;
1232 int err;
1233
1234 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
1235 err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
1236 if (err)
1237 return err;
1238 }
1239 return 0;
1240}
1241
1242static void
1243mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
1244 struct mlxsw_sp_nexthop_group *nh_grp)
1245{
1246 struct mlxsw_sp_nexthop *nh;
1247 bool offload_change = false;
1248 u32 adj_index;
1249 u16 ecmp_size = 0;
1250 bool old_adj_index_valid;
1251 u32 old_adj_index;
1252 u16 old_ecmp_size;
1253 int ret;
1254 int i;
1255 int err;
1256
Ido Schimmelb3e8d1e2017-02-08 11:16:32 +01001257 if (!nh_grp->gateway) {
1258 mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
1259 return;
1260 }
1261
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001262 for (i = 0; i < nh_grp->count; i++) {
1263 nh = &nh_grp->nexthops[i];
1264
1265 if (nh->should_offload ^ nh->offloaded) {
1266 offload_change = true;
1267 if (nh->should_offload)
1268 nh->update = 1;
1269 }
1270 if (nh->should_offload)
1271 ecmp_size++;
1272 }
1273 if (!offload_change) {
1274 /* Nothing was added or removed, so no need to reallocate. Just
1275 * update MAC on existing adjacency indexes.
1276 */
Ido Schimmela59b7e02017-01-23 11:11:42 +01001277 err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp,
1278 false);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001279 if (err) {
1280 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
1281 goto set_trap;
1282 }
1283 return;
1284 }
1285 if (!ecmp_size)
1286 /* No neigh of this group is connected so we just set
1287 * the trap and let everthing flow through kernel.
1288 */
1289 goto set_trap;
1290
1291 ret = mlxsw_sp_kvdl_alloc(mlxsw_sp, ecmp_size);
1292 if (ret < 0) {
1293 /* We ran out of KVD linear space, just set the
1294 * trap and let everything flow through kernel.
1295 */
1296 dev_warn(mlxsw_sp->bus_info->dev, "Failed to allocate KVD linear area for nexthop group.\n");
1297 goto set_trap;
1298 }
1299 adj_index = ret;
1300 old_adj_index_valid = nh_grp->adj_index_valid;
1301 old_adj_index = nh_grp->adj_index;
1302 old_ecmp_size = nh_grp->ecmp_size;
1303 nh_grp->adj_index_valid = 1;
1304 nh_grp->adj_index = adj_index;
1305 nh_grp->ecmp_size = ecmp_size;
Ido Schimmela59b7e02017-01-23 11:11:42 +01001306 err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp, true);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001307 if (err) {
1308 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
1309 goto set_trap;
1310 }
1311
1312 if (!old_adj_index_valid) {
1313 /* The trap was set for fib entries, so we have to call
1314 * fib entry update to unset it and use adjacency index.
1315 */
1316 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
1317 if (err) {
1318 dev_warn(mlxsw_sp->bus_info->dev, "Failed to add adjacency index to fib entries.\n");
1319 goto set_trap;
1320 }
1321 return;
1322 }
1323
1324 err = mlxsw_sp_adj_index_mass_update(mlxsw_sp, nh_grp,
1325 old_adj_index, old_ecmp_size);
1326 mlxsw_sp_kvdl_free(mlxsw_sp, old_adj_index);
1327 if (err) {
1328 dev_warn(mlxsw_sp->bus_info->dev, "Failed to mass-update adjacency index for nexthop group.\n");
1329 goto set_trap;
1330 }
1331 return;
1332
1333set_trap:
1334 old_adj_index_valid = nh_grp->adj_index_valid;
1335 nh_grp->adj_index_valid = 0;
1336 for (i = 0; i < nh_grp->count; i++) {
1337 nh = &nh_grp->nexthops[i];
1338 nh->offloaded = 0;
1339 }
1340 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
1341 if (err)
1342 dev_warn(mlxsw_sp->bus_info->dev, "Failed to set traps for fib entries.\n");
1343 if (old_adj_index_valid)
1344 mlxsw_sp_kvdl_free(mlxsw_sp, nh_grp->adj_index);
1345}
1346
1347static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh,
1348 bool removing)
1349{
1350 if (!removing && !nh->should_offload)
1351 nh->should_offload = 1;
1352 else if (removing && nh->offloaded)
1353 nh->should_offload = 0;
1354 nh->update = 1;
1355}
1356
1357static void
1358mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
1359 struct mlxsw_sp_neigh_entry *neigh_entry,
1360 bool removing)
1361{
1362 struct mlxsw_sp_nexthop *nh;
1363
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001364 list_for_each_entry(nh, &neigh_entry->nexthop_list,
1365 neigh_list_node) {
1366 __mlxsw_sp_nexthop_neigh_update(nh, removing);
1367 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
1368 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001369}
1370
Ido Schimmel9665b742017-02-08 11:16:42 +01001371static void mlxsw_sp_nexthop_rif_init(struct mlxsw_sp_nexthop *nh,
1372 struct mlxsw_sp_rif *r)
1373{
1374 if (nh->r)
1375 return;
1376
1377 nh->r = r;
1378 list_add(&nh->rif_list_node, &r->nexthop_list);
1379}
1380
1381static void mlxsw_sp_nexthop_rif_fini(struct mlxsw_sp_nexthop *nh)
1382{
1383 if (!nh->r)
1384 return;
1385
1386 list_del(&nh->rif_list_node);
1387 nh->r = NULL;
1388}
1389
Ido Schimmela8c97012017-02-08 11:16:35 +01001390static int mlxsw_sp_nexthop_neigh_init(struct mlxsw_sp *mlxsw_sp,
1391 struct mlxsw_sp_nexthop *nh)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001392{
1393 struct mlxsw_sp_neigh_entry *neigh_entry;
Ido Schimmela8c97012017-02-08 11:16:35 +01001394 struct fib_nh *fib_nh = nh->key.fib_nh;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001395 struct neighbour *n;
Ido Schimmel93a87e52016-12-23 09:32:49 +01001396 u8 nud_state, dead;
Ido Schimmelc53b8e12017-02-08 11:16:30 +01001397 int err;
1398
Ido Schimmelad178c82017-02-08 11:16:40 +01001399 if (!nh->nh_grp->gateway || nh->neigh_entry)
Ido Schimmelb8399a12017-02-08 11:16:33 +01001400 return 0;
1401
Jiri Pirko33b13412016-11-10 12:31:04 +01001402 /* Take a reference of neigh here ensuring that neigh would
1403 * not be detructed before the nexthop entry is finished.
1404 * The reference is taken either in neigh_lookup() or
Ido Schimmelfd76d912017-02-06 16:20:17 +01001405 * in neigh_create() in case n is not found.
Jiri Pirko33b13412016-11-10 12:31:04 +01001406 */
Ido Schimmela8c97012017-02-08 11:16:35 +01001407 n = neigh_lookup(&arp_tbl, &fib_nh->nh_gw, fib_nh->nh_dev);
Jiri Pirko33b13412016-11-10 12:31:04 +01001408 if (!n) {
Ido Schimmela8c97012017-02-08 11:16:35 +01001409 n = neigh_create(&arp_tbl, &fib_nh->nh_gw, fib_nh->nh_dev);
1410 if (IS_ERR(n))
1411 return PTR_ERR(n);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001412 neigh_event_send(n, NULL);
Jiri Pirko33b13412016-11-10 12:31:04 +01001413 }
1414 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
1415 if (!neigh_entry) {
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001416 neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
1417 if (IS_ERR(neigh_entry)) {
Ido Schimmelc53b8e12017-02-08 11:16:30 +01001418 err = -EINVAL;
1419 goto err_neigh_entry_create;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001420 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001421 }
Yotam Gigib2157142016-07-05 11:27:51 +02001422
1423 /* If that is the first nexthop connected to that neigh, add to
1424 * nexthop_neighs_list
1425 */
1426 if (list_empty(&neigh_entry->nexthop_list))
1427 list_add_tail(&neigh_entry->nexthop_neighs_list_node,
1428 &mlxsw_sp->router.nexthop_neighs_list);
1429
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001430 nh->neigh_entry = neigh_entry;
1431 list_add_tail(&nh->neigh_list_node, &neigh_entry->nexthop_list);
1432 read_lock_bh(&n->lock);
1433 nud_state = n->nud_state;
Ido Schimmel93a87e52016-12-23 09:32:49 +01001434 dead = n->dead;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001435 read_unlock_bh(&n->lock);
Ido Schimmel93a87e52016-12-23 09:32:49 +01001436 __mlxsw_sp_nexthop_neigh_update(nh, !(nud_state & NUD_VALID && !dead));
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001437
1438 return 0;
Ido Schimmelc53b8e12017-02-08 11:16:30 +01001439
1440err_neigh_entry_create:
1441 neigh_release(n);
Ido Schimmelc53b8e12017-02-08 11:16:30 +01001442 return err;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001443}
1444
Ido Schimmela8c97012017-02-08 11:16:35 +01001445static void mlxsw_sp_nexthop_neigh_fini(struct mlxsw_sp *mlxsw_sp,
1446 struct mlxsw_sp_nexthop *nh)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001447{
1448 struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
Ido Schimmela8c97012017-02-08 11:16:35 +01001449 struct neighbour *n;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001450
Ido Schimmelb8399a12017-02-08 11:16:33 +01001451 if (!neigh_entry)
Ido Schimmela8c97012017-02-08 11:16:35 +01001452 return;
1453 n = neigh_entry->key.n;
Ido Schimmelb8399a12017-02-08 11:16:33 +01001454
Ido Schimmel58312122016-12-23 09:32:50 +01001455 __mlxsw_sp_nexthop_neigh_update(nh, true);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001456 list_del(&nh->neigh_list_node);
Ido Schimmele58be792017-02-08 11:16:28 +01001457 nh->neigh_entry = NULL;
Yotam Gigib2157142016-07-05 11:27:51 +02001458
1459 /* If that is the last nexthop connected to that neigh, remove from
1460 * nexthop_neighs_list
1461 */
Ido Schimmele58be792017-02-08 11:16:28 +01001462 if (list_empty(&neigh_entry->nexthop_list))
1463 list_del(&neigh_entry->nexthop_neighs_list_node);
Yotam Gigib2157142016-07-05 11:27:51 +02001464
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001465 if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
1466 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
1467
1468 neigh_release(n);
Ido Schimmela8c97012017-02-08 11:16:35 +01001469}
Ido Schimmelc53b8e12017-02-08 11:16:30 +01001470
Ido Schimmela8c97012017-02-08 11:16:35 +01001471static int mlxsw_sp_nexthop_init(struct mlxsw_sp *mlxsw_sp,
1472 struct mlxsw_sp_nexthop_group *nh_grp,
1473 struct mlxsw_sp_nexthop *nh,
1474 struct fib_nh *fib_nh)
1475{
1476 struct net_device *dev = fib_nh->nh_dev;
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +01001477 struct in_device *in_dev;
Ido Schimmela8c97012017-02-08 11:16:35 +01001478 struct mlxsw_sp_rif *r;
1479 int err;
1480
1481 nh->nh_grp = nh_grp;
1482 nh->key.fib_nh = fib_nh;
1483 err = mlxsw_sp_nexthop_insert(mlxsw_sp, nh);
1484 if (err)
1485 return err;
1486
Ido Schimmel97989ee2017-03-10 08:53:38 +01001487 if (!dev)
1488 return 0;
1489
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +01001490 in_dev = __in_dev_get_rtnl(dev);
1491 if (in_dev && IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
1492 fib_nh->nh_flags & RTNH_F_LINKDOWN)
1493 return 0;
1494
Ido Schimmela8c97012017-02-08 11:16:35 +01001495 r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
1496 if (!r)
1497 return 0;
Ido Schimmel9665b742017-02-08 11:16:42 +01001498 mlxsw_sp_nexthop_rif_init(nh, r);
Ido Schimmela8c97012017-02-08 11:16:35 +01001499
1500 err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
1501 if (err)
1502 goto err_nexthop_neigh_init;
1503
1504 return 0;
1505
1506err_nexthop_neigh_init:
1507 mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
1508 return err;
1509}
1510
1511static void mlxsw_sp_nexthop_fini(struct mlxsw_sp *mlxsw_sp,
1512 struct mlxsw_sp_nexthop *nh)
1513{
1514 mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh);
Ido Schimmel9665b742017-02-08 11:16:42 +01001515 mlxsw_sp_nexthop_rif_fini(nh);
Ido Schimmelc53b8e12017-02-08 11:16:30 +01001516 mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001517}
1518
Ido Schimmelad178c82017-02-08 11:16:40 +01001519static void mlxsw_sp_nexthop_event(struct mlxsw_sp *mlxsw_sp,
1520 unsigned long event, struct fib_nh *fib_nh)
1521{
1522 struct mlxsw_sp_nexthop_key key;
1523 struct mlxsw_sp_nexthop *nh;
1524 struct mlxsw_sp_rif *r;
1525
1526 if (mlxsw_sp->router.aborted)
1527 return;
1528
1529 key.fib_nh = fib_nh;
1530 nh = mlxsw_sp_nexthop_lookup(mlxsw_sp, key);
1531 if (WARN_ON_ONCE(!nh))
1532 return;
1533
1534 r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, fib_nh->nh_dev);
1535 if (!r)
1536 return;
1537
1538 switch (event) {
1539 case FIB_EVENT_NH_ADD:
Ido Schimmel9665b742017-02-08 11:16:42 +01001540 mlxsw_sp_nexthop_rif_init(nh, r);
Ido Schimmelad178c82017-02-08 11:16:40 +01001541 mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
1542 break;
1543 case FIB_EVENT_NH_DEL:
1544 mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh);
Ido Schimmel9665b742017-02-08 11:16:42 +01001545 mlxsw_sp_nexthop_rif_fini(nh);
Ido Schimmelad178c82017-02-08 11:16:40 +01001546 break;
1547 }
1548
1549 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
1550}
1551
Ido Schimmel9665b742017-02-08 11:16:42 +01001552static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
1553 struct mlxsw_sp_rif *r)
1554{
1555 struct mlxsw_sp_nexthop *nh, *tmp;
1556
1557 list_for_each_entry_safe(nh, tmp, &r->nexthop_list, rif_list_node) {
1558 mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh);
1559 mlxsw_sp_nexthop_rif_fini(nh);
1560 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
1561 }
1562}
1563
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001564static struct mlxsw_sp_nexthop_group *
1565mlxsw_sp_nexthop_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi)
1566{
1567 struct mlxsw_sp_nexthop_group *nh_grp;
1568 struct mlxsw_sp_nexthop *nh;
1569 struct fib_nh *fib_nh;
1570 size_t alloc_size;
1571 int i;
1572 int err;
1573
1574 alloc_size = sizeof(*nh_grp) +
1575 fi->fib_nhs * sizeof(struct mlxsw_sp_nexthop);
1576 nh_grp = kzalloc(alloc_size, GFP_KERNEL);
1577 if (!nh_grp)
1578 return ERR_PTR(-ENOMEM);
1579 INIT_LIST_HEAD(&nh_grp->fib_list);
Ido Schimmelb3e8d1e2017-02-08 11:16:32 +01001580 nh_grp->gateway = fi->fib_nh->nh_scope == RT_SCOPE_LINK;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001581 nh_grp->count = fi->fib_nhs;
Ido Schimmele9ad5e72017-02-08 11:16:29 +01001582 nh_grp->key.fi = fi;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001583 for (i = 0; i < nh_grp->count; i++) {
1584 nh = &nh_grp->nexthops[i];
1585 fib_nh = &fi->fib_nh[i];
1586 err = mlxsw_sp_nexthop_init(mlxsw_sp, nh_grp, nh, fib_nh);
1587 if (err)
1588 goto err_nexthop_init;
1589 }
Ido Schimmele9ad5e72017-02-08 11:16:29 +01001590 err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
1591 if (err)
1592 goto err_nexthop_group_insert;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001593 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
1594 return nh_grp;
1595
Ido Schimmele9ad5e72017-02-08 11:16:29 +01001596err_nexthop_group_insert:
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001597err_nexthop_init:
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +01001598 for (i--; i >= 0; i--) {
1599 nh = &nh_grp->nexthops[i];
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001600 mlxsw_sp_nexthop_fini(mlxsw_sp, nh);
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +01001601 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001602 kfree(nh_grp);
1603 return ERR_PTR(err);
1604}
1605
1606static void
1607mlxsw_sp_nexthop_group_destroy(struct mlxsw_sp *mlxsw_sp,
1608 struct mlxsw_sp_nexthop_group *nh_grp)
1609{
1610 struct mlxsw_sp_nexthop *nh;
1611 int i;
1612
Ido Schimmele9ad5e72017-02-08 11:16:29 +01001613 mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001614 for (i = 0; i < nh_grp->count; i++) {
1615 nh = &nh_grp->nexthops[i];
1616 mlxsw_sp_nexthop_fini(mlxsw_sp, nh);
1617 }
Ido Schimmel58312122016-12-23 09:32:50 +01001618 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
1619 WARN_ON_ONCE(nh_grp->adj_index_valid);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001620 kfree(nh_grp);
1621}
1622
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001623static int mlxsw_sp_nexthop_group_get(struct mlxsw_sp *mlxsw_sp,
1624 struct mlxsw_sp_fib_entry *fib_entry,
1625 struct fib_info *fi)
1626{
Ido Schimmele9ad5e72017-02-08 11:16:29 +01001627 struct mlxsw_sp_nexthop_group_key key;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001628 struct mlxsw_sp_nexthop_group *nh_grp;
1629
Ido Schimmele9ad5e72017-02-08 11:16:29 +01001630 key.fi = fi;
1631 nh_grp = mlxsw_sp_nexthop_group_lookup(mlxsw_sp, key);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001632 if (!nh_grp) {
1633 nh_grp = mlxsw_sp_nexthop_group_create(mlxsw_sp, fi);
1634 if (IS_ERR(nh_grp))
1635 return PTR_ERR(nh_grp);
1636 }
1637 list_add_tail(&fib_entry->nexthop_group_node, &nh_grp->fib_list);
1638 fib_entry->nh_group = nh_grp;
1639 return 0;
1640}
1641
1642static void mlxsw_sp_nexthop_group_put(struct mlxsw_sp *mlxsw_sp,
1643 struct mlxsw_sp_fib_entry *fib_entry)
1644{
1645 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
1646
1647 list_del(&fib_entry->nexthop_group_node);
1648 if (!list_empty(&nh_grp->fib_list))
1649 return;
1650 mlxsw_sp_nexthop_group_destroy(mlxsw_sp, nh_grp);
1651}
1652
Ido Schimmel013b20f2017-02-08 11:16:36 +01001653static bool
1654mlxsw_sp_fib_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
1655{
1656 struct mlxsw_sp_nexthop_group *nh_group = fib_entry->nh_group;
1657
Ido Schimmel9aecce12017-02-09 10:28:42 +01001658 if (fib_entry->params.tos)
1659 return false;
1660
Ido Schimmel013b20f2017-02-08 11:16:36 +01001661 switch (fib_entry->type) {
1662 case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
1663 return !!nh_group->adj_index_valid;
1664 case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
Ido Schimmel70ad3502017-02-08 11:16:38 +01001665 return !!nh_group->nh_rif;
Ido Schimmel013b20f2017-02-08 11:16:36 +01001666 default:
1667 return false;
1668 }
1669}
1670
1671static void mlxsw_sp_fib_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
1672{
1673 fib_entry->offloaded = true;
1674
Ido Schimmel76610eb2017-03-10 08:53:41 +01001675 switch (fib_entry->fib_node->fib->proto) {
Ido Schimmel013b20f2017-02-08 11:16:36 +01001676 case MLXSW_SP_L3_PROTO_IPV4:
1677 fib_info_offload_inc(fib_entry->nh_group->key.fi);
1678 break;
1679 case MLXSW_SP_L3_PROTO_IPV6:
1680 WARN_ON_ONCE(1);
1681 }
1682}
1683
1684static void
1685mlxsw_sp_fib_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
1686{
Ido Schimmel76610eb2017-03-10 08:53:41 +01001687 switch (fib_entry->fib_node->fib->proto) {
Ido Schimmel013b20f2017-02-08 11:16:36 +01001688 case MLXSW_SP_L3_PROTO_IPV4:
1689 fib_info_offload_dec(fib_entry->nh_group->key.fi);
1690 break;
1691 case MLXSW_SP_L3_PROTO_IPV6:
1692 WARN_ON_ONCE(1);
1693 }
1694
1695 fib_entry->offloaded = false;
1696}
1697
1698static void
1699mlxsw_sp_fib_entry_offload_refresh(struct mlxsw_sp_fib_entry *fib_entry,
1700 enum mlxsw_reg_ralue_op op, int err)
1701{
1702 switch (op) {
1703 case MLXSW_REG_RALUE_OP_WRITE_DELETE:
1704 if (!fib_entry->offloaded)
1705 return;
1706 return mlxsw_sp_fib_entry_offload_unset(fib_entry);
1707 case MLXSW_REG_RALUE_OP_WRITE_WRITE:
1708 if (err)
1709 return;
1710 if (mlxsw_sp_fib_entry_should_offload(fib_entry) &&
1711 !fib_entry->offloaded)
1712 mlxsw_sp_fib_entry_offload_set(fib_entry);
1713 else if (!mlxsw_sp_fib_entry_should_offload(fib_entry) &&
1714 fib_entry->offloaded)
1715 mlxsw_sp_fib_entry_offload_unset(fib_entry);
1716 return;
1717 default:
1718 return;
1719 }
1720}
1721
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001722static int mlxsw_sp_fib_entry_op4_remote(struct mlxsw_sp *mlxsw_sp,
1723 struct mlxsw_sp_fib_entry *fib_entry,
1724 enum mlxsw_reg_ralue_op op)
1725{
1726 char ralue_pl[MLXSW_REG_RALUE_LEN];
Ido Schimmel76610eb2017-03-10 08:53:41 +01001727 struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;
Ido Schimmel9aecce12017-02-09 10:28:42 +01001728 u32 *p_dip = (u32 *) fib_entry->fib_node->key.addr;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001729 enum mlxsw_reg_ralue_trap_action trap_action;
1730 u16 trap_id = 0;
1731 u32 adjacency_index = 0;
1732 u16 ecmp_size = 0;
1733
1734 /* In case the nexthop group adjacency index is valid, use it
1735 * with provided ECMP size. Otherwise, setup trap and pass
1736 * traffic to kernel.
1737 */
Ido Schimmel4b411472017-02-08 11:16:37 +01001738 if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001739 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
1740 adjacency_index = fib_entry->nh_group->adj_index;
1741 ecmp_size = fib_entry->nh_group->ecmp_size;
1742 } else {
1743 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
1744 trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
1745 }
1746
Ido Schimmel1a9234e662016-09-19 08:29:26 +02001747 mlxsw_reg_ralue_pack4(ralue_pl,
Ido Schimmel76610eb2017-03-10 08:53:41 +01001748 (enum mlxsw_reg_ralxx_protocol) fib->proto, op,
1749 fib->vr->id, fib_entry->fib_node->key.prefix_len,
Ido Schimmel9aecce12017-02-09 10:28:42 +01001750 *p_dip);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001751 mlxsw_reg_ralue_act_remote_pack(ralue_pl, trap_action, trap_id,
1752 adjacency_index, ecmp_size);
1753 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
1754}
1755
Jiri Pirko61c503f2016-07-04 08:23:11 +02001756static int mlxsw_sp_fib_entry_op4_local(struct mlxsw_sp *mlxsw_sp,
1757 struct mlxsw_sp_fib_entry *fib_entry,
1758 enum mlxsw_reg_ralue_op op)
1759{
Ido Schimmelb8399a12017-02-08 11:16:33 +01001760 struct mlxsw_sp_rif *r = fib_entry->nh_group->nh_rif;
Ido Schimmel76610eb2017-03-10 08:53:41 +01001761 struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;
Ido Schimmel70ad3502017-02-08 11:16:38 +01001762 enum mlxsw_reg_ralue_trap_action trap_action;
Jiri Pirko61c503f2016-07-04 08:23:11 +02001763 char ralue_pl[MLXSW_REG_RALUE_LEN];
Ido Schimmel9aecce12017-02-09 10:28:42 +01001764 u32 *p_dip = (u32 *) fib_entry->fib_node->key.addr;
Ido Schimmel70ad3502017-02-08 11:16:38 +01001765 u16 trap_id = 0;
1766 u16 rif = 0;
1767
1768 if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
1769 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
1770 rif = r->rif;
1771 } else {
1772 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
1773 trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
1774 }
Jiri Pirko61c503f2016-07-04 08:23:11 +02001775
Ido Schimmel1a9234e662016-09-19 08:29:26 +02001776 mlxsw_reg_ralue_pack4(ralue_pl,
Ido Schimmel76610eb2017-03-10 08:53:41 +01001777 (enum mlxsw_reg_ralxx_protocol) fib->proto, op,
1778 fib->vr->id, fib_entry->fib_node->key.prefix_len,
Ido Schimmel9aecce12017-02-09 10:28:42 +01001779 *p_dip);
Ido Schimmel70ad3502017-02-08 11:16:38 +01001780 mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, trap_id, rif);
Jiri Pirko61c503f2016-07-04 08:23:11 +02001781 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
1782}
1783
1784static int mlxsw_sp_fib_entry_op4_trap(struct mlxsw_sp *mlxsw_sp,
1785 struct mlxsw_sp_fib_entry *fib_entry,
1786 enum mlxsw_reg_ralue_op op)
1787{
Ido Schimmel76610eb2017-03-10 08:53:41 +01001788 struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;
Jiri Pirko61c503f2016-07-04 08:23:11 +02001789 char ralue_pl[MLXSW_REG_RALUE_LEN];
Ido Schimmel9aecce12017-02-09 10:28:42 +01001790 u32 *p_dip = (u32 *) fib_entry->fib_node->key.addr;
Jiri Pirko61c503f2016-07-04 08:23:11 +02001791
Ido Schimmel1a9234e662016-09-19 08:29:26 +02001792 mlxsw_reg_ralue_pack4(ralue_pl,
Ido Schimmel76610eb2017-03-10 08:53:41 +01001793 (enum mlxsw_reg_ralxx_protocol) fib->proto, op,
1794 fib->vr->id, fib_entry->fib_node->key.prefix_len,
Ido Schimmel9aecce12017-02-09 10:28:42 +01001795 *p_dip);
Jiri Pirko61c503f2016-07-04 08:23:11 +02001796 mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
1797 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
1798}
1799
1800static int mlxsw_sp_fib_entry_op4(struct mlxsw_sp *mlxsw_sp,
1801 struct mlxsw_sp_fib_entry *fib_entry,
1802 enum mlxsw_reg_ralue_op op)
1803{
1804 switch (fib_entry->type) {
1805 case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001806 return mlxsw_sp_fib_entry_op4_remote(mlxsw_sp, fib_entry, op);
Jiri Pirko61c503f2016-07-04 08:23:11 +02001807 case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
1808 return mlxsw_sp_fib_entry_op4_local(mlxsw_sp, fib_entry, op);
1809 case MLXSW_SP_FIB_ENTRY_TYPE_TRAP:
1810 return mlxsw_sp_fib_entry_op4_trap(mlxsw_sp, fib_entry, op);
1811 }
1812 return -EINVAL;
1813}
1814
1815static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
1816 struct mlxsw_sp_fib_entry *fib_entry,
1817 enum mlxsw_reg_ralue_op op)
1818{
Ido Schimmel013b20f2017-02-08 11:16:36 +01001819 int err = -EINVAL;
1820
Ido Schimmel76610eb2017-03-10 08:53:41 +01001821 switch (fib_entry->fib_node->fib->proto) {
Jiri Pirko61c503f2016-07-04 08:23:11 +02001822 case MLXSW_SP_L3_PROTO_IPV4:
Ido Schimmel013b20f2017-02-08 11:16:36 +01001823 err = mlxsw_sp_fib_entry_op4(mlxsw_sp, fib_entry, op);
1824 break;
Jiri Pirko61c503f2016-07-04 08:23:11 +02001825 case MLXSW_SP_L3_PROTO_IPV6:
Ido Schimmel013b20f2017-02-08 11:16:36 +01001826 return err;
Jiri Pirko61c503f2016-07-04 08:23:11 +02001827 }
Ido Schimmel013b20f2017-02-08 11:16:36 +01001828 mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, err);
1829 return err;
Jiri Pirko61c503f2016-07-04 08:23:11 +02001830}
1831
1832static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
1833 struct mlxsw_sp_fib_entry *fib_entry)
1834{
Jiri Pirko7146da32016-09-01 10:37:41 +02001835 return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
1836 MLXSW_REG_RALUE_OP_WRITE_WRITE);
Jiri Pirko61c503f2016-07-04 08:23:11 +02001837}
1838
1839static int mlxsw_sp_fib_entry_del(struct mlxsw_sp *mlxsw_sp,
1840 struct mlxsw_sp_fib_entry *fib_entry)
1841{
1842 return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
1843 MLXSW_REG_RALUE_OP_WRITE_DELETE);
1844}
1845
Jiri Pirko61c503f2016-07-04 08:23:11 +02001846static int
Ido Schimmel013b20f2017-02-08 11:16:36 +01001847mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp,
1848 const struct fib_entry_notifier_info *fen_info,
1849 struct mlxsw_sp_fib_entry *fib_entry)
Jiri Pirko61c503f2016-07-04 08:23:11 +02001850{
Jiri Pirkob45f64d2016-09-26 12:52:31 +02001851 struct fib_info *fi = fen_info->fi;
Jiri Pirko61c503f2016-07-04 08:23:11 +02001852
Ido Schimmel97989ee2017-03-10 08:53:38 +01001853 switch (fen_info->type) {
1854 case RTN_BROADCAST: /* fall through */
1855 case RTN_LOCAL:
Jiri Pirko61c503f2016-07-04 08:23:11 +02001856 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
1857 return 0;
Ido Schimmel97989ee2017-03-10 08:53:38 +01001858 case RTN_UNREACHABLE: /* fall through */
1859 case RTN_BLACKHOLE: /* fall through */
1860 case RTN_PROHIBIT:
1861 /* Packets hitting these routes need to be trapped, but
1862 * can do so with a lower priority than packets directed
1863 * at the host, so use action type local instead of trap.
1864 */
Jiri Pirkob45f64d2016-09-26 12:52:31 +02001865 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
Ido Schimmel97989ee2017-03-10 08:53:38 +01001866 return 0;
1867 case RTN_UNICAST:
1868 if (fi->fib_nh->nh_scope != RT_SCOPE_LINK)
1869 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
1870 else
1871 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
1872 return 0;
1873 default:
1874 return -EINVAL;
1875 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001876}
1877
Jiri Pirko5b004412016-09-01 10:37:40 +02001878static struct mlxsw_sp_fib_entry *
Ido Schimmel9aecce12017-02-09 10:28:42 +01001879mlxsw_sp_fib4_entry_create(struct mlxsw_sp *mlxsw_sp,
1880 struct mlxsw_sp_fib_node *fib_node,
1881 const struct fib_entry_notifier_info *fen_info)
Jiri Pirko5b004412016-09-01 10:37:40 +02001882{
1883 struct mlxsw_sp_fib_entry *fib_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01001884 int err;
1885
1886 fib_entry = kzalloc(sizeof(*fib_entry), GFP_KERNEL);
1887 if (!fib_entry) {
1888 err = -ENOMEM;
1889 goto err_fib_entry_alloc;
1890 }
1891
1892 err = mlxsw_sp_fib4_entry_type_set(mlxsw_sp, fen_info, fib_entry);
1893 if (err)
1894 goto err_fib4_entry_type_set;
1895
1896 err = mlxsw_sp_nexthop_group_get(mlxsw_sp, fib_entry, fen_info->fi);
1897 if (err)
1898 goto err_nexthop_group_get;
1899
1900 fib_entry->params.prio = fen_info->fi->fib_priority;
1901 fib_entry->params.tb_id = fen_info->tb_id;
1902 fib_entry->params.type = fen_info->type;
1903 fib_entry->params.tos = fen_info->tos;
1904
1905 fib_entry->fib_node = fib_node;
1906
1907 return fib_entry;
1908
1909err_nexthop_group_get:
1910err_fib4_entry_type_set:
1911 kfree(fib_entry);
1912err_fib_entry_alloc:
1913 return ERR_PTR(err);
1914}
1915
1916static void mlxsw_sp_fib4_entry_destroy(struct mlxsw_sp *mlxsw_sp,
1917 struct mlxsw_sp_fib_entry *fib_entry)
1918{
1919 mlxsw_sp_nexthop_group_put(mlxsw_sp, fib_entry);
1920 kfree(fib_entry);
1921}
1922
1923static struct mlxsw_sp_fib_node *
1924mlxsw_sp_fib4_node_get(struct mlxsw_sp *mlxsw_sp,
1925 const struct fib_entry_notifier_info *fen_info);
1926
1927static struct mlxsw_sp_fib_entry *
1928mlxsw_sp_fib4_entry_lookup(struct mlxsw_sp *mlxsw_sp,
1929 const struct fib_entry_notifier_info *fen_info)
1930{
1931 struct mlxsw_sp_fib_entry *fib_entry;
1932 struct mlxsw_sp_fib_node *fib_node;
1933
1934 fib_node = mlxsw_sp_fib4_node_get(mlxsw_sp, fen_info);
1935 if (IS_ERR(fib_node))
1936 return NULL;
1937
1938 list_for_each_entry(fib_entry, &fib_node->entry_list, list) {
1939 if (fib_entry->params.tb_id == fen_info->tb_id &&
1940 fib_entry->params.tos == fen_info->tos &&
1941 fib_entry->params.type == fen_info->type &&
1942 fib_entry->nh_group->key.fi == fen_info->fi) {
1943 return fib_entry;
1944 }
1945 }
1946
1947 return NULL;
1948}
1949
1950static const struct rhashtable_params mlxsw_sp_fib_ht_params = {
1951 .key_offset = offsetof(struct mlxsw_sp_fib_node, key),
1952 .head_offset = offsetof(struct mlxsw_sp_fib_node, ht_node),
1953 .key_len = sizeof(struct mlxsw_sp_fib_key),
1954 .automatic_shrinking = true,
1955};
1956
1957static int mlxsw_sp_fib_node_insert(struct mlxsw_sp_fib *fib,
1958 struct mlxsw_sp_fib_node *fib_node)
1959{
1960 return rhashtable_insert_fast(&fib->ht, &fib_node->ht_node,
1961 mlxsw_sp_fib_ht_params);
1962}
1963
1964static void mlxsw_sp_fib_node_remove(struct mlxsw_sp_fib *fib,
1965 struct mlxsw_sp_fib_node *fib_node)
1966{
1967 rhashtable_remove_fast(&fib->ht, &fib_node->ht_node,
1968 mlxsw_sp_fib_ht_params);
1969}
1970
1971static struct mlxsw_sp_fib_node *
1972mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
1973 size_t addr_len, unsigned char prefix_len)
1974{
1975 struct mlxsw_sp_fib_key key;
1976
1977 memset(&key, 0, sizeof(key));
1978 memcpy(key.addr, addr, addr_len);
1979 key.prefix_len = prefix_len;
1980 return rhashtable_lookup_fast(&fib->ht, &key, mlxsw_sp_fib_ht_params);
1981}
1982
1983static struct mlxsw_sp_fib_node *
Ido Schimmel76610eb2017-03-10 08:53:41 +01001984mlxsw_sp_fib_node_create(struct mlxsw_sp_fib *fib, const void *addr,
Ido Schimmel9aecce12017-02-09 10:28:42 +01001985 size_t addr_len, unsigned char prefix_len)
1986{
1987 struct mlxsw_sp_fib_node *fib_node;
1988
1989 fib_node = kzalloc(sizeof(*fib_node), GFP_KERNEL);
1990 if (!fib_node)
1991 return NULL;
1992
1993 INIT_LIST_HEAD(&fib_node->entry_list);
Ido Schimmel76610eb2017-03-10 08:53:41 +01001994 list_add(&fib_node->list, &fib->node_list);
Ido Schimmel9aecce12017-02-09 10:28:42 +01001995 memcpy(fib_node->key.addr, addr, addr_len);
1996 fib_node->key.prefix_len = prefix_len;
Ido Schimmel9aecce12017-02-09 10:28:42 +01001997
1998 return fib_node;
1999}
2000
2001static void mlxsw_sp_fib_node_destroy(struct mlxsw_sp_fib_node *fib_node)
2002{
Ido Schimmel9aecce12017-02-09 10:28:42 +01002003 list_del(&fib_node->list);
2004 WARN_ON(!list_empty(&fib_node->entry_list));
2005 kfree(fib_node);
2006}
2007
2008static bool
2009mlxsw_sp_fib_node_entry_is_first(const struct mlxsw_sp_fib_node *fib_node,
2010 const struct mlxsw_sp_fib_entry *fib_entry)
2011{
2012 return list_first_entry(&fib_node->entry_list,
2013 struct mlxsw_sp_fib_entry, list) == fib_entry;
2014}
2015
2016static void mlxsw_sp_fib_node_prefix_inc(struct mlxsw_sp_fib_node *fib_node)
2017{
2018 unsigned char prefix_len = fib_node->key.prefix_len;
Ido Schimmel76610eb2017-03-10 08:53:41 +01002019 struct mlxsw_sp_fib *fib = fib_node->fib;
Ido Schimmel9aecce12017-02-09 10:28:42 +01002020
2021 if (fib->prefix_ref_count[prefix_len]++ == 0)
2022 mlxsw_sp_prefix_usage_set(&fib->prefix_usage, prefix_len);
2023}
2024
2025static void mlxsw_sp_fib_node_prefix_dec(struct mlxsw_sp_fib_node *fib_node)
2026{
2027 unsigned char prefix_len = fib_node->key.prefix_len;
Ido Schimmel76610eb2017-03-10 08:53:41 +01002028 struct mlxsw_sp_fib *fib = fib_node->fib;
Ido Schimmel9aecce12017-02-09 10:28:42 +01002029
2030 if (--fib->prefix_ref_count[prefix_len] == 0)
2031 mlxsw_sp_prefix_usage_clear(&fib->prefix_usage, prefix_len);
2032}
2033
Ido Schimmel76610eb2017-03-10 08:53:41 +01002034static int mlxsw_sp_fib_node_init(struct mlxsw_sp *mlxsw_sp,
2035 struct mlxsw_sp_fib_node *fib_node,
2036 struct mlxsw_sp_fib *fib)
2037{
2038 struct mlxsw_sp_prefix_usage req_prefix_usage;
2039 struct mlxsw_sp_lpm_tree *lpm_tree;
2040 int err;
2041
2042 err = mlxsw_sp_fib_node_insert(fib, fib_node);
2043 if (err)
2044 return err;
2045 fib_node->fib = fib;
2046
2047 mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, &fib->prefix_usage);
2048 mlxsw_sp_prefix_usage_set(&req_prefix_usage, fib_node->key.prefix_len);
2049
2050 if (!mlxsw_sp_prefix_usage_none(&fib->prefix_usage)) {
2051 err = mlxsw_sp_vr_lpm_tree_check(mlxsw_sp, fib,
2052 &req_prefix_usage);
2053 if (err)
2054 goto err_tree_check;
2055 } else {
2056 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
2057 fib->proto);
2058 if (IS_ERR(lpm_tree))
2059 return PTR_ERR(lpm_tree);
2060 fib->lpm_tree = lpm_tree;
2061 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib);
2062 if (err)
2063 goto err_tree_bind;
2064 }
2065
2066 mlxsw_sp_fib_node_prefix_inc(fib_node);
2067
2068 return 0;
2069
2070err_tree_bind:
2071 fib->lpm_tree = NULL;
2072 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
2073err_tree_check:
2074 fib_node->fib = NULL;
2075 mlxsw_sp_fib_node_remove(fib, fib_node);
2076 return err;
2077}
2078
2079static void mlxsw_sp_fib_node_fini(struct mlxsw_sp *mlxsw_sp,
2080 struct mlxsw_sp_fib_node *fib_node)
2081{
2082 struct mlxsw_sp_lpm_tree *lpm_tree = fib_node->fib->lpm_tree;
2083 struct mlxsw_sp_fib *fib = fib_node->fib;
2084
2085 mlxsw_sp_fib_node_prefix_dec(fib_node);
2086
2087 if (mlxsw_sp_prefix_usage_none(&fib->prefix_usage)) {
2088 mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, fib);
2089 fib->lpm_tree = NULL;
2090 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
2091 } else {
2092 mlxsw_sp_vr_lpm_tree_check(mlxsw_sp, fib, &fib->prefix_usage);
2093 }
2094
2095 fib_node->fib = NULL;
2096 mlxsw_sp_fib_node_remove(fib, fib_node);
2097}
2098
Ido Schimmel9aecce12017-02-09 10:28:42 +01002099static struct mlxsw_sp_fib_node *
2100mlxsw_sp_fib4_node_get(struct mlxsw_sp *mlxsw_sp,
2101 const struct fib_entry_notifier_info *fen_info)
2102{
2103 struct mlxsw_sp_fib_node *fib_node;
Ido Schimmel76610eb2017-03-10 08:53:41 +01002104 struct mlxsw_sp_fib *fib;
Jiri Pirko5b004412016-09-01 10:37:40 +02002105 struct mlxsw_sp_vr *vr;
2106 int err;
2107
Ido Schimmel76610eb2017-03-10 08:53:41 +01002108 vr = mlxsw_sp_vr_get(mlxsw_sp, fen_info->tb_id);
Jiri Pirko5b004412016-09-01 10:37:40 +02002109 if (IS_ERR(vr))
2110 return ERR_CAST(vr);
Ido Schimmel76610eb2017-03-10 08:53:41 +01002111 fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV4);
Jiri Pirko5b004412016-09-01 10:37:40 +02002112
Ido Schimmel76610eb2017-03-10 08:53:41 +01002113 fib_node = mlxsw_sp_fib_node_lookup(fib, &fen_info->dst,
Ido Schimmel9aecce12017-02-09 10:28:42 +01002114 sizeof(fen_info->dst),
2115 fen_info->dst_len);
2116 if (fib_node)
2117 return fib_node;
2118
Ido Schimmel76610eb2017-03-10 08:53:41 +01002119 fib_node = mlxsw_sp_fib_node_create(fib, &fen_info->dst,
Ido Schimmel9aecce12017-02-09 10:28:42 +01002120 sizeof(fen_info->dst),
2121 fen_info->dst_len);
2122 if (!fib_node) {
Jiri Pirko5b004412016-09-01 10:37:40 +02002123 err = -ENOMEM;
Ido Schimmel9aecce12017-02-09 10:28:42 +01002124 goto err_fib_node_create;
Jiri Pirko5b004412016-09-01 10:37:40 +02002125 }
Jiri Pirko5b004412016-09-01 10:37:40 +02002126
Ido Schimmel76610eb2017-03-10 08:53:41 +01002127 err = mlxsw_sp_fib_node_init(mlxsw_sp, fib_node, fib);
2128 if (err)
2129 goto err_fib_node_init;
2130
Ido Schimmel9aecce12017-02-09 10:28:42 +01002131 return fib_node;
Jiri Pirko5b004412016-09-01 10:37:40 +02002132
Ido Schimmel76610eb2017-03-10 08:53:41 +01002133err_fib_node_init:
2134 mlxsw_sp_fib_node_destroy(fib_node);
Ido Schimmel9aecce12017-02-09 10:28:42 +01002135err_fib_node_create:
Ido Schimmel76610eb2017-03-10 08:53:41 +01002136 mlxsw_sp_vr_put(vr);
Jiri Pirko5b004412016-09-01 10:37:40 +02002137 return ERR_PTR(err);
2138}
2139
Ido Schimmel9aecce12017-02-09 10:28:42 +01002140static void mlxsw_sp_fib4_node_put(struct mlxsw_sp *mlxsw_sp,
2141 struct mlxsw_sp_fib_node *fib_node)
Jiri Pirko5b004412016-09-01 10:37:40 +02002142{
Ido Schimmel76610eb2017-03-10 08:53:41 +01002143 struct mlxsw_sp_vr *vr = fib_node->fib->vr;
Jiri Pirko5b004412016-09-01 10:37:40 +02002144
Ido Schimmel9aecce12017-02-09 10:28:42 +01002145 if (!list_empty(&fib_node->entry_list))
2146 return;
Ido Schimmel76610eb2017-03-10 08:53:41 +01002147 mlxsw_sp_fib_node_fini(mlxsw_sp, fib_node);
Ido Schimmel9aecce12017-02-09 10:28:42 +01002148 mlxsw_sp_fib_node_destroy(fib_node);
Ido Schimmel76610eb2017-03-10 08:53:41 +01002149 mlxsw_sp_vr_put(vr);
Jiri Pirko5b004412016-09-01 10:37:40 +02002150}
2151
Ido Schimmel9aecce12017-02-09 10:28:42 +01002152static struct mlxsw_sp_fib_entry *
2153mlxsw_sp_fib4_node_entry_find(const struct mlxsw_sp_fib_node *fib_node,
2154 const struct mlxsw_sp_fib_entry_params *params)
Jiri Pirko61c503f2016-07-04 08:23:11 +02002155{
Jiri Pirko61c503f2016-07-04 08:23:11 +02002156 struct mlxsw_sp_fib_entry *fib_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01002157
2158 list_for_each_entry(fib_entry, &fib_node->entry_list, list) {
2159 if (fib_entry->params.tb_id > params->tb_id)
2160 continue;
2161 if (fib_entry->params.tb_id != params->tb_id)
2162 break;
2163 if (fib_entry->params.tos > params->tos)
2164 continue;
2165 if (fib_entry->params.prio >= params->prio ||
2166 fib_entry->params.tos < params->tos)
2167 return fib_entry;
2168 }
2169
2170 return NULL;
2171}
2172
Ido Schimmel4283bce2017-02-09 10:28:43 +01002173static int mlxsw_sp_fib4_node_list_append(struct mlxsw_sp_fib_entry *fib_entry,
2174 struct mlxsw_sp_fib_entry *new_entry)
2175{
2176 struct mlxsw_sp_fib_node *fib_node;
2177
2178 if (WARN_ON(!fib_entry))
2179 return -EINVAL;
2180
2181 fib_node = fib_entry->fib_node;
2182 list_for_each_entry_from(fib_entry, &fib_node->entry_list, list) {
2183 if (fib_entry->params.tb_id != new_entry->params.tb_id ||
2184 fib_entry->params.tos != new_entry->params.tos ||
2185 fib_entry->params.prio != new_entry->params.prio)
2186 break;
2187 }
2188
2189 list_add_tail(&new_entry->list, &fib_entry->list);
2190 return 0;
2191}
2192
Ido Schimmel9aecce12017-02-09 10:28:42 +01002193static int
2194mlxsw_sp_fib4_node_list_insert(struct mlxsw_sp_fib_node *fib_node,
Ido Schimmel4283bce2017-02-09 10:28:43 +01002195 struct mlxsw_sp_fib_entry *new_entry,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002196 bool replace, bool append)
Ido Schimmel9aecce12017-02-09 10:28:42 +01002197{
2198 struct mlxsw_sp_fib_entry *fib_entry;
2199
2200 fib_entry = mlxsw_sp_fib4_node_entry_find(fib_node, &new_entry->params);
2201
Ido Schimmel4283bce2017-02-09 10:28:43 +01002202 if (append)
2203 return mlxsw_sp_fib4_node_list_append(fib_entry, new_entry);
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002204 if (replace && WARN_ON(!fib_entry))
2205 return -EINVAL;
Ido Schimmel4283bce2017-02-09 10:28:43 +01002206
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002207 /* Insert new entry before replaced one, so that we can later
2208 * remove the second.
2209 */
Ido Schimmel9aecce12017-02-09 10:28:42 +01002210 if (fib_entry) {
2211 list_add_tail(&new_entry->list, &fib_entry->list);
2212 } else {
2213 struct mlxsw_sp_fib_entry *last;
2214
2215 list_for_each_entry(last, &fib_node->entry_list, list) {
2216 if (new_entry->params.tb_id > last->params.tb_id)
2217 break;
2218 fib_entry = last;
2219 }
2220
2221 if (fib_entry)
2222 list_add(&new_entry->list, &fib_entry->list);
2223 else
2224 list_add(&new_entry->list, &fib_node->entry_list);
2225 }
2226
2227 return 0;
2228}
2229
2230static void
2231mlxsw_sp_fib4_node_list_remove(struct mlxsw_sp_fib_entry *fib_entry)
2232{
2233 list_del(&fib_entry->list);
2234}
2235
2236static int
2237mlxsw_sp_fib4_node_entry_add(struct mlxsw_sp *mlxsw_sp,
2238 const struct mlxsw_sp_fib_node *fib_node,
2239 struct mlxsw_sp_fib_entry *fib_entry)
2240{
2241 if (!mlxsw_sp_fib_node_entry_is_first(fib_node, fib_entry))
2242 return 0;
2243
2244 /* To prevent packet loss, overwrite the previously offloaded
2245 * entry.
2246 */
2247 if (!list_is_singular(&fib_node->entry_list)) {
2248 enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_DELETE;
2249 struct mlxsw_sp_fib_entry *n = list_next_entry(fib_entry, list);
2250
2251 mlxsw_sp_fib_entry_offload_refresh(n, op, 0);
2252 }
2253
2254 return mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
2255}
2256
2257static void
2258mlxsw_sp_fib4_node_entry_del(struct mlxsw_sp *mlxsw_sp,
2259 const struct mlxsw_sp_fib_node *fib_node,
2260 struct mlxsw_sp_fib_entry *fib_entry)
2261{
2262 if (!mlxsw_sp_fib_node_entry_is_first(fib_node, fib_entry))
2263 return;
2264
2265 /* Promote the next entry by overwriting the deleted entry */
2266 if (!list_is_singular(&fib_node->entry_list)) {
2267 struct mlxsw_sp_fib_entry *n = list_next_entry(fib_entry, list);
2268 enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_DELETE;
2269
2270 mlxsw_sp_fib_entry_update(mlxsw_sp, n);
2271 mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, 0);
2272 return;
2273 }
2274
2275 mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry);
2276}
2277
2278static int mlxsw_sp_fib4_node_entry_link(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel4283bce2017-02-09 10:28:43 +01002279 struct mlxsw_sp_fib_entry *fib_entry,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002280 bool replace, bool append)
Ido Schimmel9aecce12017-02-09 10:28:42 +01002281{
2282 struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
2283 int err;
2284
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002285 err = mlxsw_sp_fib4_node_list_insert(fib_node, fib_entry, replace,
2286 append);
Ido Schimmel9aecce12017-02-09 10:28:42 +01002287 if (err)
2288 return err;
2289
2290 err = mlxsw_sp_fib4_node_entry_add(mlxsw_sp, fib_node, fib_entry);
2291 if (err)
2292 goto err_fib4_node_entry_add;
2293
Ido Schimmel9aecce12017-02-09 10:28:42 +01002294 return 0;
2295
2296err_fib4_node_entry_add:
2297 mlxsw_sp_fib4_node_list_remove(fib_entry);
2298 return err;
2299}
2300
2301static void
2302mlxsw_sp_fib4_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
2303 struct mlxsw_sp_fib_entry *fib_entry)
2304{
2305 struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
2306
Ido Schimmel9aecce12017-02-09 10:28:42 +01002307 mlxsw_sp_fib4_node_entry_del(mlxsw_sp, fib_node, fib_entry);
2308 mlxsw_sp_fib4_node_list_remove(fib_entry);
2309}
2310
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002311static void mlxsw_sp_fib4_entry_replace(struct mlxsw_sp *mlxsw_sp,
2312 struct mlxsw_sp_fib_entry *fib_entry,
2313 bool replace)
2314{
2315 struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
2316 struct mlxsw_sp_fib_entry *replaced;
2317
2318 if (!replace)
2319 return;
2320
2321 /* We inserted the new entry before replaced one */
2322 replaced = list_next_entry(fib_entry, list);
2323
2324 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, replaced);
2325 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, replaced);
2326 mlxsw_sp_fib4_node_put(mlxsw_sp, fib_node);
2327}
2328
Ido Schimmel9aecce12017-02-09 10:28:42 +01002329static int
2330mlxsw_sp_router_fib4_add(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel4283bce2017-02-09 10:28:43 +01002331 const struct fib_entry_notifier_info *fen_info,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002332 bool replace, bool append)
Ido Schimmel9aecce12017-02-09 10:28:42 +01002333{
2334 struct mlxsw_sp_fib_entry *fib_entry;
2335 struct mlxsw_sp_fib_node *fib_node;
Jiri Pirko61c503f2016-07-04 08:23:11 +02002336 int err;
2337
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002338 if (mlxsw_sp->router.aborted)
2339 return 0;
2340
Ido Schimmel9aecce12017-02-09 10:28:42 +01002341 fib_node = mlxsw_sp_fib4_node_get(mlxsw_sp, fen_info);
2342 if (IS_ERR(fib_node)) {
2343 dev_warn(mlxsw_sp->bus_info->dev, "Failed to get FIB node\n");
2344 return PTR_ERR(fib_node);
2345 }
2346
2347 fib_entry = mlxsw_sp_fib4_entry_create(mlxsw_sp, fib_node, fen_info);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002348 if (IS_ERR(fib_entry)) {
Ido Schimmel9aecce12017-02-09 10:28:42 +01002349 dev_warn(mlxsw_sp->bus_info->dev, "Failed to create FIB entry\n");
2350 err = PTR_ERR(fib_entry);
2351 goto err_fib4_entry_create;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002352 }
Jiri Pirko61c503f2016-07-04 08:23:11 +02002353
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002354 err = mlxsw_sp_fib4_node_entry_link(mlxsw_sp, fib_entry, replace,
2355 append);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002356 if (err) {
Ido Schimmel9aecce12017-02-09 10:28:42 +01002357 dev_warn(mlxsw_sp->bus_info->dev, "Failed to link FIB entry to node\n");
2358 goto err_fib4_node_entry_link;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002359 }
Ido Schimmel9aecce12017-02-09 10:28:42 +01002360
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002361 mlxsw_sp_fib4_entry_replace(mlxsw_sp, fib_entry, replace);
2362
Jiri Pirko61c503f2016-07-04 08:23:11 +02002363 return 0;
2364
Ido Schimmel9aecce12017-02-09 10:28:42 +01002365err_fib4_node_entry_link:
2366 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib_entry);
2367err_fib4_entry_create:
2368 mlxsw_sp_fib4_node_put(mlxsw_sp, fib_node);
Jiri Pirko61c503f2016-07-04 08:23:11 +02002369 return err;
2370}
2371
Jiri Pirko37956d72016-10-20 16:05:43 +02002372static void mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
2373 struct fib_entry_notifier_info *fen_info)
Jiri Pirko61c503f2016-07-04 08:23:11 +02002374{
Jiri Pirko61c503f2016-07-04 08:23:11 +02002375 struct mlxsw_sp_fib_entry *fib_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01002376 struct mlxsw_sp_fib_node *fib_node;
Jiri Pirko61c503f2016-07-04 08:23:11 +02002377
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002378 if (mlxsw_sp->router.aborted)
Jiri Pirko37956d72016-10-20 16:05:43 +02002379 return;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002380
Ido Schimmel9aecce12017-02-09 10:28:42 +01002381 fib_entry = mlxsw_sp_fib4_entry_lookup(mlxsw_sp, fen_info);
2382 if (WARN_ON(!fib_entry))
Jiri Pirko37956d72016-10-20 16:05:43 +02002383 return;
Ido Schimmel9aecce12017-02-09 10:28:42 +01002384 fib_node = fib_entry->fib_node;
Jiri Pirko5b004412016-09-01 10:37:40 +02002385
Ido Schimmel9aecce12017-02-09 10:28:42 +01002386 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, fib_entry);
2387 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib_entry);
2388 mlxsw_sp_fib4_node_put(mlxsw_sp, fib_node);
Jiri Pirko61c503f2016-07-04 08:23:11 +02002389}
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002390
2391static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp)
2392{
2393 char ralta_pl[MLXSW_REG_RALTA_LEN];
2394 char ralst_pl[MLXSW_REG_RALST_LEN];
Ido Schimmelb5d90e62017-03-10 08:53:43 +01002395 int i, err;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002396
2397 mlxsw_reg_ralta_pack(ralta_pl, true, MLXSW_REG_RALXX_PROTOCOL_IPV4,
2398 MLXSW_SP_LPM_TREE_MIN);
2399 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
2400 if (err)
2401 return err;
2402
2403 mlxsw_reg_ralst_pack(ralst_pl, 0xff, MLXSW_SP_LPM_TREE_MIN);
2404 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
2405 if (err)
2406 return err;
2407
Ido Schimmelb5d90e62017-03-10 08:53:43 +01002408 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
2409 struct mlxsw_sp_vr *vr = &mlxsw_sp->router.vrs[i];
2410 char raltb_pl[MLXSW_REG_RALTB_LEN];
2411 char ralue_pl[MLXSW_REG_RALUE_LEN];
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002412
Ido Schimmelb5d90e62017-03-10 08:53:43 +01002413 if (!mlxsw_sp_vr_is_used(vr))
2414 continue;
2415
2416 mlxsw_reg_raltb_pack(raltb_pl, vr->id,
2417 MLXSW_REG_RALXX_PROTOCOL_IPV4,
2418 MLXSW_SP_LPM_TREE_MIN);
2419 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb),
2420 raltb_pl);
2421 if (err)
2422 return err;
2423
2424 mlxsw_reg_ralue_pack4(ralue_pl, MLXSW_SP_L3_PROTO_IPV4,
2425 MLXSW_REG_RALUE_OP_WRITE_WRITE, vr->id, 0,
2426 0);
2427 mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
2428 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue),
2429 ralue_pl);
2430 if (err)
2431 return err;
2432 }
2433
2434 return 0;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002435}
2436
Ido Schimmel9aecce12017-02-09 10:28:42 +01002437static void mlxsw_sp_fib4_node_flush(struct mlxsw_sp *mlxsw_sp,
2438 struct mlxsw_sp_fib_node *fib_node)
2439{
2440 struct mlxsw_sp_fib_entry *fib_entry, *tmp;
2441
2442 list_for_each_entry_safe(fib_entry, tmp, &fib_node->entry_list, list) {
2443 bool do_break = &tmp->list == &fib_node->entry_list;
2444
2445 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, fib_entry);
2446 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib_entry);
2447 mlxsw_sp_fib4_node_put(mlxsw_sp, fib_node);
2448 /* Break when entry list is empty and node was freed.
2449 * Otherwise, we'll access freed memory in the next
2450 * iteration.
2451 */
2452 if (do_break)
2453 break;
2454 }
2455}
2456
2457static void mlxsw_sp_fib_node_flush(struct mlxsw_sp *mlxsw_sp,
2458 struct mlxsw_sp_fib_node *fib_node)
2459{
Ido Schimmel76610eb2017-03-10 08:53:41 +01002460 switch (fib_node->fib->proto) {
Ido Schimmel9aecce12017-02-09 10:28:42 +01002461 case MLXSW_SP_L3_PROTO_IPV4:
2462 mlxsw_sp_fib4_node_flush(mlxsw_sp, fib_node);
2463 break;
2464 case MLXSW_SP_L3_PROTO_IPV6:
2465 WARN_ON_ONCE(1);
2466 break;
2467 }
2468}
2469
Ido Schimmel76610eb2017-03-10 08:53:41 +01002470static void mlxsw_sp_vr_fib_flush(struct mlxsw_sp *mlxsw_sp,
2471 struct mlxsw_sp_vr *vr,
2472 enum mlxsw_sp_l3proto proto)
2473{
2474 struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
2475 struct mlxsw_sp_fib_node *fib_node, *tmp;
2476
2477 list_for_each_entry_safe(fib_node, tmp, &fib->node_list, list) {
2478 bool do_break = &tmp->list == &fib->node_list;
2479
2480 mlxsw_sp_fib_node_flush(mlxsw_sp, fib_node);
2481 if (do_break)
2482 break;
2483 }
2484}
2485
Ido Schimmelac571de2016-11-14 11:26:32 +01002486static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp)
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002487{
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002488 int i;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002489
Jiri Pirkoc1a38312016-10-21 16:07:23 +02002490 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
Ido Schimmel76610eb2017-03-10 08:53:41 +01002491 struct mlxsw_sp_vr *vr = &mlxsw_sp->router.vrs[i];
Ido Schimmelac571de2016-11-14 11:26:32 +01002492
Ido Schimmel76610eb2017-03-10 08:53:41 +01002493 if (!mlxsw_sp_vr_is_used(vr))
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002494 continue;
Ido Schimmel76610eb2017-03-10 08:53:41 +01002495 mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002496 }
Ido Schimmelac571de2016-11-14 11:26:32 +01002497}
2498
2499static void mlxsw_sp_router_fib4_abort(struct mlxsw_sp *mlxsw_sp)
2500{
2501 int err;
2502
Ido Schimmeld331d302016-11-16 09:51:58 +01002503 if (mlxsw_sp->router.aborted)
2504 return;
2505 dev_warn(mlxsw_sp->bus_info->dev, "FIB abort triggered. Note that FIB entries are no longer being offloaded to this device.\n");
Ido Schimmelac571de2016-11-14 11:26:32 +01002506 mlxsw_sp_router_fib_flush(mlxsw_sp);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002507 mlxsw_sp->router.aborted = true;
2508 err = mlxsw_sp_router_set_abort_trap(mlxsw_sp);
2509 if (err)
2510 dev_warn(mlxsw_sp->bus_info->dev, "Failed to set abort trap.\n");
2511}
2512
Ido Schimmel30572242016-12-03 16:45:01 +01002513struct mlxsw_sp_fib_event_work {
Ido Schimmela0e47612017-02-06 16:20:10 +01002514 struct work_struct work;
Ido Schimmelad178c82017-02-08 11:16:40 +01002515 union {
2516 struct fib_entry_notifier_info fen_info;
2517 struct fib_nh_notifier_info fnh_info;
2518 };
Ido Schimmel30572242016-12-03 16:45:01 +01002519 struct mlxsw_sp *mlxsw_sp;
2520 unsigned long event;
2521};
2522
2523static void mlxsw_sp_router_fib_event_work(struct work_struct *work)
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002524{
Ido Schimmel30572242016-12-03 16:45:01 +01002525 struct mlxsw_sp_fib_event_work *fib_work =
Ido Schimmela0e47612017-02-06 16:20:10 +01002526 container_of(work, struct mlxsw_sp_fib_event_work, work);
Ido Schimmel30572242016-12-03 16:45:01 +01002527 struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002528 bool replace, append;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002529 int err;
2530
Ido Schimmel30572242016-12-03 16:45:01 +01002531 /* Protect internal structures from changes */
2532 rtnl_lock();
2533 switch (fib_work->event) {
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002534 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
Ido Schimmel4283bce2017-02-09 10:28:43 +01002535 case FIB_EVENT_ENTRY_APPEND: /* fall through */
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002536 case FIB_EVENT_ENTRY_ADD:
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002537 replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
Ido Schimmel4283bce2017-02-09 10:28:43 +01002538 append = fib_work->event == FIB_EVENT_ENTRY_APPEND;
2539 err = mlxsw_sp_router_fib4_add(mlxsw_sp, &fib_work->fen_info,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002540 replace, append);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002541 if (err)
2542 mlxsw_sp_router_fib4_abort(mlxsw_sp);
Ido Schimmel30572242016-12-03 16:45:01 +01002543 fib_info_put(fib_work->fen_info.fi);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002544 break;
2545 case FIB_EVENT_ENTRY_DEL:
Ido Schimmel30572242016-12-03 16:45:01 +01002546 mlxsw_sp_router_fib4_del(mlxsw_sp, &fib_work->fen_info);
2547 fib_info_put(fib_work->fen_info.fi);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002548 break;
2549 case FIB_EVENT_RULE_ADD: /* fall through */
2550 case FIB_EVENT_RULE_DEL:
2551 mlxsw_sp_router_fib4_abort(mlxsw_sp);
2552 break;
Ido Schimmelad178c82017-02-08 11:16:40 +01002553 case FIB_EVENT_NH_ADD: /* fall through */
2554 case FIB_EVENT_NH_DEL:
2555 mlxsw_sp_nexthop_event(mlxsw_sp, fib_work->event,
2556 fib_work->fnh_info.fib_nh);
2557 fib_info_put(fib_work->fnh_info.fib_nh->nh_parent);
2558 break;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002559 }
Ido Schimmel30572242016-12-03 16:45:01 +01002560 rtnl_unlock();
2561 kfree(fib_work);
2562}
2563
2564/* Called with rcu_read_lock() */
2565static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
2566 unsigned long event, void *ptr)
2567{
2568 struct mlxsw_sp *mlxsw_sp = container_of(nb, struct mlxsw_sp, fib_nb);
2569 struct mlxsw_sp_fib_event_work *fib_work;
2570 struct fib_notifier_info *info = ptr;
2571
2572 if (!net_eq(info->net, &init_net))
2573 return NOTIFY_DONE;
2574
2575 fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC);
2576 if (WARN_ON(!fib_work))
2577 return NOTIFY_BAD;
2578
Ido Schimmela0e47612017-02-06 16:20:10 +01002579 INIT_WORK(&fib_work->work, mlxsw_sp_router_fib_event_work);
Ido Schimmel30572242016-12-03 16:45:01 +01002580 fib_work->mlxsw_sp = mlxsw_sp;
2581 fib_work->event = event;
2582
2583 switch (event) {
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002584 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
Ido Schimmel4283bce2017-02-09 10:28:43 +01002585 case FIB_EVENT_ENTRY_APPEND: /* fall through */
Ido Schimmel30572242016-12-03 16:45:01 +01002586 case FIB_EVENT_ENTRY_ADD: /* fall through */
2587 case FIB_EVENT_ENTRY_DEL:
2588 memcpy(&fib_work->fen_info, ptr, sizeof(fib_work->fen_info));
2589 /* Take referece on fib_info to prevent it from being
2590 * freed while work is queued. Release it afterwards.
2591 */
2592 fib_info_hold(fib_work->fen_info.fi);
2593 break;
Ido Schimmelad178c82017-02-08 11:16:40 +01002594 case FIB_EVENT_NH_ADD: /* fall through */
2595 case FIB_EVENT_NH_DEL:
2596 memcpy(&fib_work->fnh_info, ptr, sizeof(fib_work->fnh_info));
2597 fib_info_hold(fib_work->fnh_info.fib_nh->nh_parent);
2598 break;
Ido Schimmel30572242016-12-03 16:45:01 +01002599 }
2600
Ido Schimmela0e47612017-02-06 16:20:10 +01002601 mlxsw_core_schedule_work(&fib_work->work);
Ido Schimmel30572242016-12-03 16:45:01 +01002602
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002603 return NOTIFY_DONE;
2604}
2605
Ido Schimmel4724ba562017-03-10 08:53:39 +01002606static struct mlxsw_sp_rif *
2607mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
2608 const struct net_device *dev)
2609{
2610 int i;
2611
2612 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
2613 if (mlxsw_sp->rifs[i] && mlxsw_sp->rifs[i]->dev == dev)
2614 return mlxsw_sp->rifs[i];
2615
2616 return NULL;
2617}
2618
2619static int mlxsw_sp_router_rif_disable(struct mlxsw_sp *mlxsw_sp, u16 rif)
2620{
2621 char ritr_pl[MLXSW_REG_RITR_LEN];
2622 int err;
2623
2624 mlxsw_reg_ritr_rif_pack(ritr_pl, rif);
2625 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
2626 if (WARN_ON_ONCE(err))
2627 return err;
2628
2629 mlxsw_reg_ritr_enable_set(ritr_pl, false);
2630 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
2631}
2632
2633static void mlxsw_sp_router_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
2634 struct mlxsw_sp_rif *r)
2635{
2636 mlxsw_sp_router_rif_disable(mlxsw_sp, r->rif);
2637 mlxsw_sp_nexthop_rif_gone_sync(mlxsw_sp, r);
2638 mlxsw_sp_neigh_rif_gone_sync(mlxsw_sp, r);
2639}
2640
2641static bool mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *r,
2642 const struct in_device *in_dev,
2643 unsigned long event)
2644{
2645 switch (event) {
2646 case NETDEV_UP:
2647 if (!r)
2648 return true;
2649 return false;
2650 case NETDEV_DOWN:
2651 if (r && !in_dev->ifa_list)
2652 return true;
2653 /* It is possible we already removed the RIF ourselves
2654 * if it was assigned to a netdev that is now a bridge
2655 * or LAG slave.
2656 */
2657 return false;
2658 }
2659
2660 return false;
2661}
2662
2663#define MLXSW_SP_INVALID_RIF 0xffff
2664static int mlxsw_sp_avail_rif_get(struct mlxsw_sp *mlxsw_sp)
2665{
2666 int i;
2667
2668 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
2669 if (!mlxsw_sp->rifs[i])
2670 return i;
2671
2672 return MLXSW_SP_INVALID_RIF;
2673}
2674
2675static void mlxsw_sp_vport_rif_sp_attr_get(struct mlxsw_sp_port *mlxsw_sp_vport,
2676 bool *p_lagged, u16 *p_system_port)
2677{
2678 u8 local_port = mlxsw_sp_vport->local_port;
2679
2680 *p_lagged = mlxsw_sp_vport->lagged;
2681 *p_system_port = *p_lagged ? mlxsw_sp_vport->lag_id : local_port;
2682}
2683
2684static int mlxsw_sp_vport_rif_sp_op(struct mlxsw_sp_port *mlxsw_sp_vport,
Ido Schimmel69132292017-03-10 08:53:42 +01002685 u16 vr_id, struct net_device *l3_dev,
2686 u16 rif, bool create)
Ido Schimmel4724ba562017-03-10 08:53:39 +01002687{
2688 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
2689 bool lagged = mlxsw_sp_vport->lagged;
2690 char ritr_pl[MLXSW_REG_RITR_LEN];
2691 u16 system_port;
2692
Ido Schimmel69132292017-03-10 08:53:42 +01002693 mlxsw_reg_ritr_pack(ritr_pl, create, MLXSW_REG_RITR_SP_IF, rif, vr_id,
Ido Schimmel4724ba562017-03-10 08:53:39 +01002694 l3_dev->mtu, l3_dev->dev_addr);
2695
2696 mlxsw_sp_vport_rif_sp_attr_get(mlxsw_sp_vport, &lagged, &system_port);
2697 mlxsw_reg_ritr_sp_if_pack(ritr_pl, lagged, system_port,
2698 mlxsw_sp_vport_vid_get(mlxsw_sp_vport));
2699
2700 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
2701}
2702
2703static void mlxsw_sp_vport_rif_sp_leave(struct mlxsw_sp_port *mlxsw_sp_vport);
2704
2705static u16 mlxsw_sp_rif_sp_to_fid(u16 rif)
2706{
2707 return MLXSW_SP_RFID_BASE + rif;
2708}
2709
2710static struct mlxsw_sp_fid *
2711mlxsw_sp_rfid_alloc(u16 fid, struct net_device *l3_dev)
2712{
2713 struct mlxsw_sp_fid *f;
2714
2715 f = kzalloc(sizeof(*f), GFP_KERNEL);
2716 if (!f)
2717 return NULL;
2718
2719 f->leave = mlxsw_sp_vport_rif_sp_leave;
2720 f->ref_count = 0;
2721 f->dev = l3_dev;
2722 f->fid = fid;
2723
2724 return f;
2725}
2726
2727static struct mlxsw_sp_rif *
Ido Schimmel69132292017-03-10 08:53:42 +01002728mlxsw_sp_rif_alloc(u16 rif, u16 vr_id, struct net_device *l3_dev,
2729 struct mlxsw_sp_fid *f)
Ido Schimmel4724ba562017-03-10 08:53:39 +01002730{
2731 struct mlxsw_sp_rif *r;
2732
2733 r = kzalloc(sizeof(*r), GFP_KERNEL);
2734 if (!r)
2735 return NULL;
2736
2737 INIT_LIST_HEAD(&r->nexthop_list);
2738 INIT_LIST_HEAD(&r->neigh_list);
2739 ether_addr_copy(r->addr, l3_dev->dev_addr);
2740 r->mtu = l3_dev->mtu;
Ido Schimmel69132292017-03-10 08:53:42 +01002741 r->vr_id = vr_id;
Ido Schimmel4724ba562017-03-10 08:53:39 +01002742 r->dev = l3_dev;
2743 r->rif = rif;
2744 r->f = f;
2745
2746 return r;
2747}
2748
2749static struct mlxsw_sp_rif *
2750mlxsw_sp_vport_rif_sp_create(struct mlxsw_sp_port *mlxsw_sp_vport,
2751 struct net_device *l3_dev)
2752{
2753 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
Ido Schimmel69132292017-03-10 08:53:42 +01002754 struct mlxsw_sp_vr *vr;
Ido Schimmel4724ba562017-03-10 08:53:39 +01002755 struct mlxsw_sp_fid *f;
2756 struct mlxsw_sp_rif *r;
2757 u16 fid, rif;
2758 int err;
2759
2760 rif = mlxsw_sp_avail_rif_get(mlxsw_sp);
2761 if (rif == MLXSW_SP_INVALID_RIF)
2762 return ERR_PTR(-ERANGE);
2763
Ido Schimmel69132292017-03-10 08:53:42 +01002764 vr = mlxsw_sp_vr_get(mlxsw_sp, RT_TABLE_MAIN);
2765 if (IS_ERR(vr))
2766 return ERR_CAST(vr);
2767
2768 err = mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, vr->id, l3_dev, rif,
2769 true);
Ido Schimmel4724ba562017-03-10 08:53:39 +01002770 if (err)
Ido Schimmel69132292017-03-10 08:53:42 +01002771 goto err_vport_rif_sp_op;
Ido Schimmel4724ba562017-03-10 08:53:39 +01002772
2773 fid = mlxsw_sp_rif_sp_to_fid(rif);
2774 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, true);
2775 if (err)
2776 goto err_rif_fdb_op;
2777
2778 f = mlxsw_sp_rfid_alloc(fid, l3_dev);
2779 if (!f) {
2780 err = -ENOMEM;
2781 goto err_rfid_alloc;
2782 }
2783
Ido Schimmel69132292017-03-10 08:53:42 +01002784 r = mlxsw_sp_rif_alloc(rif, vr->id, l3_dev, f);
Ido Schimmel4724ba562017-03-10 08:53:39 +01002785 if (!r) {
2786 err = -ENOMEM;
2787 goto err_rif_alloc;
2788 }
2789
2790 f->r = r;
2791 mlxsw_sp->rifs[rif] = r;
Ido Schimmel69132292017-03-10 08:53:42 +01002792 vr->rif_count++;
Ido Schimmel4724ba562017-03-10 08:53:39 +01002793
2794 return r;
2795
2796err_rif_alloc:
2797 kfree(f);
2798err_rfid_alloc:
2799 mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, false);
2800err_rif_fdb_op:
Ido Schimmel69132292017-03-10 08:53:42 +01002801 mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, vr->id, l3_dev, rif, false);
2802err_vport_rif_sp_op:
2803 mlxsw_sp_vr_put(vr);
Ido Schimmel4724ba562017-03-10 08:53:39 +01002804 return ERR_PTR(err);
2805}
2806
2807static void mlxsw_sp_vport_rif_sp_destroy(struct mlxsw_sp_port *mlxsw_sp_vport,
2808 struct mlxsw_sp_rif *r)
2809{
2810 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
Ido Schimmel69132292017-03-10 08:53:42 +01002811 struct mlxsw_sp_vr *vr = &mlxsw_sp->router.vrs[r->vr_id];
Ido Schimmel4724ba562017-03-10 08:53:39 +01002812 struct net_device *l3_dev = r->dev;
2813 struct mlxsw_sp_fid *f = r->f;
2814 u16 fid = f->fid;
2815 u16 rif = r->rif;
2816
2817 mlxsw_sp_router_rif_gone_sync(mlxsw_sp, r);
2818
Ido Schimmel69132292017-03-10 08:53:42 +01002819 vr->rif_count--;
Ido Schimmel4724ba562017-03-10 08:53:39 +01002820 mlxsw_sp->rifs[rif] = NULL;
2821 f->r = NULL;
2822
2823 kfree(r);
2824
2825 kfree(f);
2826
2827 mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, false);
2828
Ido Schimmel69132292017-03-10 08:53:42 +01002829 mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, vr->id, l3_dev, rif, false);
2830
2831 mlxsw_sp_vr_put(vr);
Ido Schimmel4724ba562017-03-10 08:53:39 +01002832}
2833
2834static int mlxsw_sp_vport_rif_sp_join(struct mlxsw_sp_port *mlxsw_sp_vport,
2835 struct net_device *l3_dev)
2836{
2837 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
2838 struct mlxsw_sp_rif *r;
2839
2840 r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
2841 if (!r) {
2842 r = mlxsw_sp_vport_rif_sp_create(mlxsw_sp_vport, l3_dev);
2843 if (IS_ERR(r))
2844 return PTR_ERR(r);
2845 }
2846
2847 mlxsw_sp_vport_fid_set(mlxsw_sp_vport, r->f);
2848 r->f->ref_count++;
2849
2850 netdev_dbg(mlxsw_sp_vport->dev, "Joined FID=%d\n", r->f->fid);
2851
2852 return 0;
2853}
2854
2855static void mlxsw_sp_vport_rif_sp_leave(struct mlxsw_sp_port *mlxsw_sp_vport)
2856{
2857 struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
2858
2859 netdev_dbg(mlxsw_sp_vport->dev, "Left FID=%d\n", f->fid);
2860
2861 mlxsw_sp_vport_fid_set(mlxsw_sp_vport, NULL);
2862 if (--f->ref_count == 0)
2863 mlxsw_sp_vport_rif_sp_destroy(mlxsw_sp_vport, f->r);
2864}
2865
2866static int mlxsw_sp_inetaddr_vport_event(struct net_device *l3_dev,
2867 struct net_device *port_dev,
2868 unsigned long event, u16 vid)
2869{
2870 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(port_dev);
2871 struct mlxsw_sp_port *mlxsw_sp_vport;
2872
2873 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
2874 if (WARN_ON(!mlxsw_sp_vport))
2875 return -EINVAL;
2876
2877 switch (event) {
2878 case NETDEV_UP:
2879 return mlxsw_sp_vport_rif_sp_join(mlxsw_sp_vport, l3_dev);
2880 case NETDEV_DOWN:
2881 mlxsw_sp_vport_rif_sp_leave(mlxsw_sp_vport);
2882 break;
2883 }
2884
2885 return 0;
2886}
2887
2888static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev,
2889 unsigned long event)
2890{
2891 if (netif_is_bridge_port(port_dev) || netif_is_lag_port(port_dev))
2892 return 0;
2893
2894 return mlxsw_sp_inetaddr_vport_event(port_dev, port_dev, event, 1);
2895}
2896
2897static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev,
2898 struct net_device *lag_dev,
2899 unsigned long event, u16 vid)
2900{
2901 struct net_device *port_dev;
2902 struct list_head *iter;
2903 int err;
2904
2905 netdev_for_each_lower_dev(lag_dev, port_dev, iter) {
2906 if (mlxsw_sp_port_dev_check(port_dev)) {
2907 err = mlxsw_sp_inetaddr_vport_event(l3_dev, port_dev,
2908 event, vid);
2909 if (err)
2910 return err;
2911 }
2912 }
2913
2914 return 0;
2915}
2916
2917static int mlxsw_sp_inetaddr_lag_event(struct net_device *lag_dev,
2918 unsigned long event)
2919{
2920 if (netif_is_bridge_port(lag_dev))
2921 return 0;
2922
2923 return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event, 1);
2924}
2925
2926static struct mlxsw_sp_fid *mlxsw_sp_bridge_fid_get(struct mlxsw_sp *mlxsw_sp,
2927 struct net_device *l3_dev)
2928{
2929 u16 fid;
2930
2931 if (is_vlan_dev(l3_dev))
2932 fid = vlan_dev_vlan_id(l3_dev);
2933 else if (mlxsw_sp->master_bridge.dev == l3_dev)
2934 fid = 1;
2935 else
2936 return mlxsw_sp_vfid_find(mlxsw_sp, l3_dev);
2937
2938 return mlxsw_sp_fid_find(mlxsw_sp, fid);
2939}
2940
2941static enum mlxsw_flood_table_type mlxsw_sp_flood_table_type_get(u16 fid)
2942{
2943 return mlxsw_sp_fid_is_vfid(fid) ? MLXSW_REG_SFGC_TABLE_TYPE_FID :
2944 MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST;
2945}
2946
2947static u16 mlxsw_sp_flood_table_index_get(u16 fid)
2948{
2949 return mlxsw_sp_fid_is_vfid(fid) ? mlxsw_sp_fid_to_vfid(fid) : fid;
2950}
2951
2952static int mlxsw_sp_router_port_flood_set(struct mlxsw_sp *mlxsw_sp, u16 fid,
2953 bool set)
2954{
2955 enum mlxsw_flood_table_type table_type;
2956 char *sftr_pl;
2957 u16 index;
2958 int err;
2959
2960 sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL);
2961 if (!sftr_pl)
2962 return -ENOMEM;
2963
2964 table_type = mlxsw_sp_flood_table_type_get(fid);
2965 index = mlxsw_sp_flood_table_index_get(fid);
2966 mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BC, index, table_type,
2967 1, MLXSW_PORT_ROUTER_PORT, set);
2968 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
2969
2970 kfree(sftr_pl);
2971 return err;
2972}
2973
2974static enum mlxsw_reg_ritr_if_type mlxsw_sp_rif_type_get(u16 fid)
2975{
2976 if (mlxsw_sp_fid_is_vfid(fid))
2977 return MLXSW_REG_RITR_FID_IF;
2978 else
2979 return MLXSW_REG_RITR_VLAN_IF;
2980}
2981
Ido Schimmel69132292017-03-10 08:53:42 +01002982static int mlxsw_sp_rif_bridge_op(struct mlxsw_sp *mlxsw_sp, u16 vr_id,
Ido Schimmel4724ba562017-03-10 08:53:39 +01002983 struct net_device *l3_dev,
2984 u16 fid, u16 rif,
2985 bool create)
2986{
2987 enum mlxsw_reg_ritr_if_type rif_type;
2988 char ritr_pl[MLXSW_REG_RITR_LEN];
2989
2990 rif_type = mlxsw_sp_rif_type_get(fid);
Ido Schimmel69132292017-03-10 08:53:42 +01002991 mlxsw_reg_ritr_pack(ritr_pl, create, rif_type, rif, vr_id, l3_dev->mtu,
Ido Schimmel4724ba562017-03-10 08:53:39 +01002992 l3_dev->dev_addr);
2993 mlxsw_reg_ritr_fid_set(ritr_pl, rif_type, fid);
2994
2995 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
2996}
2997
2998static int mlxsw_sp_rif_bridge_create(struct mlxsw_sp *mlxsw_sp,
2999 struct net_device *l3_dev,
3000 struct mlxsw_sp_fid *f)
3001{
Ido Schimmel69132292017-03-10 08:53:42 +01003002 struct mlxsw_sp_vr *vr;
Ido Schimmel4724ba562017-03-10 08:53:39 +01003003 struct mlxsw_sp_rif *r;
3004 u16 rif;
3005 int err;
3006
3007 rif = mlxsw_sp_avail_rif_get(mlxsw_sp);
3008 if (rif == MLXSW_SP_INVALID_RIF)
3009 return -ERANGE;
3010
Ido Schimmel69132292017-03-10 08:53:42 +01003011 vr = mlxsw_sp_vr_get(mlxsw_sp, RT_TABLE_MAIN);
3012 if (IS_ERR(vr))
3013 return PTR_ERR(vr);
3014
Ido Schimmel4724ba562017-03-10 08:53:39 +01003015 err = mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, true);
3016 if (err)
Ido Schimmel69132292017-03-10 08:53:42 +01003017 goto err_port_flood_set;
Ido Schimmel4724ba562017-03-10 08:53:39 +01003018
Ido Schimmel69132292017-03-10 08:53:42 +01003019 err = mlxsw_sp_rif_bridge_op(mlxsw_sp, vr->id, l3_dev, f->fid, rif,
3020 true);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003021 if (err)
3022 goto err_rif_bridge_op;
3023
3024 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, true);
3025 if (err)
3026 goto err_rif_fdb_op;
3027
Ido Schimmel69132292017-03-10 08:53:42 +01003028 r = mlxsw_sp_rif_alloc(rif, vr->id, l3_dev, f);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003029 if (!r) {
3030 err = -ENOMEM;
3031 goto err_rif_alloc;
3032 }
3033
3034 f->r = r;
3035 mlxsw_sp->rifs[rif] = r;
Ido Schimmel69132292017-03-10 08:53:42 +01003036 vr->rif_count++;
Ido Schimmel4724ba562017-03-10 08:53:39 +01003037
3038 netdev_dbg(l3_dev, "RIF=%d created\n", rif);
3039
3040 return 0;
3041
3042err_rif_alloc:
3043 mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, false);
3044err_rif_fdb_op:
Ido Schimmel69132292017-03-10 08:53:42 +01003045 mlxsw_sp_rif_bridge_op(mlxsw_sp, vr->id, l3_dev, f->fid, rif, false);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003046err_rif_bridge_op:
3047 mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, false);
Ido Schimmel69132292017-03-10 08:53:42 +01003048err_port_flood_set:
3049 mlxsw_sp_vr_put(vr);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003050 return err;
3051}
3052
3053void mlxsw_sp_rif_bridge_destroy(struct mlxsw_sp *mlxsw_sp,
3054 struct mlxsw_sp_rif *r)
3055{
Ido Schimmel69132292017-03-10 08:53:42 +01003056 struct mlxsw_sp_vr *vr = &mlxsw_sp->router.vrs[r->vr_id];
Ido Schimmel4724ba562017-03-10 08:53:39 +01003057 struct net_device *l3_dev = r->dev;
3058 struct mlxsw_sp_fid *f = r->f;
3059 u16 rif = r->rif;
3060
3061 mlxsw_sp_router_rif_gone_sync(mlxsw_sp, r);
3062
Ido Schimmel69132292017-03-10 08:53:42 +01003063 vr->rif_count--;
Ido Schimmel4724ba562017-03-10 08:53:39 +01003064 mlxsw_sp->rifs[rif] = NULL;
3065 f->r = NULL;
3066
3067 kfree(r);
3068
3069 mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, false);
3070
Ido Schimmel69132292017-03-10 08:53:42 +01003071 mlxsw_sp_rif_bridge_op(mlxsw_sp, vr->id, l3_dev, f->fid, rif, false);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003072
3073 mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, false);
3074
Ido Schimmel69132292017-03-10 08:53:42 +01003075 mlxsw_sp_vr_put(vr);
3076
Ido Schimmel4724ba562017-03-10 08:53:39 +01003077 netdev_dbg(l3_dev, "RIF=%d destroyed\n", rif);
3078}
3079
3080static int mlxsw_sp_inetaddr_bridge_event(struct net_device *l3_dev,
3081 struct net_device *br_dev,
3082 unsigned long event)
3083{
3084 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
3085 struct mlxsw_sp_fid *f;
3086
3087 /* FID can either be an actual FID if the L3 device is the
3088 * VLAN-aware bridge or a VLAN device on top. Otherwise, the
3089 * L3 device is a VLAN-unaware bridge and we get a vFID.
3090 */
3091 f = mlxsw_sp_bridge_fid_get(mlxsw_sp, l3_dev);
3092 if (WARN_ON(!f))
3093 return -EINVAL;
3094
3095 switch (event) {
3096 case NETDEV_UP:
3097 return mlxsw_sp_rif_bridge_create(mlxsw_sp, l3_dev, f);
3098 case NETDEV_DOWN:
3099 mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->r);
3100 break;
3101 }
3102
3103 return 0;
3104}
3105
3106static int mlxsw_sp_inetaddr_vlan_event(struct net_device *vlan_dev,
3107 unsigned long event)
3108{
3109 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
3110 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(vlan_dev);
3111 u16 vid = vlan_dev_vlan_id(vlan_dev);
3112
3113 if (mlxsw_sp_port_dev_check(real_dev))
3114 return mlxsw_sp_inetaddr_vport_event(vlan_dev, real_dev, event,
3115 vid);
3116 else if (netif_is_lag_master(real_dev))
3117 return __mlxsw_sp_inetaddr_lag_event(vlan_dev, real_dev, event,
3118 vid);
3119 else if (netif_is_bridge_master(real_dev) &&
3120 mlxsw_sp->master_bridge.dev == real_dev)
3121 return mlxsw_sp_inetaddr_bridge_event(vlan_dev, real_dev,
3122 event);
3123
3124 return 0;
3125}
3126
3127int mlxsw_sp_inetaddr_event(struct notifier_block *unused,
3128 unsigned long event, void *ptr)
3129{
3130 struct in_ifaddr *ifa = (struct in_ifaddr *) ptr;
3131 struct net_device *dev = ifa->ifa_dev->dev;
3132 struct mlxsw_sp *mlxsw_sp;
3133 struct mlxsw_sp_rif *r;
3134 int err = 0;
3135
3136 mlxsw_sp = mlxsw_sp_lower_get(dev);
3137 if (!mlxsw_sp)
3138 goto out;
3139
3140 r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
3141 if (!mlxsw_sp_rif_should_config(r, ifa->ifa_dev, event))
3142 goto out;
3143
3144 if (mlxsw_sp_port_dev_check(dev))
3145 err = mlxsw_sp_inetaddr_port_event(dev, event);
3146 else if (netif_is_lag_master(dev))
3147 err = mlxsw_sp_inetaddr_lag_event(dev, event);
3148 else if (netif_is_bridge_master(dev))
3149 err = mlxsw_sp_inetaddr_bridge_event(dev, dev, event);
3150 else if (is_vlan_dev(dev))
3151 err = mlxsw_sp_inetaddr_vlan_event(dev, event);
3152
3153out:
3154 return notifier_from_errno(err);
3155}
3156
3157static int mlxsw_sp_rif_edit(struct mlxsw_sp *mlxsw_sp, u16 rif,
3158 const char *mac, int mtu)
3159{
3160 char ritr_pl[MLXSW_REG_RITR_LEN];
3161 int err;
3162
3163 mlxsw_reg_ritr_rif_pack(ritr_pl, rif);
3164 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
3165 if (err)
3166 return err;
3167
3168 mlxsw_reg_ritr_mtu_set(ritr_pl, mtu);
3169 mlxsw_reg_ritr_if_mac_memcpy_to(ritr_pl, mac);
3170 mlxsw_reg_ritr_op_set(ritr_pl, MLXSW_REG_RITR_RIF_CREATE);
3171 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
3172}
3173
3174int mlxsw_sp_netdevice_router_port_event(struct net_device *dev)
3175{
3176 struct mlxsw_sp *mlxsw_sp;
3177 struct mlxsw_sp_rif *r;
3178 int err;
3179
3180 mlxsw_sp = mlxsw_sp_lower_get(dev);
3181 if (!mlxsw_sp)
3182 return 0;
3183
3184 r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
3185 if (!r)
3186 return 0;
3187
3188 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, r->addr, r->f->fid, false);
3189 if (err)
3190 return err;
3191
3192 err = mlxsw_sp_rif_edit(mlxsw_sp, r->rif, dev->dev_addr, dev->mtu);
3193 if (err)
3194 goto err_rif_edit;
3195
3196 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, dev->dev_addr, r->f->fid, true);
3197 if (err)
3198 goto err_rif_fdb_op;
3199
3200 ether_addr_copy(r->addr, dev->dev_addr);
3201 r->mtu = dev->mtu;
3202
3203 netdev_dbg(dev, "Updated RIF=%d\n", r->rif);
3204
3205 return 0;
3206
3207err_rif_fdb_op:
3208 mlxsw_sp_rif_edit(mlxsw_sp, r->rif, r->addr, r->mtu);
3209err_rif_edit:
3210 mlxsw_sp_rif_fdb_op(mlxsw_sp, r->addr, r->f->fid, true);
3211 return err;
3212}
3213
Ido Schimmelc3852ef2016-12-03 16:45:07 +01003214static void mlxsw_sp_router_fib_dump_flush(struct notifier_block *nb)
3215{
3216 struct mlxsw_sp *mlxsw_sp = container_of(nb, struct mlxsw_sp, fib_nb);
3217
3218 /* Flush pending FIB notifications and then flush the device's
3219 * table before requesting another dump. The FIB notification
3220 * block is unregistered, so no need to take RTNL.
3221 */
3222 mlxsw_core_flush_owq();
3223 mlxsw_sp_router_fib_flush(mlxsw_sp);
3224}
3225
Ido Schimmel4724ba562017-03-10 08:53:39 +01003226static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
3227{
3228 char rgcr_pl[MLXSW_REG_RGCR_LEN];
3229 u64 max_rifs;
3230 int err;
3231
3232 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_RIFS))
3233 return -EIO;
3234
3235 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
3236 mlxsw_sp->rifs = kcalloc(max_rifs, sizeof(struct mlxsw_sp_rif *),
3237 GFP_KERNEL);
3238 if (!mlxsw_sp->rifs)
3239 return -ENOMEM;
3240
3241 mlxsw_reg_rgcr_pack(rgcr_pl, true);
3242 mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs);
3243 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
3244 if (err)
3245 goto err_rgcr_fail;
3246
3247 return 0;
3248
3249err_rgcr_fail:
3250 kfree(mlxsw_sp->rifs);
3251 return err;
3252}
3253
3254static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
3255{
3256 char rgcr_pl[MLXSW_REG_RGCR_LEN];
3257 int i;
3258
3259 mlxsw_reg_rgcr_pack(rgcr_pl, false);
3260 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
3261
3262 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
3263 WARN_ON_ONCE(mlxsw_sp->rifs[i]);
3264
3265 kfree(mlxsw_sp->rifs);
3266}
3267
Jiri Pirkob45f64d2016-09-26 12:52:31 +02003268int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
3269{
3270 int err;
3271
3272 INIT_LIST_HEAD(&mlxsw_sp->router.nexthop_neighs_list);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02003273 err = __mlxsw_sp_router_init(mlxsw_sp);
3274 if (err)
3275 return err;
3276
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003277 err = rhashtable_init(&mlxsw_sp->router.nexthop_ht,
3278 &mlxsw_sp_nexthop_ht_params);
3279 if (err)
3280 goto err_nexthop_ht_init;
3281
Ido Schimmele9ad5e72017-02-08 11:16:29 +01003282 err = rhashtable_init(&mlxsw_sp->router.nexthop_group_ht,
3283 &mlxsw_sp_nexthop_group_ht_params);
3284 if (err)
3285 goto err_nexthop_group_ht_init;
3286
Jiri Pirkob45f64d2016-09-26 12:52:31 +02003287 mlxsw_sp_lpm_init(mlxsw_sp);
3288 err = mlxsw_sp_vrs_init(mlxsw_sp);
3289 if (err)
3290 goto err_vrs_init;
3291
Ido Schimmel8c9583a2016-10-27 15:12:57 +02003292 err = mlxsw_sp_neigh_init(mlxsw_sp);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02003293 if (err)
3294 goto err_neigh_init;
3295
3296 mlxsw_sp->fib_nb.notifier_call = mlxsw_sp_router_fib_event;
Ido Schimmelc3852ef2016-12-03 16:45:07 +01003297 err = register_fib_notifier(&mlxsw_sp->fib_nb,
3298 mlxsw_sp_router_fib_dump_flush);
3299 if (err)
3300 goto err_register_fib_notifier;
3301
Jiri Pirkob45f64d2016-09-26 12:52:31 +02003302 return 0;
3303
Ido Schimmelc3852ef2016-12-03 16:45:07 +01003304err_register_fib_notifier:
3305 mlxsw_sp_neigh_fini(mlxsw_sp);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02003306err_neigh_init:
3307 mlxsw_sp_vrs_fini(mlxsw_sp);
3308err_vrs_init:
Ido Schimmele9ad5e72017-02-08 11:16:29 +01003309 rhashtable_destroy(&mlxsw_sp->router.nexthop_group_ht);
3310err_nexthop_group_ht_init:
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003311 rhashtable_destroy(&mlxsw_sp->router.nexthop_ht);
3312err_nexthop_ht_init:
Jiri Pirkob45f64d2016-09-26 12:52:31 +02003313 __mlxsw_sp_router_fini(mlxsw_sp);
3314 return err;
3315}
3316
3317void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
3318{
3319 unregister_fib_notifier(&mlxsw_sp->fib_nb);
3320 mlxsw_sp_neigh_fini(mlxsw_sp);
3321 mlxsw_sp_vrs_fini(mlxsw_sp);
Ido Schimmele9ad5e72017-02-08 11:16:29 +01003322 rhashtable_destroy(&mlxsw_sp->router.nexthop_group_ht);
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003323 rhashtable_destroy(&mlxsw_sp->router.nexthop_ht);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02003324 __mlxsw_sp_router_fini(mlxsw_sp);
3325}