blob: 5aad0aef1ed3a2c2b56c7270006e3a60b0a846a0 [file] [log] [blame]
Ido Schimmel464dce12016-07-02 11:00:15 +02001/*
2 * drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
3 * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
5 * Copyright (c) 2016 Ido Schimmel <idosch@mellanox.com>
Yotam Gigic723c7352016-07-05 11:27:43 +02006 * Copyright (c) 2016 Yotam Gigi <yotamg@mellanox.com>
Ido Schimmel464dce12016-07-02 11:00:15 +02007 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include <linux/kernel.h>
38#include <linux/types.h>
Jiri Pirko5e9c16c2016-07-04 08:23:04 +020039#include <linux/rhashtable.h>
40#include <linux/bitops.h>
41#include <linux/in6.h>
Yotam Gigic723c7352016-07-05 11:27:43 +020042#include <linux/notifier.h>
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +010043#include <linux/inetdevice.h>
Yotam Gigic723c7352016-07-05 11:27:43 +020044#include <net/netevent.h>
Jiri Pirko6cf3c972016-07-05 11:27:39 +020045#include <net/neighbour.h>
46#include <net/arp.h>
Jiri Pirkob45f64d2016-09-26 12:52:31 +020047#include <net/ip_fib.h>
Ido Schimmel5d7bfd12017-03-16 09:08:14 +010048#include <net/fib_rules.h>
Ido Schimmel57837882017-03-16 09:08:16 +010049#include <net/l3mdev.h>
Ido Schimmel464dce12016-07-02 11:00:15 +020050
51#include "spectrum.h"
52#include "core.h"
53#include "reg.h"
54
Ido Schimmel4724ba562017-03-10 08:53:39 +010055struct mlxsw_sp_rif {
56 struct list_head nexthop_list;
57 struct list_head neigh_list;
58 struct net_device *dev;
59 struct mlxsw_sp_fid *f;
60 unsigned char addr[ETH_ALEN];
61 int mtu;
62 u16 rif;
Ido Schimmel69132292017-03-10 08:53:42 +010063 u16 vr_id;
Ido Schimmel4724ba562017-03-10 08:53:39 +010064};
65
66static struct mlxsw_sp_rif *
67mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
68 const struct net_device *dev);
69
Jiri Pirko53342022016-07-04 08:23:08 +020070#define mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) \
71 for_each_set_bit(prefix, (prefix_usage)->b, MLXSW_SP_PREFIX_COUNT)
72
73static bool
Jiri Pirko6b75c482016-07-04 08:23:09 +020074mlxsw_sp_prefix_usage_subset(struct mlxsw_sp_prefix_usage *prefix_usage1,
75 struct mlxsw_sp_prefix_usage *prefix_usage2)
76{
77 unsigned char prefix;
78
79 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage1) {
80 if (!test_bit(prefix, prefix_usage2->b))
81 return false;
82 }
83 return true;
84}
85
86static bool
Jiri Pirko53342022016-07-04 08:23:08 +020087mlxsw_sp_prefix_usage_eq(struct mlxsw_sp_prefix_usage *prefix_usage1,
88 struct mlxsw_sp_prefix_usage *prefix_usage2)
89{
90 return !memcmp(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
91}
92
Jiri Pirko6b75c482016-07-04 08:23:09 +020093static bool
94mlxsw_sp_prefix_usage_none(struct mlxsw_sp_prefix_usage *prefix_usage)
95{
96 struct mlxsw_sp_prefix_usage prefix_usage_none = {{ 0 } };
97
98 return mlxsw_sp_prefix_usage_eq(prefix_usage, &prefix_usage_none);
99}
100
101static void
102mlxsw_sp_prefix_usage_cpy(struct mlxsw_sp_prefix_usage *prefix_usage1,
103 struct mlxsw_sp_prefix_usage *prefix_usage2)
104{
105 memcpy(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
106}
107
108static void
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200109mlxsw_sp_prefix_usage_set(struct mlxsw_sp_prefix_usage *prefix_usage,
110 unsigned char prefix_len)
111{
112 set_bit(prefix_len, prefix_usage->b);
113}
114
115static void
116mlxsw_sp_prefix_usage_clear(struct mlxsw_sp_prefix_usage *prefix_usage,
117 unsigned char prefix_len)
118{
119 clear_bit(prefix_len, prefix_usage->b);
120}
121
122struct mlxsw_sp_fib_key {
123 unsigned char addr[sizeof(struct in6_addr)];
124 unsigned char prefix_len;
125};
126
Jiri Pirko61c503f2016-07-04 08:23:11 +0200127enum mlxsw_sp_fib_entry_type {
128 MLXSW_SP_FIB_ENTRY_TYPE_REMOTE,
129 MLXSW_SP_FIB_ENTRY_TYPE_LOCAL,
130 MLXSW_SP_FIB_ENTRY_TYPE_TRAP,
131};
132
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +0200133struct mlxsw_sp_nexthop_group;
134
Ido Schimmel9aecce12017-02-09 10:28:42 +0100135struct mlxsw_sp_fib_node {
136 struct list_head entry_list;
Jiri Pirkob45f64d2016-09-26 12:52:31 +0200137 struct list_head list;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100138 struct rhash_head ht_node;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100139 struct mlxsw_sp_fib *fib;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100140 struct mlxsw_sp_fib_key key;
141};
142
143struct mlxsw_sp_fib_entry_params {
144 u32 tb_id;
145 u32 prio;
146 u8 tos;
147 u8 type;
148};
149
150struct mlxsw_sp_fib_entry {
151 struct list_head list;
152 struct mlxsw_sp_fib_node *fib_node;
153 enum mlxsw_sp_fib_entry_type type;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +0200154 struct list_head nexthop_group_node;
155 struct mlxsw_sp_nexthop_group *nh_group;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100156 struct mlxsw_sp_fib_entry_params params;
Ido Schimmel013b20f2017-02-08 11:16:36 +0100157 bool offloaded;
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200158};
159
160struct mlxsw_sp_fib {
161 struct rhashtable ht;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100162 struct list_head node_list;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100163 struct mlxsw_sp_vr *vr;
164 struct mlxsw_sp_lpm_tree *lpm_tree;
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200165 unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT];
166 struct mlxsw_sp_prefix_usage prefix_usage;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100167 enum mlxsw_sp_l3proto proto;
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200168};
169
Ido Schimmel9aecce12017-02-09 10:28:42 +0100170static const struct rhashtable_params mlxsw_sp_fib_ht_params;
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200171
Ido Schimmel76610eb2017-03-10 08:53:41 +0100172static struct mlxsw_sp_fib *mlxsw_sp_fib_create(struct mlxsw_sp_vr *vr,
173 enum mlxsw_sp_l3proto proto)
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200174{
175 struct mlxsw_sp_fib *fib;
176 int err;
177
178 fib = kzalloc(sizeof(*fib), GFP_KERNEL);
179 if (!fib)
180 return ERR_PTR(-ENOMEM);
181 err = rhashtable_init(&fib->ht, &mlxsw_sp_fib_ht_params);
182 if (err)
183 goto err_rhashtable_init;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100184 INIT_LIST_HEAD(&fib->node_list);
Ido Schimmel76610eb2017-03-10 08:53:41 +0100185 fib->proto = proto;
186 fib->vr = vr;
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200187 return fib;
188
189err_rhashtable_init:
190 kfree(fib);
191 return ERR_PTR(err);
192}
193
194static void mlxsw_sp_fib_destroy(struct mlxsw_sp_fib *fib)
195{
Ido Schimmel9aecce12017-02-09 10:28:42 +0100196 WARN_ON(!list_empty(&fib->node_list));
Ido Schimmel76610eb2017-03-10 08:53:41 +0100197 WARN_ON(fib->lpm_tree);
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200198 rhashtable_destroy(&fib->ht);
199 kfree(fib);
200}
201
Jiri Pirko53342022016-07-04 08:23:08 +0200202static struct mlxsw_sp_lpm_tree *
Ido Schimmel382dbb42017-03-10 08:53:40 +0100203mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp *mlxsw_sp)
Jiri Pirko53342022016-07-04 08:23:08 +0200204{
205 static struct mlxsw_sp_lpm_tree *lpm_tree;
206 int i;
207
208 for (i = 0; i < MLXSW_SP_LPM_TREE_COUNT; i++) {
209 lpm_tree = &mlxsw_sp->router.lpm_trees[i];
Ido Schimmel382dbb42017-03-10 08:53:40 +0100210 if (lpm_tree->ref_count == 0)
211 return lpm_tree;
Jiri Pirko53342022016-07-04 08:23:08 +0200212 }
213 return NULL;
214}
215
216static int mlxsw_sp_lpm_tree_alloc(struct mlxsw_sp *mlxsw_sp,
217 struct mlxsw_sp_lpm_tree *lpm_tree)
218{
219 char ralta_pl[MLXSW_REG_RALTA_LEN];
220
Ido Schimmel1a9234e662016-09-19 08:29:26 +0200221 mlxsw_reg_ralta_pack(ralta_pl, true,
222 (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
223 lpm_tree->id);
Jiri Pirko53342022016-07-04 08:23:08 +0200224 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
225}
226
227static int mlxsw_sp_lpm_tree_free(struct mlxsw_sp *mlxsw_sp,
228 struct mlxsw_sp_lpm_tree *lpm_tree)
229{
230 char ralta_pl[MLXSW_REG_RALTA_LEN];
231
Ido Schimmel1a9234e662016-09-19 08:29:26 +0200232 mlxsw_reg_ralta_pack(ralta_pl, false,
233 (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
234 lpm_tree->id);
Jiri Pirko53342022016-07-04 08:23:08 +0200235 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
236}
237
238static int
239mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp *mlxsw_sp,
240 struct mlxsw_sp_prefix_usage *prefix_usage,
241 struct mlxsw_sp_lpm_tree *lpm_tree)
242{
243 char ralst_pl[MLXSW_REG_RALST_LEN];
244 u8 root_bin = 0;
245 u8 prefix;
246 u8 last_prefix = MLXSW_REG_RALST_BIN_NO_CHILD;
247
248 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage)
249 root_bin = prefix;
250
251 mlxsw_reg_ralst_pack(ralst_pl, root_bin, lpm_tree->id);
252 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) {
253 if (prefix == 0)
254 continue;
255 mlxsw_reg_ralst_bin_pack(ralst_pl, prefix, last_prefix,
256 MLXSW_REG_RALST_BIN_NO_CHILD);
257 last_prefix = prefix;
258 }
259 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
260}
261
262static struct mlxsw_sp_lpm_tree *
263mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
264 struct mlxsw_sp_prefix_usage *prefix_usage,
Ido Schimmel382dbb42017-03-10 08:53:40 +0100265 enum mlxsw_sp_l3proto proto)
Jiri Pirko53342022016-07-04 08:23:08 +0200266{
267 struct mlxsw_sp_lpm_tree *lpm_tree;
268 int err;
269
Ido Schimmel382dbb42017-03-10 08:53:40 +0100270 lpm_tree = mlxsw_sp_lpm_tree_find_unused(mlxsw_sp);
Jiri Pirko53342022016-07-04 08:23:08 +0200271 if (!lpm_tree)
272 return ERR_PTR(-EBUSY);
273 lpm_tree->proto = proto;
274 err = mlxsw_sp_lpm_tree_alloc(mlxsw_sp, lpm_tree);
275 if (err)
276 return ERR_PTR(err);
277
278 err = mlxsw_sp_lpm_tree_left_struct_set(mlxsw_sp, prefix_usage,
279 lpm_tree);
280 if (err)
281 goto err_left_struct_set;
Jiri Pirko2083d362016-10-25 11:25:56 +0200282 memcpy(&lpm_tree->prefix_usage, prefix_usage,
283 sizeof(lpm_tree->prefix_usage));
Jiri Pirko53342022016-07-04 08:23:08 +0200284 return lpm_tree;
285
286err_left_struct_set:
287 mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
288 return ERR_PTR(err);
289}
290
291static int mlxsw_sp_lpm_tree_destroy(struct mlxsw_sp *mlxsw_sp,
292 struct mlxsw_sp_lpm_tree *lpm_tree)
293{
294 return mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
295}
296
297static struct mlxsw_sp_lpm_tree *
298mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
299 struct mlxsw_sp_prefix_usage *prefix_usage,
Ido Schimmel382dbb42017-03-10 08:53:40 +0100300 enum mlxsw_sp_l3proto proto)
Jiri Pirko53342022016-07-04 08:23:08 +0200301{
302 struct mlxsw_sp_lpm_tree *lpm_tree;
303 int i;
304
305 for (i = 0; i < MLXSW_SP_LPM_TREE_COUNT; i++) {
306 lpm_tree = &mlxsw_sp->router.lpm_trees[i];
Jiri Pirko8b99bec2016-10-25 11:25:57 +0200307 if (lpm_tree->ref_count != 0 &&
308 lpm_tree->proto == proto &&
Jiri Pirko53342022016-07-04 08:23:08 +0200309 mlxsw_sp_prefix_usage_eq(&lpm_tree->prefix_usage,
310 prefix_usage))
311 goto inc_ref_count;
312 }
313 lpm_tree = mlxsw_sp_lpm_tree_create(mlxsw_sp, prefix_usage,
Ido Schimmel382dbb42017-03-10 08:53:40 +0100314 proto);
Jiri Pirko53342022016-07-04 08:23:08 +0200315 if (IS_ERR(lpm_tree))
316 return lpm_tree;
317
318inc_ref_count:
319 lpm_tree->ref_count++;
320 return lpm_tree;
321}
322
323static int mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
324 struct mlxsw_sp_lpm_tree *lpm_tree)
325{
326 if (--lpm_tree->ref_count == 0)
327 return mlxsw_sp_lpm_tree_destroy(mlxsw_sp, lpm_tree);
328 return 0;
329}
330
331static void mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp)
332{
333 struct mlxsw_sp_lpm_tree *lpm_tree;
334 int i;
335
336 for (i = 0; i < MLXSW_SP_LPM_TREE_COUNT; i++) {
337 lpm_tree = &mlxsw_sp->router.lpm_trees[i];
338 lpm_tree->id = i + MLXSW_SP_LPM_TREE_MIN;
339 }
340}
341
Ido Schimmel76610eb2017-03-10 08:53:41 +0100342static bool mlxsw_sp_vr_is_used(const struct mlxsw_sp_vr *vr)
343{
344 return !!vr->fib4;
345}
346
Jiri Pirko6b75c482016-07-04 08:23:09 +0200347static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp)
348{
349 struct mlxsw_sp_vr *vr;
350 int i;
351
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200352 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
Jiri Pirko6b75c482016-07-04 08:23:09 +0200353 vr = &mlxsw_sp->router.vrs[i];
Ido Schimmel76610eb2017-03-10 08:53:41 +0100354 if (!mlxsw_sp_vr_is_used(vr))
Jiri Pirko6b75c482016-07-04 08:23:09 +0200355 return vr;
356 }
357 return NULL;
358}
359
360static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel76610eb2017-03-10 08:53:41 +0100361 const struct mlxsw_sp_fib *fib)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200362{
363 char raltb_pl[MLXSW_REG_RALTB_LEN];
364
Ido Schimmel76610eb2017-03-10 08:53:41 +0100365 mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
366 (enum mlxsw_reg_ralxx_protocol) fib->proto,
367 fib->lpm_tree->id);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200368 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
369}
370
371static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel76610eb2017-03-10 08:53:41 +0100372 const struct mlxsw_sp_fib *fib)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200373{
374 char raltb_pl[MLXSW_REG_RALTB_LEN];
375
376 /* Bind to tree 0 which is default */
Ido Schimmel76610eb2017-03-10 08:53:41 +0100377 mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
378 (enum mlxsw_reg_ralxx_protocol) fib->proto, 0);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200379 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
380}
381
382static u32 mlxsw_sp_fix_tb_id(u32 tb_id)
383{
384 /* For our purpose, squash main and local table into one */
385 if (tb_id == RT_TABLE_LOCAL)
386 tb_id = RT_TABLE_MAIN;
387 return tb_id;
388}
389
390static struct mlxsw_sp_vr *mlxsw_sp_vr_find(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel76610eb2017-03-10 08:53:41 +0100391 u32 tb_id)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200392{
393 struct mlxsw_sp_vr *vr;
394 int i;
395
396 tb_id = mlxsw_sp_fix_tb_id(tb_id);
Nogah Frankel9497c042016-09-20 11:16:54 +0200397
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200398 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
Jiri Pirko6b75c482016-07-04 08:23:09 +0200399 vr = &mlxsw_sp->router.vrs[i];
Ido Schimmel76610eb2017-03-10 08:53:41 +0100400 if (mlxsw_sp_vr_is_used(vr) && vr->tb_id == tb_id)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200401 return vr;
402 }
403 return NULL;
404}
405
Ido Schimmel76610eb2017-03-10 08:53:41 +0100406static struct mlxsw_sp_fib *mlxsw_sp_vr_fib(const struct mlxsw_sp_vr *vr,
407 enum mlxsw_sp_l3proto proto)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200408{
Ido Schimmel76610eb2017-03-10 08:53:41 +0100409 switch (proto) {
410 case MLXSW_SP_L3_PROTO_IPV4:
411 return vr->fib4;
412 case MLXSW_SP_L3_PROTO_IPV6:
413 BUG_ON(1);
414 }
415 return NULL;
416}
417
418static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
419 u32 tb_id)
420{
Jiri Pirko6b75c482016-07-04 08:23:09 +0200421 struct mlxsw_sp_vr *vr;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200422
423 vr = mlxsw_sp_vr_find_unused(mlxsw_sp);
424 if (!vr)
425 return ERR_PTR(-EBUSY);
Ido Schimmel76610eb2017-03-10 08:53:41 +0100426 vr->fib4 = mlxsw_sp_fib_create(vr, MLXSW_SP_L3_PROTO_IPV4);
427 if (IS_ERR(vr->fib4))
428 return ERR_CAST(vr->fib4);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200429 vr->tb_id = tb_id;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200430 return vr;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200431}
432
Ido Schimmel76610eb2017-03-10 08:53:41 +0100433static void mlxsw_sp_vr_destroy(struct mlxsw_sp_vr *vr)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200434{
Ido Schimmel76610eb2017-03-10 08:53:41 +0100435 mlxsw_sp_fib_destroy(vr->fib4);
436 vr->fib4 = NULL;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200437}
438
439static int
Ido Schimmel76610eb2017-03-10 08:53:41 +0100440mlxsw_sp_vr_lpm_tree_check(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fib *fib,
Jiri Pirko6b75c482016-07-04 08:23:09 +0200441 struct mlxsw_sp_prefix_usage *req_prefix_usage)
442{
Ido Schimmel76610eb2017-03-10 08:53:41 +0100443 struct mlxsw_sp_lpm_tree *lpm_tree = fib->lpm_tree;
Ido Schimmelf7df4922017-02-28 08:55:40 +0100444 struct mlxsw_sp_lpm_tree *new_tree;
445 int err;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200446
Ido Schimmelf7df4922017-02-28 08:55:40 +0100447 if (mlxsw_sp_prefix_usage_eq(req_prefix_usage, &lpm_tree->prefix_usage))
Jiri Pirko6b75c482016-07-04 08:23:09 +0200448 return 0;
449
Ido Schimmelf7df4922017-02-28 08:55:40 +0100450 new_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, req_prefix_usage,
Ido Schimmel76610eb2017-03-10 08:53:41 +0100451 fib->proto);
Ido Schimmelf7df4922017-02-28 08:55:40 +0100452 if (IS_ERR(new_tree)) {
Jiri Pirko6b75c482016-07-04 08:23:09 +0200453 /* We failed to get a tree according to the required
454 * prefix usage. However, the current tree might be still good
455 * for us if our requirement is subset of the prefixes used
456 * in the tree.
457 */
458 if (mlxsw_sp_prefix_usage_subset(req_prefix_usage,
Ido Schimmelf7df4922017-02-28 08:55:40 +0100459 &lpm_tree->prefix_usage))
Jiri Pirko6b75c482016-07-04 08:23:09 +0200460 return 0;
Ido Schimmelf7df4922017-02-28 08:55:40 +0100461 return PTR_ERR(new_tree);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200462 }
463
Ido Schimmelf7df4922017-02-28 08:55:40 +0100464 /* Prevent packet loss by overwriting existing binding */
Ido Schimmel76610eb2017-03-10 08:53:41 +0100465 fib->lpm_tree = new_tree;
466 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib);
Ido Schimmelf7df4922017-02-28 08:55:40 +0100467 if (err)
468 goto err_tree_bind;
469 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
470
471 return 0;
472
473err_tree_bind:
Ido Schimmel76610eb2017-03-10 08:53:41 +0100474 fib->lpm_tree = lpm_tree;
Ido Schimmelf7df4922017-02-28 08:55:40 +0100475 mlxsw_sp_lpm_tree_put(mlxsw_sp, new_tree);
476 return err;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200477}
478
Ido Schimmel76610eb2017-03-10 08:53:41 +0100479static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200480{
481 struct mlxsw_sp_vr *vr;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200482
483 tb_id = mlxsw_sp_fix_tb_id(tb_id);
Ido Schimmel76610eb2017-03-10 08:53:41 +0100484 vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
485 if (!vr)
486 vr = mlxsw_sp_vr_create(mlxsw_sp, tb_id);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200487 return vr;
488}
489
Ido Schimmel76610eb2017-03-10 08:53:41 +0100490static void mlxsw_sp_vr_put(struct mlxsw_sp_vr *vr)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200491{
Ido Schimmel69132292017-03-10 08:53:42 +0100492 if (!vr->rif_count && list_empty(&vr->fib4->node_list))
Ido Schimmel76610eb2017-03-10 08:53:41 +0100493 mlxsw_sp_vr_destroy(vr);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200494}
495
Nogah Frankel9497c042016-09-20 11:16:54 +0200496static int mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200497{
498 struct mlxsw_sp_vr *vr;
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200499 u64 max_vrs;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200500 int i;
501
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200502 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_VRS))
Nogah Frankel9497c042016-09-20 11:16:54 +0200503 return -EIO;
504
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200505 max_vrs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS);
506 mlxsw_sp->router.vrs = kcalloc(max_vrs, sizeof(struct mlxsw_sp_vr),
Nogah Frankel9497c042016-09-20 11:16:54 +0200507 GFP_KERNEL);
508 if (!mlxsw_sp->router.vrs)
509 return -ENOMEM;
510
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200511 for (i = 0; i < max_vrs; i++) {
Jiri Pirko6b75c482016-07-04 08:23:09 +0200512 vr = &mlxsw_sp->router.vrs[i];
513 vr->id = i;
514 }
Nogah Frankel9497c042016-09-20 11:16:54 +0200515
516 return 0;
517}
518
Ido Schimmelac571de2016-11-14 11:26:32 +0100519static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp);
520
Nogah Frankel9497c042016-09-20 11:16:54 +0200521static void mlxsw_sp_vrs_fini(struct mlxsw_sp *mlxsw_sp)
522{
Ido Schimmel30572242016-12-03 16:45:01 +0100523 /* At this stage we're guaranteed not to have new incoming
524 * FIB notifications and the work queue is free from FIBs
525 * sitting on top of mlxsw netdevs. However, we can still
526 * have other FIBs queued. Flush the queue before flushing
527 * the device's tables. No need for locks, as we're the only
528 * writer.
529 */
530 mlxsw_core_flush_owq();
Ido Schimmelac571de2016-11-14 11:26:32 +0100531 mlxsw_sp_router_fib_flush(mlxsw_sp);
Nogah Frankel9497c042016-09-20 11:16:54 +0200532 kfree(mlxsw_sp->router.vrs);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200533}
534
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200535struct mlxsw_sp_neigh_key {
Jiri Pirko33b13412016-11-10 12:31:04 +0100536 struct neighbour *n;
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200537};
538
539struct mlxsw_sp_neigh_entry {
Ido Schimmel9665b742017-02-08 11:16:42 +0100540 struct list_head rif_list_node;
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200541 struct rhash_head ht_node;
542 struct mlxsw_sp_neigh_key key;
543 u16 rif;
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100544 bool connected;
Yotam Gigia6bf9e92016-07-05 11:27:44 +0200545 unsigned char ha[ETH_ALEN];
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +0200546 struct list_head nexthop_list; /* list of nexthops using
547 * this neigh entry
548 */
Yotam Gigib2157142016-07-05 11:27:51 +0200549 struct list_head nexthop_neighs_list_node;
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200550};
551
552static const struct rhashtable_params mlxsw_sp_neigh_ht_params = {
553 .key_offset = offsetof(struct mlxsw_sp_neigh_entry, key),
554 .head_offset = offsetof(struct mlxsw_sp_neigh_entry, ht_node),
555 .key_len = sizeof(struct mlxsw_sp_neigh_key),
556};
557
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100558static struct mlxsw_sp_neigh_entry *
559mlxsw_sp_neigh_entry_alloc(struct mlxsw_sp *mlxsw_sp, struct neighbour *n,
560 u16 rif)
561{
562 struct mlxsw_sp_neigh_entry *neigh_entry;
563
564 neigh_entry = kzalloc(sizeof(*neigh_entry), GFP_KERNEL);
565 if (!neigh_entry)
566 return NULL;
567
568 neigh_entry->key.n = n;
569 neigh_entry->rif = rif;
570 INIT_LIST_HEAD(&neigh_entry->nexthop_list);
571
572 return neigh_entry;
573}
574
575static void mlxsw_sp_neigh_entry_free(struct mlxsw_sp_neigh_entry *neigh_entry)
576{
577 kfree(neigh_entry);
578}
579
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200580static int
581mlxsw_sp_neigh_entry_insert(struct mlxsw_sp *mlxsw_sp,
582 struct mlxsw_sp_neigh_entry *neigh_entry)
583{
584 return rhashtable_insert_fast(&mlxsw_sp->router.neigh_ht,
585 &neigh_entry->ht_node,
586 mlxsw_sp_neigh_ht_params);
587}
588
589static void
590mlxsw_sp_neigh_entry_remove(struct mlxsw_sp *mlxsw_sp,
591 struct mlxsw_sp_neigh_entry *neigh_entry)
592{
593 rhashtable_remove_fast(&mlxsw_sp->router.neigh_ht,
594 &neigh_entry->ht_node,
595 mlxsw_sp_neigh_ht_params);
596}
597
598static struct mlxsw_sp_neigh_entry *
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100599mlxsw_sp_neigh_entry_create(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200600{
601 struct mlxsw_sp_neigh_entry *neigh_entry;
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100602 struct mlxsw_sp_rif *r;
603 int err;
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200604
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100605 r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, n->dev);
606 if (!r)
607 return ERR_PTR(-EINVAL);
608
609 neigh_entry = mlxsw_sp_neigh_entry_alloc(mlxsw_sp, n, r->rif);
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200610 if (!neigh_entry)
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100611 return ERR_PTR(-ENOMEM);
612
613 err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
614 if (err)
615 goto err_neigh_entry_insert;
616
Ido Schimmel9665b742017-02-08 11:16:42 +0100617 list_add(&neigh_entry->rif_list_node, &r->neigh_list);
618
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200619 return neigh_entry;
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100620
621err_neigh_entry_insert:
622 mlxsw_sp_neigh_entry_free(neigh_entry);
623 return ERR_PTR(err);
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200624}
625
626static void
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100627mlxsw_sp_neigh_entry_destroy(struct mlxsw_sp *mlxsw_sp,
628 struct mlxsw_sp_neigh_entry *neigh_entry)
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200629{
Ido Schimmel9665b742017-02-08 11:16:42 +0100630 list_del(&neigh_entry->rif_list_node);
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100631 mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry);
632 mlxsw_sp_neigh_entry_free(neigh_entry);
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200633}
634
635static struct mlxsw_sp_neigh_entry *
Jiri Pirko33b13412016-11-10 12:31:04 +0100636mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200637{
Jiri Pirko33b13412016-11-10 12:31:04 +0100638 struct mlxsw_sp_neigh_key key;
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200639
Jiri Pirko33b13412016-11-10 12:31:04 +0100640 key.n = n;
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200641 return rhashtable_lookup_fast(&mlxsw_sp->router.neigh_ht,
642 &key, mlxsw_sp_neigh_ht_params);
643}
644
Yotam Gigic723c7352016-07-05 11:27:43 +0200645static void
646mlxsw_sp_router_neighs_update_interval_init(struct mlxsw_sp *mlxsw_sp)
647{
648 unsigned long interval = NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME);
649
650 mlxsw_sp->router.neighs_update.interval = jiffies_to_msecs(interval);
651}
652
653static void mlxsw_sp_router_neigh_ent_ipv4_process(struct mlxsw_sp *mlxsw_sp,
654 char *rauhtd_pl,
655 int ent_index)
656{
657 struct net_device *dev;
658 struct neighbour *n;
659 __be32 dipn;
660 u32 dip;
661 u16 rif;
662
663 mlxsw_reg_rauhtd_ent_ipv4_unpack(rauhtd_pl, ent_index, &rif, &dip);
664
665 if (!mlxsw_sp->rifs[rif]) {
666 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
667 return;
668 }
669
670 dipn = htonl(dip);
671 dev = mlxsw_sp->rifs[rif]->dev;
672 n = neigh_lookup(&arp_tbl, &dipn, dev);
673 if (!n) {
674 netdev_err(dev, "Failed to find matching neighbour for IP=%pI4h\n",
675 &dip);
676 return;
677 }
678
679 netdev_dbg(dev, "Updating neighbour with IP=%pI4h\n", &dip);
680 neigh_event_send(n, NULL);
681 neigh_release(n);
682}
683
684static void mlxsw_sp_router_neigh_rec_ipv4_process(struct mlxsw_sp *mlxsw_sp,
685 char *rauhtd_pl,
686 int rec_index)
687{
688 u8 num_entries;
689 int i;
690
691 num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
692 rec_index);
693 /* Hardware starts counting at 0, so add 1. */
694 num_entries++;
695
696 /* Each record consists of several neighbour entries. */
697 for (i = 0; i < num_entries; i++) {
698 int ent_index;
699
700 ent_index = rec_index * MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC + i;
701 mlxsw_sp_router_neigh_ent_ipv4_process(mlxsw_sp, rauhtd_pl,
702 ent_index);
703 }
704
705}
706
707static void mlxsw_sp_router_neigh_rec_process(struct mlxsw_sp *mlxsw_sp,
708 char *rauhtd_pl, int rec_index)
709{
710 switch (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, rec_index)) {
711 case MLXSW_REG_RAUHTD_TYPE_IPV4:
712 mlxsw_sp_router_neigh_rec_ipv4_process(mlxsw_sp, rauhtd_pl,
713 rec_index);
714 break;
715 case MLXSW_REG_RAUHTD_TYPE_IPV6:
716 WARN_ON_ONCE(1);
717 break;
718 }
719}
720
Arkadi Sharshevsky42cdb332016-11-11 16:34:26 +0100721static bool mlxsw_sp_router_rauhtd_is_full(char *rauhtd_pl)
722{
723 u8 num_rec, last_rec_index, num_entries;
724
725 num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
726 last_rec_index = num_rec - 1;
727
728 if (num_rec < MLXSW_REG_RAUHTD_REC_MAX_NUM)
729 return false;
730 if (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, last_rec_index) ==
731 MLXSW_REG_RAUHTD_TYPE_IPV6)
732 return true;
733
734 num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
735 last_rec_index);
736 if (++num_entries == MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC)
737 return true;
738 return false;
739}
740
Yotam Gigib2157142016-07-05 11:27:51 +0200741static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp)
Yotam Gigic723c7352016-07-05 11:27:43 +0200742{
Yotam Gigic723c7352016-07-05 11:27:43 +0200743 char *rauhtd_pl;
744 u8 num_rec;
745 int i, err;
746
747 rauhtd_pl = kmalloc(MLXSW_REG_RAUHTD_LEN, GFP_KERNEL);
748 if (!rauhtd_pl)
Yotam Gigib2157142016-07-05 11:27:51 +0200749 return -ENOMEM;
Yotam Gigic723c7352016-07-05 11:27:43 +0200750
751 /* Make sure the neighbour's netdev isn't removed in the
752 * process.
753 */
754 rtnl_lock();
755 do {
756 mlxsw_reg_rauhtd_pack(rauhtd_pl, MLXSW_REG_RAUHTD_TYPE_IPV4);
757 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(rauhtd),
758 rauhtd_pl);
759 if (err) {
760 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to dump neighbour talbe\n");
761 break;
762 }
763 num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
764 for (i = 0; i < num_rec; i++)
765 mlxsw_sp_router_neigh_rec_process(mlxsw_sp, rauhtd_pl,
766 i);
Arkadi Sharshevsky42cdb332016-11-11 16:34:26 +0100767 } while (mlxsw_sp_router_rauhtd_is_full(rauhtd_pl));
Yotam Gigic723c7352016-07-05 11:27:43 +0200768 rtnl_unlock();
769
770 kfree(rauhtd_pl);
Yotam Gigib2157142016-07-05 11:27:51 +0200771 return err;
772}
773
774static void mlxsw_sp_router_neighs_update_nh(struct mlxsw_sp *mlxsw_sp)
775{
776 struct mlxsw_sp_neigh_entry *neigh_entry;
777
778 /* Take RTNL mutex here to prevent lists from changes */
779 rtnl_lock();
780 list_for_each_entry(neigh_entry, &mlxsw_sp->router.nexthop_neighs_list,
Ido Schimmel8a0b7272017-02-06 16:20:15 +0100781 nexthop_neighs_list_node)
Yotam Gigib2157142016-07-05 11:27:51 +0200782 /* If this neigh have nexthops, make the kernel think this neigh
783 * is active regardless of the traffic.
784 */
Ido Schimmel8a0b7272017-02-06 16:20:15 +0100785 neigh_event_send(neigh_entry->key.n, NULL);
Yotam Gigib2157142016-07-05 11:27:51 +0200786 rtnl_unlock();
787}
788
789static void
790mlxsw_sp_router_neighs_update_work_schedule(struct mlxsw_sp *mlxsw_sp)
791{
792 unsigned long interval = mlxsw_sp->router.neighs_update.interval;
793
794 mlxsw_core_schedule_dw(&mlxsw_sp->router.neighs_update.dw,
795 msecs_to_jiffies(interval));
796}
797
798static void mlxsw_sp_router_neighs_update_work(struct work_struct *work)
799{
800 struct mlxsw_sp *mlxsw_sp = container_of(work, struct mlxsw_sp,
801 router.neighs_update.dw.work);
802 int err;
803
804 err = mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp);
805 if (err)
806 dev_err(mlxsw_sp->bus_info->dev, "Could not update kernel for neigh activity");
807
808 mlxsw_sp_router_neighs_update_nh(mlxsw_sp);
809
Yotam Gigic723c7352016-07-05 11:27:43 +0200810 mlxsw_sp_router_neighs_update_work_schedule(mlxsw_sp);
811}
812
Yotam Gigi0b2361d2016-07-05 11:27:52 +0200813static void mlxsw_sp_router_probe_unresolved_nexthops(struct work_struct *work)
814{
815 struct mlxsw_sp_neigh_entry *neigh_entry;
816 struct mlxsw_sp *mlxsw_sp = container_of(work, struct mlxsw_sp,
817 router.nexthop_probe_dw.work);
818
819 /* Iterate over nexthop neighbours, find those who are unresolved and
820 * send arp on them. This solves the chicken-egg problem when
821 * the nexthop wouldn't get offloaded until the neighbor is resolved
822 * but it wouldn't get resolved ever in case traffic is flowing in HW
823 * using different nexthop.
824 *
825 * Take RTNL mutex here to prevent lists from changes.
826 */
827 rtnl_lock();
828 list_for_each_entry(neigh_entry, &mlxsw_sp->router.nexthop_neighs_list,
Ido Schimmel8a0b7272017-02-06 16:20:15 +0100829 nexthop_neighs_list_node)
Ido Schimmel01b1aa32017-02-06 16:20:16 +0100830 if (!neigh_entry->connected)
Jiri Pirko33b13412016-11-10 12:31:04 +0100831 neigh_event_send(neigh_entry->key.n, NULL);
Yotam Gigi0b2361d2016-07-05 11:27:52 +0200832 rtnl_unlock();
833
834 mlxsw_core_schedule_dw(&mlxsw_sp->router.nexthop_probe_dw,
835 MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL);
836}
837
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +0200838static void
839mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
840 struct mlxsw_sp_neigh_entry *neigh_entry,
841 bool removing);
842
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100843static enum mlxsw_reg_rauht_op mlxsw_sp_rauht_op(bool adding)
Yotam Gigia6bf9e92016-07-05 11:27:44 +0200844{
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100845 return adding ? MLXSW_REG_RAUHT_OP_WRITE_ADD :
846 MLXSW_REG_RAUHT_OP_WRITE_DELETE;
847}
848
849static void
850mlxsw_sp_router_neigh_entry_op4(struct mlxsw_sp *mlxsw_sp,
851 struct mlxsw_sp_neigh_entry *neigh_entry,
852 enum mlxsw_reg_rauht_op op)
853{
Jiri Pirko33b13412016-11-10 12:31:04 +0100854 struct neighbour *n = neigh_entry->key.n;
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100855 u32 dip = ntohl(*((__be32 *) n->primary_key));
Yotam Gigia6bf9e92016-07-05 11:27:44 +0200856 char rauht_pl[MLXSW_REG_RAUHT_LEN];
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100857
858 mlxsw_reg_rauht_pack4(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
859 dip);
860 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
861}
862
863static void
864mlxsw_sp_neigh_entry_update(struct mlxsw_sp *mlxsw_sp,
865 struct mlxsw_sp_neigh_entry *neigh_entry,
866 bool adding)
867{
868 if (!adding && !neigh_entry->connected)
869 return;
870 neigh_entry->connected = adding;
871 if (neigh_entry->key.n->tbl == &arp_tbl)
872 mlxsw_sp_router_neigh_entry_op4(mlxsw_sp, neigh_entry,
873 mlxsw_sp_rauht_op(adding));
874 else
875 WARN_ON_ONCE(1);
876}
877
878struct mlxsw_sp_neigh_event_work {
879 struct work_struct work;
880 struct mlxsw_sp *mlxsw_sp;
881 struct neighbour *n;
882};
883
884static void mlxsw_sp_router_neigh_event_work(struct work_struct *work)
885{
886 struct mlxsw_sp_neigh_event_work *neigh_work =
887 container_of(work, struct mlxsw_sp_neigh_event_work, work);
888 struct mlxsw_sp *mlxsw_sp = neigh_work->mlxsw_sp;
889 struct mlxsw_sp_neigh_entry *neigh_entry;
890 struct neighbour *n = neigh_work->n;
891 unsigned char ha[ETH_ALEN];
Yotam Gigia6bf9e92016-07-05 11:27:44 +0200892 bool entry_connected;
Ido Schimmel93a87e52016-12-23 09:32:49 +0100893 u8 nud_state, dead;
Yotam Gigia6bf9e92016-07-05 11:27:44 +0200894
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100895 /* If these parameters are changed after we release the lock,
896 * then we are guaranteed to receive another event letting us
897 * know about it.
898 */
Yotam Gigia6bf9e92016-07-05 11:27:44 +0200899 read_lock_bh(&n->lock);
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100900 memcpy(ha, n->ha, ETH_ALEN);
Yotam Gigia6bf9e92016-07-05 11:27:44 +0200901 nud_state = n->nud_state;
Ido Schimmel93a87e52016-12-23 09:32:49 +0100902 dead = n->dead;
Yotam Gigia6bf9e92016-07-05 11:27:44 +0200903 read_unlock_bh(&n->lock);
904
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100905 rtnl_lock();
Ido Schimmel93a87e52016-12-23 09:32:49 +0100906 entry_connected = nud_state & NUD_VALID && !dead;
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100907 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
908 if (!entry_connected && !neigh_entry)
909 goto out;
910 if (!neigh_entry) {
911 neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
912 if (IS_ERR(neigh_entry))
913 goto out;
Yotam Gigia6bf9e92016-07-05 11:27:44 +0200914 }
915
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100916 memcpy(neigh_entry->ha, ha, ETH_ALEN);
917 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, entry_connected);
918 mlxsw_sp_nexthop_neigh_update(mlxsw_sp, neigh_entry, !entry_connected);
919
920 if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
921 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
922
923out:
924 rtnl_unlock();
Yotam Gigia6bf9e92016-07-05 11:27:44 +0200925 neigh_release(n);
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100926 kfree(neigh_work);
Yotam Gigia6bf9e92016-07-05 11:27:44 +0200927}
928
Jiri Pirkoe7322632016-09-01 10:37:43 +0200929int mlxsw_sp_router_netevent_event(struct notifier_block *unused,
930 unsigned long event, void *ptr)
Yotam Gigic723c7352016-07-05 11:27:43 +0200931{
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100932 struct mlxsw_sp_neigh_event_work *neigh_work;
Yotam Gigic723c7352016-07-05 11:27:43 +0200933 struct mlxsw_sp_port *mlxsw_sp_port;
934 struct mlxsw_sp *mlxsw_sp;
935 unsigned long interval;
936 struct neigh_parms *p;
Yotam Gigia6bf9e92016-07-05 11:27:44 +0200937 struct neighbour *n;
Yotam Gigic723c7352016-07-05 11:27:43 +0200938
939 switch (event) {
940 case NETEVENT_DELAY_PROBE_TIME_UPDATE:
941 p = ptr;
942
943 /* We don't care about changes in the default table. */
944 if (!p->dev || p->tbl != &arp_tbl)
945 return NOTIFY_DONE;
946
947 /* We are in atomic context and can't take RTNL mutex,
948 * so use RCU variant to walk the device chain.
949 */
950 mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(p->dev);
951 if (!mlxsw_sp_port)
952 return NOTIFY_DONE;
953
954 mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
955 interval = jiffies_to_msecs(NEIGH_VAR(p, DELAY_PROBE_TIME));
956 mlxsw_sp->router.neighs_update.interval = interval;
957
958 mlxsw_sp_port_dev_put(mlxsw_sp_port);
959 break;
Yotam Gigia6bf9e92016-07-05 11:27:44 +0200960 case NETEVENT_NEIGH_UPDATE:
961 n = ptr;
Yotam Gigia6bf9e92016-07-05 11:27:44 +0200962
963 if (n->tbl != &arp_tbl)
964 return NOTIFY_DONE;
965
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100966 mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(n->dev);
Yotam Gigia6bf9e92016-07-05 11:27:44 +0200967 if (!mlxsw_sp_port)
968 return NOTIFY_DONE;
969
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100970 neigh_work = kzalloc(sizeof(*neigh_work), GFP_ATOMIC);
971 if (!neigh_work) {
Yotam Gigia6bf9e92016-07-05 11:27:44 +0200972 mlxsw_sp_port_dev_put(mlxsw_sp_port);
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100973 return NOTIFY_BAD;
Yotam Gigia6bf9e92016-07-05 11:27:44 +0200974 }
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100975
976 INIT_WORK(&neigh_work->work, mlxsw_sp_router_neigh_event_work);
977 neigh_work->mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
978 neigh_work->n = n;
Yotam Gigia6bf9e92016-07-05 11:27:44 +0200979
980 /* Take a reference to ensure the neighbour won't be
981 * destructed until we drop the reference in delayed
982 * work.
983 */
984 neigh_clone(n);
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100985 mlxsw_core_schedule_work(&neigh_work->work);
986 mlxsw_sp_port_dev_put(mlxsw_sp_port);
Yotam Gigia6bf9e92016-07-05 11:27:44 +0200987 break;
Yotam Gigic723c7352016-07-05 11:27:43 +0200988 }
989
990 return NOTIFY_DONE;
991}
992
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200993static int mlxsw_sp_neigh_init(struct mlxsw_sp *mlxsw_sp)
994{
Yotam Gigic723c7352016-07-05 11:27:43 +0200995 int err;
996
997 err = rhashtable_init(&mlxsw_sp->router.neigh_ht,
998 &mlxsw_sp_neigh_ht_params);
999 if (err)
1000 return err;
1001
1002 /* Initialize the polling interval according to the default
1003 * table.
1004 */
1005 mlxsw_sp_router_neighs_update_interval_init(mlxsw_sp);
1006
Yotam Gigi0b2361d2016-07-05 11:27:52 +02001007 /* Create the delayed works for the activity_update */
Yotam Gigic723c7352016-07-05 11:27:43 +02001008 INIT_DELAYED_WORK(&mlxsw_sp->router.neighs_update.dw,
1009 mlxsw_sp_router_neighs_update_work);
Yotam Gigi0b2361d2016-07-05 11:27:52 +02001010 INIT_DELAYED_WORK(&mlxsw_sp->router.nexthop_probe_dw,
1011 mlxsw_sp_router_probe_unresolved_nexthops);
Yotam Gigic723c7352016-07-05 11:27:43 +02001012 mlxsw_core_schedule_dw(&mlxsw_sp->router.neighs_update.dw, 0);
Yotam Gigi0b2361d2016-07-05 11:27:52 +02001013 mlxsw_core_schedule_dw(&mlxsw_sp->router.nexthop_probe_dw, 0);
Yotam Gigic723c7352016-07-05 11:27:43 +02001014 return 0;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001015}
1016
1017static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp)
1018{
Yotam Gigic723c7352016-07-05 11:27:43 +02001019 cancel_delayed_work_sync(&mlxsw_sp->router.neighs_update.dw);
Yotam Gigi0b2361d2016-07-05 11:27:52 +02001020 cancel_delayed_work_sync(&mlxsw_sp->router.nexthop_probe_dw);
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001021 rhashtable_destroy(&mlxsw_sp->router.neigh_ht);
1022}
1023
Ido Schimmel9665b742017-02-08 11:16:42 +01001024static int mlxsw_sp_neigh_rif_flush(struct mlxsw_sp *mlxsw_sp,
1025 const struct mlxsw_sp_rif *r)
1026{
1027 char rauht_pl[MLXSW_REG_RAUHT_LEN];
1028
1029 mlxsw_reg_rauht_pack(rauht_pl, MLXSW_REG_RAUHT_OP_WRITE_DELETE_ALL,
1030 r->rif, r->addr);
1031 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
1032}
1033
1034static void mlxsw_sp_neigh_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
1035 struct mlxsw_sp_rif *r)
1036{
1037 struct mlxsw_sp_neigh_entry *neigh_entry, *tmp;
1038
1039 mlxsw_sp_neigh_rif_flush(mlxsw_sp, r);
1040 list_for_each_entry_safe(neigh_entry, tmp, &r->neigh_list,
1041 rif_list_node)
1042 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
1043}
1044
Ido Schimmelc53b8e12017-02-08 11:16:30 +01001045struct mlxsw_sp_nexthop_key {
1046 struct fib_nh *fib_nh;
1047};
1048
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001049struct mlxsw_sp_nexthop {
1050 struct list_head neigh_list_node; /* member of neigh entry list */
Ido Schimmel9665b742017-02-08 11:16:42 +01001051 struct list_head rif_list_node;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001052 struct mlxsw_sp_nexthop_group *nh_grp; /* pointer back to the group
1053 * this belongs to
1054 */
Ido Schimmelc53b8e12017-02-08 11:16:30 +01001055 struct rhash_head ht_node;
1056 struct mlxsw_sp_nexthop_key key;
Ido Schimmelb8399a12017-02-08 11:16:33 +01001057 struct mlxsw_sp_rif *r;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001058 u8 should_offload:1, /* set indicates this neigh is connected and
1059 * should be put to KVD linear area of this group.
1060 */
1061 offloaded:1, /* set in case the neigh is actually put into
1062 * KVD linear area of this group.
1063 */
1064 update:1; /* set indicates that MAC of this neigh should be
1065 * updated in HW
1066 */
1067 struct mlxsw_sp_neigh_entry *neigh_entry;
1068};
1069
Ido Schimmele9ad5e72017-02-08 11:16:29 +01001070struct mlxsw_sp_nexthop_group_key {
1071 struct fib_info *fi;
1072};
1073
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001074struct mlxsw_sp_nexthop_group {
Ido Schimmele9ad5e72017-02-08 11:16:29 +01001075 struct rhash_head ht_node;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001076 struct list_head fib_list; /* list of fib entries that use this group */
Ido Schimmele9ad5e72017-02-08 11:16:29 +01001077 struct mlxsw_sp_nexthop_group_key key;
Ido Schimmelb3e8d1e2017-02-08 11:16:32 +01001078 u8 adj_index_valid:1,
1079 gateway:1; /* routes using the group use a gateway */
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001080 u32 adj_index;
1081 u16 ecmp_size;
1082 u16 count;
1083 struct mlxsw_sp_nexthop nexthops[0];
Ido Schimmelb8399a12017-02-08 11:16:33 +01001084#define nh_rif nexthops[0].r
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001085};
1086
Ido Schimmele9ad5e72017-02-08 11:16:29 +01001087static const struct rhashtable_params mlxsw_sp_nexthop_group_ht_params = {
1088 .key_offset = offsetof(struct mlxsw_sp_nexthop_group, key),
1089 .head_offset = offsetof(struct mlxsw_sp_nexthop_group, ht_node),
1090 .key_len = sizeof(struct mlxsw_sp_nexthop_group_key),
1091};
1092
1093static int mlxsw_sp_nexthop_group_insert(struct mlxsw_sp *mlxsw_sp,
1094 struct mlxsw_sp_nexthop_group *nh_grp)
1095{
1096 return rhashtable_insert_fast(&mlxsw_sp->router.nexthop_group_ht,
1097 &nh_grp->ht_node,
1098 mlxsw_sp_nexthop_group_ht_params);
1099}
1100
1101static void mlxsw_sp_nexthop_group_remove(struct mlxsw_sp *mlxsw_sp,
1102 struct mlxsw_sp_nexthop_group *nh_grp)
1103{
1104 rhashtable_remove_fast(&mlxsw_sp->router.nexthop_group_ht,
1105 &nh_grp->ht_node,
1106 mlxsw_sp_nexthop_group_ht_params);
1107}
1108
1109static struct mlxsw_sp_nexthop_group *
1110mlxsw_sp_nexthop_group_lookup(struct mlxsw_sp *mlxsw_sp,
1111 struct mlxsw_sp_nexthop_group_key key)
1112{
1113 return rhashtable_lookup_fast(&mlxsw_sp->router.nexthop_group_ht, &key,
1114 mlxsw_sp_nexthop_group_ht_params);
1115}
1116
Ido Schimmelc53b8e12017-02-08 11:16:30 +01001117static const struct rhashtable_params mlxsw_sp_nexthop_ht_params = {
1118 .key_offset = offsetof(struct mlxsw_sp_nexthop, key),
1119 .head_offset = offsetof(struct mlxsw_sp_nexthop, ht_node),
1120 .key_len = sizeof(struct mlxsw_sp_nexthop_key),
1121};
1122
1123static int mlxsw_sp_nexthop_insert(struct mlxsw_sp *mlxsw_sp,
1124 struct mlxsw_sp_nexthop *nh)
1125{
1126 return rhashtable_insert_fast(&mlxsw_sp->router.nexthop_ht,
1127 &nh->ht_node, mlxsw_sp_nexthop_ht_params);
1128}
1129
1130static void mlxsw_sp_nexthop_remove(struct mlxsw_sp *mlxsw_sp,
1131 struct mlxsw_sp_nexthop *nh)
1132{
1133 rhashtable_remove_fast(&mlxsw_sp->router.nexthop_ht, &nh->ht_node,
1134 mlxsw_sp_nexthop_ht_params);
1135}
1136
Ido Schimmelad178c82017-02-08 11:16:40 +01001137static struct mlxsw_sp_nexthop *
1138mlxsw_sp_nexthop_lookup(struct mlxsw_sp *mlxsw_sp,
1139 struct mlxsw_sp_nexthop_key key)
1140{
1141 return rhashtable_lookup_fast(&mlxsw_sp->router.nexthop_ht, &key,
1142 mlxsw_sp_nexthop_ht_params);
1143}
1144
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001145static int mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel76610eb2017-03-10 08:53:41 +01001146 const struct mlxsw_sp_fib *fib,
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001147 u32 adj_index, u16 ecmp_size,
1148 u32 new_adj_index,
1149 u16 new_ecmp_size)
1150{
1151 char raleu_pl[MLXSW_REG_RALEU_LEN];
1152
Ido Schimmel1a9234e662016-09-19 08:29:26 +02001153 mlxsw_reg_raleu_pack(raleu_pl,
Ido Schimmel76610eb2017-03-10 08:53:41 +01001154 (enum mlxsw_reg_ralxx_protocol) fib->proto,
1155 fib->vr->id, adj_index, ecmp_size, new_adj_index,
Ido Schimmel1a9234e662016-09-19 08:29:26 +02001156 new_ecmp_size);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001157 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raleu), raleu_pl);
1158}
1159
1160static int mlxsw_sp_adj_index_mass_update(struct mlxsw_sp *mlxsw_sp,
1161 struct mlxsw_sp_nexthop_group *nh_grp,
1162 u32 old_adj_index, u16 old_ecmp_size)
1163{
1164 struct mlxsw_sp_fib_entry *fib_entry;
Ido Schimmel76610eb2017-03-10 08:53:41 +01001165 struct mlxsw_sp_fib *fib = NULL;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001166 int err;
1167
1168 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
Ido Schimmel76610eb2017-03-10 08:53:41 +01001169 if (fib == fib_entry->fib_node->fib)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001170 continue;
Ido Schimmel76610eb2017-03-10 08:53:41 +01001171 fib = fib_entry->fib_node->fib;
1172 err = mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp, fib,
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001173 old_adj_index,
1174 old_ecmp_size,
1175 nh_grp->adj_index,
1176 nh_grp->ecmp_size);
1177 if (err)
1178 return err;
1179 }
1180 return 0;
1181}
1182
1183static int mlxsw_sp_nexthop_mac_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
1184 struct mlxsw_sp_nexthop *nh)
1185{
1186 struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
1187 char ratr_pl[MLXSW_REG_RATR_LEN];
1188
1189 mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY,
1190 true, adj_index, neigh_entry->rif);
1191 mlxsw_reg_ratr_eth_entry_pack(ratr_pl, neigh_entry->ha);
1192 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
1193}
1194
1195static int
1196mlxsw_sp_nexthop_group_mac_update(struct mlxsw_sp *mlxsw_sp,
Ido Schimmela59b7e02017-01-23 11:11:42 +01001197 struct mlxsw_sp_nexthop_group *nh_grp,
1198 bool reallocate)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001199{
1200 u32 adj_index = nh_grp->adj_index; /* base */
1201 struct mlxsw_sp_nexthop *nh;
1202 int i;
1203 int err;
1204
1205 for (i = 0; i < nh_grp->count; i++) {
1206 nh = &nh_grp->nexthops[i];
1207
1208 if (!nh->should_offload) {
1209 nh->offloaded = 0;
1210 continue;
1211 }
1212
Ido Schimmela59b7e02017-01-23 11:11:42 +01001213 if (nh->update || reallocate) {
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001214 err = mlxsw_sp_nexthop_mac_update(mlxsw_sp,
1215 adj_index, nh);
1216 if (err)
1217 return err;
1218 nh->update = 0;
1219 nh->offloaded = 1;
1220 }
1221 adj_index++;
1222 }
1223 return 0;
1224}
1225
1226static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
1227 struct mlxsw_sp_fib_entry *fib_entry);
1228
1229static int
1230mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp *mlxsw_sp,
1231 struct mlxsw_sp_nexthop_group *nh_grp)
1232{
1233 struct mlxsw_sp_fib_entry *fib_entry;
1234 int err;
1235
1236 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
1237 err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
1238 if (err)
1239 return err;
1240 }
1241 return 0;
1242}
1243
1244static void
1245mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
1246 struct mlxsw_sp_nexthop_group *nh_grp)
1247{
1248 struct mlxsw_sp_nexthop *nh;
1249 bool offload_change = false;
1250 u32 adj_index;
1251 u16 ecmp_size = 0;
1252 bool old_adj_index_valid;
1253 u32 old_adj_index;
1254 u16 old_ecmp_size;
1255 int ret;
1256 int i;
1257 int err;
1258
Ido Schimmelb3e8d1e2017-02-08 11:16:32 +01001259 if (!nh_grp->gateway) {
1260 mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
1261 return;
1262 }
1263
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001264 for (i = 0; i < nh_grp->count; i++) {
1265 nh = &nh_grp->nexthops[i];
1266
1267 if (nh->should_offload ^ nh->offloaded) {
1268 offload_change = true;
1269 if (nh->should_offload)
1270 nh->update = 1;
1271 }
1272 if (nh->should_offload)
1273 ecmp_size++;
1274 }
1275 if (!offload_change) {
1276 /* Nothing was added or removed, so no need to reallocate. Just
1277 * update MAC on existing adjacency indexes.
1278 */
Ido Schimmela59b7e02017-01-23 11:11:42 +01001279 err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp,
1280 false);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001281 if (err) {
1282 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
1283 goto set_trap;
1284 }
1285 return;
1286 }
1287 if (!ecmp_size)
1288 /* No neigh of this group is connected so we just set
1289 * the trap and let everthing flow through kernel.
1290 */
1291 goto set_trap;
1292
1293 ret = mlxsw_sp_kvdl_alloc(mlxsw_sp, ecmp_size);
1294 if (ret < 0) {
1295 /* We ran out of KVD linear space, just set the
1296 * trap and let everything flow through kernel.
1297 */
1298 dev_warn(mlxsw_sp->bus_info->dev, "Failed to allocate KVD linear area for nexthop group.\n");
1299 goto set_trap;
1300 }
1301 adj_index = ret;
1302 old_adj_index_valid = nh_grp->adj_index_valid;
1303 old_adj_index = nh_grp->adj_index;
1304 old_ecmp_size = nh_grp->ecmp_size;
1305 nh_grp->adj_index_valid = 1;
1306 nh_grp->adj_index = adj_index;
1307 nh_grp->ecmp_size = ecmp_size;
Ido Schimmela59b7e02017-01-23 11:11:42 +01001308 err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp, true);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001309 if (err) {
1310 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
1311 goto set_trap;
1312 }
1313
1314 if (!old_adj_index_valid) {
1315 /* The trap was set for fib entries, so we have to call
1316 * fib entry update to unset it and use adjacency index.
1317 */
1318 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
1319 if (err) {
1320 dev_warn(mlxsw_sp->bus_info->dev, "Failed to add adjacency index to fib entries.\n");
1321 goto set_trap;
1322 }
1323 return;
1324 }
1325
1326 err = mlxsw_sp_adj_index_mass_update(mlxsw_sp, nh_grp,
1327 old_adj_index, old_ecmp_size);
1328 mlxsw_sp_kvdl_free(mlxsw_sp, old_adj_index);
1329 if (err) {
1330 dev_warn(mlxsw_sp->bus_info->dev, "Failed to mass-update adjacency index for nexthop group.\n");
1331 goto set_trap;
1332 }
1333 return;
1334
1335set_trap:
1336 old_adj_index_valid = nh_grp->adj_index_valid;
1337 nh_grp->adj_index_valid = 0;
1338 for (i = 0; i < nh_grp->count; i++) {
1339 nh = &nh_grp->nexthops[i];
1340 nh->offloaded = 0;
1341 }
1342 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
1343 if (err)
1344 dev_warn(mlxsw_sp->bus_info->dev, "Failed to set traps for fib entries.\n");
1345 if (old_adj_index_valid)
1346 mlxsw_sp_kvdl_free(mlxsw_sp, nh_grp->adj_index);
1347}
1348
1349static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh,
1350 bool removing)
1351{
1352 if (!removing && !nh->should_offload)
1353 nh->should_offload = 1;
1354 else if (removing && nh->offloaded)
1355 nh->should_offload = 0;
1356 nh->update = 1;
1357}
1358
1359static void
1360mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
1361 struct mlxsw_sp_neigh_entry *neigh_entry,
1362 bool removing)
1363{
1364 struct mlxsw_sp_nexthop *nh;
1365
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001366 list_for_each_entry(nh, &neigh_entry->nexthop_list,
1367 neigh_list_node) {
1368 __mlxsw_sp_nexthop_neigh_update(nh, removing);
1369 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
1370 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001371}
1372
Ido Schimmel9665b742017-02-08 11:16:42 +01001373static void mlxsw_sp_nexthop_rif_init(struct mlxsw_sp_nexthop *nh,
1374 struct mlxsw_sp_rif *r)
1375{
1376 if (nh->r)
1377 return;
1378
1379 nh->r = r;
1380 list_add(&nh->rif_list_node, &r->nexthop_list);
1381}
1382
1383static void mlxsw_sp_nexthop_rif_fini(struct mlxsw_sp_nexthop *nh)
1384{
1385 if (!nh->r)
1386 return;
1387
1388 list_del(&nh->rif_list_node);
1389 nh->r = NULL;
1390}
1391
Ido Schimmela8c97012017-02-08 11:16:35 +01001392static int mlxsw_sp_nexthop_neigh_init(struct mlxsw_sp *mlxsw_sp,
1393 struct mlxsw_sp_nexthop *nh)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001394{
1395 struct mlxsw_sp_neigh_entry *neigh_entry;
Ido Schimmela8c97012017-02-08 11:16:35 +01001396 struct fib_nh *fib_nh = nh->key.fib_nh;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001397 struct neighbour *n;
Ido Schimmel93a87e52016-12-23 09:32:49 +01001398 u8 nud_state, dead;
Ido Schimmelc53b8e12017-02-08 11:16:30 +01001399 int err;
1400
Ido Schimmelad178c82017-02-08 11:16:40 +01001401 if (!nh->nh_grp->gateway || nh->neigh_entry)
Ido Schimmelb8399a12017-02-08 11:16:33 +01001402 return 0;
1403
Jiri Pirko33b13412016-11-10 12:31:04 +01001404 /* Take a reference of neigh here ensuring that neigh would
1405 * not be detructed before the nexthop entry is finished.
1406 * The reference is taken either in neigh_lookup() or
Ido Schimmelfd76d912017-02-06 16:20:17 +01001407 * in neigh_create() in case n is not found.
Jiri Pirko33b13412016-11-10 12:31:04 +01001408 */
Ido Schimmela8c97012017-02-08 11:16:35 +01001409 n = neigh_lookup(&arp_tbl, &fib_nh->nh_gw, fib_nh->nh_dev);
Jiri Pirko33b13412016-11-10 12:31:04 +01001410 if (!n) {
Ido Schimmela8c97012017-02-08 11:16:35 +01001411 n = neigh_create(&arp_tbl, &fib_nh->nh_gw, fib_nh->nh_dev);
1412 if (IS_ERR(n))
1413 return PTR_ERR(n);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001414 neigh_event_send(n, NULL);
Jiri Pirko33b13412016-11-10 12:31:04 +01001415 }
1416 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
1417 if (!neigh_entry) {
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001418 neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
1419 if (IS_ERR(neigh_entry)) {
Ido Schimmelc53b8e12017-02-08 11:16:30 +01001420 err = -EINVAL;
1421 goto err_neigh_entry_create;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001422 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001423 }
Yotam Gigib2157142016-07-05 11:27:51 +02001424
1425 /* If that is the first nexthop connected to that neigh, add to
1426 * nexthop_neighs_list
1427 */
1428 if (list_empty(&neigh_entry->nexthop_list))
1429 list_add_tail(&neigh_entry->nexthop_neighs_list_node,
1430 &mlxsw_sp->router.nexthop_neighs_list);
1431
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001432 nh->neigh_entry = neigh_entry;
1433 list_add_tail(&nh->neigh_list_node, &neigh_entry->nexthop_list);
1434 read_lock_bh(&n->lock);
1435 nud_state = n->nud_state;
Ido Schimmel93a87e52016-12-23 09:32:49 +01001436 dead = n->dead;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001437 read_unlock_bh(&n->lock);
Ido Schimmel93a87e52016-12-23 09:32:49 +01001438 __mlxsw_sp_nexthop_neigh_update(nh, !(nud_state & NUD_VALID && !dead));
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001439
1440 return 0;
Ido Schimmelc53b8e12017-02-08 11:16:30 +01001441
1442err_neigh_entry_create:
1443 neigh_release(n);
Ido Schimmelc53b8e12017-02-08 11:16:30 +01001444 return err;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001445}
1446
Ido Schimmela8c97012017-02-08 11:16:35 +01001447static void mlxsw_sp_nexthop_neigh_fini(struct mlxsw_sp *mlxsw_sp,
1448 struct mlxsw_sp_nexthop *nh)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001449{
1450 struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
Ido Schimmela8c97012017-02-08 11:16:35 +01001451 struct neighbour *n;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001452
Ido Schimmelb8399a12017-02-08 11:16:33 +01001453 if (!neigh_entry)
Ido Schimmela8c97012017-02-08 11:16:35 +01001454 return;
1455 n = neigh_entry->key.n;
Ido Schimmelb8399a12017-02-08 11:16:33 +01001456
Ido Schimmel58312122016-12-23 09:32:50 +01001457 __mlxsw_sp_nexthop_neigh_update(nh, true);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001458 list_del(&nh->neigh_list_node);
Ido Schimmele58be792017-02-08 11:16:28 +01001459 nh->neigh_entry = NULL;
Yotam Gigib2157142016-07-05 11:27:51 +02001460
1461 /* If that is the last nexthop connected to that neigh, remove from
1462 * nexthop_neighs_list
1463 */
Ido Schimmele58be792017-02-08 11:16:28 +01001464 if (list_empty(&neigh_entry->nexthop_list))
1465 list_del(&neigh_entry->nexthop_neighs_list_node);
Yotam Gigib2157142016-07-05 11:27:51 +02001466
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001467 if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
1468 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
1469
1470 neigh_release(n);
Ido Schimmela8c97012017-02-08 11:16:35 +01001471}
Ido Schimmelc53b8e12017-02-08 11:16:30 +01001472
Ido Schimmela8c97012017-02-08 11:16:35 +01001473static int mlxsw_sp_nexthop_init(struct mlxsw_sp *mlxsw_sp,
1474 struct mlxsw_sp_nexthop_group *nh_grp,
1475 struct mlxsw_sp_nexthop *nh,
1476 struct fib_nh *fib_nh)
1477{
1478 struct net_device *dev = fib_nh->nh_dev;
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +01001479 struct in_device *in_dev;
Ido Schimmela8c97012017-02-08 11:16:35 +01001480 struct mlxsw_sp_rif *r;
1481 int err;
1482
1483 nh->nh_grp = nh_grp;
1484 nh->key.fib_nh = fib_nh;
1485 err = mlxsw_sp_nexthop_insert(mlxsw_sp, nh);
1486 if (err)
1487 return err;
1488
Ido Schimmel97989ee2017-03-10 08:53:38 +01001489 if (!dev)
1490 return 0;
1491
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +01001492 in_dev = __in_dev_get_rtnl(dev);
1493 if (in_dev && IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
1494 fib_nh->nh_flags & RTNH_F_LINKDOWN)
1495 return 0;
1496
Ido Schimmela8c97012017-02-08 11:16:35 +01001497 r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
1498 if (!r)
1499 return 0;
Ido Schimmel9665b742017-02-08 11:16:42 +01001500 mlxsw_sp_nexthop_rif_init(nh, r);
Ido Schimmela8c97012017-02-08 11:16:35 +01001501
1502 err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
1503 if (err)
1504 goto err_nexthop_neigh_init;
1505
1506 return 0;
1507
1508err_nexthop_neigh_init:
1509 mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
1510 return err;
1511}
1512
1513static void mlxsw_sp_nexthop_fini(struct mlxsw_sp *mlxsw_sp,
1514 struct mlxsw_sp_nexthop *nh)
1515{
1516 mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh);
Ido Schimmel9665b742017-02-08 11:16:42 +01001517 mlxsw_sp_nexthop_rif_fini(nh);
Ido Schimmelc53b8e12017-02-08 11:16:30 +01001518 mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001519}
1520
Ido Schimmelad178c82017-02-08 11:16:40 +01001521static void mlxsw_sp_nexthop_event(struct mlxsw_sp *mlxsw_sp,
1522 unsigned long event, struct fib_nh *fib_nh)
1523{
1524 struct mlxsw_sp_nexthop_key key;
1525 struct mlxsw_sp_nexthop *nh;
1526 struct mlxsw_sp_rif *r;
1527
1528 if (mlxsw_sp->router.aborted)
1529 return;
1530
1531 key.fib_nh = fib_nh;
1532 nh = mlxsw_sp_nexthop_lookup(mlxsw_sp, key);
1533 if (WARN_ON_ONCE(!nh))
1534 return;
1535
1536 r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, fib_nh->nh_dev);
1537 if (!r)
1538 return;
1539
1540 switch (event) {
1541 case FIB_EVENT_NH_ADD:
Ido Schimmel9665b742017-02-08 11:16:42 +01001542 mlxsw_sp_nexthop_rif_init(nh, r);
Ido Schimmelad178c82017-02-08 11:16:40 +01001543 mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
1544 break;
1545 case FIB_EVENT_NH_DEL:
1546 mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh);
Ido Schimmel9665b742017-02-08 11:16:42 +01001547 mlxsw_sp_nexthop_rif_fini(nh);
Ido Schimmelad178c82017-02-08 11:16:40 +01001548 break;
1549 }
1550
1551 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
1552}
1553
Ido Schimmel9665b742017-02-08 11:16:42 +01001554static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
1555 struct mlxsw_sp_rif *r)
1556{
1557 struct mlxsw_sp_nexthop *nh, *tmp;
1558
1559 list_for_each_entry_safe(nh, tmp, &r->nexthop_list, rif_list_node) {
1560 mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh);
1561 mlxsw_sp_nexthop_rif_fini(nh);
1562 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
1563 }
1564}
1565
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001566static struct mlxsw_sp_nexthop_group *
1567mlxsw_sp_nexthop_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi)
1568{
1569 struct mlxsw_sp_nexthop_group *nh_grp;
1570 struct mlxsw_sp_nexthop *nh;
1571 struct fib_nh *fib_nh;
1572 size_t alloc_size;
1573 int i;
1574 int err;
1575
1576 alloc_size = sizeof(*nh_grp) +
1577 fi->fib_nhs * sizeof(struct mlxsw_sp_nexthop);
1578 nh_grp = kzalloc(alloc_size, GFP_KERNEL);
1579 if (!nh_grp)
1580 return ERR_PTR(-ENOMEM);
1581 INIT_LIST_HEAD(&nh_grp->fib_list);
Ido Schimmelb3e8d1e2017-02-08 11:16:32 +01001582 nh_grp->gateway = fi->fib_nh->nh_scope == RT_SCOPE_LINK;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001583 nh_grp->count = fi->fib_nhs;
Ido Schimmele9ad5e72017-02-08 11:16:29 +01001584 nh_grp->key.fi = fi;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001585 for (i = 0; i < nh_grp->count; i++) {
1586 nh = &nh_grp->nexthops[i];
1587 fib_nh = &fi->fib_nh[i];
1588 err = mlxsw_sp_nexthop_init(mlxsw_sp, nh_grp, nh, fib_nh);
1589 if (err)
1590 goto err_nexthop_init;
1591 }
Ido Schimmele9ad5e72017-02-08 11:16:29 +01001592 err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
1593 if (err)
1594 goto err_nexthop_group_insert;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001595 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
1596 return nh_grp;
1597
Ido Schimmele9ad5e72017-02-08 11:16:29 +01001598err_nexthop_group_insert:
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001599err_nexthop_init:
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +01001600 for (i--; i >= 0; i--) {
1601 nh = &nh_grp->nexthops[i];
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001602 mlxsw_sp_nexthop_fini(mlxsw_sp, nh);
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +01001603 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001604 kfree(nh_grp);
1605 return ERR_PTR(err);
1606}
1607
1608static void
1609mlxsw_sp_nexthop_group_destroy(struct mlxsw_sp *mlxsw_sp,
1610 struct mlxsw_sp_nexthop_group *nh_grp)
1611{
1612 struct mlxsw_sp_nexthop *nh;
1613 int i;
1614
Ido Schimmele9ad5e72017-02-08 11:16:29 +01001615 mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001616 for (i = 0; i < nh_grp->count; i++) {
1617 nh = &nh_grp->nexthops[i];
1618 mlxsw_sp_nexthop_fini(mlxsw_sp, nh);
1619 }
Ido Schimmel58312122016-12-23 09:32:50 +01001620 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
1621 WARN_ON_ONCE(nh_grp->adj_index_valid);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001622 kfree(nh_grp);
1623}
1624
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001625static int mlxsw_sp_nexthop_group_get(struct mlxsw_sp *mlxsw_sp,
1626 struct mlxsw_sp_fib_entry *fib_entry,
1627 struct fib_info *fi)
1628{
Ido Schimmele9ad5e72017-02-08 11:16:29 +01001629 struct mlxsw_sp_nexthop_group_key key;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001630 struct mlxsw_sp_nexthop_group *nh_grp;
1631
Ido Schimmele9ad5e72017-02-08 11:16:29 +01001632 key.fi = fi;
1633 nh_grp = mlxsw_sp_nexthop_group_lookup(mlxsw_sp, key);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001634 if (!nh_grp) {
1635 nh_grp = mlxsw_sp_nexthop_group_create(mlxsw_sp, fi);
1636 if (IS_ERR(nh_grp))
1637 return PTR_ERR(nh_grp);
1638 }
1639 list_add_tail(&fib_entry->nexthop_group_node, &nh_grp->fib_list);
1640 fib_entry->nh_group = nh_grp;
1641 return 0;
1642}
1643
1644static void mlxsw_sp_nexthop_group_put(struct mlxsw_sp *mlxsw_sp,
1645 struct mlxsw_sp_fib_entry *fib_entry)
1646{
1647 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
1648
1649 list_del(&fib_entry->nexthop_group_node);
1650 if (!list_empty(&nh_grp->fib_list))
1651 return;
1652 mlxsw_sp_nexthop_group_destroy(mlxsw_sp, nh_grp);
1653}
1654
Ido Schimmel013b20f2017-02-08 11:16:36 +01001655static bool
1656mlxsw_sp_fib_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
1657{
1658 struct mlxsw_sp_nexthop_group *nh_group = fib_entry->nh_group;
1659
Ido Schimmel9aecce12017-02-09 10:28:42 +01001660 if (fib_entry->params.tos)
1661 return false;
1662
Ido Schimmel013b20f2017-02-08 11:16:36 +01001663 switch (fib_entry->type) {
1664 case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
1665 return !!nh_group->adj_index_valid;
1666 case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
Ido Schimmel70ad3502017-02-08 11:16:38 +01001667 return !!nh_group->nh_rif;
Ido Schimmel013b20f2017-02-08 11:16:36 +01001668 default:
1669 return false;
1670 }
1671}
1672
1673static void mlxsw_sp_fib_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
1674{
1675 fib_entry->offloaded = true;
1676
Ido Schimmel76610eb2017-03-10 08:53:41 +01001677 switch (fib_entry->fib_node->fib->proto) {
Ido Schimmel013b20f2017-02-08 11:16:36 +01001678 case MLXSW_SP_L3_PROTO_IPV4:
1679 fib_info_offload_inc(fib_entry->nh_group->key.fi);
1680 break;
1681 case MLXSW_SP_L3_PROTO_IPV6:
1682 WARN_ON_ONCE(1);
1683 }
1684}
1685
1686static void
1687mlxsw_sp_fib_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
1688{
Ido Schimmel76610eb2017-03-10 08:53:41 +01001689 switch (fib_entry->fib_node->fib->proto) {
Ido Schimmel013b20f2017-02-08 11:16:36 +01001690 case MLXSW_SP_L3_PROTO_IPV4:
1691 fib_info_offload_dec(fib_entry->nh_group->key.fi);
1692 break;
1693 case MLXSW_SP_L3_PROTO_IPV6:
1694 WARN_ON_ONCE(1);
1695 }
1696
1697 fib_entry->offloaded = false;
1698}
1699
1700static void
1701mlxsw_sp_fib_entry_offload_refresh(struct mlxsw_sp_fib_entry *fib_entry,
1702 enum mlxsw_reg_ralue_op op, int err)
1703{
1704 switch (op) {
1705 case MLXSW_REG_RALUE_OP_WRITE_DELETE:
1706 if (!fib_entry->offloaded)
1707 return;
1708 return mlxsw_sp_fib_entry_offload_unset(fib_entry);
1709 case MLXSW_REG_RALUE_OP_WRITE_WRITE:
1710 if (err)
1711 return;
1712 if (mlxsw_sp_fib_entry_should_offload(fib_entry) &&
1713 !fib_entry->offloaded)
1714 mlxsw_sp_fib_entry_offload_set(fib_entry);
1715 else if (!mlxsw_sp_fib_entry_should_offload(fib_entry) &&
1716 fib_entry->offloaded)
1717 mlxsw_sp_fib_entry_offload_unset(fib_entry);
1718 return;
1719 default:
1720 return;
1721 }
1722}
1723
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001724static int mlxsw_sp_fib_entry_op4_remote(struct mlxsw_sp *mlxsw_sp,
1725 struct mlxsw_sp_fib_entry *fib_entry,
1726 enum mlxsw_reg_ralue_op op)
1727{
1728 char ralue_pl[MLXSW_REG_RALUE_LEN];
Ido Schimmel76610eb2017-03-10 08:53:41 +01001729 struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;
Ido Schimmel9aecce12017-02-09 10:28:42 +01001730 u32 *p_dip = (u32 *) fib_entry->fib_node->key.addr;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001731 enum mlxsw_reg_ralue_trap_action trap_action;
1732 u16 trap_id = 0;
1733 u32 adjacency_index = 0;
1734 u16 ecmp_size = 0;
1735
1736 /* In case the nexthop group adjacency index is valid, use it
1737 * with provided ECMP size. Otherwise, setup trap and pass
1738 * traffic to kernel.
1739 */
Ido Schimmel4b411472017-02-08 11:16:37 +01001740 if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001741 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
1742 adjacency_index = fib_entry->nh_group->adj_index;
1743 ecmp_size = fib_entry->nh_group->ecmp_size;
1744 } else {
1745 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
1746 trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
1747 }
1748
Ido Schimmel1a9234e662016-09-19 08:29:26 +02001749 mlxsw_reg_ralue_pack4(ralue_pl,
Ido Schimmel76610eb2017-03-10 08:53:41 +01001750 (enum mlxsw_reg_ralxx_protocol) fib->proto, op,
1751 fib->vr->id, fib_entry->fib_node->key.prefix_len,
Ido Schimmel9aecce12017-02-09 10:28:42 +01001752 *p_dip);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001753 mlxsw_reg_ralue_act_remote_pack(ralue_pl, trap_action, trap_id,
1754 adjacency_index, ecmp_size);
1755 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
1756}
1757
Jiri Pirko61c503f2016-07-04 08:23:11 +02001758static int mlxsw_sp_fib_entry_op4_local(struct mlxsw_sp *mlxsw_sp,
1759 struct mlxsw_sp_fib_entry *fib_entry,
1760 enum mlxsw_reg_ralue_op op)
1761{
Ido Schimmelb8399a12017-02-08 11:16:33 +01001762 struct mlxsw_sp_rif *r = fib_entry->nh_group->nh_rif;
Ido Schimmel76610eb2017-03-10 08:53:41 +01001763 struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;
Ido Schimmel70ad3502017-02-08 11:16:38 +01001764 enum mlxsw_reg_ralue_trap_action trap_action;
Jiri Pirko61c503f2016-07-04 08:23:11 +02001765 char ralue_pl[MLXSW_REG_RALUE_LEN];
Ido Schimmel9aecce12017-02-09 10:28:42 +01001766 u32 *p_dip = (u32 *) fib_entry->fib_node->key.addr;
Ido Schimmel70ad3502017-02-08 11:16:38 +01001767 u16 trap_id = 0;
1768 u16 rif = 0;
1769
1770 if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
1771 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
1772 rif = r->rif;
1773 } else {
1774 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
1775 trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
1776 }
Jiri Pirko61c503f2016-07-04 08:23:11 +02001777
Ido Schimmel1a9234e662016-09-19 08:29:26 +02001778 mlxsw_reg_ralue_pack4(ralue_pl,
Ido Schimmel76610eb2017-03-10 08:53:41 +01001779 (enum mlxsw_reg_ralxx_protocol) fib->proto, op,
1780 fib->vr->id, fib_entry->fib_node->key.prefix_len,
Ido Schimmel9aecce12017-02-09 10:28:42 +01001781 *p_dip);
Ido Schimmel70ad3502017-02-08 11:16:38 +01001782 mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, trap_id, rif);
Jiri Pirko61c503f2016-07-04 08:23:11 +02001783 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
1784}
1785
1786static int mlxsw_sp_fib_entry_op4_trap(struct mlxsw_sp *mlxsw_sp,
1787 struct mlxsw_sp_fib_entry *fib_entry,
1788 enum mlxsw_reg_ralue_op op)
1789{
Ido Schimmel76610eb2017-03-10 08:53:41 +01001790 struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;
Jiri Pirko61c503f2016-07-04 08:23:11 +02001791 char ralue_pl[MLXSW_REG_RALUE_LEN];
Ido Schimmel9aecce12017-02-09 10:28:42 +01001792 u32 *p_dip = (u32 *) fib_entry->fib_node->key.addr;
Jiri Pirko61c503f2016-07-04 08:23:11 +02001793
Ido Schimmel1a9234e662016-09-19 08:29:26 +02001794 mlxsw_reg_ralue_pack4(ralue_pl,
Ido Schimmel76610eb2017-03-10 08:53:41 +01001795 (enum mlxsw_reg_ralxx_protocol) fib->proto, op,
1796 fib->vr->id, fib_entry->fib_node->key.prefix_len,
Ido Schimmel9aecce12017-02-09 10:28:42 +01001797 *p_dip);
Jiri Pirko61c503f2016-07-04 08:23:11 +02001798 mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
1799 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
1800}
1801
1802static int mlxsw_sp_fib_entry_op4(struct mlxsw_sp *mlxsw_sp,
1803 struct mlxsw_sp_fib_entry *fib_entry,
1804 enum mlxsw_reg_ralue_op op)
1805{
1806 switch (fib_entry->type) {
1807 case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001808 return mlxsw_sp_fib_entry_op4_remote(mlxsw_sp, fib_entry, op);
Jiri Pirko61c503f2016-07-04 08:23:11 +02001809 case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
1810 return mlxsw_sp_fib_entry_op4_local(mlxsw_sp, fib_entry, op);
1811 case MLXSW_SP_FIB_ENTRY_TYPE_TRAP:
1812 return mlxsw_sp_fib_entry_op4_trap(mlxsw_sp, fib_entry, op);
1813 }
1814 return -EINVAL;
1815}
1816
1817static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
1818 struct mlxsw_sp_fib_entry *fib_entry,
1819 enum mlxsw_reg_ralue_op op)
1820{
Ido Schimmel013b20f2017-02-08 11:16:36 +01001821 int err = -EINVAL;
1822
Ido Schimmel76610eb2017-03-10 08:53:41 +01001823 switch (fib_entry->fib_node->fib->proto) {
Jiri Pirko61c503f2016-07-04 08:23:11 +02001824 case MLXSW_SP_L3_PROTO_IPV4:
Ido Schimmel013b20f2017-02-08 11:16:36 +01001825 err = mlxsw_sp_fib_entry_op4(mlxsw_sp, fib_entry, op);
1826 break;
Jiri Pirko61c503f2016-07-04 08:23:11 +02001827 case MLXSW_SP_L3_PROTO_IPV6:
Ido Schimmel013b20f2017-02-08 11:16:36 +01001828 return err;
Jiri Pirko61c503f2016-07-04 08:23:11 +02001829 }
Ido Schimmel013b20f2017-02-08 11:16:36 +01001830 mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, err);
1831 return err;
Jiri Pirko61c503f2016-07-04 08:23:11 +02001832}
1833
1834static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
1835 struct mlxsw_sp_fib_entry *fib_entry)
1836{
Jiri Pirko7146da32016-09-01 10:37:41 +02001837 return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
1838 MLXSW_REG_RALUE_OP_WRITE_WRITE);
Jiri Pirko61c503f2016-07-04 08:23:11 +02001839}
1840
1841static int mlxsw_sp_fib_entry_del(struct mlxsw_sp *mlxsw_sp,
1842 struct mlxsw_sp_fib_entry *fib_entry)
1843{
1844 return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
1845 MLXSW_REG_RALUE_OP_WRITE_DELETE);
1846}
1847
Jiri Pirko61c503f2016-07-04 08:23:11 +02001848static int
Ido Schimmel013b20f2017-02-08 11:16:36 +01001849mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp,
1850 const struct fib_entry_notifier_info *fen_info,
1851 struct mlxsw_sp_fib_entry *fib_entry)
Jiri Pirko61c503f2016-07-04 08:23:11 +02001852{
Jiri Pirkob45f64d2016-09-26 12:52:31 +02001853 struct fib_info *fi = fen_info->fi;
Jiri Pirko61c503f2016-07-04 08:23:11 +02001854
Ido Schimmel97989ee2017-03-10 08:53:38 +01001855 switch (fen_info->type) {
1856 case RTN_BROADCAST: /* fall through */
1857 case RTN_LOCAL:
Jiri Pirko61c503f2016-07-04 08:23:11 +02001858 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
1859 return 0;
Ido Schimmel97989ee2017-03-10 08:53:38 +01001860 case RTN_UNREACHABLE: /* fall through */
1861 case RTN_BLACKHOLE: /* fall through */
1862 case RTN_PROHIBIT:
1863 /* Packets hitting these routes need to be trapped, but
1864 * can do so with a lower priority than packets directed
1865 * at the host, so use action type local instead of trap.
1866 */
Jiri Pirkob45f64d2016-09-26 12:52:31 +02001867 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
Ido Schimmel97989ee2017-03-10 08:53:38 +01001868 return 0;
1869 case RTN_UNICAST:
1870 if (fi->fib_nh->nh_scope != RT_SCOPE_LINK)
1871 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
1872 else
1873 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
1874 return 0;
1875 default:
1876 return -EINVAL;
1877 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001878}
1879
Jiri Pirko5b004412016-09-01 10:37:40 +02001880static struct mlxsw_sp_fib_entry *
Ido Schimmel9aecce12017-02-09 10:28:42 +01001881mlxsw_sp_fib4_entry_create(struct mlxsw_sp *mlxsw_sp,
1882 struct mlxsw_sp_fib_node *fib_node,
1883 const struct fib_entry_notifier_info *fen_info)
Jiri Pirko5b004412016-09-01 10:37:40 +02001884{
1885 struct mlxsw_sp_fib_entry *fib_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01001886 int err;
1887
1888 fib_entry = kzalloc(sizeof(*fib_entry), GFP_KERNEL);
1889 if (!fib_entry) {
1890 err = -ENOMEM;
1891 goto err_fib_entry_alloc;
1892 }
1893
1894 err = mlxsw_sp_fib4_entry_type_set(mlxsw_sp, fen_info, fib_entry);
1895 if (err)
1896 goto err_fib4_entry_type_set;
1897
1898 err = mlxsw_sp_nexthop_group_get(mlxsw_sp, fib_entry, fen_info->fi);
1899 if (err)
1900 goto err_nexthop_group_get;
1901
1902 fib_entry->params.prio = fen_info->fi->fib_priority;
1903 fib_entry->params.tb_id = fen_info->tb_id;
1904 fib_entry->params.type = fen_info->type;
1905 fib_entry->params.tos = fen_info->tos;
1906
1907 fib_entry->fib_node = fib_node;
1908
1909 return fib_entry;
1910
1911err_nexthop_group_get:
1912err_fib4_entry_type_set:
1913 kfree(fib_entry);
1914err_fib_entry_alloc:
1915 return ERR_PTR(err);
1916}
1917
1918static void mlxsw_sp_fib4_entry_destroy(struct mlxsw_sp *mlxsw_sp,
1919 struct mlxsw_sp_fib_entry *fib_entry)
1920{
1921 mlxsw_sp_nexthop_group_put(mlxsw_sp, fib_entry);
1922 kfree(fib_entry);
1923}
1924
1925static struct mlxsw_sp_fib_node *
1926mlxsw_sp_fib4_node_get(struct mlxsw_sp *mlxsw_sp,
1927 const struct fib_entry_notifier_info *fen_info);
1928
1929static struct mlxsw_sp_fib_entry *
1930mlxsw_sp_fib4_entry_lookup(struct mlxsw_sp *mlxsw_sp,
1931 const struct fib_entry_notifier_info *fen_info)
1932{
1933 struct mlxsw_sp_fib_entry *fib_entry;
1934 struct mlxsw_sp_fib_node *fib_node;
1935
1936 fib_node = mlxsw_sp_fib4_node_get(mlxsw_sp, fen_info);
1937 if (IS_ERR(fib_node))
1938 return NULL;
1939
1940 list_for_each_entry(fib_entry, &fib_node->entry_list, list) {
1941 if (fib_entry->params.tb_id == fen_info->tb_id &&
1942 fib_entry->params.tos == fen_info->tos &&
1943 fib_entry->params.type == fen_info->type &&
1944 fib_entry->nh_group->key.fi == fen_info->fi) {
1945 return fib_entry;
1946 }
1947 }
1948
1949 return NULL;
1950}
1951
1952static const struct rhashtable_params mlxsw_sp_fib_ht_params = {
1953 .key_offset = offsetof(struct mlxsw_sp_fib_node, key),
1954 .head_offset = offsetof(struct mlxsw_sp_fib_node, ht_node),
1955 .key_len = sizeof(struct mlxsw_sp_fib_key),
1956 .automatic_shrinking = true,
1957};
1958
1959static int mlxsw_sp_fib_node_insert(struct mlxsw_sp_fib *fib,
1960 struct mlxsw_sp_fib_node *fib_node)
1961{
1962 return rhashtable_insert_fast(&fib->ht, &fib_node->ht_node,
1963 mlxsw_sp_fib_ht_params);
1964}
1965
1966static void mlxsw_sp_fib_node_remove(struct mlxsw_sp_fib *fib,
1967 struct mlxsw_sp_fib_node *fib_node)
1968{
1969 rhashtable_remove_fast(&fib->ht, &fib_node->ht_node,
1970 mlxsw_sp_fib_ht_params);
1971}
1972
1973static struct mlxsw_sp_fib_node *
1974mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
1975 size_t addr_len, unsigned char prefix_len)
1976{
1977 struct mlxsw_sp_fib_key key;
1978
1979 memset(&key, 0, sizeof(key));
1980 memcpy(key.addr, addr, addr_len);
1981 key.prefix_len = prefix_len;
1982 return rhashtable_lookup_fast(&fib->ht, &key, mlxsw_sp_fib_ht_params);
1983}
1984
1985static struct mlxsw_sp_fib_node *
Ido Schimmel76610eb2017-03-10 08:53:41 +01001986mlxsw_sp_fib_node_create(struct mlxsw_sp_fib *fib, const void *addr,
Ido Schimmel9aecce12017-02-09 10:28:42 +01001987 size_t addr_len, unsigned char prefix_len)
1988{
1989 struct mlxsw_sp_fib_node *fib_node;
1990
1991 fib_node = kzalloc(sizeof(*fib_node), GFP_KERNEL);
1992 if (!fib_node)
1993 return NULL;
1994
1995 INIT_LIST_HEAD(&fib_node->entry_list);
Ido Schimmel76610eb2017-03-10 08:53:41 +01001996 list_add(&fib_node->list, &fib->node_list);
Ido Schimmel9aecce12017-02-09 10:28:42 +01001997 memcpy(fib_node->key.addr, addr, addr_len);
1998 fib_node->key.prefix_len = prefix_len;
Ido Schimmel9aecce12017-02-09 10:28:42 +01001999
2000 return fib_node;
2001}
2002
2003static void mlxsw_sp_fib_node_destroy(struct mlxsw_sp_fib_node *fib_node)
2004{
Ido Schimmel9aecce12017-02-09 10:28:42 +01002005 list_del(&fib_node->list);
2006 WARN_ON(!list_empty(&fib_node->entry_list));
2007 kfree(fib_node);
2008}
2009
2010static bool
2011mlxsw_sp_fib_node_entry_is_first(const struct mlxsw_sp_fib_node *fib_node,
2012 const struct mlxsw_sp_fib_entry *fib_entry)
2013{
2014 return list_first_entry(&fib_node->entry_list,
2015 struct mlxsw_sp_fib_entry, list) == fib_entry;
2016}
2017
2018static void mlxsw_sp_fib_node_prefix_inc(struct mlxsw_sp_fib_node *fib_node)
2019{
2020 unsigned char prefix_len = fib_node->key.prefix_len;
Ido Schimmel76610eb2017-03-10 08:53:41 +01002021 struct mlxsw_sp_fib *fib = fib_node->fib;
Ido Schimmel9aecce12017-02-09 10:28:42 +01002022
2023 if (fib->prefix_ref_count[prefix_len]++ == 0)
2024 mlxsw_sp_prefix_usage_set(&fib->prefix_usage, prefix_len);
2025}
2026
2027static void mlxsw_sp_fib_node_prefix_dec(struct mlxsw_sp_fib_node *fib_node)
2028{
2029 unsigned char prefix_len = fib_node->key.prefix_len;
Ido Schimmel76610eb2017-03-10 08:53:41 +01002030 struct mlxsw_sp_fib *fib = fib_node->fib;
Ido Schimmel9aecce12017-02-09 10:28:42 +01002031
2032 if (--fib->prefix_ref_count[prefix_len] == 0)
2033 mlxsw_sp_prefix_usage_clear(&fib->prefix_usage, prefix_len);
2034}
2035
Ido Schimmel76610eb2017-03-10 08:53:41 +01002036static int mlxsw_sp_fib_node_init(struct mlxsw_sp *mlxsw_sp,
2037 struct mlxsw_sp_fib_node *fib_node,
2038 struct mlxsw_sp_fib *fib)
2039{
2040 struct mlxsw_sp_prefix_usage req_prefix_usage;
2041 struct mlxsw_sp_lpm_tree *lpm_tree;
2042 int err;
2043
2044 err = mlxsw_sp_fib_node_insert(fib, fib_node);
2045 if (err)
2046 return err;
2047 fib_node->fib = fib;
2048
2049 mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, &fib->prefix_usage);
2050 mlxsw_sp_prefix_usage_set(&req_prefix_usage, fib_node->key.prefix_len);
2051
2052 if (!mlxsw_sp_prefix_usage_none(&fib->prefix_usage)) {
2053 err = mlxsw_sp_vr_lpm_tree_check(mlxsw_sp, fib,
2054 &req_prefix_usage);
2055 if (err)
2056 goto err_tree_check;
2057 } else {
2058 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
2059 fib->proto);
2060 if (IS_ERR(lpm_tree))
2061 return PTR_ERR(lpm_tree);
2062 fib->lpm_tree = lpm_tree;
2063 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib);
2064 if (err)
2065 goto err_tree_bind;
2066 }
2067
2068 mlxsw_sp_fib_node_prefix_inc(fib_node);
2069
2070 return 0;
2071
2072err_tree_bind:
2073 fib->lpm_tree = NULL;
2074 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
2075err_tree_check:
2076 fib_node->fib = NULL;
2077 mlxsw_sp_fib_node_remove(fib, fib_node);
2078 return err;
2079}
2080
2081static void mlxsw_sp_fib_node_fini(struct mlxsw_sp *mlxsw_sp,
2082 struct mlxsw_sp_fib_node *fib_node)
2083{
2084 struct mlxsw_sp_lpm_tree *lpm_tree = fib_node->fib->lpm_tree;
2085 struct mlxsw_sp_fib *fib = fib_node->fib;
2086
2087 mlxsw_sp_fib_node_prefix_dec(fib_node);
2088
2089 if (mlxsw_sp_prefix_usage_none(&fib->prefix_usage)) {
2090 mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, fib);
2091 fib->lpm_tree = NULL;
2092 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
2093 } else {
2094 mlxsw_sp_vr_lpm_tree_check(mlxsw_sp, fib, &fib->prefix_usage);
2095 }
2096
2097 fib_node->fib = NULL;
2098 mlxsw_sp_fib_node_remove(fib, fib_node);
2099}
2100
Ido Schimmel9aecce12017-02-09 10:28:42 +01002101static struct mlxsw_sp_fib_node *
2102mlxsw_sp_fib4_node_get(struct mlxsw_sp *mlxsw_sp,
2103 const struct fib_entry_notifier_info *fen_info)
2104{
2105 struct mlxsw_sp_fib_node *fib_node;
Ido Schimmel76610eb2017-03-10 08:53:41 +01002106 struct mlxsw_sp_fib *fib;
Jiri Pirko5b004412016-09-01 10:37:40 +02002107 struct mlxsw_sp_vr *vr;
2108 int err;
2109
Ido Schimmel76610eb2017-03-10 08:53:41 +01002110 vr = mlxsw_sp_vr_get(mlxsw_sp, fen_info->tb_id);
Jiri Pirko5b004412016-09-01 10:37:40 +02002111 if (IS_ERR(vr))
2112 return ERR_CAST(vr);
Ido Schimmel76610eb2017-03-10 08:53:41 +01002113 fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV4);
Jiri Pirko5b004412016-09-01 10:37:40 +02002114
Ido Schimmel76610eb2017-03-10 08:53:41 +01002115 fib_node = mlxsw_sp_fib_node_lookup(fib, &fen_info->dst,
Ido Schimmel9aecce12017-02-09 10:28:42 +01002116 sizeof(fen_info->dst),
2117 fen_info->dst_len);
2118 if (fib_node)
2119 return fib_node;
2120
Ido Schimmel76610eb2017-03-10 08:53:41 +01002121 fib_node = mlxsw_sp_fib_node_create(fib, &fen_info->dst,
Ido Schimmel9aecce12017-02-09 10:28:42 +01002122 sizeof(fen_info->dst),
2123 fen_info->dst_len);
2124 if (!fib_node) {
Jiri Pirko5b004412016-09-01 10:37:40 +02002125 err = -ENOMEM;
Ido Schimmel9aecce12017-02-09 10:28:42 +01002126 goto err_fib_node_create;
Jiri Pirko5b004412016-09-01 10:37:40 +02002127 }
Jiri Pirko5b004412016-09-01 10:37:40 +02002128
Ido Schimmel76610eb2017-03-10 08:53:41 +01002129 err = mlxsw_sp_fib_node_init(mlxsw_sp, fib_node, fib);
2130 if (err)
2131 goto err_fib_node_init;
2132
Ido Schimmel9aecce12017-02-09 10:28:42 +01002133 return fib_node;
Jiri Pirko5b004412016-09-01 10:37:40 +02002134
Ido Schimmel76610eb2017-03-10 08:53:41 +01002135err_fib_node_init:
2136 mlxsw_sp_fib_node_destroy(fib_node);
Ido Schimmel9aecce12017-02-09 10:28:42 +01002137err_fib_node_create:
Ido Schimmel76610eb2017-03-10 08:53:41 +01002138 mlxsw_sp_vr_put(vr);
Jiri Pirko5b004412016-09-01 10:37:40 +02002139 return ERR_PTR(err);
2140}
2141
Ido Schimmel9aecce12017-02-09 10:28:42 +01002142static void mlxsw_sp_fib4_node_put(struct mlxsw_sp *mlxsw_sp,
2143 struct mlxsw_sp_fib_node *fib_node)
Jiri Pirko5b004412016-09-01 10:37:40 +02002144{
Ido Schimmel76610eb2017-03-10 08:53:41 +01002145 struct mlxsw_sp_vr *vr = fib_node->fib->vr;
Jiri Pirko5b004412016-09-01 10:37:40 +02002146
Ido Schimmel9aecce12017-02-09 10:28:42 +01002147 if (!list_empty(&fib_node->entry_list))
2148 return;
Ido Schimmel76610eb2017-03-10 08:53:41 +01002149 mlxsw_sp_fib_node_fini(mlxsw_sp, fib_node);
Ido Schimmel9aecce12017-02-09 10:28:42 +01002150 mlxsw_sp_fib_node_destroy(fib_node);
Ido Schimmel76610eb2017-03-10 08:53:41 +01002151 mlxsw_sp_vr_put(vr);
Jiri Pirko5b004412016-09-01 10:37:40 +02002152}
2153
Ido Schimmel9aecce12017-02-09 10:28:42 +01002154static struct mlxsw_sp_fib_entry *
2155mlxsw_sp_fib4_node_entry_find(const struct mlxsw_sp_fib_node *fib_node,
2156 const struct mlxsw_sp_fib_entry_params *params)
Jiri Pirko61c503f2016-07-04 08:23:11 +02002157{
Jiri Pirko61c503f2016-07-04 08:23:11 +02002158 struct mlxsw_sp_fib_entry *fib_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01002159
2160 list_for_each_entry(fib_entry, &fib_node->entry_list, list) {
2161 if (fib_entry->params.tb_id > params->tb_id)
2162 continue;
2163 if (fib_entry->params.tb_id != params->tb_id)
2164 break;
2165 if (fib_entry->params.tos > params->tos)
2166 continue;
2167 if (fib_entry->params.prio >= params->prio ||
2168 fib_entry->params.tos < params->tos)
2169 return fib_entry;
2170 }
2171
2172 return NULL;
2173}
2174
Ido Schimmel4283bce2017-02-09 10:28:43 +01002175static int mlxsw_sp_fib4_node_list_append(struct mlxsw_sp_fib_entry *fib_entry,
2176 struct mlxsw_sp_fib_entry *new_entry)
2177{
2178 struct mlxsw_sp_fib_node *fib_node;
2179
2180 if (WARN_ON(!fib_entry))
2181 return -EINVAL;
2182
2183 fib_node = fib_entry->fib_node;
2184 list_for_each_entry_from(fib_entry, &fib_node->entry_list, list) {
2185 if (fib_entry->params.tb_id != new_entry->params.tb_id ||
2186 fib_entry->params.tos != new_entry->params.tos ||
2187 fib_entry->params.prio != new_entry->params.prio)
2188 break;
2189 }
2190
2191 list_add_tail(&new_entry->list, &fib_entry->list);
2192 return 0;
2193}
2194
Ido Schimmel9aecce12017-02-09 10:28:42 +01002195static int
2196mlxsw_sp_fib4_node_list_insert(struct mlxsw_sp_fib_node *fib_node,
Ido Schimmel4283bce2017-02-09 10:28:43 +01002197 struct mlxsw_sp_fib_entry *new_entry,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002198 bool replace, bool append)
Ido Schimmel9aecce12017-02-09 10:28:42 +01002199{
2200 struct mlxsw_sp_fib_entry *fib_entry;
2201
2202 fib_entry = mlxsw_sp_fib4_node_entry_find(fib_node, &new_entry->params);
2203
Ido Schimmel4283bce2017-02-09 10:28:43 +01002204 if (append)
2205 return mlxsw_sp_fib4_node_list_append(fib_entry, new_entry);
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002206 if (replace && WARN_ON(!fib_entry))
2207 return -EINVAL;
Ido Schimmel4283bce2017-02-09 10:28:43 +01002208
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002209 /* Insert new entry before replaced one, so that we can later
2210 * remove the second.
2211 */
Ido Schimmel9aecce12017-02-09 10:28:42 +01002212 if (fib_entry) {
2213 list_add_tail(&new_entry->list, &fib_entry->list);
2214 } else {
2215 struct mlxsw_sp_fib_entry *last;
2216
2217 list_for_each_entry(last, &fib_node->entry_list, list) {
2218 if (new_entry->params.tb_id > last->params.tb_id)
2219 break;
2220 fib_entry = last;
2221 }
2222
2223 if (fib_entry)
2224 list_add(&new_entry->list, &fib_entry->list);
2225 else
2226 list_add(&new_entry->list, &fib_node->entry_list);
2227 }
2228
2229 return 0;
2230}
2231
2232static void
2233mlxsw_sp_fib4_node_list_remove(struct mlxsw_sp_fib_entry *fib_entry)
2234{
2235 list_del(&fib_entry->list);
2236}
2237
2238static int
2239mlxsw_sp_fib4_node_entry_add(struct mlxsw_sp *mlxsw_sp,
2240 const struct mlxsw_sp_fib_node *fib_node,
2241 struct mlxsw_sp_fib_entry *fib_entry)
2242{
2243 if (!mlxsw_sp_fib_node_entry_is_first(fib_node, fib_entry))
2244 return 0;
2245
2246 /* To prevent packet loss, overwrite the previously offloaded
2247 * entry.
2248 */
2249 if (!list_is_singular(&fib_node->entry_list)) {
2250 enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_DELETE;
2251 struct mlxsw_sp_fib_entry *n = list_next_entry(fib_entry, list);
2252
2253 mlxsw_sp_fib_entry_offload_refresh(n, op, 0);
2254 }
2255
2256 return mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
2257}
2258
2259static void
2260mlxsw_sp_fib4_node_entry_del(struct mlxsw_sp *mlxsw_sp,
2261 const struct mlxsw_sp_fib_node *fib_node,
2262 struct mlxsw_sp_fib_entry *fib_entry)
2263{
2264 if (!mlxsw_sp_fib_node_entry_is_first(fib_node, fib_entry))
2265 return;
2266
2267 /* Promote the next entry by overwriting the deleted entry */
2268 if (!list_is_singular(&fib_node->entry_list)) {
2269 struct mlxsw_sp_fib_entry *n = list_next_entry(fib_entry, list);
2270 enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_DELETE;
2271
2272 mlxsw_sp_fib_entry_update(mlxsw_sp, n);
2273 mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, 0);
2274 return;
2275 }
2276
2277 mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry);
2278}
2279
2280static int mlxsw_sp_fib4_node_entry_link(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel4283bce2017-02-09 10:28:43 +01002281 struct mlxsw_sp_fib_entry *fib_entry,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002282 bool replace, bool append)
Ido Schimmel9aecce12017-02-09 10:28:42 +01002283{
2284 struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
2285 int err;
2286
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002287 err = mlxsw_sp_fib4_node_list_insert(fib_node, fib_entry, replace,
2288 append);
Ido Schimmel9aecce12017-02-09 10:28:42 +01002289 if (err)
2290 return err;
2291
2292 err = mlxsw_sp_fib4_node_entry_add(mlxsw_sp, fib_node, fib_entry);
2293 if (err)
2294 goto err_fib4_node_entry_add;
2295
Ido Schimmel9aecce12017-02-09 10:28:42 +01002296 return 0;
2297
2298err_fib4_node_entry_add:
2299 mlxsw_sp_fib4_node_list_remove(fib_entry);
2300 return err;
2301}
2302
2303static void
2304mlxsw_sp_fib4_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
2305 struct mlxsw_sp_fib_entry *fib_entry)
2306{
2307 struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
2308
Ido Schimmel9aecce12017-02-09 10:28:42 +01002309 mlxsw_sp_fib4_node_entry_del(mlxsw_sp, fib_node, fib_entry);
2310 mlxsw_sp_fib4_node_list_remove(fib_entry);
2311}
2312
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002313static void mlxsw_sp_fib4_entry_replace(struct mlxsw_sp *mlxsw_sp,
2314 struct mlxsw_sp_fib_entry *fib_entry,
2315 bool replace)
2316{
2317 struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
2318 struct mlxsw_sp_fib_entry *replaced;
2319
2320 if (!replace)
2321 return;
2322
2323 /* We inserted the new entry before replaced one */
2324 replaced = list_next_entry(fib_entry, list);
2325
2326 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, replaced);
2327 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, replaced);
2328 mlxsw_sp_fib4_node_put(mlxsw_sp, fib_node);
2329}
2330
Ido Schimmel9aecce12017-02-09 10:28:42 +01002331static int
2332mlxsw_sp_router_fib4_add(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel4283bce2017-02-09 10:28:43 +01002333 const struct fib_entry_notifier_info *fen_info,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002334 bool replace, bool append)
Ido Schimmel9aecce12017-02-09 10:28:42 +01002335{
2336 struct mlxsw_sp_fib_entry *fib_entry;
2337 struct mlxsw_sp_fib_node *fib_node;
Jiri Pirko61c503f2016-07-04 08:23:11 +02002338 int err;
2339
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002340 if (mlxsw_sp->router.aborted)
2341 return 0;
2342
Ido Schimmel9aecce12017-02-09 10:28:42 +01002343 fib_node = mlxsw_sp_fib4_node_get(mlxsw_sp, fen_info);
2344 if (IS_ERR(fib_node)) {
2345 dev_warn(mlxsw_sp->bus_info->dev, "Failed to get FIB node\n");
2346 return PTR_ERR(fib_node);
2347 }
2348
2349 fib_entry = mlxsw_sp_fib4_entry_create(mlxsw_sp, fib_node, fen_info);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002350 if (IS_ERR(fib_entry)) {
Ido Schimmel9aecce12017-02-09 10:28:42 +01002351 dev_warn(mlxsw_sp->bus_info->dev, "Failed to create FIB entry\n");
2352 err = PTR_ERR(fib_entry);
2353 goto err_fib4_entry_create;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002354 }
Jiri Pirko61c503f2016-07-04 08:23:11 +02002355
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002356 err = mlxsw_sp_fib4_node_entry_link(mlxsw_sp, fib_entry, replace,
2357 append);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002358 if (err) {
Ido Schimmel9aecce12017-02-09 10:28:42 +01002359 dev_warn(mlxsw_sp->bus_info->dev, "Failed to link FIB entry to node\n");
2360 goto err_fib4_node_entry_link;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002361 }
Ido Schimmel9aecce12017-02-09 10:28:42 +01002362
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002363 mlxsw_sp_fib4_entry_replace(mlxsw_sp, fib_entry, replace);
2364
Jiri Pirko61c503f2016-07-04 08:23:11 +02002365 return 0;
2366
Ido Schimmel9aecce12017-02-09 10:28:42 +01002367err_fib4_node_entry_link:
2368 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib_entry);
2369err_fib4_entry_create:
2370 mlxsw_sp_fib4_node_put(mlxsw_sp, fib_node);
Jiri Pirko61c503f2016-07-04 08:23:11 +02002371 return err;
2372}
2373
Jiri Pirko37956d72016-10-20 16:05:43 +02002374static void mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
2375 struct fib_entry_notifier_info *fen_info)
Jiri Pirko61c503f2016-07-04 08:23:11 +02002376{
Jiri Pirko61c503f2016-07-04 08:23:11 +02002377 struct mlxsw_sp_fib_entry *fib_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01002378 struct mlxsw_sp_fib_node *fib_node;
Jiri Pirko61c503f2016-07-04 08:23:11 +02002379
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002380 if (mlxsw_sp->router.aborted)
Jiri Pirko37956d72016-10-20 16:05:43 +02002381 return;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002382
Ido Schimmel9aecce12017-02-09 10:28:42 +01002383 fib_entry = mlxsw_sp_fib4_entry_lookup(mlxsw_sp, fen_info);
2384 if (WARN_ON(!fib_entry))
Jiri Pirko37956d72016-10-20 16:05:43 +02002385 return;
Ido Schimmel9aecce12017-02-09 10:28:42 +01002386 fib_node = fib_entry->fib_node;
Jiri Pirko5b004412016-09-01 10:37:40 +02002387
Ido Schimmel9aecce12017-02-09 10:28:42 +01002388 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, fib_entry);
2389 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib_entry);
2390 mlxsw_sp_fib4_node_put(mlxsw_sp, fib_node);
Jiri Pirko61c503f2016-07-04 08:23:11 +02002391}
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002392
2393static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp)
2394{
2395 char ralta_pl[MLXSW_REG_RALTA_LEN];
2396 char ralst_pl[MLXSW_REG_RALST_LEN];
Ido Schimmelb5d90e62017-03-10 08:53:43 +01002397 int i, err;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002398
2399 mlxsw_reg_ralta_pack(ralta_pl, true, MLXSW_REG_RALXX_PROTOCOL_IPV4,
2400 MLXSW_SP_LPM_TREE_MIN);
2401 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
2402 if (err)
2403 return err;
2404
2405 mlxsw_reg_ralst_pack(ralst_pl, 0xff, MLXSW_SP_LPM_TREE_MIN);
2406 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
2407 if (err)
2408 return err;
2409
Ido Schimmelb5d90e62017-03-10 08:53:43 +01002410 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
2411 struct mlxsw_sp_vr *vr = &mlxsw_sp->router.vrs[i];
2412 char raltb_pl[MLXSW_REG_RALTB_LEN];
2413 char ralue_pl[MLXSW_REG_RALUE_LEN];
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002414
Ido Schimmelb5d90e62017-03-10 08:53:43 +01002415 if (!mlxsw_sp_vr_is_used(vr))
2416 continue;
2417
2418 mlxsw_reg_raltb_pack(raltb_pl, vr->id,
2419 MLXSW_REG_RALXX_PROTOCOL_IPV4,
2420 MLXSW_SP_LPM_TREE_MIN);
2421 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb),
2422 raltb_pl);
2423 if (err)
2424 return err;
2425
2426 mlxsw_reg_ralue_pack4(ralue_pl, MLXSW_SP_L3_PROTO_IPV4,
2427 MLXSW_REG_RALUE_OP_WRITE_WRITE, vr->id, 0,
2428 0);
2429 mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
2430 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue),
2431 ralue_pl);
2432 if (err)
2433 return err;
2434 }
2435
2436 return 0;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002437}
2438
Ido Schimmel9aecce12017-02-09 10:28:42 +01002439static void mlxsw_sp_fib4_node_flush(struct mlxsw_sp *mlxsw_sp,
2440 struct mlxsw_sp_fib_node *fib_node)
2441{
2442 struct mlxsw_sp_fib_entry *fib_entry, *tmp;
2443
2444 list_for_each_entry_safe(fib_entry, tmp, &fib_node->entry_list, list) {
2445 bool do_break = &tmp->list == &fib_node->entry_list;
2446
2447 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, fib_entry);
2448 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib_entry);
2449 mlxsw_sp_fib4_node_put(mlxsw_sp, fib_node);
2450 /* Break when entry list is empty and node was freed.
2451 * Otherwise, we'll access freed memory in the next
2452 * iteration.
2453 */
2454 if (do_break)
2455 break;
2456 }
2457}
2458
2459static void mlxsw_sp_fib_node_flush(struct mlxsw_sp *mlxsw_sp,
2460 struct mlxsw_sp_fib_node *fib_node)
2461{
Ido Schimmel76610eb2017-03-10 08:53:41 +01002462 switch (fib_node->fib->proto) {
Ido Schimmel9aecce12017-02-09 10:28:42 +01002463 case MLXSW_SP_L3_PROTO_IPV4:
2464 mlxsw_sp_fib4_node_flush(mlxsw_sp, fib_node);
2465 break;
2466 case MLXSW_SP_L3_PROTO_IPV6:
2467 WARN_ON_ONCE(1);
2468 break;
2469 }
2470}
2471
Ido Schimmel76610eb2017-03-10 08:53:41 +01002472static void mlxsw_sp_vr_fib_flush(struct mlxsw_sp *mlxsw_sp,
2473 struct mlxsw_sp_vr *vr,
2474 enum mlxsw_sp_l3proto proto)
2475{
2476 struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
2477 struct mlxsw_sp_fib_node *fib_node, *tmp;
2478
2479 list_for_each_entry_safe(fib_node, tmp, &fib->node_list, list) {
2480 bool do_break = &tmp->list == &fib->node_list;
2481
2482 mlxsw_sp_fib_node_flush(mlxsw_sp, fib_node);
2483 if (do_break)
2484 break;
2485 }
2486}
2487
Ido Schimmelac571de2016-11-14 11:26:32 +01002488static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp)
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002489{
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002490 int i;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002491
Jiri Pirkoc1a38312016-10-21 16:07:23 +02002492 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
Ido Schimmel76610eb2017-03-10 08:53:41 +01002493 struct mlxsw_sp_vr *vr = &mlxsw_sp->router.vrs[i];
Ido Schimmelac571de2016-11-14 11:26:32 +01002494
Ido Schimmel76610eb2017-03-10 08:53:41 +01002495 if (!mlxsw_sp_vr_is_used(vr))
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002496 continue;
Ido Schimmel76610eb2017-03-10 08:53:41 +01002497 mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002498 }
Ido Schimmelac571de2016-11-14 11:26:32 +01002499}
2500
2501static void mlxsw_sp_router_fib4_abort(struct mlxsw_sp *mlxsw_sp)
2502{
2503 int err;
2504
Ido Schimmeld331d302016-11-16 09:51:58 +01002505 if (mlxsw_sp->router.aborted)
2506 return;
2507 dev_warn(mlxsw_sp->bus_info->dev, "FIB abort triggered. Note that FIB entries are no longer being offloaded to this device.\n");
Ido Schimmelac571de2016-11-14 11:26:32 +01002508 mlxsw_sp_router_fib_flush(mlxsw_sp);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002509 mlxsw_sp->router.aborted = true;
2510 err = mlxsw_sp_router_set_abort_trap(mlxsw_sp);
2511 if (err)
2512 dev_warn(mlxsw_sp->bus_info->dev, "Failed to set abort trap.\n");
2513}
2514
Ido Schimmel30572242016-12-03 16:45:01 +01002515struct mlxsw_sp_fib_event_work {
Ido Schimmela0e47612017-02-06 16:20:10 +01002516 struct work_struct work;
Ido Schimmelad178c82017-02-08 11:16:40 +01002517 union {
2518 struct fib_entry_notifier_info fen_info;
Ido Schimmel5d7bfd12017-03-16 09:08:14 +01002519 struct fib_rule_notifier_info fr_info;
Ido Schimmelad178c82017-02-08 11:16:40 +01002520 struct fib_nh_notifier_info fnh_info;
2521 };
Ido Schimmel30572242016-12-03 16:45:01 +01002522 struct mlxsw_sp *mlxsw_sp;
2523 unsigned long event;
2524};
2525
2526static void mlxsw_sp_router_fib_event_work(struct work_struct *work)
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002527{
Ido Schimmel30572242016-12-03 16:45:01 +01002528 struct mlxsw_sp_fib_event_work *fib_work =
Ido Schimmela0e47612017-02-06 16:20:10 +01002529 container_of(work, struct mlxsw_sp_fib_event_work, work);
Ido Schimmel30572242016-12-03 16:45:01 +01002530 struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
Ido Schimmel5d7bfd12017-03-16 09:08:14 +01002531 struct fib_rule *rule;
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002532 bool replace, append;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002533 int err;
2534
Ido Schimmel30572242016-12-03 16:45:01 +01002535 /* Protect internal structures from changes */
2536 rtnl_lock();
2537 switch (fib_work->event) {
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002538 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
Ido Schimmel4283bce2017-02-09 10:28:43 +01002539 case FIB_EVENT_ENTRY_APPEND: /* fall through */
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002540 case FIB_EVENT_ENTRY_ADD:
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002541 replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
Ido Schimmel4283bce2017-02-09 10:28:43 +01002542 append = fib_work->event == FIB_EVENT_ENTRY_APPEND;
2543 err = mlxsw_sp_router_fib4_add(mlxsw_sp, &fib_work->fen_info,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002544 replace, append);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002545 if (err)
2546 mlxsw_sp_router_fib4_abort(mlxsw_sp);
Ido Schimmel30572242016-12-03 16:45:01 +01002547 fib_info_put(fib_work->fen_info.fi);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002548 break;
2549 case FIB_EVENT_ENTRY_DEL:
Ido Schimmel30572242016-12-03 16:45:01 +01002550 mlxsw_sp_router_fib4_del(mlxsw_sp, &fib_work->fen_info);
2551 fib_info_put(fib_work->fen_info.fi);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002552 break;
2553 case FIB_EVENT_RULE_ADD: /* fall through */
2554 case FIB_EVENT_RULE_DEL:
Ido Schimmel5d7bfd12017-03-16 09:08:14 +01002555 rule = fib_work->fr_info.rule;
2556 if (!fib4_rule_default(rule))
2557 mlxsw_sp_router_fib4_abort(mlxsw_sp);
2558 fib_rule_put(rule);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002559 break;
Ido Schimmelad178c82017-02-08 11:16:40 +01002560 case FIB_EVENT_NH_ADD: /* fall through */
2561 case FIB_EVENT_NH_DEL:
2562 mlxsw_sp_nexthop_event(mlxsw_sp, fib_work->event,
2563 fib_work->fnh_info.fib_nh);
2564 fib_info_put(fib_work->fnh_info.fib_nh->nh_parent);
2565 break;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002566 }
Ido Schimmel30572242016-12-03 16:45:01 +01002567 rtnl_unlock();
2568 kfree(fib_work);
2569}
2570
2571/* Called with rcu_read_lock() */
2572static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
2573 unsigned long event, void *ptr)
2574{
2575 struct mlxsw_sp *mlxsw_sp = container_of(nb, struct mlxsw_sp, fib_nb);
2576 struct mlxsw_sp_fib_event_work *fib_work;
2577 struct fib_notifier_info *info = ptr;
2578
2579 if (!net_eq(info->net, &init_net))
2580 return NOTIFY_DONE;
2581
2582 fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC);
2583 if (WARN_ON(!fib_work))
2584 return NOTIFY_BAD;
2585
Ido Schimmela0e47612017-02-06 16:20:10 +01002586 INIT_WORK(&fib_work->work, mlxsw_sp_router_fib_event_work);
Ido Schimmel30572242016-12-03 16:45:01 +01002587 fib_work->mlxsw_sp = mlxsw_sp;
2588 fib_work->event = event;
2589
2590 switch (event) {
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002591 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
Ido Schimmel4283bce2017-02-09 10:28:43 +01002592 case FIB_EVENT_ENTRY_APPEND: /* fall through */
Ido Schimmel30572242016-12-03 16:45:01 +01002593 case FIB_EVENT_ENTRY_ADD: /* fall through */
2594 case FIB_EVENT_ENTRY_DEL:
2595 memcpy(&fib_work->fen_info, ptr, sizeof(fib_work->fen_info));
2596 /* Take referece on fib_info to prevent it from being
2597 * freed while work is queued. Release it afterwards.
2598 */
2599 fib_info_hold(fib_work->fen_info.fi);
2600 break;
Ido Schimmel5d7bfd12017-03-16 09:08:14 +01002601 case FIB_EVENT_RULE_ADD: /* fall through */
2602 case FIB_EVENT_RULE_DEL:
2603 memcpy(&fib_work->fr_info, ptr, sizeof(fib_work->fr_info));
2604 fib_rule_get(fib_work->fr_info.rule);
2605 break;
Ido Schimmelad178c82017-02-08 11:16:40 +01002606 case FIB_EVENT_NH_ADD: /* fall through */
2607 case FIB_EVENT_NH_DEL:
2608 memcpy(&fib_work->fnh_info, ptr, sizeof(fib_work->fnh_info));
2609 fib_info_hold(fib_work->fnh_info.fib_nh->nh_parent);
2610 break;
Ido Schimmel30572242016-12-03 16:45:01 +01002611 }
2612
Ido Schimmela0e47612017-02-06 16:20:10 +01002613 mlxsw_core_schedule_work(&fib_work->work);
Ido Schimmel30572242016-12-03 16:45:01 +01002614
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002615 return NOTIFY_DONE;
2616}
2617
Ido Schimmel4724ba562017-03-10 08:53:39 +01002618static struct mlxsw_sp_rif *
2619mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
2620 const struct net_device *dev)
2621{
2622 int i;
2623
2624 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
2625 if (mlxsw_sp->rifs[i] && mlxsw_sp->rifs[i]->dev == dev)
2626 return mlxsw_sp->rifs[i];
2627
2628 return NULL;
2629}
2630
2631static int mlxsw_sp_router_rif_disable(struct mlxsw_sp *mlxsw_sp, u16 rif)
2632{
2633 char ritr_pl[MLXSW_REG_RITR_LEN];
2634 int err;
2635
2636 mlxsw_reg_ritr_rif_pack(ritr_pl, rif);
2637 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
2638 if (WARN_ON_ONCE(err))
2639 return err;
2640
2641 mlxsw_reg_ritr_enable_set(ritr_pl, false);
2642 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
2643}
2644
2645static void mlxsw_sp_router_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
2646 struct mlxsw_sp_rif *r)
2647{
2648 mlxsw_sp_router_rif_disable(mlxsw_sp, r->rif);
2649 mlxsw_sp_nexthop_rif_gone_sync(mlxsw_sp, r);
2650 mlxsw_sp_neigh_rif_gone_sync(mlxsw_sp, r);
2651}
2652
2653static bool mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *r,
2654 const struct in_device *in_dev,
2655 unsigned long event)
2656{
2657 switch (event) {
2658 case NETDEV_UP:
2659 if (!r)
2660 return true;
2661 return false;
2662 case NETDEV_DOWN:
2663 if (r && !in_dev->ifa_list)
2664 return true;
2665 /* It is possible we already removed the RIF ourselves
2666 * if it was assigned to a netdev that is now a bridge
2667 * or LAG slave.
2668 */
2669 return false;
2670 }
2671
2672 return false;
2673}
2674
2675#define MLXSW_SP_INVALID_RIF 0xffff
2676static int mlxsw_sp_avail_rif_get(struct mlxsw_sp *mlxsw_sp)
2677{
2678 int i;
2679
2680 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
2681 if (!mlxsw_sp->rifs[i])
2682 return i;
2683
2684 return MLXSW_SP_INVALID_RIF;
2685}
2686
2687static void mlxsw_sp_vport_rif_sp_attr_get(struct mlxsw_sp_port *mlxsw_sp_vport,
2688 bool *p_lagged, u16 *p_system_port)
2689{
2690 u8 local_port = mlxsw_sp_vport->local_port;
2691
2692 *p_lagged = mlxsw_sp_vport->lagged;
2693 *p_system_port = *p_lagged ? mlxsw_sp_vport->lag_id : local_port;
2694}
2695
2696static int mlxsw_sp_vport_rif_sp_op(struct mlxsw_sp_port *mlxsw_sp_vport,
Ido Schimmel69132292017-03-10 08:53:42 +01002697 u16 vr_id, struct net_device *l3_dev,
2698 u16 rif, bool create)
Ido Schimmel4724ba562017-03-10 08:53:39 +01002699{
2700 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
2701 bool lagged = mlxsw_sp_vport->lagged;
2702 char ritr_pl[MLXSW_REG_RITR_LEN];
2703 u16 system_port;
2704
Ido Schimmel69132292017-03-10 08:53:42 +01002705 mlxsw_reg_ritr_pack(ritr_pl, create, MLXSW_REG_RITR_SP_IF, rif, vr_id,
Ido Schimmel4724ba562017-03-10 08:53:39 +01002706 l3_dev->mtu, l3_dev->dev_addr);
2707
2708 mlxsw_sp_vport_rif_sp_attr_get(mlxsw_sp_vport, &lagged, &system_port);
2709 mlxsw_reg_ritr_sp_if_pack(ritr_pl, lagged, system_port,
2710 mlxsw_sp_vport_vid_get(mlxsw_sp_vport));
2711
2712 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
2713}
2714
2715static void mlxsw_sp_vport_rif_sp_leave(struct mlxsw_sp_port *mlxsw_sp_vport);
2716
2717static u16 mlxsw_sp_rif_sp_to_fid(u16 rif)
2718{
2719 return MLXSW_SP_RFID_BASE + rif;
2720}
2721
2722static struct mlxsw_sp_fid *
2723mlxsw_sp_rfid_alloc(u16 fid, struct net_device *l3_dev)
2724{
2725 struct mlxsw_sp_fid *f;
2726
2727 f = kzalloc(sizeof(*f), GFP_KERNEL);
2728 if (!f)
2729 return NULL;
2730
2731 f->leave = mlxsw_sp_vport_rif_sp_leave;
2732 f->ref_count = 0;
2733 f->dev = l3_dev;
2734 f->fid = fid;
2735
2736 return f;
2737}
2738
2739static struct mlxsw_sp_rif *
Ido Schimmel69132292017-03-10 08:53:42 +01002740mlxsw_sp_rif_alloc(u16 rif, u16 vr_id, struct net_device *l3_dev,
2741 struct mlxsw_sp_fid *f)
Ido Schimmel4724ba562017-03-10 08:53:39 +01002742{
2743 struct mlxsw_sp_rif *r;
2744
2745 r = kzalloc(sizeof(*r), GFP_KERNEL);
2746 if (!r)
2747 return NULL;
2748
2749 INIT_LIST_HEAD(&r->nexthop_list);
2750 INIT_LIST_HEAD(&r->neigh_list);
2751 ether_addr_copy(r->addr, l3_dev->dev_addr);
2752 r->mtu = l3_dev->mtu;
Ido Schimmel69132292017-03-10 08:53:42 +01002753 r->vr_id = vr_id;
Ido Schimmel4724ba562017-03-10 08:53:39 +01002754 r->dev = l3_dev;
2755 r->rif = rif;
2756 r->f = f;
2757
2758 return r;
2759}
2760
2761static struct mlxsw_sp_rif *
2762mlxsw_sp_vport_rif_sp_create(struct mlxsw_sp_port *mlxsw_sp_vport,
2763 struct net_device *l3_dev)
2764{
2765 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
Ido Schimmel57837882017-03-16 09:08:16 +01002766 u32 tb_id = l3mdev_fib_table(l3_dev);
Ido Schimmel69132292017-03-10 08:53:42 +01002767 struct mlxsw_sp_vr *vr;
Ido Schimmel4724ba562017-03-10 08:53:39 +01002768 struct mlxsw_sp_fid *f;
2769 struct mlxsw_sp_rif *r;
2770 u16 fid, rif;
2771 int err;
2772
2773 rif = mlxsw_sp_avail_rif_get(mlxsw_sp);
2774 if (rif == MLXSW_SP_INVALID_RIF)
2775 return ERR_PTR(-ERANGE);
2776
Ido Schimmel57837882017-03-16 09:08:16 +01002777 vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id ? : RT_TABLE_MAIN);
Ido Schimmel69132292017-03-10 08:53:42 +01002778 if (IS_ERR(vr))
2779 return ERR_CAST(vr);
2780
2781 err = mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, vr->id, l3_dev, rif,
2782 true);
Ido Schimmel4724ba562017-03-10 08:53:39 +01002783 if (err)
Ido Schimmel69132292017-03-10 08:53:42 +01002784 goto err_vport_rif_sp_op;
Ido Schimmel4724ba562017-03-10 08:53:39 +01002785
2786 fid = mlxsw_sp_rif_sp_to_fid(rif);
2787 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, true);
2788 if (err)
2789 goto err_rif_fdb_op;
2790
2791 f = mlxsw_sp_rfid_alloc(fid, l3_dev);
2792 if (!f) {
2793 err = -ENOMEM;
2794 goto err_rfid_alloc;
2795 }
2796
Ido Schimmel69132292017-03-10 08:53:42 +01002797 r = mlxsw_sp_rif_alloc(rif, vr->id, l3_dev, f);
Ido Schimmel4724ba562017-03-10 08:53:39 +01002798 if (!r) {
2799 err = -ENOMEM;
2800 goto err_rif_alloc;
2801 }
2802
2803 f->r = r;
2804 mlxsw_sp->rifs[rif] = r;
Ido Schimmel69132292017-03-10 08:53:42 +01002805 vr->rif_count++;
Ido Schimmel4724ba562017-03-10 08:53:39 +01002806
2807 return r;
2808
2809err_rif_alloc:
2810 kfree(f);
2811err_rfid_alloc:
2812 mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, false);
2813err_rif_fdb_op:
Ido Schimmel69132292017-03-10 08:53:42 +01002814 mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, vr->id, l3_dev, rif, false);
2815err_vport_rif_sp_op:
2816 mlxsw_sp_vr_put(vr);
Ido Schimmel4724ba562017-03-10 08:53:39 +01002817 return ERR_PTR(err);
2818}
2819
2820static void mlxsw_sp_vport_rif_sp_destroy(struct mlxsw_sp_port *mlxsw_sp_vport,
2821 struct mlxsw_sp_rif *r)
2822{
2823 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
Ido Schimmel69132292017-03-10 08:53:42 +01002824 struct mlxsw_sp_vr *vr = &mlxsw_sp->router.vrs[r->vr_id];
Ido Schimmel4724ba562017-03-10 08:53:39 +01002825 struct net_device *l3_dev = r->dev;
2826 struct mlxsw_sp_fid *f = r->f;
2827 u16 fid = f->fid;
2828 u16 rif = r->rif;
2829
2830 mlxsw_sp_router_rif_gone_sync(mlxsw_sp, r);
2831
Ido Schimmel69132292017-03-10 08:53:42 +01002832 vr->rif_count--;
Ido Schimmel4724ba562017-03-10 08:53:39 +01002833 mlxsw_sp->rifs[rif] = NULL;
2834 f->r = NULL;
2835
2836 kfree(r);
2837
2838 kfree(f);
2839
2840 mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, false);
2841
Ido Schimmel69132292017-03-10 08:53:42 +01002842 mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, vr->id, l3_dev, rif, false);
2843
2844 mlxsw_sp_vr_put(vr);
Ido Schimmel4724ba562017-03-10 08:53:39 +01002845}
2846
2847static int mlxsw_sp_vport_rif_sp_join(struct mlxsw_sp_port *mlxsw_sp_vport,
2848 struct net_device *l3_dev)
2849{
2850 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
2851 struct mlxsw_sp_rif *r;
2852
2853 r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
2854 if (!r) {
2855 r = mlxsw_sp_vport_rif_sp_create(mlxsw_sp_vport, l3_dev);
2856 if (IS_ERR(r))
2857 return PTR_ERR(r);
2858 }
2859
2860 mlxsw_sp_vport_fid_set(mlxsw_sp_vport, r->f);
2861 r->f->ref_count++;
2862
2863 netdev_dbg(mlxsw_sp_vport->dev, "Joined FID=%d\n", r->f->fid);
2864
2865 return 0;
2866}
2867
2868static void mlxsw_sp_vport_rif_sp_leave(struct mlxsw_sp_port *mlxsw_sp_vport)
2869{
2870 struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
2871
2872 netdev_dbg(mlxsw_sp_vport->dev, "Left FID=%d\n", f->fid);
2873
2874 mlxsw_sp_vport_fid_set(mlxsw_sp_vport, NULL);
2875 if (--f->ref_count == 0)
2876 mlxsw_sp_vport_rif_sp_destroy(mlxsw_sp_vport, f->r);
2877}
2878
2879static int mlxsw_sp_inetaddr_vport_event(struct net_device *l3_dev,
2880 struct net_device *port_dev,
2881 unsigned long event, u16 vid)
2882{
2883 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(port_dev);
2884 struct mlxsw_sp_port *mlxsw_sp_vport;
2885
2886 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
2887 if (WARN_ON(!mlxsw_sp_vport))
2888 return -EINVAL;
2889
2890 switch (event) {
2891 case NETDEV_UP:
2892 return mlxsw_sp_vport_rif_sp_join(mlxsw_sp_vport, l3_dev);
2893 case NETDEV_DOWN:
2894 mlxsw_sp_vport_rif_sp_leave(mlxsw_sp_vport);
2895 break;
2896 }
2897
2898 return 0;
2899}
2900
2901static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev,
2902 unsigned long event)
2903{
2904 if (netif_is_bridge_port(port_dev) || netif_is_lag_port(port_dev))
2905 return 0;
2906
2907 return mlxsw_sp_inetaddr_vport_event(port_dev, port_dev, event, 1);
2908}
2909
2910static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev,
2911 struct net_device *lag_dev,
2912 unsigned long event, u16 vid)
2913{
2914 struct net_device *port_dev;
2915 struct list_head *iter;
2916 int err;
2917
2918 netdev_for_each_lower_dev(lag_dev, port_dev, iter) {
2919 if (mlxsw_sp_port_dev_check(port_dev)) {
2920 err = mlxsw_sp_inetaddr_vport_event(l3_dev, port_dev,
2921 event, vid);
2922 if (err)
2923 return err;
2924 }
2925 }
2926
2927 return 0;
2928}
2929
2930static int mlxsw_sp_inetaddr_lag_event(struct net_device *lag_dev,
2931 unsigned long event)
2932{
2933 if (netif_is_bridge_port(lag_dev))
2934 return 0;
2935
2936 return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event, 1);
2937}
2938
2939static struct mlxsw_sp_fid *mlxsw_sp_bridge_fid_get(struct mlxsw_sp *mlxsw_sp,
2940 struct net_device *l3_dev)
2941{
2942 u16 fid;
2943
2944 if (is_vlan_dev(l3_dev))
2945 fid = vlan_dev_vlan_id(l3_dev);
2946 else if (mlxsw_sp->master_bridge.dev == l3_dev)
2947 fid = 1;
2948 else
2949 return mlxsw_sp_vfid_find(mlxsw_sp, l3_dev);
2950
2951 return mlxsw_sp_fid_find(mlxsw_sp, fid);
2952}
2953
2954static enum mlxsw_flood_table_type mlxsw_sp_flood_table_type_get(u16 fid)
2955{
2956 return mlxsw_sp_fid_is_vfid(fid) ? MLXSW_REG_SFGC_TABLE_TYPE_FID :
2957 MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST;
2958}
2959
2960static u16 mlxsw_sp_flood_table_index_get(u16 fid)
2961{
2962 return mlxsw_sp_fid_is_vfid(fid) ? mlxsw_sp_fid_to_vfid(fid) : fid;
2963}
2964
2965static int mlxsw_sp_router_port_flood_set(struct mlxsw_sp *mlxsw_sp, u16 fid,
2966 bool set)
2967{
2968 enum mlxsw_flood_table_type table_type;
2969 char *sftr_pl;
2970 u16 index;
2971 int err;
2972
2973 sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL);
2974 if (!sftr_pl)
2975 return -ENOMEM;
2976
2977 table_type = mlxsw_sp_flood_table_type_get(fid);
2978 index = mlxsw_sp_flood_table_index_get(fid);
2979 mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BC, index, table_type,
2980 1, MLXSW_PORT_ROUTER_PORT, set);
2981 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
2982
2983 kfree(sftr_pl);
2984 return err;
2985}
2986
2987static enum mlxsw_reg_ritr_if_type mlxsw_sp_rif_type_get(u16 fid)
2988{
2989 if (mlxsw_sp_fid_is_vfid(fid))
2990 return MLXSW_REG_RITR_FID_IF;
2991 else
2992 return MLXSW_REG_RITR_VLAN_IF;
2993}
2994
Ido Schimmel69132292017-03-10 08:53:42 +01002995static int mlxsw_sp_rif_bridge_op(struct mlxsw_sp *mlxsw_sp, u16 vr_id,
Ido Schimmel4724ba562017-03-10 08:53:39 +01002996 struct net_device *l3_dev,
2997 u16 fid, u16 rif,
2998 bool create)
2999{
3000 enum mlxsw_reg_ritr_if_type rif_type;
3001 char ritr_pl[MLXSW_REG_RITR_LEN];
3002
3003 rif_type = mlxsw_sp_rif_type_get(fid);
Ido Schimmel69132292017-03-10 08:53:42 +01003004 mlxsw_reg_ritr_pack(ritr_pl, create, rif_type, rif, vr_id, l3_dev->mtu,
Ido Schimmel4724ba562017-03-10 08:53:39 +01003005 l3_dev->dev_addr);
3006 mlxsw_reg_ritr_fid_set(ritr_pl, rif_type, fid);
3007
3008 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
3009}
3010
3011static int mlxsw_sp_rif_bridge_create(struct mlxsw_sp *mlxsw_sp,
3012 struct net_device *l3_dev,
3013 struct mlxsw_sp_fid *f)
3014{
Ido Schimmel57837882017-03-16 09:08:16 +01003015 u32 tb_id = l3mdev_fib_table(l3_dev);
Ido Schimmel69132292017-03-10 08:53:42 +01003016 struct mlxsw_sp_vr *vr;
Ido Schimmel4724ba562017-03-10 08:53:39 +01003017 struct mlxsw_sp_rif *r;
3018 u16 rif;
3019 int err;
3020
3021 rif = mlxsw_sp_avail_rif_get(mlxsw_sp);
3022 if (rif == MLXSW_SP_INVALID_RIF)
3023 return -ERANGE;
3024
Ido Schimmel57837882017-03-16 09:08:16 +01003025 vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id ? : RT_TABLE_MAIN);
Ido Schimmel69132292017-03-10 08:53:42 +01003026 if (IS_ERR(vr))
3027 return PTR_ERR(vr);
3028
Ido Schimmel4724ba562017-03-10 08:53:39 +01003029 err = mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, true);
3030 if (err)
Ido Schimmel69132292017-03-10 08:53:42 +01003031 goto err_port_flood_set;
Ido Schimmel4724ba562017-03-10 08:53:39 +01003032
Ido Schimmel69132292017-03-10 08:53:42 +01003033 err = mlxsw_sp_rif_bridge_op(mlxsw_sp, vr->id, l3_dev, f->fid, rif,
3034 true);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003035 if (err)
3036 goto err_rif_bridge_op;
3037
3038 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, true);
3039 if (err)
3040 goto err_rif_fdb_op;
3041
Ido Schimmel69132292017-03-10 08:53:42 +01003042 r = mlxsw_sp_rif_alloc(rif, vr->id, l3_dev, f);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003043 if (!r) {
3044 err = -ENOMEM;
3045 goto err_rif_alloc;
3046 }
3047
3048 f->r = r;
3049 mlxsw_sp->rifs[rif] = r;
Ido Schimmel69132292017-03-10 08:53:42 +01003050 vr->rif_count++;
Ido Schimmel4724ba562017-03-10 08:53:39 +01003051
3052 netdev_dbg(l3_dev, "RIF=%d created\n", rif);
3053
3054 return 0;
3055
3056err_rif_alloc:
3057 mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, false);
3058err_rif_fdb_op:
Ido Schimmel69132292017-03-10 08:53:42 +01003059 mlxsw_sp_rif_bridge_op(mlxsw_sp, vr->id, l3_dev, f->fid, rif, false);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003060err_rif_bridge_op:
3061 mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, false);
Ido Schimmel69132292017-03-10 08:53:42 +01003062err_port_flood_set:
3063 mlxsw_sp_vr_put(vr);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003064 return err;
3065}
3066
3067void mlxsw_sp_rif_bridge_destroy(struct mlxsw_sp *mlxsw_sp,
3068 struct mlxsw_sp_rif *r)
3069{
Ido Schimmel69132292017-03-10 08:53:42 +01003070 struct mlxsw_sp_vr *vr = &mlxsw_sp->router.vrs[r->vr_id];
Ido Schimmel4724ba562017-03-10 08:53:39 +01003071 struct net_device *l3_dev = r->dev;
3072 struct mlxsw_sp_fid *f = r->f;
3073 u16 rif = r->rif;
3074
3075 mlxsw_sp_router_rif_gone_sync(mlxsw_sp, r);
3076
Ido Schimmel69132292017-03-10 08:53:42 +01003077 vr->rif_count--;
Ido Schimmel4724ba562017-03-10 08:53:39 +01003078 mlxsw_sp->rifs[rif] = NULL;
3079 f->r = NULL;
3080
3081 kfree(r);
3082
3083 mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, false);
3084
Ido Schimmel69132292017-03-10 08:53:42 +01003085 mlxsw_sp_rif_bridge_op(mlxsw_sp, vr->id, l3_dev, f->fid, rif, false);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003086
3087 mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, false);
3088
Ido Schimmel69132292017-03-10 08:53:42 +01003089 mlxsw_sp_vr_put(vr);
3090
Ido Schimmel4724ba562017-03-10 08:53:39 +01003091 netdev_dbg(l3_dev, "RIF=%d destroyed\n", rif);
3092}
3093
3094static int mlxsw_sp_inetaddr_bridge_event(struct net_device *l3_dev,
3095 struct net_device *br_dev,
3096 unsigned long event)
3097{
3098 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
3099 struct mlxsw_sp_fid *f;
3100
3101 /* FID can either be an actual FID if the L3 device is the
3102 * VLAN-aware bridge or a VLAN device on top. Otherwise, the
3103 * L3 device is a VLAN-unaware bridge and we get a vFID.
3104 */
3105 f = mlxsw_sp_bridge_fid_get(mlxsw_sp, l3_dev);
3106 if (WARN_ON(!f))
3107 return -EINVAL;
3108
3109 switch (event) {
3110 case NETDEV_UP:
3111 return mlxsw_sp_rif_bridge_create(mlxsw_sp, l3_dev, f);
3112 case NETDEV_DOWN:
3113 mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->r);
3114 break;
3115 }
3116
3117 return 0;
3118}
3119
3120static int mlxsw_sp_inetaddr_vlan_event(struct net_device *vlan_dev,
3121 unsigned long event)
3122{
3123 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
3124 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(vlan_dev);
3125 u16 vid = vlan_dev_vlan_id(vlan_dev);
3126
3127 if (mlxsw_sp_port_dev_check(real_dev))
3128 return mlxsw_sp_inetaddr_vport_event(vlan_dev, real_dev, event,
3129 vid);
3130 else if (netif_is_lag_master(real_dev))
3131 return __mlxsw_sp_inetaddr_lag_event(vlan_dev, real_dev, event,
3132 vid);
3133 else if (netif_is_bridge_master(real_dev) &&
3134 mlxsw_sp->master_bridge.dev == real_dev)
3135 return mlxsw_sp_inetaddr_bridge_event(vlan_dev, real_dev,
3136 event);
3137
3138 return 0;
3139}
3140
3141int mlxsw_sp_inetaddr_event(struct notifier_block *unused,
3142 unsigned long event, void *ptr)
3143{
3144 struct in_ifaddr *ifa = (struct in_ifaddr *) ptr;
3145 struct net_device *dev = ifa->ifa_dev->dev;
3146 struct mlxsw_sp *mlxsw_sp;
3147 struct mlxsw_sp_rif *r;
3148 int err = 0;
3149
3150 mlxsw_sp = mlxsw_sp_lower_get(dev);
3151 if (!mlxsw_sp)
3152 goto out;
3153
3154 r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
3155 if (!mlxsw_sp_rif_should_config(r, ifa->ifa_dev, event))
3156 goto out;
3157
3158 if (mlxsw_sp_port_dev_check(dev))
3159 err = mlxsw_sp_inetaddr_port_event(dev, event);
3160 else if (netif_is_lag_master(dev))
3161 err = mlxsw_sp_inetaddr_lag_event(dev, event);
3162 else if (netif_is_bridge_master(dev))
3163 err = mlxsw_sp_inetaddr_bridge_event(dev, dev, event);
3164 else if (is_vlan_dev(dev))
3165 err = mlxsw_sp_inetaddr_vlan_event(dev, event);
3166
3167out:
3168 return notifier_from_errno(err);
3169}
3170
3171static int mlxsw_sp_rif_edit(struct mlxsw_sp *mlxsw_sp, u16 rif,
3172 const char *mac, int mtu)
3173{
3174 char ritr_pl[MLXSW_REG_RITR_LEN];
3175 int err;
3176
3177 mlxsw_reg_ritr_rif_pack(ritr_pl, rif);
3178 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
3179 if (err)
3180 return err;
3181
3182 mlxsw_reg_ritr_mtu_set(ritr_pl, mtu);
3183 mlxsw_reg_ritr_if_mac_memcpy_to(ritr_pl, mac);
3184 mlxsw_reg_ritr_op_set(ritr_pl, MLXSW_REG_RITR_RIF_CREATE);
3185 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
3186}
3187
3188int mlxsw_sp_netdevice_router_port_event(struct net_device *dev)
3189{
3190 struct mlxsw_sp *mlxsw_sp;
3191 struct mlxsw_sp_rif *r;
3192 int err;
3193
3194 mlxsw_sp = mlxsw_sp_lower_get(dev);
3195 if (!mlxsw_sp)
3196 return 0;
3197
3198 r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
3199 if (!r)
3200 return 0;
3201
3202 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, r->addr, r->f->fid, false);
3203 if (err)
3204 return err;
3205
3206 err = mlxsw_sp_rif_edit(mlxsw_sp, r->rif, dev->dev_addr, dev->mtu);
3207 if (err)
3208 goto err_rif_edit;
3209
3210 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, dev->dev_addr, r->f->fid, true);
3211 if (err)
3212 goto err_rif_fdb_op;
3213
3214 ether_addr_copy(r->addr, dev->dev_addr);
3215 r->mtu = dev->mtu;
3216
3217 netdev_dbg(dev, "Updated RIF=%d\n", r->rif);
3218
3219 return 0;
3220
3221err_rif_fdb_op:
3222 mlxsw_sp_rif_edit(mlxsw_sp, r->rif, r->addr, r->mtu);
3223err_rif_edit:
3224 mlxsw_sp_rif_fdb_op(mlxsw_sp, r->addr, r->f->fid, true);
3225 return err;
3226}
3227
Ido Schimmelc3852ef2016-12-03 16:45:07 +01003228static void mlxsw_sp_router_fib_dump_flush(struct notifier_block *nb)
3229{
3230 struct mlxsw_sp *mlxsw_sp = container_of(nb, struct mlxsw_sp, fib_nb);
3231
3232 /* Flush pending FIB notifications and then flush the device's
3233 * table before requesting another dump. The FIB notification
3234 * block is unregistered, so no need to take RTNL.
3235 */
3236 mlxsw_core_flush_owq();
3237 mlxsw_sp_router_fib_flush(mlxsw_sp);
3238}
3239
Ido Schimmel4724ba562017-03-10 08:53:39 +01003240static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
3241{
3242 char rgcr_pl[MLXSW_REG_RGCR_LEN];
3243 u64 max_rifs;
3244 int err;
3245
3246 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_RIFS))
3247 return -EIO;
3248
3249 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
3250 mlxsw_sp->rifs = kcalloc(max_rifs, sizeof(struct mlxsw_sp_rif *),
3251 GFP_KERNEL);
3252 if (!mlxsw_sp->rifs)
3253 return -ENOMEM;
3254
3255 mlxsw_reg_rgcr_pack(rgcr_pl, true);
3256 mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs);
3257 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
3258 if (err)
3259 goto err_rgcr_fail;
3260
3261 return 0;
3262
3263err_rgcr_fail:
3264 kfree(mlxsw_sp->rifs);
3265 return err;
3266}
3267
3268static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
3269{
3270 char rgcr_pl[MLXSW_REG_RGCR_LEN];
3271 int i;
3272
3273 mlxsw_reg_rgcr_pack(rgcr_pl, false);
3274 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
3275
3276 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
3277 WARN_ON_ONCE(mlxsw_sp->rifs[i]);
3278
3279 kfree(mlxsw_sp->rifs);
3280}
3281
Jiri Pirkob45f64d2016-09-26 12:52:31 +02003282int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
3283{
3284 int err;
3285
3286 INIT_LIST_HEAD(&mlxsw_sp->router.nexthop_neighs_list);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02003287 err = __mlxsw_sp_router_init(mlxsw_sp);
3288 if (err)
3289 return err;
3290
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003291 err = rhashtable_init(&mlxsw_sp->router.nexthop_ht,
3292 &mlxsw_sp_nexthop_ht_params);
3293 if (err)
3294 goto err_nexthop_ht_init;
3295
Ido Schimmele9ad5e72017-02-08 11:16:29 +01003296 err = rhashtable_init(&mlxsw_sp->router.nexthop_group_ht,
3297 &mlxsw_sp_nexthop_group_ht_params);
3298 if (err)
3299 goto err_nexthop_group_ht_init;
3300
Jiri Pirkob45f64d2016-09-26 12:52:31 +02003301 mlxsw_sp_lpm_init(mlxsw_sp);
3302 err = mlxsw_sp_vrs_init(mlxsw_sp);
3303 if (err)
3304 goto err_vrs_init;
3305
Ido Schimmel8c9583a2016-10-27 15:12:57 +02003306 err = mlxsw_sp_neigh_init(mlxsw_sp);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02003307 if (err)
3308 goto err_neigh_init;
3309
3310 mlxsw_sp->fib_nb.notifier_call = mlxsw_sp_router_fib_event;
Ido Schimmelc3852ef2016-12-03 16:45:07 +01003311 err = register_fib_notifier(&mlxsw_sp->fib_nb,
3312 mlxsw_sp_router_fib_dump_flush);
3313 if (err)
3314 goto err_register_fib_notifier;
3315
Jiri Pirkob45f64d2016-09-26 12:52:31 +02003316 return 0;
3317
Ido Schimmelc3852ef2016-12-03 16:45:07 +01003318err_register_fib_notifier:
3319 mlxsw_sp_neigh_fini(mlxsw_sp);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02003320err_neigh_init:
3321 mlxsw_sp_vrs_fini(mlxsw_sp);
3322err_vrs_init:
Ido Schimmele9ad5e72017-02-08 11:16:29 +01003323 rhashtable_destroy(&mlxsw_sp->router.nexthop_group_ht);
3324err_nexthop_group_ht_init:
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003325 rhashtable_destroy(&mlxsw_sp->router.nexthop_ht);
3326err_nexthop_ht_init:
Jiri Pirkob45f64d2016-09-26 12:52:31 +02003327 __mlxsw_sp_router_fini(mlxsw_sp);
3328 return err;
3329}
3330
3331void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
3332{
3333 unregister_fib_notifier(&mlxsw_sp->fib_nb);
3334 mlxsw_sp_neigh_fini(mlxsw_sp);
3335 mlxsw_sp_vrs_fini(mlxsw_sp);
Ido Schimmele9ad5e72017-02-08 11:16:29 +01003336 rhashtable_destroy(&mlxsw_sp->router.nexthop_group_ht);
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003337 rhashtable_destroy(&mlxsw_sp->router.nexthop_ht);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02003338 __mlxsw_sp_router_fini(mlxsw_sp);
3339}