blob: 33cec1cc164259ad9d7dd022a811e957447a61ff [file] [log] [blame]
Ido Schimmel464dce12016-07-02 11:00:15 +02001/*
2 * drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
3 * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
5 * Copyright (c) 2016 Ido Schimmel <idosch@mellanox.com>
Yotam Gigic723c7352016-07-05 11:27:43 +02006 * Copyright (c) 2016 Yotam Gigi <yotamg@mellanox.com>
Ido Schimmel464dce12016-07-02 11:00:15 +02007 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include <linux/kernel.h>
38#include <linux/types.h>
Jiri Pirko5e9c16c2016-07-04 08:23:04 +020039#include <linux/rhashtable.h>
40#include <linux/bitops.h>
41#include <linux/in6.h>
Yotam Gigic723c7352016-07-05 11:27:43 +020042#include <linux/notifier.h>
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +010043#include <linux/inetdevice.h>
Ido Schimmel9db032b2017-03-16 09:08:17 +010044#include <linux/netdevice.h>
Yotam Gigic723c7352016-07-05 11:27:43 +020045#include <net/netevent.h>
Jiri Pirko6cf3c972016-07-05 11:27:39 +020046#include <net/neighbour.h>
47#include <net/arp.h>
Jiri Pirkob45f64d2016-09-26 12:52:31 +020048#include <net/ip_fib.h>
Ido Schimmel5d7bfd12017-03-16 09:08:14 +010049#include <net/fib_rules.h>
Ido Schimmel57837882017-03-16 09:08:16 +010050#include <net/l3mdev.h>
Ido Schimmel464dce12016-07-02 11:00:15 +020051
52#include "spectrum.h"
53#include "core.h"
54#include "reg.h"
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +020055#include "spectrum_cnt.h"
56#include "spectrum_dpipe.h"
57#include "spectrum_router.h"
Ido Schimmel464dce12016-07-02 11:00:15 +020058
Ido Schimmel4724ba562017-03-10 08:53:39 +010059struct mlxsw_sp_rif {
60 struct list_head nexthop_list;
61 struct list_head neigh_list;
62 struct net_device *dev;
63 struct mlxsw_sp_fid *f;
64 unsigned char addr[ETH_ALEN];
65 int mtu;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +010066 u16 rif_index;
Ido Schimmel69132292017-03-10 08:53:42 +010067 u16 vr_id;
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +020068 unsigned int counter_ingress;
69 bool counter_ingress_valid;
70 unsigned int counter_egress;
71 bool counter_egress_valid;
Ido Schimmel4724ba562017-03-10 08:53:39 +010072};
73
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +020074static unsigned int *
75mlxsw_sp_rif_p_counter_get(struct mlxsw_sp_rif *rif,
76 enum mlxsw_sp_rif_counter_dir dir)
77{
78 switch (dir) {
79 case MLXSW_SP_RIF_COUNTER_EGRESS:
80 return &rif->counter_egress;
81 case MLXSW_SP_RIF_COUNTER_INGRESS:
82 return &rif->counter_ingress;
83 }
84 return NULL;
85}
86
87static bool
88mlxsw_sp_rif_counter_valid_get(struct mlxsw_sp_rif *rif,
89 enum mlxsw_sp_rif_counter_dir dir)
90{
91 switch (dir) {
92 case MLXSW_SP_RIF_COUNTER_EGRESS:
93 return rif->counter_egress_valid;
94 case MLXSW_SP_RIF_COUNTER_INGRESS:
95 return rif->counter_ingress_valid;
96 }
97 return false;
98}
99
100static void
101mlxsw_sp_rif_counter_valid_set(struct mlxsw_sp_rif *rif,
102 enum mlxsw_sp_rif_counter_dir dir,
103 bool valid)
104{
105 switch (dir) {
106 case MLXSW_SP_RIF_COUNTER_EGRESS:
107 rif->counter_egress_valid = valid;
108 break;
109 case MLXSW_SP_RIF_COUNTER_INGRESS:
110 rif->counter_ingress_valid = valid;
111 break;
112 }
113}
114
115static int mlxsw_sp_rif_counter_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
116 unsigned int counter_index, bool enable,
117 enum mlxsw_sp_rif_counter_dir dir)
118{
119 char ritr_pl[MLXSW_REG_RITR_LEN];
120 bool is_egress = false;
121 int err;
122
123 if (dir == MLXSW_SP_RIF_COUNTER_EGRESS)
124 is_egress = true;
125 mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
126 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
127 if (err)
128 return err;
129
130 mlxsw_reg_ritr_counter_pack(ritr_pl, counter_index, enable,
131 is_egress);
132 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
133}
134
135int mlxsw_sp_rif_counter_value_get(struct mlxsw_sp *mlxsw_sp,
136 struct mlxsw_sp_rif *rif,
137 enum mlxsw_sp_rif_counter_dir dir, u64 *cnt)
138{
139 char ricnt_pl[MLXSW_REG_RICNT_LEN];
140 unsigned int *p_counter_index;
141 bool valid;
142 int err;
143
144 valid = mlxsw_sp_rif_counter_valid_get(rif, dir);
145 if (!valid)
146 return -EINVAL;
147
148 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
149 if (!p_counter_index)
150 return -EINVAL;
151 mlxsw_reg_ricnt_pack(ricnt_pl, *p_counter_index,
152 MLXSW_REG_RICNT_OPCODE_NOP);
153 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
154 if (err)
155 return err;
156 *cnt = mlxsw_reg_ricnt_good_unicast_packets_get(ricnt_pl);
157 return 0;
158}
159
160static int mlxsw_sp_rif_counter_clear(struct mlxsw_sp *mlxsw_sp,
161 unsigned int counter_index)
162{
163 char ricnt_pl[MLXSW_REG_RICNT_LEN];
164
165 mlxsw_reg_ricnt_pack(ricnt_pl, counter_index,
166 MLXSW_REG_RICNT_OPCODE_CLEAR);
167 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
168}
169
170int mlxsw_sp_rif_counter_alloc(struct mlxsw_sp *mlxsw_sp,
171 struct mlxsw_sp_rif *rif,
172 enum mlxsw_sp_rif_counter_dir dir)
173{
174 unsigned int *p_counter_index;
175 int err;
176
177 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
178 if (!p_counter_index)
179 return -EINVAL;
180 err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
181 p_counter_index);
182 if (err)
183 return err;
184
185 err = mlxsw_sp_rif_counter_clear(mlxsw_sp, *p_counter_index);
186 if (err)
187 goto err_counter_clear;
188
189 err = mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
190 *p_counter_index, true, dir);
191 if (err)
192 goto err_counter_edit;
193 mlxsw_sp_rif_counter_valid_set(rif, dir, true);
194 return 0;
195
196err_counter_edit:
197err_counter_clear:
198 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
199 *p_counter_index);
200 return err;
201}
202
203void mlxsw_sp_rif_counter_free(struct mlxsw_sp *mlxsw_sp,
204 struct mlxsw_sp_rif *rif,
205 enum mlxsw_sp_rif_counter_dir dir)
206{
207 unsigned int *p_counter_index;
208
209 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
210 if (WARN_ON(!p_counter_index))
211 return;
212 mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
213 *p_counter_index, false, dir);
214 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
215 *p_counter_index);
216 mlxsw_sp_rif_counter_valid_set(rif, dir, false);
217}
218
Ido Schimmel4724ba562017-03-10 08:53:39 +0100219static struct mlxsw_sp_rif *
220mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
221 const struct net_device *dev);
222
Jiri Pirko53342022016-07-04 08:23:08 +0200223#define mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) \
224 for_each_set_bit(prefix, (prefix_usage)->b, MLXSW_SP_PREFIX_COUNT)
225
226static bool
Jiri Pirko6b75c482016-07-04 08:23:09 +0200227mlxsw_sp_prefix_usage_subset(struct mlxsw_sp_prefix_usage *prefix_usage1,
228 struct mlxsw_sp_prefix_usage *prefix_usage2)
229{
230 unsigned char prefix;
231
232 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage1) {
233 if (!test_bit(prefix, prefix_usage2->b))
234 return false;
235 }
236 return true;
237}
238
239static bool
Jiri Pirko53342022016-07-04 08:23:08 +0200240mlxsw_sp_prefix_usage_eq(struct mlxsw_sp_prefix_usage *prefix_usage1,
241 struct mlxsw_sp_prefix_usage *prefix_usage2)
242{
243 return !memcmp(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
244}
245
Jiri Pirko6b75c482016-07-04 08:23:09 +0200246static bool
247mlxsw_sp_prefix_usage_none(struct mlxsw_sp_prefix_usage *prefix_usage)
248{
249 struct mlxsw_sp_prefix_usage prefix_usage_none = {{ 0 } };
250
251 return mlxsw_sp_prefix_usage_eq(prefix_usage, &prefix_usage_none);
252}
253
254static void
255mlxsw_sp_prefix_usage_cpy(struct mlxsw_sp_prefix_usage *prefix_usage1,
256 struct mlxsw_sp_prefix_usage *prefix_usage2)
257{
258 memcpy(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
259}
260
261static void
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200262mlxsw_sp_prefix_usage_set(struct mlxsw_sp_prefix_usage *prefix_usage,
263 unsigned char prefix_len)
264{
265 set_bit(prefix_len, prefix_usage->b);
266}
267
268static void
269mlxsw_sp_prefix_usage_clear(struct mlxsw_sp_prefix_usage *prefix_usage,
270 unsigned char prefix_len)
271{
272 clear_bit(prefix_len, prefix_usage->b);
273}
274
275struct mlxsw_sp_fib_key {
276 unsigned char addr[sizeof(struct in6_addr)];
277 unsigned char prefix_len;
278};
279
Jiri Pirko61c503f2016-07-04 08:23:11 +0200280enum mlxsw_sp_fib_entry_type {
281 MLXSW_SP_FIB_ENTRY_TYPE_REMOTE,
282 MLXSW_SP_FIB_ENTRY_TYPE_LOCAL,
283 MLXSW_SP_FIB_ENTRY_TYPE_TRAP,
284};
285
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +0200286struct mlxsw_sp_nexthop_group;
287
Ido Schimmel9aecce12017-02-09 10:28:42 +0100288struct mlxsw_sp_fib_node {
289 struct list_head entry_list;
Jiri Pirkob45f64d2016-09-26 12:52:31 +0200290 struct list_head list;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100291 struct rhash_head ht_node;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100292 struct mlxsw_sp_fib *fib;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100293 struct mlxsw_sp_fib_key key;
294};
295
296struct mlxsw_sp_fib_entry_params {
297 u32 tb_id;
298 u32 prio;
299 u8 tos;
300 u8 type;
301};
302
303struct mlxsw_sp_fib_entry {
304 struct list_head list;
305 struct mlxsw_sp_fib_node *fib_node;
306 enum mlxsw_sp_fib_entry_type type;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +0200307 struct list_head nexthop_group_node;
308 struct mlxsw_sp_nexthop_group *nh_group;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100309 struct mlxsw_sp_fib_entry_params params;
Ido Schimmel013b20f2017-02-08 11:16:36 +0100310 bool offloaded;
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200311};
312
313struct mlxsw_sp_fib {
314 struct rhashtable ht;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100315 struct list_head node_list;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100316 struct mlxsw_sp_vr *vr;
317 struct mlxsw_sp_lpm_tree *lpm_tree;
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200318 unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT];
319 struct mlxsw_sp_prefix_usage prefix_usage;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100320 enum mlxsw_sp_l3proto proto;
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200321};
322
Ido Schimmel9aecce12017-02-09 10:28:42 +0100323static const struct rhashtable_params mlxsw_sp_fib_ht_params;
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200324
Ido Schimmel76610eb2017-03-10 08:53:41 +0100325static struct mlxsw_sp_fib *mlxsw_sp_fib_create(struct mlxsw_sp_vr *vr,
326 enum mlxsw_sp_l3proto proto)
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200327{
328 struct mlxsw_sp_fib *fib;
329 int err;
330
331 fib = kzalloc(sizeof(*fib), GFP_KERNEL);
332 if (!fib)
333 return ERR_PTR(-ENOMEM);
334 err = rhashtable_init(&fib->ht, &mlxsw_sp_fib_ht_params);
335 if (err)
336 goto err_rhashtable_init;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100337 INIT_LIST_HEAD(&fib->node_list);
Ido Schimmel76610eb2017-03-10 08:53:41 +0100338 fib->proto = proto;
339 fib->vr = vr;
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200340 return fib;
341
342err_rhashtable_init:
343 kfree(fib);
344 return ERR_PTR(err);
345}
346
347static void mlxsw_sp_fib_destroy(struct mlxsw_sp_fib *fib)
348{
Ido Schimmel9aecce12017-02-09 10:28:42 +0100349 WARN_ON(!list_empty(&fib->node_list));
Ido Schimmel76610eb2017-03-10 08:53:41 +0100350 WARN_ON(fib->lpm_tree);
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200351 rhashtable_destroy(&fib->ht);
352 kfree(fib);
353}
354
Jiri Pirko53342022016-07-04 08:23:08 +0200355static struct mlxsw_sp_lpm_tree *
Ido Schimmel382dbb42017-03-10 08:53:40 +0100356mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp *mlxsw_sp)
Jiri Pirko53342022016-07-04 08:23:08 +0200357{
358 static struct mlxsw_sp_lpm_tree *lpm_tree;
359 int i;
360
Ido Schimmel8494ab02017-03-24 08:02:47 +0100361 for (i = 0; i < mlxsw_sp->router.lpm.tree_count; i++) {
362 lpm_tree = &mlxsw_sp->router.lpm.trees[i];
Ido Schimmel382dbb42017-03-10 08:53:40 +0100363 if (lpm_tree->ref_count == 0)
364 return lpm_tree;
Jiri Pirko53342022016-07-04 08:23:08 +0200365 }
366 return NULL;
367}
368
369static int mlxsw_sp_lpm_tree_alloc(struct mlxsw_sp *mlxsw_sp,
370 struct mlxsw_sp_lpm_tree *lpm_tree)
371{
372 char ralta_pl[MLXSW_REG_RALTA_LEN];
373
Ido Schimmel1a9234e662016-09-19 08:29:26 +0200374 mlxsw_reg_ralta_pack(ralta_pl, true,
375 (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
376 lpm_tree->id);
Jiri Pirko53342022016-07-04 08:23:08 +0200377 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
378}
379
380static int mlxsw_sp_lpm_tree_free(struct mlxsw_sp *mlxsw_sp,
381 struct mlxsw_sp_lpm_tree *lpm_tree)
382{
383 char ralta_pl[MLXSW_REG_RALTA_LEN];
384
Ido Schimmel1a9234e662016-09-19 08:29:26 +0200385 mlxsw_reg_ralta_pack(ralta_pl, false,
386 (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
387 lpm_tree->id);
Jiri Pirko53342022016-07-04 08:23:08 +0200388 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
389}
390
391static int
392mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp *mlxsw_sp,
393 struct mlxsw_sp_prefix_usage *prefix_usage,
394 struct mlxsw_sp_lpm_tree *lpm_tree)
395{
396 char ralst_pl[MLXSW_REG_RALST_LEN];
397 u8 root_bin = 0;
398 u8 prefix;
399 u8 last_prefix = MLXSW_REG_RALST_BIN_NO_CHILD;
400
401 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage)
402 root_bin = prefix;
403
404 mlxsw_reg_ralst_pack(ralst_pl, root_bin, lpm_tree->id);
405 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) {
406 if (prefix == 0)
407 continue;
408 mlxsw_reg_ralst_bin_pack(ralst_pl, prefix, last_prefix,
409 MLXSW_REG_RALST_BIN_NO_CHILD);
410 last_prefix = prefix;
411 }
412 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
413}
414
415static struct mlxsw_sp_lpm_tree *
416mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
417 struct mlxsw_sp_prefix_usage *prefix_usage,
Ido Schimmel382dbb42017-03-10 08:53:40 +0100418 enum mlxsw_sp_l3proto proto)
Jiri Pirko53342022016-07-04 08:23:08 +0200419{
420 struct mlxsw_sp_lpm_tree *lpm_tree;
421 int err;
422
Ido Schimmel382dbb42017-03-10 08:53:40 +0100423 lpm_tree = mlxsw_sp_lpm_tree_find_unused(mlxsw_sp);
Jiri Pirko53342022016-07-04 08:23:08 +0200424 if (!lpm_tree)
425 return ERR_PTR(-EBUSY);
426 lpm_tree->proto = proto;
427 err = mlxsw_sp_lpm_tree_alloc(mlxsw_sp, lpm_tree);
428 if (err)
429 return ERR_PTR(err);
430
431 err = mlxsw_sp_lpm_tree_left_struct_set(mlxsw_sp, prefix_usage,
432 lpm_tree);
433 if (err)
434 goto err_left_struct_set;
Jiri Pirko2083d362016-10-25 11:25:56 +0200435 memcpy(&lpm_tree->prefix_usage, prefix_usage,
436 sizeof(lpm_tree->prefix_usage));
Jiri Pirko53342022016-07-04 08:23:08 +0200437 return lpm_tree;
438
439err_left_struct_set:
440 mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
441 return ERR_PTR(err);
442}
443
444static int mlxsw_sp_lpm_tree_destroy(struct mlxsw_sp *mlxsw_sp,
445 struct mlxsw_sp_lpm_tree *lpm_tree)
446{
447 return mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
448}
449
450static struct mlxsw_sp_lpm_tree *
451mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
452 struct mlxsw_sp_prefix_usage *prefix_usage,
Ido Schimmel382dbb42017-03-10 08:53:40 +0100453 enum mlxsw_sp_l3proto proto)
Jiri Pirko53342022016-07-04 08:23:08 +0200454{
455 struct mlxsw_sp_lpm_tree *lpm_tree;
456 int i;
457
Ido Schimmel8494ab02017-03-24 08:02:47 +0100458 for (i = 0; i < mlxsw_sp->router.lpm.tree_count; i++) {
459 lpm_tree = &mlxsw_sp->router.lpm.trees[i];
Jiri Pirko8b99bec2016-10-25 11:25:57 +0200460 if (lpm_tree->ref_count != 0 &&
461 lpm_tree->proto == proto &&
Jiri Pirko53342022016-07-04 08:23:08 +0200462 mlxsw_sp_prefix_usage_eq(&lpm_tree->prefix_usage,
463 prefix_usage))
464 goto inc_ref_count;
465 }
466 lpm_tree = mlxsw_sp_lpm_tree_create(mlxsw_sp, prefix_usage,
Ido Schimmel382dbb42017-03-10 08:53:40 +0100467 proto);
Jiri Pirko53342022016-07-04 08:23:08 +0200468 if (IS_ERR(lpm_tree))
469 return lpm_tree;
470
471inc_ref_count:
472 lpm_tree->ref_count++;
473 return lpm_tree;
474}
475
476static int mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
477 struct mlxsw_sp_lpm_tree *lpm_tree)
478{
479 if (--lpm_tree->ref_count == 0)
480 return mlxsw_sp_lpm_tree_destroy(mlxsw_sp, lpm_tree);
481 return 0;
482}
483
Ido Schimmel8494ab02017-03-24 08:02:47 +0100484#define MLXSW_SP_LPM_TREE_MIN 2 /* trees 0 and 1 are reserved */
485
486static int mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp)
Jiri Pirko53342022016-07-04 08:23:08 +0200487{
488 struct mlxsw_sp_lpm_tree *lpm_tree;
Ido Schimmel8494ab02017-03-24 08:02:47 +0100489 u64 max_trees;
Jiri Pirko53342022016-07-04 08:23:08 +0200490 int i;
491
Ido Schimmel8494ab02017-03-24 08:02:47 +0100492 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LPM_TREES))
493 return -EIO;
494
495 max_trees = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LPM_TREES);
496 mlxsw_sp->router.lpm.tree_count = max_trees - MLXSW_SP_LPM_TREE_MIN;
497 mlxsw_sp->router.lpm.trees = kcalloc(mlxsw_sp->router.lpm.tree_count,
498 sizeof(struct mlxsw_sp_lpm_tree),
499 GFP_KERNEL);
500 if (!mlxsw_sp->router.lpm.trees)
501 return -ENOMEM;
502
503 for (i = 0; i < mlxsw_sp->router.lpm.tree_count; i++) {
504 lpm_tree = &mlxsw_sp->router.lpm.trees[i];
Jiri Pirko53342022016-07-04 08:23:08 +0200505 lpm_tree->id = i + MLXSW_SP_LPM_TREE_MIN;
506 }
Ido Schimmel8494ab02017-03-24 08:02:47 +0100507
508 return 0;
509}
510
511static void mlxsw_sp_lpm_fini(struct mlxsw_sp *mlxsw_sp)
512{
513 kfree(mlxsw_sp->router.lpm.trees);
Jiri Pirko53342022016-07-04 08:23:08 +0200514}
515
Ido Schimmel76610eb2017-03-10 08:53:41 +0100516static bool mlxsw_sp_vr_is_used(const struct mlxsw_sp_vr *vr)
517{
518 return !!vr->fib4;
519}
520
Jiri Pirko6b75c482016-07-04 08:23:09 +0200521static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp)
522{
523 struct mlxsw_sp_vr *vr;
524 int i;
525
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200526 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
Jiri Pirko6b75c482016-07-04 08:23:09 +0200527 vr = &mlxsw_sp->router.vrs[i];
Ido Schimmel76610eb2017-03-10 08:53:41 +0100528 if (!mlxsw_sp_vr_is_used(vr))
Jiri Pirko6b75c482016-07-04 08:23:09 +0200529 return vr;
530 }
531 return NULL;
532}
533
534static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel76610eb2017-03-10 08:53:41 +0100535 const struct mlxsw_sp_fib *fib)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200536{
537 char raltb_pl[MLXSW_REG_RALTB_LEN];
538
Ido Schimmel76610eb2017-03-10 08:53:41 +0100539 mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
540 (enum mlxsw_reg_ralxx_protocol) fib->proto,
541 fib->lpm_tree->id);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200542 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
543}
544
545static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel76610eb2017-03-10 08:53:41 +0100546 const struct mlxsw_sp_fib *fib)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200547{
548 char raltb_pl[MLXSW_REG_RALTB_LEN];
549
550 /* Bind to tree 0 which is default */
Ido Schimmel76610eb2017-03-10 08:53:41 +0100551 mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
552 (enum mlxsw_reg_ralxx_protocol) fib->proto, 0);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200553 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
554}
555
556static u32 mlxsw_sp_fix_tb_id(u32 tb_id)
557{
558 /* For our purpose, squash main and local table into one */
559 if (tb_id == RT_TABLE_LOCAL)
560 tb_id = RT_TABLE_MAIN;
561 return tb_id;
562}
563
564static struct mlxsw_sp_vr *mlxsw_sp_vr_find(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel76610eb2017-03-10 08:53:41 +0100565 u32 tb_id)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200566{
567 struct mlxsw_sp_vr *vr;
568 int i;
569
570 tb_id = mlxsw_sp_fix_tb_id(tb_id);
Nogah Frankel9497c042016-09-20 11:16:54 +0200571
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200572 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
Jiri Pirko6b75c482016-07-04 08:23:09 +0200573 vr = &mlxsw_sp->router.vrs[i];
Ido Schimmel76610eb2017-03-10 08:53:41 +0100574 if (mlxsw_sp_vr_is_used(vr) && vr->tb_id == tb_id)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200575 return vr;
576 }
577 return NULL;
578}
579
Ido Schimmel76610eb2017-03-10 08:53:41 +0100580static struct mlxsw_sp_fib *mlxsw_sp_vr_fib(const struct mlxsw_sp_vr *vr,
581 enum mlxsw_sp_l3proto proto)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200582{
Ido Schimmel76610eb2017-03-10 08:53:41 +0100583 switch (proto) {
584 case MLXSW_SP_L3_PROTO_IPV4:
585 return vr->fib4;
586 case MLXSW_SP_L3_PROTO_IPV6:
587 BUG_ON(1);
588 }
589 return NULL;
590}
591
592static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
593 u32 tb_id)
594{
Jiri Pirko6b75c482016-07-04 08:23:09 +0200595 struct mlxsw_sp_vr *vr;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200596
597 vr = mlxsw_sp_vr_find_unused(mlxsw_sp);
598 if (!vr)
599 return ERR_PTR(-EBUSY);
Ido Schimmel76610eb2017-03-10 08:53:41 +0100600 vr->fib4 = mlxsw_sp_fib_create(vr, MLXSW_SP_L3_PROTO_IPV4);
601 if (IS_ERR(vr->fib4))
602 return ERR_CAST(vr->fib4);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200603 vr->tb_id = tb_id;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200604 return vr;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200605}
606
Ido Schimmel76610eb2017-03-10 08:53:41 +0100607static void mlxsw_sp_vr_destroy(struct mlxsw_sp_vr *vr)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200608{
Ido Schimmel76610eb2017-03-10 08:53:41 +0100609 mlxsw_sp_fib_destroy(vr->fib4);
610 vr->fib4 = NULL;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200611}
612
613static int
Ido Schimmel76610eb2017-03-10 08:53:41 +0100614mlxsw_sp_vr_lpm_tree_check(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fib *fib,
Jiri Pirko6b75c482016-07-04 08:23:09 +0200615 struct mlxsw_sp_prefix_usage *req_prefix_usage)
616{
Ido Schimmel76610eb2017-03-10 08:53:41 +0100617 struct mlxsw_sp_lpm_tree *lpm_tree = fib->lpm_tree;
Ido Schimmelf7df4922017-02-28 08:55:40 +0100618 struct mlxsw_sp_lpm_tree *new_tree;
619 int err;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200620
Ido Schimmelf7df4922017-02-28 08:55:40 +0100621 if (mlxsw_sp_prefix_usage_eq(req_prefix_usage, &lpm_tree->prefix_usage))
Jiri Pirko6b75c482016-07-04 08:23:09 +0200622 return 0;
623
Ido Schimmelf7df4922017-02-28 08:55:40 +0100624 new_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, req_prefix_usage,
Ido Schimmel76610eb2017-03-10 08:53:41 +0100625 fib->proto);
Ido Schimmelf7df4922017-02-28 08:55:40 +0100626 if (IS_ERR(new_tree)) {
Jiri Pirko6b75c482016-07-04 08:23:09 +0200627 /* We failed to get a tree according to the required
628 * prefix usage. However, the current tree might be still good
629 * for us if our requirement is subset of the prefixes used
630 * in the tree.
631 */
632 if (mlxsw_sp_prefix_usage_subset(req_prefix_usage,
Ido Schimmelf7df4922017-02-28 08:55:40 +0100633 &lpm_tree->prefix_usage))
Jiri Pirko6b75c482016-07-04 08:23:09 +0200634 return 0;
Ido Schimmelf7df4922017-02-28 08:55:40 +0100635 return PTR_ERR(new_tree);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200636 }
637
Ido Schimmelf7df4922017-02-28 08:55:40 +0100638 /* Prevent packet loss by overwriting existing binding */
Ido Schimmel76610eb2017-03-10 08:53:41 +0100639 fib->lpm_tree = new_tree;
640 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib);
Ido Schimmelf7df4922017-02-28 08:55:40 +0100641 if (err)
642 goto err_tree_bind;
643 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
644
645 return 0;
646
647err_tree_bind:
Ido Schimmel76610eb2017-03-10 08:53:41 +0100648 fib->lpm_tree = lpm_tree;
Ido Schimmelf7df4922017-02-28 08:55:40 +0100649 mlxsw_sp_lpm_tree_put(mlxsw_sp, new_tree);
650 return err;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200651}
652
Ido Schimmel76610eb2017-03-10 08:53:41 +0100653static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200654{
655 struct mlxsw_sp_vr *vr;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200656
657 tb_id = mlxsw_sp_fix_tb_id(tb_id);
Ido Schimmel76610eb2017-03-10 08:53:41 +0100658 vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
659 if (!vr)
660 vr = mlxsw_sp_vr_create(mlxsw_sp, tb_id);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200661 return vr;
662}
663
Ido Schimmel76610eb2017-03-10 08:53:41 +0100664static void mlxsw_sp_vr_put(struct mlxsw_sp_vr *vr)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200665{
Ido Schimmel69132292017-03-10 08:53:42 +0100666 if (!vr->rif_count && list_empty(&vr->fib4->node_list))
Ido Schimmel76610eb2017-03-10 08:53:41 +0100667 mlxsw_sp_vr_destroy(vr);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200668}
669
Nogah Frankel9497c042016-09-20 11:16:54 +0200670static int mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200671{
672 struct mlxsw_sp_vr *vr;
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200673 u64 max_vrs;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200674 int i;
675
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200676 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_VRS))
Nogah Frankel9497c042016-09-20 11:16:54 +0200677 return -EIO;
678
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200679 max_vrs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS);
680 mlxsw_sp->router.vrs = kcalloc(max_vrs, sizeof(struct mlxsw_sp_vr),
Nogah Frankel9497c042016-09-20 11:16:54 +0200681 GFP_KERNEL);
682 if (!mlxsw_sp->router.vrs)
683 return -ENOMEM;
684
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200685 for (i = 0; i < max_vrs; i++) {
Jiri Pirko6b75c482016-07-04 08:23:09 +0200686 vr = &mlxsw_sp->router.vrs[i];
687 vr->id = i;
688 }
Nogah Frankel9497c042016-09-20 11:16:54 +0200689
690 return 0;
691}
692
Ido Schimmelac571de2016-11-14 11:26:32 +0100693static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp);
694
Nogah Frankel9497c042016-09-20 11:16:54 +0200695static void mlxsw_sp_vrs_fini(struct mlxsw_sp *mlxsw_sp)
696{
Ido Schimmel30572242016-12-03 16:45:01 +0100697 /* At this stage we're guaranteed not to have new incoming
698 * FIB notifications and the work queue is free from FIBs
699 * sitting on top of mlxsw netdevs. However, we can still
700 * have other FIBs queued. Flush the queue before flushing
701 * the device's tables. No need for locks, as we're the only
702 * writer.
703 */
704 mlxsw_core_flush_owq();
Ido Schimmelac571de2016-11-14 11:26:32 +0100705 mlxsw_sp_router_fib_flush(mlxsw_sp);
Nogah Frankel9497c042016-09-20 11:16:54 +0200706 kfree(mlxsw_sp->router.vrs);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200707}
708
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200709struct mlxsw_sp_neigh_key {
Jiri Pirko33b13412016-11-10 12:31:04 +0100710 struct neighbour *n;
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200711};
712
713struct mlxsw_sp_neigh_entry {
Ido Schimmel9665b742017-02-08 11:16:42 +0100714 struct list_head rif_list_node;
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200715 struct rhash_head ht_node;
716 struct mlxsw_sp_neigh_key key;
717 u16 rif;
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100718 bool connected;
Yotam Gigia6bf9e92016-07-05 11:27:44 +0200719 unsigned char ha[ETH_ALEN];
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +0200720 struct list_head nexthop_list; /* list of nexthops using
721 * this neigh entry
722 */
Yotam Gigib2157142016-07-05 11:27:51 +0200723 struct list_head nexthop_neighs_list_node;
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200724};
725
726static const struct rhashtable_params mlxsw_sp_neigh_ht_params = {
727 .key_offset = offsetof(struct mlxsw_sp_neigh_entry, key),
728 .head_offset = offsetof(struct mlxsw_sp_neigh_entry, ht_node),
729 .key_len = sizeof(struct mlxsw_sp_neigh_key),
730};
731
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100732static struct mlxsw_sp_neigh_entry *
733mlxsw_sp_neigh_entry_alloc(struct mlxsw_sp *mlxsw_sp, struct neighbour *n,
734 u16 rif)
735{
736 struct mlxsw_sp_neigh_entry *neigh_entry;
737
738 neigh_entry = kzalloc(sizeof(*neigh_entry), GFP_KERNEL);
739 if (!neigh_entry)
740 return NULL;
741
742 neigh_entry->key.n = n;
743 neigh_entry->rif = rif;
744 INIT_LIST_HEAD(&neigh_entry->nexthop_list);
745
746 return neigh_entry;
747}
748
749static void mlxsw_sp_neigh_entry_free(struct mlxsw_sp_neigh_entry *neigh_entry)
750{
751 kfree(neigh_entry);
752}
753
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200754static int
755mlxsw_sp_neigh_entry_insert(struct mlxsw_sp *mlxsw_sp,
756 struct mlxsw_sp_neigh_entry *neigh_entry)
757{
758 return rhashtable_insert_fast(&mlxsw_sp->router.neigh_ht,
759 &neigh_entry->ht_node,
760 mlxsw_sp_neigh_ht_params);
761}
762
763static void
764mlxsw_sp_neigh_entry_remove(struct mlxsw_sp *mlxsw_sp,
765 struct mlxsw_sp_neigh_entry *neigh_entry)
766{
767 rhashtable_remove_fast(&mlxsw_sp->router.neigh_ht,
768 &neigh_entry->ht_node,
769 mlxsw_sp_neigh_ht_params);
770}
771
772static struct mlxsw_sp_neigh_entry *
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100773mlxsw_sp_neigh_entry_create(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200774{
775 struct mlxsw_sp_neigh_entry *neigh_entry;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +0100776 struct mlxsw_sp_rif *rif;
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100777 int err;
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200778
Arkadi Sharshevskybf952332017-03-17 09:38:00 +0100779 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, n->dev);
780 if (!rif)
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100781 return ERR_PTR(-EINVAL);
782
Arkadi Sharshevskybf952332017-03-17 09:38:00 +0100783 neigh_entry = mlxsw_sp_neigh_entry_alloc(mlxsw_sp, n, rif->rif_index);
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200784 if (!neigh_entry)
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100785 return ERR_PTR(-ENOMEM);
786
787 err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
788 if (err)
789 goto err_neigh_entry_insert;
790
Arkadi Sharshevskybf952332017-03-17 09:38:00 +0100791 list_add(&neigh_entry->rif_list_node, &rif->neigh_list);
Ido Schimmel9665b742017-02-08 11:16:42 +0100792
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200793 return neigh_entry;
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100794
795err_neigh_entry_insert:
796 mlxsw_sp_neigh_entry_free(neigh_entry);
797 return ERR_PTR(err);
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200798}
799
800static void
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100801mlxsw_sp_neigh_entry_destroy(struct mlxsw_sp *mlxsw_sp,
802 struct mlxsw_sp_neigh_entry *neigh_entry)
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200803{
Ido Schimmel9665b742017-02-08 11:16:42 +0100804 list_del(&neigh_entry->rif_list_node);
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100805 mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry);
806 mlxsw_sp_neigh_entry_free(neigh_entry);
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200807}
808
809static struct mlxsw_sp_neigh_entry *
Jiri Pirko33b13412016-11-10 12:31:04 +0100810mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200811{
Jiri Pirko33b13412016-11-10 12:31:04 +0100812 struct mlxsw_sp_neigh_key key;
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200813
Jiri Pirko33b13412016-11-10 12:31:04 +0100814 key.n = n;
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200815 return rhashtable_lookup_fast(&mlxsw_sp->router.neigh_ht,
816 &key, mlxsw_sp_neigh_ht_params);
817}
818
Yotam Gigic723c7352016-07-05 11:27:43 +0200819static void
820mlxsw_sp_router_neighs_update_interval_init(struct mlxsw_sp *mlxsw_sp)
821{
822 unsigned long interval = NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME);
823
824 mlxsw_sp->router.neighs_update.interval = jiffies_to_msecs(interval);
825}
826
827static void mlxsw_sp_router_neigh_ent_ipv4_process(struct mlxsw_sp *mlxsw_sp,
828 char *rauhtd_pl,
829 int ent_index)
830{
831 struct net_device *dev;
832 struct neighbour *n;
833 __be32 dipn;
834 u32 dip;
835 u16 rif;
836
837 mlxsw_reg_rauhtd_ent_ipv4_unpack(rauhtd_pl, ent_index, &rif, &dip);
838
839 if (!mlxsw_sp->rifs[rif]) {
840 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
841 return;
842 }
843
844 dipn = htonl(dip);
845 dev = mlxsw_sp->rifs[rif]->dev;
846 n = neigh_lookup(&arp_tbl, &dipn, dev);
847 if (!n) {
848 netdev_err(dev, "Failed to find matching neighbour for IP=%pI4h\n",
849 &dip);
850 return;
851 }
852
853 netdev_dbg(dev, "Updating neighbour with IP=%pI4h\n", &dip);
854 neigh_event_send(n, NULL);
855 neigh_release(n);
856}
857
858static void mlxsw_sp_router_neigh_rec_ipv4_process(struct mlxsw_sp *mlxsw_sp,
859 char *rauhtd_pl,
860 int rec_index)
861{
862 u8 num_entries;
863 int i;
864
865 num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
866 rec_index);
867 /* Hardware starts counting at 0, so add 1. */
868 num_entries++;
869
870 /* Each record consists of several neighbour entries. */
871 for (i = 0; i < num_entries; i++) {
872 int ent_index;
873
874 ent_index = rec_index * MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC + i;
875 mlxsw_sp_router_neigh_ent_ipv4_process(mlxsw_sp, rauhtd_pl,
876 ent_index);
877 }
878
879}
880
881static void mlxsw_sp_router_neigh_rec_process(struct mlxsw_sp *mlxsw_sp,
882 char *rauhtd_pl, int rec_index)
883{
884 switch (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, rec_index)) {
885 case MLXSW_REG_RAUHTD_TYPE_IPV4:
886 mlxsw_sp_router_neigh_rec_ipv4_process(mlxsw_sp, rauhtd_pl,
887 rec_index);
888 break;
889 case MLXSW_REG_RAUHTD_TYPE_IPV6:
890 WARN_ON_ONCE(1);
891 break;
892 }
893}
894
Arkadi Sharshevsky42cdb332016-11-11 16:34:26 +0100895static bool mlxsw_sp_router_rauhtd_is_full(char *rauhtd_pl)
896{
897 u8 num_rec, last_rec_index, num_entries;
898
899 num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
900 last_rec_index = num_rec - 1;
901
902 if (num_rec < MLXSW_REG_RAUHTD_REC_MAX_NUM)
903 return false;
904 if (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, last_rec_index) ==
905 MLXSW_REG_RAUHTD_TYPE_IPV6)
906 return true;
907
908 num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
909 last_rec_index);
910 if (++num_entries == MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC)
911 return true;
912 return false;
913}
914
Yotam Gigib2157142016-07-05 11:27:51 +0200915static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp)
Yotam Gigic723c7352016-07-05 11:27:43 +0200916{
Yotam Gigic723c7352016-07-05 11:27:43 +0200917 char *rauhtd_pl;
918 u8 num_rec;
919 int i, err;
920
921 rauhtd_pl = kmalloc(MLXSW_REG_RAUHTD_LEN, GFP_KERNEL);
922 if (!rauhtd_pl)
Yotam Gigib2157142016-07-05 11:27:51 +0200923 return -ENOMEM;
Yotam Gigic723c7352016-07-05 11:27:43 +0200924
925 /* Make sure the neighbour's netdev isn't removed in the
926 * process.
927 */
928 rtnl_lock();
929 do {
930 mlxsw_reg_rauhtd_pack(rauhtd_pl, MLXSW_REG_RAUHTD_TYPE_IPV4);
931 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(rauhtd),
932 rauhtd_pl);
933 if (err) {
934 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to dump neighbour talbe\n");
935 break;
936 }
937 num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
938 for (i = 0; i < num_rec; i++)
939 mlxsw_sp_router_neigh_rec_process(mlxsw_sp, rauhtd_pl,
940 i);
Arkadi Sharshevsky42cdb332016-11-11 16:34:26 +0100941 } while (mlxsw_sp_router_rauhtd_is_full(rauhtd_pl));
Yotam Gigic723c7352016-07-05 11:27:43 +0200942 rtnl_unlock();
943
944 kfree(rauhtd_pl);
Yotam Gigib2157142016-07-05 11:27:51 +0200945 return err;
946}
947
948static void mlxsw_sp_router_neighs_update_nh(struct mlxsw_sp *mlxsw_sp)
949{
950 struct mlxsw_sp_neigh_entry *neigh_entry;
951
952 /* Take RTNL mutex here to prevent lists from changes */
953 rtnl_lock();
954 list_for_each_entry(neigh_entry, &mlxsw_sp->router.nexthop_neighs_list,
Ido Schimmel8a0b7272017-02-06 16:20:15 +0100955 nexthop_neighs_list_node)
Yotam Gigib2157142016-07-05 11:27:51 +0200956 /* If this neigh have nexthops, make the kernel think this neigh
957 * is active regardless of the traffic.
958 */
Ido Schimmel8a0b7272017-02-06 16:20:15 +0100959 neigh_event_send(neigh_entry->key.n, NULL);
Yotam Gigib2157142016-07-05 11:27:51 +0200960 rtnl_unlock();
961}
962
963static void
964mlxsw_sp_router_neighs_update_work_schedule(struct mlxsw_sp *mlxsw_sp)
965{
966 unsigned long interval = mlxsw_sp->router.neighs_update.interval;
967
968 mlxsw_core_schedule_dw(&mlxsw_sp->router.neighs_update.dw,
969 msecs_to_jiffies(interval));
970}
971
972static void mlxsw_sp_router_neighs_update_work(struct work_struct *work)
973{
974 struct mlxsw_sp *mlxsw_sp = container_of(work, struct mlxsw_sp,
975 router.neighs_update.dw.work);
976 int err;
977
978 err = mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp);
979 if (err)
980 dev_err(mlxsw_sp->bus_info->dev, "Could not update kernel for neigh activity");
981
982 mlxsw_sp_router_neighs_update_nh(mlxsw_sp);
983
Yotam Gigic723c7352016-07-05 11:27:43 +0200984 mlxsw_sp_router_neighs_update_work_schedule(mlxsw_sp);
985}
986
Yotam Gigi0b2361d2016-07-05 11:27:52 +0200987static void mlxsw_sp_router_probe_unresolved_nexthops(struct work_struct *work)
988{
989 struct mlxsw_sp_neigh_entry *neigh_entry;
990 struct mlxsw_sp *mlxsw_sp = container_of(work, struct mlxsw_sp,
991 router.nexthop_probe_dw.work);
992
993 /* Iterate over nexthop neighbours, find those who are unresolved and
994 * send arp on them. This solves the chicken-egg problem when
995 * the nexthop wouldn't get offloaded until the neighbor is resolved
996 * but it wouldn't get resolved ever in case traffic is flowing in HW
997 * using different nexthop.
998 *
999 * Take RTNL mutex here to prevent lists from changes.
1000 */
1001 rtnl_lock();
1002 list_for_each_entry(neigh_entry, &mlxsw_sp->router.nexthop_neighs_list,
Ido Schimmel8a0b7272017-02-06 16:20:15 +01001003 nexthop_neighs_list_node)
Ido Schimmel01b1aa32017-02-06 16:20:16 +01001004 if (!neigh_entry->connected)
Jiri Pirko33b13412016-11-10 12:31:04 +01001005 neigh_event_send(neigh_entry->key.n, NULL);
Yotam Gigi0b2361d2016-07-05 11:27:52 +02001006 rtnl_unlock();
1007
1008 mlxsw_core_schedule_dw(&mlxsw_sp->router.nexthop_probe_dw,
1009 MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL);
1010}
1011
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001012static void
1013mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
1014 struct mlxsw_sp_neigh_entry *neigh_entry,
1015 bool removing);
1016
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001017static enum mlxsw_reg_rauht_op mlxsw_sp_rauht_op(bool adding)
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001018{
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001019 return adding ? MLXSW_REG_RAUHT_OP_WRITE_ADD :
1020 MLXSW_REG_RAUHT_OP_WRITE_DELETE;
1021}
1022
1023static void
1024mlxsw_sp_router_neigh_entry_op4(struct mlxsw_sp *mlxsw_sp,
1025 struct mlxsw_sp_neigh_entry *neigh_entry,
1026 enum mlxsw_reg_rauht_op op)
1027{
Jiri Pirko33b13412016-11-10 12:31:04 +01001028 struct neighbour *n = neigh_entry->key.n;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001029 u32 dip = ntohl(*((__be32 *) n->primary_key));
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001030 char rauht_pl[MLXSW_REG_RAUHT_LEN];
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001031
1032 mlxsw_reg_rauht_pack4(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
1033 dip);
1034 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
1035}
1036
1037static void
1038mlxsw_sp_neigh_entry_update(struct mlxsw_sp *mlxsw_sp,
1039 struct mlxsw_sp_neigh_entry *neigh_entry,
1040 bool adding)
1041{
1042 if (!adding && !neigh_entry->connected)
1043 return;
1044 neigh_entry->connected = adding;
1045 if (neigh_entry->key.n->tbl == &arp_tbl)
1046 mlxsw_sp_router_neigh_entry_op4(mlxsw_sp, neigh_entry,
1047 mlxsw_sp_rauht_op(adding));
1048 else
1049 WARN_ON_ONCE(1);
1050}
1051
1052struct mlxsw_sp_neigh_event_work {
1053 struct work_struct work;
1054 struct mlxsw_sp *mlxsw_sp;
1055 struct neighbour *n;
1056};
1057
1058static void mlxsw_sp_router_neigh_event_work(struct work_struct *work)
1059{
1060 struct mlxsw_sp_neigh_event_work *neigh_work =
1061 container_of(work, struct mlxsw_sp_neigh_event_work, work);
1062 struct mlxsw_sp *mlxsw_sp = neigh_work->mlxsw_sp;
1063 struct mlxsw_sp_neigh_entry *neigh_entry;
1064 struct neighbour *n = neigh_work->n;
1065 unsigned char ha[ETH_ALEN];
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001066 bool entry_connected;
Ido Schimmel93a87e52016-12-23 09:32:49 +01001067 u8 nud_state, dead;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001068
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001069 /* If these parameters are changed after we release the lock,
1070 * then we are guaranteed to receive another event letting us
1071 * know about it.
1072 */
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001073 read_lock_bh(&n->lock);
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001074 memcpy(ha, n->ha, ETH_ALEN);
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001075 nud_state = n->nud_state;
Ido Schimmel93a87e52016-12-23 09:32:49 +01001076 dead = n->dead;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001077 read_unlock_bh(&n->lock);
1078
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001079 rtnl_lock();
Ido Schimmel93a87e52016-12-23 09:32:49 +01001080 entry_connected = nud_state & NUD_VALID && !dead;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001081 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
1082 if (!entry_connected && !neigh_entry)
1083 goto out;
1084 if (!neigh_entry) {
1085 neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
1086 if (IS_ERR(neigh_entry))
1087 goto out;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001088 }
1089
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001090 memcpy(neigh_entry->ha, ha, ETH_ALEN);
1091 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, entry_connected);
1092 mlxsw_sp_nexthop_neigh_update(mlxsw_sp, neigh_entry, !entry_connected);
1093
1094 if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
1095 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
1096
1097out:
1098 rtnl_unlock();
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001099 neigh_release(n);
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001100 kfree(neigh_work);
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001101}
1102
Jiri Pirkoe7322632016-09-01 10:37:43 +02001103int mlxsw_sp_router_netevent_event(struct notifier_block *unused,
1104 unsigned long event, void *ptr)
Yotam Gigic723c7352016-07-05 11:27:43 +02001105{
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001106 struct mlxsw_sp_neigh_event_work *neigh_work;
Yotam Gigic723c7352016-07-05 11:27:43 +02001107 struct mlxsw_sp_port *mlxsw_sp_port;
1108 struct mlxsw_sp *mlxsw_sp;
1109 unsigned long interval;
1110 struct neigh_parms *p;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001111 struct neighbour *n;
Yotam Gigic723c7352016-07-05 11:27:43 +02001112
1113 switch (event) {
1114 case NETEVENT_DELAY_PROBE_TIME_UPDATE:
1115 p = ptr;
1116
1117 /* We don't care about changes in the default table. */
1118 if (!p->dev || p->tbl != &arp_tbl)
1119 return NOTIFY_DONE;
1120
1121 /* We are in atomic context and can't take RTNL mutex,
1122 * so use RCU variant to walk the device chain.
1123 */
1124 mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(p->dev);
1125 if (!mlxsw_sp_port)
1126 return NOTIFY_DONE;
1127
1128 mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1129 interval = jiffies_to_msecs(NEIGH_VAR(p, DELAY_PROBE_TIME));
1130 mlxsw_sp->router.neighs_update.interval = interval;
1131
1132 mlxsw_sp_port_dev_put(mlxsw_sp_port);
1133 break;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001134 case NETEVENT_NEIGH_UPDATE:
1135 n = ptr;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001136
1137 if (n->tbl != &arp_tbl)
1138 return NOTIFY_DONE;
1139
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001140 mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(n->dev);
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001141 if (!mlxsw_sp_port)
1142 return NOTIFY_DONE;
1143
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001144 neigh_work = kzalloc(sizeof(*neigh_work), GFP_ATOMIC);
1145 if (!neigh_work) {
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001146 mlxsw_sp_port_dev_put(mlxsw_sp_port);
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001147 return NOTIFY_BAD;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001148 }
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001149
1150 INIT_WORK(&neigh_work->work, mlxsw_sp_router_neigh_event_work);
1151 neigh_work->mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1152 neigh_work->n = n;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001153
1154 /* Take a reference to ensure the neighbour won't be
1155 * destructed until we drop the reference in delayed
1156 * work.
1157 */
1158 neigh_clone(n);
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001159 mlxsw_core_schedule_work(&neigh_work->work);
1160 mlxsw_sp_port_dev_put(mlxsw_sp_port);
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001161 break;
Yotam Gigic723c7352016-07-05 11:27:43 +02001162 }
1163
1164 return NOTIFY_DONE;
1165}
1166
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001167static int mlxsw_sp_neigh_init(struct mlxsw_sp *mlxsw_sp)
1168{
Yotam Gigic723c7352016-07-05 11:27:43 +02001169 int err;
1170
1171 err = rhashtable_init(&mlxsw_sp->router.neigh_ht,
1172 &mlxsw_sp_neigh_ht_params);
1173 if (err)
1174 return err;
1175
1176 /* Initialize the polling interval according to the default
1177 * table.
1178 */
1179 mlxsw_sp_router_neighs_update_interval_init(mlxsw_sp);
1180
Yotam Gigi0b2361d2016-07-05 11:27:52 +02001181 /* Create the delayed works for the activity_update */
Yotam Gigic723c7352016-07-05 11:27:43 +02001182 INIT_DELAYED_WORK(&mlxsw_sp->router.neighs_update.dw,
1183 mlxsw_sp_router_neighs_update_work);
Yotam Gigi0b2361d2016-07-05 11:27:52 +02001184 INIT_DELAYED_WORK(&mlxsw_sp->router.nexthop_probe_dw,
1185 mlxsw_sp_router_probe_unresolved_nexthops);
Yotam Gigic723c7352016-07-05 11:27:43 +02001186 mlxsw_core_schedule_dw(&mlxsw_sp->router.neighs_update.dw, 0);
Yotam Gigi0b2361d2016-07-05 11:27:52 +02001187 mlxsw_core_schedule_dw(&mlxsw_sp->router.nexthop_probe_dw, 0);
Yotam Gigic723c7352016-07-05 11:27:43 +02001188 return 0;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001189}
1190
1191static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp)
1192{
Yotam Gigic723c7352016-07-05 11:27:43 +02001193 cancel_delayed_work_sync(&mlxsw_sp->router.neighs_update.dw);
Yotam Gigi0b2361d2016-07-05 11:27:52 +02001194 cancel_delayed_work_sync(&mlxsw_sp->router.nexthop_probe_dw);
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001195 rhashtable_destroy(&mlxsw_sp->router.neigh_ht);
1196}
1197
Ido Schimmel9665b742017-02-08 11:16:42 +01001198static int mlxsw_sp_neigh_rif_flush(struct mlxsw_sp *mlxsw_sp,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001199 const struct mlxsw_sp_rif *rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01001200{
1201 char rauht_pl[MLXSW_REG_RAUHT_LEN];
1202
1203 mlxsw_reg_rauht_pack(rauht_pl, MLXSW_REG_RAUHT_OP_WRITE_DELETE_ALL,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001204 rif->rif_index, rif->addr);
Ido Schimmel9665b742017-02-08 11:16:42 +01001205 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
1206}
1207
1208static void mlxsw_sp_neigh_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001209 struct mlxsw_sp_rif *rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01001210{
1211 struct mlxsw_sp_neigh_entry *neigh_entry, *tmp;
1212
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001213 mlxsw_sp_neigh_rif_flush(mlxsw_sp, rif);
1214 list_for_each_entry_safe(neigh_entry, tmp, &rif->neigh_list,
Ido Schimmel9665b742017-02-08 11:16:42 +01001215 rif_list_node)
1216 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
1217}
1218
Ido Schimmelc53b8e12017-02-08 11:16:30 +01001219struct mlxsw_sp_nexthop_key {
1220 struct fib_nh *fib_nh;
1221};
1222
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001223struct mlxsw_sp_nexthop {
1224 struct list_head neigh_list_node; /* member of neigh entry list */
Ido Schimmel9665b742017-02-08 11:16:42 +01001225 struct list_head rif_list_node;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001226 struct mlxsw_sp_nexthop_group *nh_grp; /* pointer back to the group
1227 * this belongs to
1228 */
Ido Schimmelc53b8e12017-02-08 11:16:30 +01001229 struct rhash_head ht_node;
1230 struct mlxsw_sp_nexthop_key key;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001231 struct mlxsw_sp_rif *rif;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001232 u8 should_offload:1, /* set indicates this neigh is connected and
1233 * should be put to KVD linear area of this group.
1234 */
1235 offloaded:1, /* set in case the neigh is actually put into
1236 * KVD linear area of this group.
1237 */
1238 update:1; /* set indicates that MAC of this neigh should be
1239 * updated in HW
1240 */
1241 struct mlxsw_sp_neigh_entry *neigh_entry;
1242};
1243
Ido Schimmele9ad5e72017-02-08 11:16:29 +01001244struct mlxsw_sp_nexthop_group_key {
1245 struct fib_info *fi;
1246};
1247
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001248struct mlxsw_sp_nexthop_group {
Ido Schimmele9ad5e72017-02-08 11:16:29 +01001249 struct rhash_head ht_node;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001250 struct list_head fib_list; /* list of fib entries that use this group */
Ido Schimmele9ad5e72017-02-08 11:16:29 +01001251 struct mlxsw_sp_nexthop_group_key key;
Ido Schimmelb3e8d1e2017-02-08 11:16:32 +01001252 u8 adj_index_valid:1,
1253 gateway:1; /* routes using the group use a gateway */
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001254 u32 adj_index;
1255 u16 ecmp_size;
1256 u16 count;
1257 struct mlxsw_sp_nexthop nexthops[0];
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001258#define nh_rif nexthops[0].rif
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001259};
1260
Ido Schimmele9ad5e72017-02-08 11:16:29 +01001261static const struct rhashtable_params mlxsw_sp_nexthop_group_ht_params = {
1262 .key_offset = offsetof(struct mlxsw_sp_nexthop_group, key),
1263 .head_offset = offsetof(struct mlxsw_sp_nexthop_group, ht_node),
1264 .key_len = sizeof(struct mlxsw_sp_nexthop_group_key),
1265};
1266
1267static int mlxsw_sp_nexthop_group_insert(struct mlxsw_sp *mlxsw_sp,
1268 struct mlxsw_sp_nexthop_group *nh_grp)
1269{
1270 return rhashtable_insert_fast(&mlxsw_sp->router.nexthop_group_ht,
1271 &nh_grp->ht_node,
1272 mlxsw_sp_nexthop_group_ht_params);
1273}
1274
1275static void mlxsw_sp_nexthop_group_remove(struct mlxsw_sp *mlxsw_sp,
1276 struct mlxsw_sp_nexthop_group *nh_grp)
1277{
1278 rhashtable_remove_fast(&mlxsw_sp->router.nexthop_group_ht,
1279 &nh_grp->ht_node,
1280 mlxsw_sp_nexthop_group_ht_params);
1281}
1282
1283static struct mlxsw_sp_nexthop_group *
1284mlxsw_sp_nexthop_group_lookup(struct mlxsw_sp *mlxsw_sp,
1285 struct mlxsw_sp_nexthop_group_key key)
1286{
1287 return rhashtable_lookup_fast(&mlxsw_sp->router.nexthop_group_ht, &key,
1288 mlxsw_sp_nexthop_group_ht_params);
1289}
1290
Ido Schimmelc53b8e12017-02-08 11:16:30 +01001291static const struct rhashtable_params mlxsw_sp_nexthop_ht_params = {
1292 .key_offset = offsetof(struct mlxsw_sp_nexthop, key),
1293 .head_offset = offsetof(struct mlxsw_sp_nexthop, ht_node),
1294 .key_len = sizeof(struct mlxsw_sp_nexthop_key),
1295};
1296
1297static int mlxsw_sp_nexthop_insert(struct mlxsw_sp *mlxsw_sp,
1298 struct mlxsw_sp_nexthop *nh)
1299{
1300 return rhashtable_insert_fast(&mlxsw_sp->router.nexthop_ht,
1301 &nh->ht_node, mlxsw_sp_nexthop_ht_params);
1302}
1303
1304static void mlxsw_sp_nexthop_remove(struct mlxsw_sp *mlxsw_sp,
1305 struct mlxsw_sp_nexthop *nh)
1306{
1307 rhashtable_remove_fast(&mlxsw_sp->router.nexthop_ht, &nh->ht_node,
1308 mlxsw_sp_nexthop_ht_params);
1309}
1310
Ido Schimmelad178c82017-02-08 11:16:40 +01001311static struct mlxsw_sp_nexthop *
1312mlxsw_sp_nexthop_lookup(struct mlxsw_sp *mlxsw_sp,
1313 struct mlxsw_sp_nexthop_key key)
1314{
1315 return rhashtable_lookup_fast(&mlxsw_sp->router.nexthop_ht, &key,
1316 mlxsw_sp_nexthop_ht_params);
1317}
1318
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001319static int mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel76610eb2017-03-10 08:53:41 +01001320 const struct mlxsw_sp_fib *fib,
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001321 u32 adj_index, u16 ecmp_size,
1322 u32 new_adj_index,
1323 u16 new_ecmp_size)
1324{
1325 char raleu_pl[MLXSW_REG_RALEU_LEN];
1326
Ido Schimmel1a9234e662016-09-19 08:29:26 +02001327 mlxsw_reg_raleu_pack(raleu_pl,
Ido Schimmel76610eb2017-03-10 08:53:41 +01001328 (enum mlxsw_reg_ralxx_protocol) fib->proto,
1329 fib->vr->id, adj_index, ecmp_size, new_adj_index,
Ido Schimmel1a9234e662016-09-19 08:29:26 +02001330 new_ecmp_size);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001331 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raleu), raleu_pl);
1332}
1333
1334static int mlxsw_sp_adj_index_mass_update(struct mlxsw_sp *mlxsw_sp,
1335 struct mlxsw_sp_nexthop_group *nh_grp,
1336 u32 old_adj_index, u16 old_ecmp_size)
1337{
1338 struct mlxsw_sp_fib_entry *fib_entry;
Ido Schimmel76610eb2017-03-10 08:53:41 +01001339 struct mlxsw_sp_fib *fib = NULL;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001340 int err;
1341
1342 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
Ido Schimmel76610eb2017-03-10 08:53:41 +01001343 if (fib == fib_entry->fib_node->fib)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001344 continue;
Ido Schimmel76610eb2017-03-10 08:53:41 +01001345 fib = fib_entry->fib_node->fib;
1346 err = mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp, fib,
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001347 old_adj_index,
1348 old_ecmp_size,
1349 nh_grp->adj_index,
1350 nh_grp->ecmp_size);
1351 if (err)
1352 return err;
1353 }
1354 return 0;
1355}
1356
1357static int mlxsw_sp_nexthop_mac_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
1358 struct mlxsw_sp_nexthop *nh)
1359{
1360 struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
1361 char ratr_pl[MLXSW_REG_RATR_LEN];
1362
1363 mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY,
1364 true, adj_index, neigh_entry->rif);
1365 mlxsw_reg_ratr_eth_entry_pack(ratr_pl, neigh_entry->ha);
1366 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
1367}
1368
1369static int
1370mlxsw_sp_nexthop_group_mac_update(struct mlxsw_sp *mlxsw_sp,
Ido Schimmela59b7e02017-01-23 11:11:42 +01001371 struct mlxsw_sp_nexthop_group *nh_grp,
1372 bool reallocate)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001373{
1374 u32 adj_index = nh_grp->adj_index; /* base */
1375 struct mlxsw_sp_nexthop *nh;
1376 int i;
1377 int err;
1378
1379 for (i = 0; i < nh_grp->count; i++) {
1380 nh = &nh_grp->nexthops[i];
1381
1382 if (!nh->should_offload) {
1383 nh->offloaded = 0;
1384 continue;
1385 }
1386
Ido Schimmela59b7e02017-01-23 11:11:42 +01001387 if (nh->update || reallocate) {
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001388 err = mlxsw_sp_nexthop_mac_update(mlxsw_sp,
1389 adj_index, nh);
1390 if (err)
1391 return err;
1392 nh->update = 0;
1393 nh->offloaded = 1;
1394 }
1395 adj_index++;
1396 }
1397 return 0;
1398}
1399
1400static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
1401 struct mlxsw_sp_fib_entry *fib_entry);
1402
1403static int
1404mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp *mlxsw_sp,
1405 struct mlxsw_sp_nexthop_group *nh_grp)
1406{
1407 struct mlxsw_sp_fib_entry *fib_entry;
1408 int err;
1409
1410 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
1411 err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
1412 if (err)
1413 return err;
1414 }
1415 return 0;
1416}
1417
1418static void
1419mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
1420 struct mlxsw_sp_nexthop_group *nh_grp)
1421{
1422 struct mlxsw_sp_nexthop *nh;
1423 bool offload_change = false;
1424 u32 adj_index;
1425 u16 ecmp_size = 0;
1426 bool old_adj_index_valid;
1427 u32 old_adj_index;
1428 u16 old_ecmp_size;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001429 int i;
1430 int err;
1431
Ido Schimmelb3e8d1e2017-02-08 11:16:32 +01001432 if (!nh_grp->gateway) {
1433 mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
1434 return;
1435 }
1436
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001437 for (i = 0; i < nh_grp->count; i++) {
1438 nh = &nh_grp->nexthops[i];
1439
1440 if (nh->should_offload ^ nh->offloaded) {
1441 offload_change = true;
1442 if (nh->should_offload)
1443 nh->update = 1;
1444 }
1445 if (nh->should_offload)
1446 ecmp_size++;
1447 }
1448 if (!offload_change) {
1449 /* Nothing was added or removed, so no need to reallocate. Just
1450 * update MAC on existing adjacency indexes.
1451 */
Ido Schimmela59b7e02017-01-23 11:11:42 +01001452 err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp,
1453 false);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001454 if (err) {
1455 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
1456 goto set_trap;
1457 }
1458 return;
1459 }
1460 if (!ecmp_size)
1461 /* No neigh of this group is connected so we just set
1462 * the trap and let everthing flow through kernel.
1463 */
1464 goto set_trap;
1465
Arkadi Sharshevsky13124442017-03-25 08:28:22 +01001466 err = mlxsw_sp_kvdl_alloc(mlxsw_sp, ecmp_size, &adj_index);
1467 if (err) {
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001468 /* We ran out of KVD linear space, just set the
1469 * trap and let everything flow through kernel.
1470 */
1471 dev_warn(mlxsw_sp->bus_info->dev, "Failed to allocate KVD linear area for nexthop group.\n");
1472 goto set_trap;
1473 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001474 old_adj_index_valid = nh_grp->adj_index_valid;
1475 old_adj_index = nh_grp->adj_index;
1476 old_ecmp_size = nh_grp->ecmp_size;
1477 nh_grp->adj_index_valid = 1;
1478 nh_grp->adj_index = adj_index;
1479 nh_grp->ecmp_size = ecmp_size;
Ido Schimmela59b7e02017-01-23 11:11:42 +01001480 err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp, true);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001481 if (err) {
1482 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
1483 goto set_trap;
1484 }
1485
1486 if (!old_adj_index_valid) {
1487 /* The trap was set for fib entries, so we have to call
1488 * fib entry update to unset it and use adjacency index.
1489 */
1490 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
1491 if (err) {
1492 dev_warn(mlxsw_sp->bus_info->dev, "Failed to add adjacency index to fib entries.\n");
1493 goto set_trap;
1494 }
1495 return;
1496 }
1497
1498 err = mlxsw_sp_adj_index_mass_update(mlxsw_sp, nh_grp,
1499 old_adj_index, old_ecmp_size);
1500 mlxsw_sp_kvdl_free(mlxsw_sp, old_adj_index);
1501 if (err) {
1502 dev_warn(mlxsw_sp->bus_info->dev, "Failed to mass-update adjacency index for nexthop group.\n");
1503 goto set_trap;
1504 }
1505 return;
1506
1507set_trap:
1508 old_adj_index_valid = nh_grp->adj_index_valid;
1509 nh_grp->adj_index_valid = 0;
1510 for (i = 0; i < nh_grp->count; i++) {
1511 nh = &nh_grp->nexthops[i];
1512 nh->offloaded = 0;
1513 }
1514 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
1515 if (err)
1516 dev_warn(mlxsw_sp->bus_info->dev, "Failed to set traps for fib entries.\n");
1517 if (old_adj_index_valid)
1518 mlxsw_sp_kvdl_free(mlxsw_sp, nh_grp->adj_index);
1519}
1520
1521static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh,
1522 bool removing)
1523{
1524 if (!removing && !nh->should_offload)
1525 nh->should_offload = 1;
1526 else if (removing && nh->offloaded)
1527 nh->should_offload = 0;
1528 nh->update = 1;
1529}
1530
1531static void
1532mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
1533 struct mlxsw_sp_neigh_entry *neigh_entry,
1534 bool removing)
1535{
1536 struct mlxsw_sp_nexthop *nh;
1537
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001538 list_for_each_entry(nh, &neigh_entry->nexthop_list,
1539 neigh_list_node) {
1540 __mlxsw_sp_nexthop_neigh_update(nh, removing);
1541 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
1542 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001543}
1544
Ido Schimmel9665b742017-02-08 11:16:42 +01001545static void mlxsw_sp_nexthop_rif_init(struct mlxsw_sp_nexthop *nh,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001546 struct mlxsw_sp_rif *rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01001547{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001548 if (nh->rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01001549 return;
1550
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001551 nh->rif = rif;
1552 list_add(&nh->rif_list_node, &rif->nexthop_list);
Ido Schimmel9665b742017-02-08 11:16:42 +01001553}
1554
1555static void mlxsw_sp_nexthop_rif_fini(struct mlxsw_sp_nexthop *nh)
1556{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001557 if (!nh->rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01001558 return;
1559
1560 list_del(&nh->rif_list_node);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001561 nh->rif = NULL;
Ido Schimmel9665b742017-02-08 11:16:42 +01001562}
1563
Ido Schimmela8c97012017-02-08 11:16:35 +01001564static int mlxsw_sp_nexthop_neigh_init(struct mlxsw_sp *mlxsw_sp,
1565 struct mlxsw_sp_nexthop *nh)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001566{
1567 struct mlxsw_sp_neigh_entry *neigh_entry;
Ido Schimmela8c97012017-02-08 11:16:35 +01001568 struct fib_nh *fib_nh = nh->key.fib_nh;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001569 struct neighbour *n;
Ido Schimmel93a87e52016-12-23 09:32:49 +01001570 u8 nud_state, dead;
Ido Schimmelc53b8e12017-02-08 11:16:30 +01001571 int err;
1572
Ido Schimmelad178c82017-02-08 11:16:40 +01001573 if (!nh->nh_grp->gateway || nh->neigh_entry)
Ido Schimmelb8399a12017-02-08 11:16:33 +01001574 return 0;
1575
Jiri Pirko33b13412016-11-10 12:31:04 +01001576 /* Take a reference of neigh here ensuring that neigh would
1577 * not be detructed before the nexthop entry is finished.
1578 * The reference is taken either in neigh_lookup() or
Ido Schimmelfd76d912017-02-06 16:20:17 +01001579 * in neigh_create() in case n is not found.
Jiri Pirko33b13412016-11-10 12:31:04 +01001580 */
Ido Schimmela8c97012017-02-08 11:16:35 +01001581 n = neigh_lookup(&arp_tbl, &fib_nh->nh_gw, fib_nh->nh_dev);
Jiri Pirko33b13412016-11-10 12:31:04 +01001582 if (!n) {
Ido Schimmela8c97012017-02-08 11:16:35 +01001583 n = neigh_create(&arp_tbl, &fib_nh->nh_gw, fib_nh->nh_dev);
1584 if (IS_ERR(n))
1585 return PTR_ERR(n);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001586 neigh_event_send(n, NULL);
Jiri Pirko33b13412016-11-10 12:31:04 +01001587 }
1588 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
1589 if (!neigh_entry) {
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001590 neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
1591 if (IS_ERR(neigh_entry)) {
Ido Schimmelc53b8e12017-02-08 11:16:30 +01001592 err = -EINVAL;
1593 goto err_neigh_entry_create;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001594 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001595 }
Yotam Gigib2157142016-07-05 11:27:51 +02001596
1597 /* If that is the first nexthop connected to that neigh, add to
1598 * nexthop_neighs_list
1599 */
1600 if (list_empty(&neigh_entry->nexthop_list))
1601 list_add_tail(&neigh_entry->nexthop_neighs_list_node,
1602 &mlxsw_sp->router.nexthop_neighs_list);
1603
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001604 nh->neigh_entry = neigh_entry;
1605 list_add_tail(&nh->neigh_list_node, &neigh_entry->nexthop_list);
1606 read_lock_bh(&n->lock);
1607 nud_state = n->nud_state;
Ido Schimmel93a87e52016-12-23 09:32:49 +01001608 dead = n->dead;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001609 read_unlock_bh(&n->lock);
Ido Schimmel93a87e52016-12-23 09:32:49 +01001610 __mlxsw_sp_nexthop_neigh_update(nh, !(nud_state & NUD_VALID && !dead));
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001611
1612 return 0;
Ido Schimmelc53b8e12017-02-08 11:16:30 +01001613
1614err_neigh_entry_create:
1615 neigh_release(n);
Ido Schimmelc53b8e12017-02-08 11:16:30 +01001616 return err;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001617}
1618
Ido Schimmela8c97012017-02-08 11:16:35 +01001619static void mlxsw_sp_nexthop_neigh_fini(struct mlxsw_sp *mlxsw_sp,
1620 struct mlxsw_sp_nexthop *nh)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001621{
1622 struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
Ido Schimmela8c97012017-02-08 11:16:35 +01001623 struct neighbour *n;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001624
Ido Schimmelb8399a12017-02-08 11:16:33 +01001625 if (!neigh_entry)
Ido Schimmela8c97012017-02-08 11:16:35 +01001626 return;
1627 n = neigh_entry->key.n;
Ido Schimmelb8399a12017-02-08 11:16:33 +01001628
Ido Schimmel58312122016-12-23 09:32:50 +01001629 __mlxsw_sp_nexthop_neigh_update(nh, true);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001630 list_del(&nh->neigh_list_node);
Ido Schimmele58be792017-02-08 11:16:28 +01001631 nh->neigh_entry = NULL;
Yotam Gigib2157142016-07-05 11:27:51 +02001632
1633 /* If that is the last nexthop connected to that neigh, remove from
1634 * nexthop_neighs_list
1635 */
Ido Schimmele58be792017-02-08 11:16:28 +01001636 if (list_empty(&neigh_entry->nexthop_list))
1637 list_del(&neigh_entry->nexthop_neighs_list_node);
Yotam Gigib2157142016-07-05 11:27:51 +02001638
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001639 if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
1640 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
1641
1642 neigh_release(n);
Ido Schimmela8c97012017-02-08 11:16:35 +01001643}
Ido Schimmelc53b8e12017-02-08 11:16:30 +01001644
Ido Schimmela8c97012017-02-08 11:16:35 +01001645static int mlxsw_sp_nexthop_init(struct mlxsw_sp *mlxsw_sp,
1646 struct mlxsw_sp_nexthop_group *nh_grp,
1647 struct mlxsw_sp_nexthop *nh,
1648 struct fib_nh *fib_nh)
1649{
1650 struct net_device *dev = fib_nh->nh_dev;
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +01001651 struct in_device *in_dev;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001652 struct mlxsw_sp_rif *rif;
Ido Schimmela8c97012017-02-08 11:16:35 +01001653 int err;
1654
1655 nh->nh_grp = nh_grp;
1656 nh->key.fib_nh = fib_nh;
1657 err = mlxsw_sp_nexthop_insert(mlxsw_sp, nh);
1658 if (err)
1659 return err;
1660
Ido Schimmel97989ee2017-03-10 08:53:38 +01001661 if (!dev)
1662 return 0;
1663
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +01001664 in_dev = __in_dev_get_rtnl(dev);
1665 if (in_dev && IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
1666 fib_nh->nh_flags & RTNH_F_LINKDOWN)
1667 return 0;
1668
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001669 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
1670 if (!rif)
Ido Schimmela8c97012017-02-08 11:16:35 +01001671 return 0;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001672 mlxsw_sp_nexthop_rif_init(nh, rif);
Ido Schimmela8c97012017-02-08 11:16:35 +01001673
1674 err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
1675 if (err)
1676 goto err_nexthop_neigh_init;
1677
1678 return 0;
1679
1680err_nexthop_neigh_init:
1681 mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
1682 return err;
1683}
1684
1685static void mlxsw_sp_nexthop_fini(struct mlxsw_sp *mlxsw_sp,
1686 struct mlxsw_sp_nexthop *nh)
1687{
1688 mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh);
Ido Schimmel9665b742017-02-08 11:16:42 +01001689 mlxsw_sp_nexthop_rif_fini(nh);
Ido Schimmelc53b8e12017-02-08 11:16:30 +01001690 mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001691}
1692
Ido Schimmelad178c82017-02-08 11:16:40 +01001693static void mlxsw_sp_nexthop_event(struct mlxsw_sp *mlxsw_sp,
1694 unsigned long event, struct fib_nh *fib_nh)
1695{
1696 struct mlxsw_sp_nexthop_key key;
1697 struct mlxsw_sp_nexthop *nh;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001698 struct mlxsw_sp_rif *rif;
Ido Schimmelad178c82017-02-08 11:16:40 +01001699
1700 if (mlxsw_sp->router.aborted)
1701 return;
1702
1703 key.fib_nh = fib_nh;
1704 nh = mlxsw_sp_nexthop_lookup(mlxsw_sp, key);
1705 if (WARN_ON_ONCE(!nh))
1706 return;
1707
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001708 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, fib_nh->nh_dev);
1709 if (!rif)
Ido Schimmelad178c82017-02-08 11:16:40 +01001710 return;
1711
1712 switch (event) {
1713 case FIB_EVENT_NH_ADD:
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001714 mlxsw_sp_nexthop_rif_init(nh, rif);
Ido Schimmelad178c82017-02-08 11:16:40 +01001715 mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
1716 break;
1717 case FIB_EVENT_NH_DEL:
1718 mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh);
Ido Schimmel9665b742017-02-08 11:16:42 +01001719 mlxsw_sp_nexthop_rif_fini(nh);
Ido Schimmelad178c82017-02-08 11:16:40 +01001720 break;
1721 }
1722
1723 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
1724}
1725
Ido Schimmel9665b742017-02-08 11:16:42 +01001726static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001727 struct mlxsw_sp_rif *rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01001728{
1729 struct mlxsw_sp_nexthop *nh, *tmp;
1730
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001731 list_for_each_entry_safe(nh, tmp, &rif->nexthop_list, rif_list_node) {
Ido Schimmel9665b742017-02-08 11:16:42 +01001732 mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh);
1733 mlxsw_sp_nexthop_rif_fini(nh);
1734 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
1735 }
1736}
1737
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001738static struct mlxsw_sp_nexthop_group *
1739mlxsw_sp_nexthop_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi)
1740{
1741 struct mlxsw_sp_nexthop_group *nh_grp;
1742 struct mlxsw_sp_nexthop *nh;
1743 struct fib_nh *fib_nh;
1744 size_t alloc_size;
1745 int i;
1746 int err;
1747
1748 alloc_size = sizeof(*nh_grp) +
1749 fi->fib_nhs * sizeof(struct mlxsw_sp_nexthop);
1750 nh_grp = kzalloc(alloc_size, GFP_KERNEL);
1751 if (!nh_grp)
1752 return ERR_PTR(-ENOMEM);
1753 INIT_LIST_HEAD(&nh_grp->fib_list);
Ido Schimmelb3e8d1e2017-02-08 11:16:32 +01001754 nh_grp->gateway = fi->fib_nh->nh_scope == RT_SCOPE_LINK;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001755 nh_grp->count = fi->fib_nhs;
Ido Schimmele9ad5e72017-02-08 11:16:29 +01001756 nh_grp->key.fi = fi;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001757 for (i = 0; i < nh_grp->count; i++) {
1758 nh = &nh_grp->nexthops[i];
1759 fib_nh = &fi->fib_nh[i];
1760 err = mlxsw_sp_nexthop_init(mlxsw_sp, nh_grp, nh, fib_nh);
1761 if (err)
1762 goto err_nexthop_init;
1763 }
Ido Schimmele9ad5e72017-02-08 11:16:29 +01001764 err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
1765 if (err)
1766 goto err_nexthop_group_insert;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001767 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
1768 return nh_grp;
1769
Ido Schimmele9ad5e72017-02-08 11:16:29 +01001770err_nexthop_group_insert:
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001771err_nexthop_init:
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +01001772 for (i--; i >= 0; i--) {
1773 nh = &nh_grp->nexthops[i];
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001774 mlxsw_sp_nexthop_fini(mlxsw_sp, nh);
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +01001775 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001776 kfree(nh_grp);
1777 return ERR_PTR(err);
1778}
1779
1780static void
1781mlxsw_sp_nexthop_group_destroy(struct mlxsw_sp *mlxsw_sp,
1782 struct mlxsw_sp_nexthop_group *nh_grp)
1783{
1784 struct mlxsw_sp_nexthop *nh;
1785 int i;
1786
Ido Schimmele9ad5e72017-02-08 11:16:29 +01001787 mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001788 for (i = 0; i < nh_grp->count; i++) {
1789 nh = &nh_grp->nexthops[i];
1790 mlxsw_sp_nexthop_fini(mlxsw_sp, nh);
1791 }
Ido Schimmel58312122016-12-23 09:32:50 +01001792 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
1793 WARN_ON_ONCE(nh_grp->adj_index_valid);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001794 kfree(nh_grp);
1795}
1796
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001797static int mlxsw_sp_nexthop_group_get(struct mlxsw_sp *mlxsw_sp,
1798 struct mlxsw_sp_fib_entry *fib_entry,
1799 struct fib_info *fi)
1800{
Ido Schimmele9ad5e72017-02-08 11:16:29 +01001801 struct mlxsw_sp_nexthop_group_key key;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001802 struct mlxsw_sp_nexthop_group *nh_grp;
1803
Ido Schimmele9ad5e72017-02-08 11:16:29 +01001804 key.fi = fi;
1805 nh_grp = mlxsw_sp_nexthop_group_lookup(mlxsw_sp, key);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001806 if (!nh_grp) {
1807 nh_grp = mlxsw_sp_nexthop_group_create(mlxsw_sp, fi);
1808 if (IS_ERR(nh_grp))
1809 return PTR_ERR(nh_grp);
1810 }
1811 list_add_tail(&fib_entry->nexthop_group_node, &nh_grp->fib_list);
1812 fib_entry->nh_group = nh_grp;
1813 return 0;
1814}
1815
1816static void mlxsw_sp_nexthop_group_put(struct mlxsw_sp *mlxsw_sp,
1817 struct mlxsw_sp_fib_entry *fib_entry)
1818{
1819 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
1820
1821 list_del(&fib_entry->nexthop_group_node);
1822 if (!list_empty(&nh_grp->fib_list))
1823 return;
1824 mlxsw_sp_nexthop_group_destroy(mlxsw_sp, nh_grp);
1825}
1826
Ido Schimmel013b20f2017-02-08 11:16:36 +01001827static bool
1828mlxsw_sp_fib_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
1829{
1830 struct mlxsw_sp_nexthop_group *nh_group = fib_entry->nh_group;
1831
Ido Schimmel9aecce12017-02-09 10:28:42 +01001832 if (fib_entry->params.tos)
1833 return false;
1834
Ido Schimmel013b20f2017-02-08 11:16:36 +01001835 switch (fib_entry->type) {
1836 case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
1837 return !!nh_group->adj_index_valid;
1838 case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
Ido Schimmel70ad3502017-02-08 11:16:38 +01001839 return !!nh_group->nh_rif;
Ido Schimmel013b20f2017-02-08 11:16:36 +01001840 default:
1841 return false;
1842 }
1843}
1844
1845static void mlxsw_sp_fib_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
1846{
1847 fib_entry->offloaded = true;
1848
Ido Schimmel76610eb2017-03-10 08:53:41 +01001849 switch (fib_entry->fib_node->fib->proto) {
Ido Schimmel013b20f2017-02-08 11:16:36 +01001850 case MLXSW_SP_L3_PROTO_IPV4:
1851 fib_info_offload_inc(fib_entry->nh_group->key.fi);
1852 break;
1853 case MLXSW_SP_L3_PROTO_IPV6:
1854 WARN_ON_ONCE(1);
1855 }
1856}
1857
1858static void
1859mlxsw_sp_fib_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
1860{
Ido Schimmel76610eb2017-03-10 08:53:41 +01001861 switch (fib_entry->fib_node->fib->proto) {
Ido Schimmel013b20f2017-02-08 11:16:36 +01001862 case MLXSW_SP_L3_PROTO_IPV4:
1863 fib_info_offload_dec(fib_entry->nh_group->key.fi);
1864 break;
1865 case MLXSW_SP_L3_PROTO_IPV6:
1866 WARN_ON_ONCE(1);
1867 }
1868
1869 fib_entry->offloaded = false;
1870}
1871
1872static void
1873mlxsw_sp_fib_entry_offload_refresh(struct mlxsw_sp_fib_entry *fib_entry,
1874 enum mlxsw_reg_ralue_op op, int err)
1875{
1876 switch (op) {
1877 case MLXSW_REG_RALUE_OP_WRITE_DELETE:
1878 if (!fib_entry->offloaded)
1879 return;
1880 return mlxsw_sp_fib_entry_offload_unset(fib_entry);
1881 case MLXSW_REG_RALUE_OP_WRITE_WRITE:
1882 if (err)
1883 return;
1884 if (mlxsw_sp_fib_entry_should_offload(fib_entry) &&
1885 !fib_entry->offloaded)
1886 mlxsw_sp_fib_entry_offload_set(fib_entry);
1887 else if (!mlxsw_sp_fib_entry_should_offload(fib_entry) &&
1888 fib_entry->offloaded)
1889 mlxsw_sp_fib_entry_offload_unset(fib_entry);
1890 return;
1891 default:
1892 return;
1893 }
1894}
1895
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001896static int mlxsw_sp_fib_entry_op4_remote(struct mlxsw_sp *mlxsw_sp,
1897 struct mlxsw_sp_fib_entry *fib_entry,
1898 enum mlxsw_reg_ralue_op op)
1899{
1900 char ralue_pl[MLXSW_REG_RALUE_LEN];
Ido Schimmel76610eb2017-03-10 08:53:41 +01001901 struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;
Ido Schimmel9aecce12017-02-09 10:28:42 +01001902 u32 *p_dip = (u32 *) fib_entry->fib_node->key.addr;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001903 enum mlxsw_reg_ralue_trap_action trap_action;
1904 u16 trap_id = 0;
1905 u32 adjacency_index = 0;
1906 u16 ecmp_size = 0;
1907
1908 /* In case the nexthop group adjacency index is valid, use it
1909 * with provided ECMP size. Otherwise, setup trap and pass
1910 * traffic to kernel.
1911 */
Ido Schimmel4b411472017-02-08 11:16:37 +01001912 if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001913 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
1914 adjacency_index = fib_entry->nh_group->adj_index;
1915 ecmp_size = fib_entry->nh_group->ecmp_size;
1916 } else {
1917 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
1918 trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
1919 }
1920
Ido Schimmel1a9234e662016-09-19 08:29:26 +02001921 mlxsw_reg_ralue_pack4(ralue_pl,
Ido Schimmel76610eb2017-03-10 08:53:41 +01001922 (enum mlxsw_reg_ralxx_protocol) fib->proto, op,
1923 fib->vr->id, fib_entry->fib_node->key.prefix_len,
Ido Schimmel9aecce12017-02-09 10:28:42 +01001924 *p_dip);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001925 mlxsw_reg_ralue_act_remote_pack(ralue_pl, trap_action, trap_id,
1926 adjacency_index, ecmp_size);
1927 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
1928}
1929
Jiri Pirko61c503f2016-07-04 08:23:11 +02001930static int mlxsw_sp_fib_entry_op4_local(struct mlxsw_sp *mlxsw_sp,
1931 struct mlxsw_sp_fib_entry *fib_entry,
1932 enum mlxsw_reg_ralue_op op)
1933{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001934 struct mlxsw_sp_rif *rif = fib_entry->nh_group->nh_rif;
Ido Schimmel76610eb2017-03-10 08:53:41 +01001935 struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;
Ido Schimmel70ad3502017-02-08 11:16:38 +01001936 enum mlxsw_reg_ralue_trap_action trap_action;
Jiri Pirko61c503f2016-07-04 08:23:11 +02001937 char ralue_pl[MLXSW_REG_RALUE_LEN];
Ido Schimmel9aecce12017-02-09 10:28:42 +01001938 u32 *p_dip = (u32 *) fib_entry->fib_node->key.addr;
Ido Schimmel70ad3502017-02-08 11:16:38 +01001939 u16 trap_id = 0;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001940 u16 rif_index = 0;
Ido Schimmel70ad3502017-02-08 11:16:38 +01001941
1942 if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
1943 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001944 rif_index = rif->rif_index;
Ido Schimmel70ad3502017-02-08 11:16:38 +01001945 } else {
1946 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
1947 trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
1948 }
Jiri Pirko61c503f2016-07-04 08:23:11 +02001949
Ido Schimmel1a9234e662016-09-19 08:29:26 +02001950 mlxsw_reg_ralue_pack4(ralue_pl,
Ido Schimmel76610eb2017-03-10 08:53:41 +01001951 (enum mlxsw_reg_ralxx_protocol) fib->proto, op,
1952 fib->vr->id, fib_entry->fib_node->key.prefix_len,
Ido Schimmel9aecce12017-02-09 10:28:42 +01001953 *p_dip);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001954 mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, trap_id,
1955 rif_index);
Jiri Pirko61c503f2016-07-04 08:23:11 +02001956 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
1957}
1958
1959static int mlxsw_sp_fib_entry_op4_trap(struct mlxsw_sp *mlxsw_sp,
1960 struct mlxsw_sp_fib_entry *fib_entry,
1961 enum mlxsw_reg_ralue_op op)
1962{
Ido Schimmel76610eb2017-03-10 08:53:41 +01001963 struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;
Jiri Pirko61c503f2016-07-04 08:23:11 +02001964 char ralue_pl[MLXSW_REG_RALUE_LEN];
Ido Schimmel9aecce12017-02-09 10:28:42 +01001965 u32 *p_dip = (u32 *) fib_entry->fib_node->key.addr;
Jiri Pirko61c503f2016-07-04 08:23:11 +02001966
Ido Schimmel1a9234e662016-09-19 08:29:26 +02001967 mlxsw_reg_ralue_pack4(ralue_pl,
Ido Schimmel76610eb2017-03-10 08:53:41 +01001968 (enum mlxsw_reg_ralxx_protocol) fib->proto, op,
1969 fib->vr->id, fib_entry->fib_node->key.prefix_len,
Ido Schimmel9aecce12017-02-09 10:28:42 +01001970 *p_dip);
Jiri Pirko61c503f2016-07-04 08:23:11 +02001971 mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
1972 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
1973}
1974
1975static int mlxsw_sp_fib_entry_op4(struct mlxsw_sp *mlxsw_sp,
1976 struct mlxsw_sp_fib_entry *fib_entry,
1977 enum mlxsw_reg_ralue_op op)
1978{
1979 switch (fib_entry->type) {
1980 case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001981 return mlxsw_sp_fib_entry_op4_remote(mlxsw_sp, fib_entry, op);
Jiri Pirko61c503f2016-07-04 08:23:11 +02001982 case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
1983 return mlxsw_sp_fib_entry_op4_local(mlxsw_sp, fib_entry, op);
1984 case MLXSW_SP_FIB_ENTRY_TYPE_TRAP:
1985 return mlxsw_sp_fib_entry_op4_trap(mlxsw_sp, fib_entry, op);
1986 }
1987 return -EINVAL;
1988}
1989
1990static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
1991 struct mlxsw_sp_fib_entry *fib_entry,
1992 enum mlxsw_reg_ralue_op op)
1993{
Ido Schimmel013b20f2017-02-08 11:16:36 +01001994 int err = -EINVAL;
1995
Ido Schimmel76610eb2017-03-10 08:53:41 +01001996 switch (fib_entry->fib_node->fib->proto) {
Jiri Pirko61c503f2016-07-04 08:23:11 +02001997 case MLXSW_SP_L3_PROTO_IPV4:
Ido Schimmel013b20f2017-02-08 11:16:36 +01001998 err = mlxsw_sp_fib_entry_op4(mlxsw_sp, fib_entry, op);
1999 break;
Jiri Pirko61c503f2016-07-04 08:23:11 +02002000 case MLXSW_SP_L3_PROTO_IPV6:
Ido Schimmel013b20f2017-02-08 11:16:36 +01002001 return err;
Jiri Pirko61c503f2016-07-04 08:23:11 +02002002 }
Ido Schimmel013b20f2017-02-08 11:16:36 +01002003 mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, err);
2004 return err;
Jiri Pirko61c503f2016-07-04 08:23:11 +02002005}
2006
2007static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
2008 struct mlxsw_sp_fib_entry *fib_entry)
2009{
Jiri Pirko7146da32016-09-01 10:37:41 +02002010 return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
2011 MLXSW_REG_RALUE_OP_WRITE_WRITE);
Jiri Pirko61c503f2016-07-04 08:23:11 +02002012}
2013
2014static int mlxsw_sp_fib_entry_del(struct mlxsw_sp *mlxsw_sp,
2015 struct mlxsw_sp_fib_entry *fib_entry)
2016{
2017 return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
2018 MLXSW_REG_RALUE_OP_WRITE_DELETE);
2019}
2020
Jiri Pirko61c503f2016-07-04 08:23:11 +02002021static int
Ido Schimmel013b20f2017-02-08 11:16:36 +01002022mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp,
2023 const struct fib_entry_notifier_info *fen_info,
2024 struct mlxsw_sp_fib_entry *fib_entry)
Jiri Pirko61c503f2016-07-04 08:23:11 +02002025{
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002026 struct fib_info *fi = fen_info->fi;
Jiri Pirko61c503f2016-07-04 08:23:11 +02002027
Ido Schimmel97989ee2017-03-10 08:53:38 +01002028 switch (fen_info->type) {
2029 case RTN_BROADCAST: /* fall through */
2030 case RTN_LOCAL:
Jiri Pirko61c503f2016-07-04 08:23:11 +02002031 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
2032 return 0;
Ido Schimmel97989ee2017-03-10 08:53:38 +01002033 case RTN_UNREACHABLE: /* fall through */
2034 case RTN_BLACKHOLE: /* fall through */
2035 case RTN_PROHIBIT:
2036 /* Packets hitting these routes need to be trapped, but
2037 * can do so with a lower priority than packets directed
2038 * at the host, so use action type local instead of trap.
2039 */
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002040 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
Ido Schimmel97989ee2017-03-10 08:53:38 +01002041 return 0;
2042 case RTN_UNICAST:
2043 if (fi->fib_nh->nh_scope != RT_SCOPE_LINK)
2044 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
2045 else
2046 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
2047 return 0;
2048 default:
2049 return -EINVAL;
2050 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002051}
2052
Jiri Pirko5b004412016-09-01 10:37:40 +02002053static struct mlxsw_sp_fib_entry *
Ido Schimmel9aecce12017-02-09 10:28:42 +01002054mlxsw_sp_fib4_entry_create(struct mlxsw_sp *mlxsw_sp,
2055 struct mlxsw_sp_fib_node *fib_node,
2056 const struct fib_entry_notifier_info *fen_info)
Jiri Pirko5b004412016-09-01 10:37:40 +02002057{
2058 struct mlxsw_sp_fib_entry *fib_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01002059 int err;
2060
2061 fib_entry = kzalloc(sizeof(*fib_entry), GFP_KERNEL);
2062 if (!fib_entry) {
2063 err = -ENOMEM;
2064 goto err_fib_entry_alloc;
2065 }
2066
2067 err = mlxsw_sp_fib4_entry_type_set(mlxsw_sp, fen_info, fib_entry);
2068 if (err)
2069 goto err_fib4_entry_type_set;
2070
2071 err = mlxsw_sp_nexthop_group_get(mlxsw_sp, fib_entry, fen_info->fi);
2072 if (err)
2073 goto err_nexthop_group_get;
2074
2075 fib_entry->params.prio = fen_info->fi->fib_priority;
2076 fib_entry->params.tb_id = fen_info->tb_id;
2077 fib_entry->params.type = fen_info->type;
2078 fib_entry->params.tos = fen_info->tos;
2079
2080 fib_entry->fib_node = fib_node;
2081
2082 return fib_entry;
2083
2084err_nexthop_group_get:
2085err_fib4_entry_type_set:
2086 kfree(fib_entry);
2087err_fib_entry_alloc:
2088 return ERR_PTR(err);
2089}
2090
2091static void mlxsw_sp_fib4_entry_destroy(struct mlxsw_sp *mlxsw_sp,
2092 struct mlxsw_sp_fib_entry *fib_entry)
2093{
2094 mlxsw_sp_nexthop_group_put(mlxsw_sp, fib_entry);
2095 kfree(fib_entry);
2096}
2097
2098static struct mlxsw_sp_fib_node *
2099mlxsw_sp_fib4_node_get(struct mlxsw_sp *mlxsw_sp,
2100 const struct fib_entry_notifier_info *fen_info);
2101
2102static struct mlxsw_sp_fib_entry *
2103mlxsw_sp_fib4_entry_lookup(struct mlxsw_sp *mlxsw_sp,
2104 const struct fib_entry_notifier_info *fen_info)
2105{
2106 struct mlxsw_sp_fib_entry *fib_entry;
2107 struct mlxsw_sp_fib_node *fib_node;
2108
2109 fib_node = mlxsw_sp_fib4_node_get(mlxsw_sp, fen_info);
2110 if (IS_ERR(fib_node))
2111 return NULL;
2112
2113 list_for_each_entry(fib_entry, &fib_node->entry_list, list) {
2114 if (fib_entry->params.tb_id == fen_info->tb_id &&
2115 fib_entry->params.tos == fen_info->tos &&
2116 fib_entry->params.type == fen_info->type &&
2117 fib_entry->nh_group->key.fi == fen_info->fi) {
2118 return fib_entry;
2119 }
2120 }
2121
2122 return NULL;
2123}
2124
2125static const struct rhashtable_params mlxsw_sp_fib_ht_params = {
2126 .key_offset = offsetof(struct mlxsw_sp_fib_node, key),
2127 .head_offset = offsetof(struct mlxsw_sp_fib_node, ht_node),
2128 .key_len = sizeof(struct mlxsw_sp_fib_key),
2129 .automatic_shrinking = true,
2130};
2131
2132static int mlxsw_sp_fib_node_insert(struct mlxsw_sp_fib *fib,
2133 struct mlxsw_sp_fib_node *fib_node)
2134{
2135 return rhashtable_insert_fast(&fib->ht, &fib_node->ht_node,
2136 mlxsw_sp_fib_ht_params);
2137}
2138
2139static void mlxsw_sp_fib_node_remove(struct mlxsw_sp_fib *fib,
2140 struct mlxsw_sp_fib_node *fib_node)
2141{
2142 rhashtable_remove_fast(&fib->ht, &fib_node->ht_node,
2143 mlxsw_sp_fib_ht_params);
2144}
2145
2146static struct mlxsw_sp_fib_node *
2147mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
2148 size_t addr_len, unsigned char prefix_len)
2149{
2150 struct mlxsw_sp_fib_key key;
2151
2152 memset(&key, 0, sizeof(key));
2153 memcpy(key.addr, addr, addr_len);
2154 key.prefix_len = prefix_len;
2155 return rhashtable_lookup_fast(&fib->ht, &key, mlxsw_sp_fib_ht_params);
2156}
2157
2158static struct mlxsw_sp_fib_node *
Ido Schimmel76610eb2017-03-10 08:53:41 +01002159mlxsw_sp_fib_node_create(struct mlxsw_sp_fib *fib, const void *addr,
Ido Schimmel9aecce12017-02-09 10:28:42 +01002160 size_t addr_len, unsigned char prefix_len)
2161{
2162 struct mlxsw_sp_fib_node *fib_node;
2163
2164 fib_node = kzalloc(sizeof(*fib_node), GFP_KERNEL);
2165 if (!fib_node)
2166 return NULL;
2167
2168 INIT_LIST_HEAD(&fib_node->entry_list);
Ido Schimmel76610eb2017-03-10 08:53:41 +01002169 list_add(&fib_node->list, &fib->node_list);
Ido Schimmel9aecce12017-02-09 10:28:42 +01002170 memcpy(fib_node->key.addr, addr, addr_len);
2171 fib_node->key.prefix_len = prefix_len;
Ido Schimmel9aecce12017-02-09 10:28:42 +01002172
2173 return fib_node;
2174}
2175
2176static void mlxsw_sp_fib_node_destroy(struct mlxsw_sp_fib_node *fib_node)
2177{
Ido Schimmel9aecce12017-02-09 10:28:42 +01002178 list_del(&fib_node->list);
2179 WARN_ON(!list_empty(&fib_node->entry_list));
2180 kfree(fib_node);
2181}
2182
2183static bool
2184mlxsw_sp_fib_node_entry_is_first(const struct mlxsw_sp_fib_node *fib_node,
2185 const struct mlxsw_sp_fib_entry *fib_entry)
2186{
2187 return list_first_entry(&fib_node->entry_list,
2188 struct mlxsw_sp_fib_entry, list) == fib_entry;
2189}
2190
2191static void mlxsw_sp_fib_node_prefix_inc(struct mlxsw_sp_fib_node *fib_node)
2192{
2193 unsigned char prefix_len = fib_node->key.prefix_len;
Ido Schimmel76610eb2017-03-10 08:53:41 +01002194 struct mlxsw_sp_fib *fib = fib_node->fib;
Ido Schimmel9aecce12017-02-09 10:28:42 +01002195
2196 if (fib->prefix_ref_count[prefix_len]++ == 0)
2197 mlxsw_sp_prefix_usage_set(&fib->prefix_usage, prefix_len);
2198}
2199
2200static void mlxsw_sp_fib_node_prefix_dec(struct mlxsw_sp_fib_node *fib_node)
2201{
2202 unsigned char prefix_len = fib_node->key.prefix_len;
Ido Schimmel76610eb2017-03-10 08:53:41 +01002203 struct mlxsw_sp_fib *fib = fib_node->fib;
Ido Schimmel9aecce12017-02-09 10:28:42 +01002204
2205 if (--fib->prefix_ref_count[prefix_len] == 0)
2206 mlxsw_sp_prefix_usage_clear(&fib->prefix_usage, prefix_len);
2207}
2208
Ido Schimmel76610eb2017-03-10 08:53:41 +01002209static int mlxsw_sp_fib_node_init(struct mlxsw_sp *mlxsw_sp,
2210 struct mlxsw_sp_fib_node *fib_node,
2211 struct mlxsw_sp_fib *fib)
2212{
2213 struct mlxsw_sp_prefix_usage req_prefix_usage;
2214 struct mlxsw_sp_lpm_tree *lpm_tree;
2215 int err;
2216
2217 err = mlxsw_sp_fib_node_insert(fib, fib_node);
2218 if (err)
2219 return err;
2220 fib_node->fib = fib;
2221
2222 mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, &fib->prefix_usage);
2223 mlxsw_sp_prefix_usage_set(&req_prefix_usage, fib_node->key.prefix_len);
2224
2225 if (!mlxsw_sp_prefix_usage_none(&fib->prefix_usage)) {
2226 err = mlxsw_sp_vr_lpm_tree_check(mlxsw_sp, fib,
2227 &req_prefix_usage);
2228 if (err)
2229 goto err_tree_check;
2230 } else {
2231 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
2232 fib->proto);
2233 if (IS_ERR(lpm_tree))
2234 return PTR_ERR(lpm_tree);
2235 fib->lpm_tree = lpm_tree;
2236 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib);
2237 if (err)
2238 goto err_tree_bind;
2239 }
2240
2241 mlxsw_sp_fib_node_prefix_inc(fib_node);
2242
2243 return 0;
2244
2245err_tree_bind:
2246 fib->lpm_tree = NULL;
2247 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
2248err_tree_check:
2249 fib_node->fib = NULL;
2250 mlxsw_sp_fib_node_remove(fib, fib_node);
2251 return err;
2252}
2253
2254static void mlxsw_sp_fib_node_fini(struct mlxsw_sp *mlxsw_sp,
2255 struct mlxsw_sp_fib_node *fib_node)
2256{
2257 struct mlxsw_sp_lpm_tree *lpm_tree = fib_node->fib->lpm_tree;
2258 struct mlxsw_sp_fib *fib = fib_node->fib;
2259
2260 mlxsw_sp_fib_node_prefix_dec(fib_node);
2261
2262 if (mlxsw_sp_prefix_usage_none(&fib->prefix_usage)) {
2263 mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, fib);
2264 fib->lpm_tree = NULL;
2265 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
2266 } else {
2267 mlxsw_sp_vr_lpm_tree_check(mlxsw_sp, fib, &fib->prefix_usage);
2268 }
2269
2270 fib_node->fib = NULL;
2271 mlxsw_sp_fib_node_remove(fib, fib_node);
2272}
2273
Ido Schimmel9aecce12017-02-09 10:28:42 +01002274static struct mlxsw_sp_fib_node *
2275mlxsw_sp_fib4_node_get(struct mlxsw_sp *mlxsw_sp,
2276 const struct fib_entry_notifier_info *fen_info)
2277{
2278 struct mlxsw_sp_fib_node *fib_node;
Ido Schimmel76610eb2017-03-10 08:53:41 +01002279 struct mlxsw_sp_fib *fib;
Jiri Pirko5b004412016-09-01 10:37:40 +02002280 struct mlxsw_sp_vr *vr;
2281 int err;
2282
Ido Schimmel76610eb2017-03-10 08:53:41 +01002283 vr = mlxsw_sp_vr_get(mlxsw_sp, fen_info->tb_id);
Jiri Pirko5b004412016-09-01 10:37:40 +02002284 if (IS_ERR(vr))
2285 return ERR_CAST(vr);
Ido Schimmel76610eb2017-03-10 08:53:41 +01002286 fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV4);
Jiri Pirko5b004412016-09-01 10:37:40 +02002287
Ido Schimmel76610eb2017-03-10 08:53:41 +01002288 fib_node = mlxsw_sp_fib_node_lookup(fib, &fen_info->dst,
Ido Schimmel9aecce12017-02-09 10:28:42 +01002289 sizeof(fen_info->dst),
2290 fen_info->dst_len);
2291 if (fib_node)
2292 return fib_node;
2293
Ido Schimmel76610eb2017-03-10 08:53:41 +01002294 fib_node = mlxsw_sp_fib_node_create(fib, &fen_info->dst,
Ido Schimmel9aecce12017-02-09 10:28:42 +01002295 sizeof(fen_info->dst),
2296 fen_info->dst_len);
2297 if (!fib_node) {
Jiri Pirko5b004412016-09-01 10:37:40 +02002298 err = -ENOMEM;
Ido Schimmel9aecce12017-02-09 10:28:42 +01002299 goto err_fib_node_create;
Jiri Pirko5b004412016-09-01 10:37:40 +02002300 }
Jiri Pirko5b004412016-09-01 10:37:40 +02002301
Ido Schimmel76610eb2017-03-10 08:53:41 +01002302 err = mlxsw_sp_fib_node_init(mlxsw_sp, fib_node, fib);
2303 if (err)
2304 goto err_fib_node_init;
2305
Ido Schimmel9aecce12017-02-09 10:28:42 +01002306 return fib_node;
Jiri Pirko5b004412016-09-01 10:37:40 +02002307
Ido Schimmel76610eb2017-03-10 08:53:41 +01002308err_fib_node_init:
2309 mlxsw_sp_fib_node_destroy(fib_node);
Ido Schimmel9aecce12017-02-09 10:28:42 +01002310err_fib_node_create:
Ido Schimmel76610eb2017-03-10 08:53:41 +01002311 mlxsw_sp_vr_put(vr);
Jiri Pirko5b004412016-09-01 10:37:40 +02002312 return ERR_PTR(err);
2313}
2314
Ido Schimmel9aecce12017-02-09 10:28:42 +01002315static void mlxsw_sp_fib4_node_put(struct mlxsw_sp *mlxsw_sp,
2316 struct mlxsw_sp_fib_node *fib_node)
Jiri Pirko5b004412016-09-01 10:37:40 +02002317{
Ido Schimmel76610eb2017-03-10 08:53:41 +01002318 struct mlxsw_sp_vr *vr = fib_node->fib->vr;
Jiri Pirko5b004412016-09-01 10:37:40 +02002319
Ido Schimmel9aecce12017-02-09 10:28:42 +01002320 if (!list_empty(&fib_node->entry_list))
2321 return;
Ido Schimmel76610eb2017-03-10 08:53:41 +01002322 mlxsw_sp_fib_node_fini(mlxsw_sp, fib_node);
Ido Schimmel9aecce12017-02-09 10:28:42 +01002323 mlxsw_sp_fib_node_destroy(fib_node);
Ido Schimmel76610eb2017-03-10 08:53:41 +01002324 mlxsw_sp_vr_put(vr);
Jiri Pirko5b004412016-09-01 10:37:40 +02002325}
2326
Ido Schimmel9aecce12017-02-09 10:28:42 +01002327static struct mlxsw_sp_fib_entry *
2328mlxsw_sp_fib4_node_entry_find(const struct mlxsw_sp_fib_node *fib_node,
2329 const struct mlxsw_sp_fib_entry_params *params)
Jiri Pirko61c503f2016-07-04 08:23:11 +02002330{
Jiri Pirko61c503f2016-07-04 08:23:11 +02002331 struct mlxsw_sp_fib_entry *fib_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01002332
2333 list_for_each_entry(fib_entry, &fib_node->entry_list, list) {
2334 if (fib_entry->params.tb_id > params->tb_id)
2335 continue;
2336 if (fib_entry->params.tb_id != params->tb_id)
2337 break;
2338 if (fib_entry->params.tos > params->tos)
2339 continue;
2340 if (fib_entry->params.prio >= params->prio ||
2341 fib_entry->params.tos < params->tos)
2342 return fib_entry;
2343 }
2344
2345 return NULL;
2346}
2347
Ido Schimmel4283bce2017-02-09 10:28:43 +01002348static int mlxsw_sp_fib4_node_list_append(struct mlxsw_sp_fib_entry *fib_entry,
2349 struct mlxsw_sp_fib_entry *new_entry)
2350{
2351 struct mlxsw_sp_fib_node *fib_node;
2352
2353 if (WARN_ON(!fib_entry))
2354 return -EINVAL;
2355
2356 fib_node = fib_entry->fib_node;
2357 list_for_each_entry_from(fib_entry, &fib_node->entry_list, list) {
2358 if (fib_entry->params.tb_id != new_entry->params.tb_id ||
2359 fib_entry->params.tos != new_entry->params.tos ||
2360 fib_entry->params.prio != new_entry->params.prio)
2361 break;
2362 }
2363
2364 list_add_tail(&new_entry->list, &fib_entry->list);
2365 return 0;
2366}
2367
Ido Schimmel9aecce12017-02-09 10:28:42 +01002368static int
2369mlxsw_sp_fib4_node_list_insert(struct mlxsw_sp_fib_node *fib_node,
Ido Schimmel4283bce2017-02-09 10:28:43 +01002370 struct mlxsw_sp_fib_entry *new_entry,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002371 bool replace, bool append)
Ido Schimmel9aecce12017-02-09 10:28:42 +01002372{
2373 struct mlxsw_sp_fib_entry *fib_entry;
2374
2375 fib_entry = mlxsw_sp_fib4_node_entry_find(fib_node, &new_entry->params);
2376
Ido Schimmel4283bce2017-02-09 10:28:43 +01002377 if (append)
2378 return mlxsw_sp_fib4_node_list_append(fib_entry, new_entry);
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002379 if (replace && WARN_ON(!fib_entry))
2380 return -EINVAL;
Ido Schimmel4283bce2017-02-09 10:28:43 +01002381
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002382 /* Insert new entry before replaced one, so that we can later
2383 * remove the second.
2384 */
Ido Schimmel9aecce12017-02-09 10:28:42 +01002385 if (fib_entry) {
2386 list_add_tail(&new_entry->list, &fib_entry->list);
2387 } else {
2388 struct mlxsw_sp_fib_entry *last;
2389
2390 list_for_each_entry(last, &fib_node->entry_list, list) {
2391 if (new_entry->params.tb_id > last->params.tb_id)
2392 break;
2393 fib_entry = last;
2394 }
2395
2396 if (fib_entry)
2397 list_add(&new_entry->list, &fib_entry->list);
2398 else
2399 list_add(&new_entry->list, &fib_node->entry_list);
2400 }
2401
2402 return 0;
2403}
2404
2405static void
2406mlxsw_sp_fib4_node_list_remove(struct mlxsw_sp_fib_entry *fib_entry)
2407{
2408 list_del(&fib_entry->list);
2409}
2410
2411static int
2412mlxsw_sp_fib4_node_entry_add(struct mlxsw_sp *mlxsw_sp,
2413 const struct mlxsw_sp_fib_node *fib_node,
2414 struct mlxsw_sp_fib_entry *fib_entry)
2415{
2416 if (!mlxsw_sp_fib_node_entry_is_first(fib_node, fib_entry))
2417 return 0;
2418
2419 /* To prevent packet loss, overwrite the previously offloaded
2420 * entry.
2421 */
2422 if (!list_is_singular(&fib_node->entry_list)) {
2423 enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_DELETE;
2424 struct mlxsw_sp_fib_entry *n = list_next_entry(fib_entry, list);
2425
2426 mlxsw_sp_fib_entry_offload_refresh(n, op, 0);
2427 }
2428
2429 return mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
2430}
2431
2432static void
2433mlxsw_sp_fib4_node_entry_del(struct mlxsw_sp *mlxsw_sp,
2434 const struct mlxsw_sp_fib_node *fib_node,
2435 struct mlxsw_sp_fib_entry *fib_entry)
2436{
2437 if (!mlxsw_sp_fib_node_entry_is_first(fib_node, fib_entry))
2438 return;
2439
2440 /* Promote the next entry by overwriting the deleted entry */
2441 if (!list_is_singular(&fib_node->entry_list)) {
2442 struct mlxsw_sp_fib_entry *n = list_next_entry(fib_entry, list);
2443 enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_DELETE;
2444
2445 mlxsw_sp_fib_entry_update(mlxsw_sp, n);
2446 mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, 0);
2447 return;
2448 }
2449
2450 mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry);
2451}
2452
2453static int mlxsw_sp_fib4_node_entry_link(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel4283bce2017-02-09 10:28:43 +01002454 struct mlxsw_sp_fib_entry *fib_entry,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002455 bool replace, bool append)
Ido Schimmel9aecce12017-02-09 10:28:42 +01002456{
2457 struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
2458 int err;
2459
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002460 err = mlxsw_sp_fib4_node_list_insert(fib_node, fib_entry, replace,
2461 append);
Ido Schimmel9aecce12017-02-09 10:28:42 +01002462 if (err)
2463 return err;
2464
2465 err = mlxsw_sp_fib4_node_entry_add(mlxsw_sp, fib_node, fib_entry);
2466 if (err)
2467 goto err_fib4_node_entry_add;
2468
Ido Schimmel9aecce12017-02-09 10:28:42 +01002469 return 0;
2470
2471err_fib4_node_entry_add:
2472 mlxsw_sp_fib4_node_list_remove(fib_entry);
2473 return err;
2474}
2475
2476static void
2477mlxsw_sp_fib4_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
2478 struct mlxsw_sp_fib_entry *fib_entry)
2479{
2480 struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
2481
Ido Schimmel9aecce12017-02-09 10:28:42 +01002482 mlxsw_sp_fib4_node_entry_del(mlxsw_sp, fib_node, fib_entry);
2483 mlxsw_sp_fib4_node_list_remove(fib_entry);
2484}
2485
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002486static void mlxsw_sp_fib4_entry_replace(struct mlxsw_sp *mlxsw_sp,
2487 struct mlxsw_sp_fib_entry *fib_entry,
2488 bool replace)
2489{
2490 struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
2491 struct mlxsw_sp_fib_entry *replaced;
2492
2493 if (!replace)
2494 return;
2495
2496 /* We inserted the new entry before replaced one */
2497 replaced = list_next_entry(fib_entry, list);
2498
2499 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, replaced);
2500 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, replaced);
2501 mlxsw_sp_fib4_node_put(mlxsw_sp, fib_node);
2502}
2503
Ido Schimmel9aecce12017-02-09 10:28:42 +01002504static int
2505mlxsw_sp_router_fib4_add(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel4283bce2017-02-09 10:28:43 +01002506 const struct fib_entry_notifier_info *fen_info,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002507 bool replace, bool append)
Ido Schimmel9aecce12017-02-09 10:28:42 +01002508{
2509 struct mlxsw_sp_fib_entry *fib_entry;
2510 struct mlxsw_sp_fib_node *fib_node;
Jiri Pirko61c503f2016-07-04 08:23:11 +02002511 int err;
2512
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002513 if (mlxsw_sp->router.aborted)
2514 return 0;
2515
Ido Schimmel9aecce12017-02-09 10:28:42 +01002516 fib_node = mlxsw_sp_fib4_node_get(mlxsw_sp, fen_info);
2517 if (IS_ERR(fib_node)) {
2518 dev_warn(mlxsw_sp->bus_info->dev, "Failed to get FIB node\n");
2519 return PTR_ERR(fib_node);
2520 }
2521
2522 fib_entry = mlxsw_sp_fib4_entry_create(mlxsw_sp, fib_node, fen_info);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002523 if (IS_ERR(fib_entry)) {
Ido Schimmel9aecce12017-02-09 10:28:42 +01002524 dev_warn(mlxsw_sp->bus_info->dev, "Failed to create FIB entry\n");
2525 err = PTR_ERR(fib_entry);
2526 goto err_fib4_entry_create;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002527 }
Jiri Pirko61c503f2016-07-04 08:23:11 +02002528
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002529 err = mlxsw_sp_fib4_node_entry_link(mlxsw_sp, fib_entry, replace,
2530 append);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002531 if (err) {
Ido Schimmel9aecce12017-02-09 10:28:42 +01002532 dev_warn(mlxsw_sp->bus_info->dev, "Failed to link FIB entry to node\n");
2533 goto err_fib4_node_entry_link;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002534 }
Ido Schimmel9aecce12017-02-09 10:28:42 +01002535
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002536 mlxsw_sp_fib4_entry_replace(mlxsw_sp, fib_entry, replace);
2537
Jiri Pirko61c503f2016-07-04 08:23:11 +02002538 return 0;
2539
Ido Schimmel9aecce12017-02-09 10:28:42 +01002540err_fib4_node_entry_link:
2541 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib_entry);
2542err_fib4_entry_create:
2543 mlxsw_sp_fib4_node_put(mlxsw_sp, fib_node);
Jiri Pirko61c503f2016-07-04 08:23:11 +02002544 return err;
2545}
2546
Jiri Pirko37956d72016-10-20 16:05:43 +02002547static void mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
2548 struct fib_entry_notifier_info *fen_info)
Jiri Pirko61c503f2016-07-04 08:23:11 +02002549{
Jiri Pirko61c503f2016-07-04 08:23:11 +02002550 struct mlxsw_sp_fib_entry *fib_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01002551 struct mlxsw_sp_fib_node *fib_node;
Jiri Pirko61c503f2016-07-04 08:23:11 +02002552
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002553 if (mlxsw_sp->router.aborted)
Jiri Pirko37956d72016-10-20 16:05:43 +02002554 return;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002555
Ido Schimmel9aecce12017-02-09 10:28:42 +01002556 fib_entry = mlxsw_sp_fib4_entry_lookup(mlxsw_sp, fen_info);
2557 if (WARN_ON(!fib_entry))
Jiri Pirko37956d72016-10-20 16:05:43 +02002558 return;
Ido Schimmel9aecce12017-02-09 10:28:42 +01002559 fib_node = fib_entry->fib_node;
Jiri Pirko5b004412016-09-01 10:37:40 +02002560
Ido Schimmel9aecce12017-02-09 10:28:42 +01002561 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, fib_entry);
2562 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib_entry);
2563 mlxsw_sp_fib4_node_put(mlxsw_sp, fib_node);
Jiri Pirko61c503f2016-07-04 08:23:11 +02002564}
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002565
2566static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp)
2567{
2568 char ralta_pl[MLXSW_REG_RALTA_LEN];
2569 char ralst_pl[MLXSW_REG_RALST_LEN];
Ido Schimmelb5d90e62017-03-10 08:53:43 +01002570 int i, err;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002571
2572 mlxsw_reg_ralta_pack(ralta_pl, true, MLXSW_REG_RALXX_PROTOCOL_IPV4,
2573 MLXSW_SP_LPM_TREE_MIN);
2574 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
2575 if (err)
2576 return err;
2577
2578 mlxsw_reg_ralst_pack(ralst_pl, 0xff, MLXSW_SP_LPM_TREE_MIN);
2579 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
2580 if (err)
2581 return err;
2582
Ido Schimmelb5d90e62017-03-10 08:53:43 +01002583 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
2584 struct mlxsw_sp_vr *vr = &mlxsw_sp->router.vrs[i];
2585 char raltb_pl[MLXSW_REG_RALTB_LEN];
2586 char ralue_pl[MLXSW_REG_RALUE_LEN];
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002587
Ido Schimmelb5d90e62017-03-10 08:53:43 +01002588 if (!mlxsw_sp_vr_is_used(vr))
2589 continue;
2590
2591 mlxsw_reg_raltb_pack(raltb_pl, vr->id,
2592 MLXSW_REG_RALXX_PROTOCOL_IPV4,
2593 MLXSW_SP_LPM_TREE_MIN);
2594 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb),
2595 raltb_pl);
2596 if (err)
2597 return err;
2598
2599 mlxsw_reg_ralue_pack4(ralue_pl, MLXSW_SP_L3_PROTO_IPV4,
2600 MLXSW_REG_RALUE_OP_WRITE_WRITE, vr->id, 0,
2601 0);
2602 mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
2603 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue),
2604 ralue_pl);
2605 if (err)
2606 return err;
2607 }
2608
2609 return 0;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002610}
2611
Ido Schimmel9aecce12017-02-09 10:28:42 +01002612static void mlxsw_sp_fib4_node_flush(struct mlxsw_sp *mlxsw_sp,
2613 struct mlxsw_sp_fib_node *fib_node)
2614{
2615 struct mlxsw_sp_fib_entry *fib_entry, *tmp;
2616
2617 list_for_each_entry_safe(fib_entry, tmp, &fib_node->entry_list, list) {
2618 bool do_break = &tmp->list == &fib_node->entry_list;
2619
2620 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, fib_entry);
2621 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib_entry);
2622 mlxsw_sp_fib4_node_put(mlxsw_sp, fib_node);
2623 /* Break when entry list is empty and node was freed.
2624 * Otherwise, we'll access freed memory in the next
2625 * iteration.
2626 */
2627 if (do_break)
2628 break;
2629 }
2630}
2631
2632static void mlxsw_sp_fib_node_flush(struct mlxsw_sp *mlxsw_sp,
2633 struct mlxsw_sp_fib_node *fib_node)
2634{
Ido Schimmel76610eb2017-03-10 08:53:41 +01002635 switch (fib_node->fib->proto) {
Ido Schimmel9aecce12017-02-09 10:28:42 +01002636 case MLXSW_SP_L3_PROTO_IPV4:
2637 mlxsw_sp_fib4_node_flush(mlxsw_sp, fib_node);
2638 break;
2639 case MLXSW_SP_L3_PROTO_IPV6:
2640 WARN_ON_ONCE(1);
2641 break;
2642 }
2643}
2644
Ido Schimmel76610eb2017-03-10 08:53:41 +01002645static void mlxsw_sp_vr_fib_flush(struct mlxsw_sp *mlxsw_sp,
2646 struct mlxsw_sp_vr *vr,
2647 enum mlxsw_sp_l3proto proto)
2648{
2649 struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
2650 struct mlxsw_sp_fib_node *fib_node, *tmp;
2651
2652 list_for_each_entry_safe(fib_node, tmp, &fib->node_list, list) {
2653 bool do_break = &tmp->list == &fib->node_list;
2654
2655 mlxsw_sp_fib_node_flush(mlxsw_sp, fib_node);
2656 if (do_break)
2657 break;
2658 }
2659}
2660
Ido Schimmelac571de2016-11-14 11:26:32 +01002661static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp)
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002662{
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002663 int i;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002664
Jiri Pirkoc1a38312016-10-21 16:07:23 +02002665 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
Ido Schimmel76610eb2017-03-10 08:53:41 +01002666 struct mlxsw_sp_vr *vr = &mlxsw_sp->router.vrs[i];
Ido Schimmelac571de2016-11-14 11:26:32 +01002667
Ido Schimmel76610eb2017-03-10 08:53:41 +01002668 if (!mlxsw_sp_vr_is_used(vr))
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002669 continue;
Ido Schimmel76610eb2017-03-10 08:53:41 +01002670 mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002671 }
Ido Schimmelac571de2016-11-14 11:26:32 +01002672}
2673
2674static void mlxsw_sp_router_fib4_abort(struct mlxsw_sp *mlxsw_sp)
2675{
2676 int err;
2677
Ido Schimmeld331d302016-11-16 09:51:58 +01002678 if (mlxsw_sp->router.aborted)
2679 return;
2680 dev_warn(mlxsw_sp->bus_info->dev, "FIB abort triggered. Note that FIB entries are no longer being offloaded to this device.\n");
Ido Schimmelac571de2016-11-14 11:26:32 +01002681 mlxsw_sp_router_fib_flush(mlxsw_sp);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002682 mlxsw_sp->router.aborted = true;
2683 err = mlxsw_sp_router_set_abort_trap(mlxsw_sp);
2684 if (err)
2685 dev_warn(mlxsw_sp->bus_info->dev, "Failed to set abort trap.\n");
2686}
2687
Ido Schimmel30572242016-12-03 16:45:01 +01002688struct mlxsw_sp_fib_event_work {
Ido Schimmela0e47612017-02-06 16:20:10 +01002689 struct work_struct work;
Ido Schimmelad178c82017-02-08 11:16:40 +01002690 union {
2691 struct fib_entry_notifier_info fen_info;
Ido Schimmel5d7bfd12017-03-16 09:08:14 +01002692 struct fib_rule_notifier_info fr_info;
Ido Schimmelad178c82017-02-08 11:16:40 +01002693 struct fib_nh_notifier_info fnh_info;
2694 };
Ido Schimmel30572242016-12-03 16:45:01 +01002695 struct mlxsw_sp *mlxsw_sp;
2696 unsigned long event;
2697};
2698
2699static void mlxsw_sp_router_fib_event_work(struct work_struct *work)
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002700{
Ido Schimmel30572242016-12-03 16:45:01 +01002701 struct mlxsw_sp_fib_event_work *fib_work =
Ido Schimmela0e47612017-02-06 16:20:10 +01002702 container_of(work, struct mlxsw_sp_fib_event_work, work);
Ido Schimmel30572242016-12-03 16:45:01 +01002703 struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
Ido Schimmel5d7bfd12017-03-16 09:08:14 +01002704 struct fib_rule *rule;
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002705 bool replace, append;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002706 int err;
2707
Ido Schimmel30572242016-12-03 16:45:01 +01002708 /* Protect internal structures from changes */
2709 rtnl_lock();
2710 switch (fib_work->event) {
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002711 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
Ido Schimmel4283bce2017-02-09 10:28:43 +01002712 case FIB_EVENT_ENTRY_APPEND: /* fall through */
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002713 case FIB_EVENT_ENTRY_ADD:
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002714 replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
Ido Schimmel4283bce2017-02-09 10:28:43 +01002715 append = fib_work->event == FIB_EVENT_ENTRY_APPEND;
2716 err = mlxsw_sp_router_fib4_add(mlxsw_sp, &fib_work->fen_info,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002717 replace, append);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002718 if (err)
2719 mlxsw_sp_router_fib4_abort(mlxsw_sp);
Ido Schimmel30572242016-12-03 16:45:01 +01002720 fib_info_put(fib_work->fen_info.fi);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002721 break;
2722 case FIB_EVENT_ENTRY_DEL:
Ido Schimmel30572242016-12-03 16:45:01 +01002723 mlxsw_sp_router_fib4_del(mlxsw_sp, &fib_work->fen_info);
2724 fib_info_put(fib_work->fen_info.fi);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002725 break;
2726 case FIB_EVENT_RULE_ADD: /* fall through */
2727 case FIB_EVENT_RULE_DEL:
Ido Schimmel5d7bfd12017-03-16 09:08:14 +01002728 rule = fib_work->fr_info.rule;
Ido Schimmelc7f6e662017-03-16 09:08:20 +01002729 if (!fib4_rule_default(rule) && !rule->l3mdev)
Ido Schimmel5d7bfd12017-03-16 09:08:14 +01002730 mlxsw_sp_router_fib4_abort(mlxsw_sp);
2731 fib_rule_put(rule);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002732 break;
Ido Schimmelad178c82017-02-08 11:16:40 +01002733 case FIB_EVENT_NH_ADD: /* fall through */
2734 case FIB_EVENT_NH_DEL:
2735 mlxsw_sp_nexthop_event(mlxsw_sp, fib_work->event,
2736 fib_work->fnh_info.fib_nh);
2737 fib_info_put(fib_work->fnh_info.fib_nh->nh_parent);
2738 break;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002739 }
Ido Schimmel30572242016-12-03 16:45:01 +01002740 rtnl_unlock();
2741 kfree(fib_work);
2742}
2743
2744/* Called with rcu_read_lock() */
2745static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
2746 unsigned long event, void *ptr)
2747{
2748 struct mlxsw_sp *mlxsw_sp = container_of(nb, struct mlxsw_sp, fib_nb);
2749 struct mlxsw_sp_fib_event_work *fib_work;
2750 struct fib_notifier_info *info = ptr;
2751
2752 if (!net_eq(info->net, &init_net))
2753 return NOTIFY_DONE;
2754
2755 fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC);
2756 if (WARN_ON(!fib_work))
2757 return NOTIFY_BAD;
2758
Ido Schimmela0e47612017-02-06 16:20:10 +01002759 INIT_WORK(&fib_work->work, mlxsw_sp_router_fib_event_work);
Ido Schimmel30572242016-12-03 16:45:01 +01002760 fib_work->mlxsw_sp = mlxsw_sp;
2761 fib_work->event = event;
2762
2763 switch (event) {
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002764 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
Ido Schimmel4283bce2017-02-09 10:28:43 +01002765 case FIB_EVENT_ENTRY_APPEND: /* fall through */
Ido Schimmel30572242016-12-03 16:45:01 +01002766 case FIB_EVENT_ENTRY_ADD: /* fall through */
2767 case FIB_EVENT_ENTRY_DEL:
2768 memcpy(&fib_work->fen_info, ptr, sizeof(fib_work->fen_info));
2769 /* Take referece on fib_info to prevent it from being
2770 * freed while work is queued. Release it afterwards.
2771 */
2772 fib_info_hold(fib_work->fen_info.fi);
2773 break;
Ido Schimmel5d7bfd12017-03-16 09:08:14 +01002774 case FIB_EVENT_RULE_ADD: /* fall through */
2775 case FIB_EVENT_RULE_DEL:
2776 memcpy(&fib_work->fr_info, ptr, sizeof(fib_work->fr_info));
2777 fib_rule_get(fib_work->fr_info.rule);
2778 break;
Ido Schimmelad178c82017-02-08 11:16:40 +01002779 case FIB_EVENT_NH_ADD: /* fall through */
2780 case FIB_EVENT_NH_DEL:
2781 memcpy(&fib_work->fnh_info, ptr, sizeof(fib_work->fnh_info));
2782 fib_info_hold(fib_work->fnh_info.fib_nh->nh_parent);
2783 break;
Ido Schimmel30572242016-12-03 16:45:01 +01002784 }
2785
Ido Schimmela0e47612017-02-06 16:20:10 +01002786 mlxsw_core_schedule_work(&fib_work->work);
Ido Schimmel30572242016-12-03 16:45:01 +01002787
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002788 return NOTIFY_DONE;
2789}
2790
Ido Schimmel4724ba562017-03-10 08:53:39 +01002791static struct mlxsw_sp_rif *
2792mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
2793 const struct net_device *dev)
2794{
2795 int i;
2796
2797 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
2798 if (mlxsw_sp->rifs[i] && mlxsw_sp->rifs[i]->dev == dev)
2799 return mlxsw_sp->rifs[i];
2800
2801 return NULL;
2802}
2803
2804static int mlxsw_sp_router_rif_disable(struct mlxsw_sp *mlxsw_sp, u16 rif)
2805{
2806 char ritr_pl[MLXSW_REG_RITR_LEN];
2807 int err;
2808
2809 mlxsw_reg_ritr_rif_pack(ritr_pl, rif);
2810 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
2811 if (WARN_ON_ONCE(err))
2812 return err;
2813
2814 mlxsw_reg_ritr_enable_set(ritr_pl, false);
2815 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
2816}
2817
2818static void mlxsw_sp_router_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002819 struct mlxsw_sp_rif *rif)
Ido Schimmel4724ba562017-03-10 08:53:39 +01002820{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002821 mlxsw_sp_router_rif_disable(mlxsw_sp, rif->rif_index);
2822 mlxsw_sp_nexthop_rif_gone_sync(mlxsw_sp, rif);
2823 mlxsw_sp_neigh_rif_gone_sync(mlxsw_sp, rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01002824}
2825
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002826static bool mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *rif,
Ido Schimmel4724ba562017-03-10 08:53:39 +01002827 const struct in_device *in_dev,
2828 unsigned long event)
2829{
2830 switch (event) {
2831 case NETDEV_UP:
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002832 if (!rif)
Ido Schimmel4724ba562017-03-10 08:53:39 +01002833 return true;
2834 return false;
2835 case NETDEV_DOWN:
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002836 if (rif && !in_dev->ifa_list &&
2837 !netif_is_l3_slave(rif->dev))
Ido Schimmel4724ba562017-03-10 08:53:39 +01002838 return true;
2839 /* It is possible we already removed the RIF ourselves
2840 * if it was assigned to a netdev that is now a bridge
2841 * or LAG slave.
2842 */
2843 return false;
2844 }
2845
2846 return false;
2847}
2848
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002849#define MLXSW_SP_INVALID_INDEX_RIF 0xffff
Ido Schimmel4724ba562017-03-10 08:53:39 +01002850static int mlxsw_sp_avail_rif_get(struct mlxsw_sp *mlxsw_sp)
2851{
2852 int i;
2853
2854 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
2855 if (!mlxsw_sp->rifs[i])
2856 return i;
2857
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002858 return MLXSW_SP_INVALID_INDEX_RIF;
Ido Schimmel4724ba562017-03-10 08:53:39 +01002859}
2860
2861static void mlxsw_sp_vport_rif_sp_attr_get(struct mlxsw_sp_port *mlxsw_sp_vport,
2862 bool *p_lagged, u16 *p_system_port)
2863{
2864 u8 local_port = mlxsw_sp_vport->local_port;
2865
2866 *p_lagged = mlxsw_sp_vport->lagged;
2867 *p_system_port = *p_lagged ? mlxsw_sp_vport->lag_id : local_port;
2868}
2869
2870static int mlxsw_sp_vport_rif_sp_op(struct mlxsw_sp_port *mlxsw_sp_vport,
Ido Schimmel69132292017-03-10 08:53:42 +01002871 u16 vr_id, struct net_device *l3_dev,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002872 u16 rif_index, bool create)
Ido Schimmel4724ba562017-03-10 08:53:39 +01002873{
2874 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
2875 bool lagged = mlxsw_sp_vport->lagged;
2876 char ritr_pl[MLXSW_REG_RITR_LEN];
2877 u16 system_port;
2878
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002879 mlxsw_reg_ritr_pack(ritr_pl, create, MLXSW_REG_RITR_SP_IF, rif_index,
2880 vr_id, l3_dev->mtu, l3_dev->dev_addr);
Ido Schimmel4724ba562017-03-10 08:53:39 +01002881
2882 mlxsw_sp_vport_rif_sp_attr_get(mlxsw_sp_vport, &lagged, &system_port);
2883 mlxsw_reg_ritr_sp_if_pack(ritr_pl, lagged, system_port,
2884 mlxsw_sp_vport_vid_get(mlxsw_sp_vport));
2885
2886 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
2887}
2888
2889static void mlxsw_sp_vport_rif_sp_leave(struct mlxsw_sp_port *mlxsw_sp_vport);
2890
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002891static u16 mlxsw_sp_rif_sp_to_fid(u16 rif_index)
Ido Schimmel4724ba562017-03-10 08:53:39 +01002892{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002893 return MLXSW_SP_RFID_BASE + rif_index;
Ido Schimmel4724ba562017-03-10 08:53:39 +01002894}
2895
2896static struct mlxsw_sp_fid *
2897mlxsw_sp_rfid_alloc(u16 fid, struct net_device *l3_dev)
2898{
2899 struct mlxsw_sp_fid *f;
2900
2901 f = kzalloc(sizeof(*f), GFP_KERNEL);
2902 if (!f)
2903 return NULL;
2904
2905 f->leave = mlxsw_sp_vport_rif_sp_leave;
2906 f->ref_count = 0;
2907 f->dev = l3_dev;
2908 f->fid = fid;
2909
2910 return f;
2911}
2912
2913static struct mlxsw_sp_rif *
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002914mlxsw_sp_rif_alloc(u16 rif_index, u16 vr_id, struct net_device *l3_dev,
Ido Schimmel69132292017-03-10 08:53:42 +01002915 struct mlxsw_sp_fid *f)
Ido Schimmel4724ba562017-03-10 08:53:39 +01002916{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002917 struct mlxsw_sp_rif *rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01002918
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002919 rif = kzalloc(sizeof(*rif), GFP_KERNEL);
2920 if (!rif)
Ido Schimmel4724ba562017-03-10 08:53:39 +01002921 return NULL;
2922
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002923 INIT_LIST_HEAD(&rif->nexthop_list);
2924 INIT_LIST_HEAD(&rif->neigh_list);
2925 ether_addr_copy(rif->addr, l3_dev->dev_addr);
2926 rif->mtu = l3_dev->mtu;
2927 rif->vr_id = vr_id;
2928 rif->dev = l3_dev;
2929 rif->rif_index = rif_index;
2930 rif->f = f;
Ido Schimmel4724ba562017-03-10 08:53:39 +01002931
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002932 return rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01002933}
2934
Arkadi Sharshevskyfd1b9d42017-03-28 17:24:16 +02002935u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif)
2936{
2937 return rif->rif_index;
2938}
2939
2940int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif)
2941{
2942 return rif->dev->ifindex;
2943}
2944
Ido Schimmel4724ba562017-03-10 08:53:39 +01002945static struct mlxsw_sp_rif *
2946mlxsw_sp_vport_rif_sp_create(struct mlxsw_sp_port *mlxsw_sp_vport,
2947 struct net_device *l3_dev)
2948{
2949 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
Ido Schimmel57837882017-03-16 09:08:16 +01002950 u32 tb_id = l3mdev_fib_table(l3_dev);
Ido Schimmel69132292017-03-10 08:53:42 +01002951 struct mlxsw_sp_vr *vr;
Ido Schimmel4724ba562017-03-10 08:53:39 +01002952 struct mlxsw_sp_fid *f;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002953 struct mlxsw_sp_rif *rif;
2954 u16 fid, rif_index;
Ido Schimmel4724ba562017-03-10 08:53:39 +01002955 int err;
2956
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002957 rif_index = mlxsw_sp_avail_rif_get(mlxsw_sp);
2958 if (rif_index == MLXSW_SP_INVALID_INDEX_RIF)
Ido Schimmel4724ba562017-03-10 08:53:39 +01002959 return ERR_PTR(-ERANGE);
2960
Ido Schimmel57837882017-03-16 09:08:16 +01002961 vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id ? : RT_TABLE_MAIN);
Ido Schimmel69132292017-03-10 08:53:42 +01002962 if (IS_ERR(vr))
2963 return ERR_CAST(vr);
2964
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002965 err = mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, vr->id, l3_dev,
2966 rif_index, true);
Ido Schimmel4724ba562017-03-10 08:53:39 +01002967 if (err)
Ido Schimmel69132292017-03-10 08:53:42 +01002968 goto err_vport_rif_sp_op;
Ido Schimmel4724ba562017-03-10 08:53:39 +01002969
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002970 fid = mlxsw_sp_rif_sp_to_fid(rif_index);
Ido Schimmel4724ba562017-03-10 08:53:39 +01002971 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, true);
2972 if (err)
2973 goto err_rif_fdb_op;
2974
2975 f = mlxsw_sp_rfid_alloc(fid, l3_dev);
2976 if (!f) {
2977 err = -ENOMEM;
2978 goto err_rfid_alloc;
2979 }
2980
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002981 rif = mlxsw_sp_rif_alloc(rif_index, vr->id, l3_dev, f);
2982 if (!rif) {
Ido Schimmel4724ba562017-03-10 08:53:39 +01002983 err = -ENOMEM;
2984 goto err_rif_alloc;
2985 }
2986
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +02002987 if (devlink_dpipe_table_counter_enabled(priv_to_devlink(mlxsw_sp->core),
2988 MLXSW_SP_DPIPE_TABLE_NAME_ERIF)) {
2989 err = mlxsw_sp_rif_counter_alloc(mlxsw_sp, rif,
2990 MLXSW_SP_RIF_COUNTER_EGRESS);
2991 if (err)
2992 netdev_dbg(mlxsw_sp_vport->dev,
2993 "Counter alloc Failed err=%d\n", err);
2994 }
2995
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002996 f->rif = rif;
2997 mlxsw_sp->rifs[rif_index] = rif;
Ido Schimmel69132292017-03-10 08:53:42 +01002998 vr->rif_count++;
Ido Schimmel4724ba562017-03-10 08:53:39 +01002999
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003000 return rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01003001
3002err_rif_alloc:
3003 kfree(f);
3004err_rfid_alloc:
3005 mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, false);
3006err_rif_fdb_op:
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003007 mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, vr->id, l3_dev, rif_index,
3008 false);
Ido Schimmel69132292017-03-10 08:53:42 +01003009err_vport_rif_sp_op:
3010 mlxsw_sp_vr_put(vr);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003011 return ERR_PTR(err);
3012}
3013
3014static void mlxsw_sp_vport_rif_sp_destroy(struct mlxsw_sp_port *mlxsw_sp_vport,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003015 struct mlxsw_sp_rif *rif)
Ido Schimmel4724ba562017-03-10 08:53:39 +01003016{
3017 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003018 struct mlxsw_sp_vr *vr = &mlxsw_sp->router.vrs[rif->vr_id];
3019 struct net_device *l3_dev = rif->dev;
3020 struct mlxsw_sp_fid *f = rif->f;
3021 u16 rif_index = rif->rif_index;
Ido Schimmel4724ba562017-03-10 08:53:39 +01003022 u16 fid = f->fid;
Ido Schimmel4724ba562017-03-10 08:53:39 +01003023
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003024 mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003025
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +02003026 mlxsw_sp_rif_counter_free(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS);
3027 mlxsw_sp_rif_counter_free(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_INGRESS);
3028
Ido Schimmel69132292017-03-10 08:53:42 +01003029 vr->rif_count--;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003030 mlxsw_sp->rifs[rif_index] = NULL;
3031 f->rif = NULL;
Ido Schimmel4724ba562017-03-10 08:53:39 +01003032
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003033 kfree(rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003034
3035 kfree(f);
3036
3037 mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, false);
3038
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003039 mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, vr->id, l3_dev, rif_index,
3040 false);
Ido Schimmel69132292017-03-10 08:53:42 +01003041 mlxsw_sp_vr_put(vr);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003042}
3043
3044static int mlxsw_sp_vport_rif_sp_join(struct mlxsw_sp_port *mlxsw_sp_vport,
3045 struct net_device *l3_dev)
3046{
3047 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003048 struct mlxsw_sp_rif *rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01003049
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003050 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
3051 if (!rif) {
3052 rif = mlxsw_sp_vport_rif_sp_create(mlxsw_sp_vport, l3_dev);
3053 if (IS_ERR(rif))
3054 return PTR_ERR(rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003055 }
3056
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003057 mlxsw_sp_vport_fid_set(mlxsw_sp_vport, rif->f);
3058 rif->f->ref_count++;
Ido Schimmel4724ba562017-03-10 08:53:39 +01003059
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003060 netdev_dbg(mlxsw_sp_vport->dev, "Joined FID=%d\n", rif->f->fid);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003061
3062 return 0;
3063}
3064
3065static void mlxsw_sp_vport_rif_sp_leave(struct mlxsw_sp_port *mlxsw_sp_vport)
3066{
3067 struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
3068
3069 netdev_dbg(mlxsw_sp_vport->dev, "Left FID=%d\n", f->fid);
3070
3071 mlxsw_sp_vport_fid_set(mlxsw_sp_vport, NULL);
3072 if (--f->ref_count == 0)
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003073 mlxsw_sp_vport_rif_sp_destroy(mlxsw_sp_vport, f->rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003074}
3075
3076static int mlxsw_sp_inetaddr_vport_event(struct net_device *l3_dev,
3077 struct net_device *port_dev,
3078 unsigned long event, u16 vid)
3079{
3080 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(port_dev);
3081 struct mlxsw_sp_port *mlxsw_sp_vport;
3082
3083 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
3084 if (WARN_ON(!mlxsw_sp_vport))
3085 return -EINVAL;
3086
3087 switch (event) {
3088 case NETDEV_UP:
3089 return mlxsw_sp_vport_rif_sp_join(mlxsw_sp_vport, l3_dev);
3090 case NETDEV_DOWN:
3091 mlxsw_sp_vport_rif_sp_leave(mlxsw_sp_vport);
3092 break;
3093 }
3094
3095 return 0;
3096}
3097
3098static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev,
3099 unsigned long event)
3100{
Jiri Pirko2b94e582017-04-18 16:55:37 +02003101 if (netif_is_bridge_port(port_dev) ||
3102 netif_is_lag_port(port_dev) ||
3103 netif_is_ovs_port(port_dev))
Ido Schimmel4724ba562017-03-10 08:53:39 +01003104 return 0;
3105
3106 return mlxsw_sp_inetaddr_vport_event(port_dev, port_dev, event, 1);
3107}
3108
3109static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev,
3110 struct net_device *lag_dev,
3111 unsigned long event, u16 vid)
3112{
3113 struct net_device *port_dev;
3114 struct list_head *iter;
3115 int err;
3116
3117 netdev_for_each_lower_dev(lag_dev, port_dev, iter) {
3118 if (mlxsw_sp_port_dev_check(port_dev)) {
3119 err = mlxsw_sp_inetaddr_vport_event(l3_dev, port_dev,
3120 event, vid);
3121 if (err)
3122 return err;
3123 }
3124 }
3125
3126 return 0;
3127}
3128
3129static int mlxsw_sp_inetaddr_lag_event(struct net_device *lag_dev,
3130 unsigned long event)
3131{
3132 if (netif_is_bridge_port(lag_dev))
3133 return 0;
3134
3135 return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event, 1);
3136}
3137
3138static struct mlxsw_sp_fid *mlxsw_sp_bridge_fid_get(struct mlxsw_sp *mlxsw_sp,
3139 struct net_device *l3_dev)
3140{
3141 u16 fid;
3142
3143 if (is_vlan_dev(l3_dev))
3144 fid = vlan_dev_vlan_id(l3_dev);
3145 else if (mlxsw_sp->master_bridge.dev == l3_dev)
3146 fid = 1;
3147 else
3148 return mlxsw_sp_vfid_find(mlxsw_sp, l3_dev);
3149
3150 return mlxsw_sp_fid_find(mlxsw_sp, fid);
3151}
3152
Ido Schimmel5ec2ee72017-03-24 08:02:48 +01003153static u8 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp)
3154{
3155 return mlxsw_core_max_ports(mlxsw_sp->core) + 1;
3156}
3157
Ido Schimmel4724ba562017-03-10 08:53:39 +01003158static enum mlxsw_flood_table_type mlxsw_sp_flood_table_type_get(u16 fid)
3159{
3160 return mlxsw_sp_fid_is_vfid(fid) ? MLXSW_REG_SFGC_TABLE_TYPE_FID :
3161 MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST;
3162}
3163
3164static u16 mlxsw_sp_flood_table_index_get(u16 fid)
3165{
3166 return mlxsw_sp_fid_is_vfid(fid) ? mlxsw_sp_fid_to_vfid(fid) : fid;
3167}
3168
3169static int mlxsw_sp_router_port_flood_set(struct mlxsw_sp *mlxsw_sp, u16 fid,
3170 bool set)
3171{
Ido Schimmel5ec2ee72017-03-24 08:02:48 +01003172 u8 router_port = mlxsw_sp_router_port(mlxsw_sp);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003173 enum mlxsw_flood_table_type table_type;
3174 char *sftr_pl;
3175 u16 index;
3176 int err;
3177
3178 sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL);
3179 if (!sftr_pl)
3180 return -ENOMEM;
3181
3182 table_type = mlxsw_sp_flood_table_type_get(fid);
3183 index = mlxsw_sp_flood_table_index_get(fid);
3184 mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BC, index, table_type,
Ido Schimmel5ec2ee72017-03-24 08:02:48 +01003185 1, router_port, set);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003186 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
3187
3188 kfree(sftr_pl);
3189 return err;
3190}
3191
3192static enum mlxsw_reg_ritr_if_type mlxsw_sp_rif_type_get(u16 fid)
3193{
3194 if (mlxsw_sp_fid_is_vfid(fid))
3195 return MLXSW_REG_RITR_FID_IF;
3196 else
3197 return MLXSW_REG_RITR_VLAN_IF;
3198}
3199
Ido Schimmel69132292017-03-10 08:53:42 +01003200static int mlxsw_sp_rif_bridge_op(struct mlxsw_sp *mlxsw_sp, u16 vr_id,
Ido Schimmel4724ba562017-03-10 08:53:39 +01003201 struct net_device *l3_dev,
3202 u16 fid, u16 rif,
3203 bool create)
3204{
3205 enum mlxsw_reg_ritr_if_type rif_type;
3206 char ritr_pl[MLXSW_REG_RITR_LEN];
3207
3208 rif_type = mlxsw_sp_rif_type_get(fid);
Ido Schimmel69132292017-03-10 08:53:42 +01003209 mlxsw_reg_ritr_pack(ritr_pl, create, rif_type, rif, vr_id, l3_dev->mtu,
Ido Schimmel4724ba562017-03-10 08:53:39 +01003210 l3_dev->dev_addr);
3211 mlxsw_reg_ritr_fid_set(ritr_pl, rif_type, fid);
3212
3213 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
3214}
3215
3216static int mlxsw_sp_rif_bridge_create(struct mlxsw_sp *mlxsw_sp,
3217 struct net_device *l3_dev,
3218 struct mlxsw_sp_fid *f)
3219{
Ido Schimmel57837882017-03-16 09:08:16 +01003220 u32 tb_id = l3mdev_fib_table(l3_dev);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003221 struct mlxsw_sp_rif *rif;
Ido Schimmel69132292017-03-10 08:53:42 +01003222 struct mlxsw_sp_vr *vr;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003223 u16 rif_index;
Ido Schimmel4724ba562017-03-10 08:53:39 +01003224 int err;
3225
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003226 rif_index = mlxsw_sp_avail_rif_get(mlxsw_sp);
3227 if (rif_index == MLXSW_SP_INVALID_INDEX_RIF)
Ido Schimmel4724ba562017-03-10 08:53:39 +01003228 return -ERANGE;
3229
Ido Schimmel57837882017-03-16 09:08:16 +01003230 vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id ? : RT_TABLE_MAIN);
Ido Schimmel69132292017-03-10 08:53:42 +01003231 if (IS_ERR(vr))
3232 return PTR_ERR(vr);
3233
Ido Schimmel4724ba562017-03-10 08:53:39 +01003234 err = mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, true);
3235 if (err)
Ido Schimmel69132292017-03-10 08:53:42 +01003236 goto err_port_flood_set;
Ido Schimmel4724ba562017-03-10 08:53:39 +01003237
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003238 err = mlxsw_sp_rif_bridge_op(mlxsw_sp, vr->id, l3_dev, f->fid,
3239 rif_index, true);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003240 if (err)
3241 goto err_rif_bridge_op;
3242
3243 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, true);
3244 if (err)
3245 goto err_rif_fdb_op;
3246
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003247 rif = mlxsw_sp_rif_alloc(rif_index, vr->id, l3_dev, f);
3248 if (!rif) {
Ido Schimmel4724ba562017-03-10 08:53:39 +01003249 err = -ENOMEM;
3250 goto err_rif_alloc;
3251 }
3252
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003253 f->rif = rif;
3254 mlxsw_sp->rifs[rif_index] = rif;
Ido Schimmel69132292017-03-10 08:53:42 +01003255 vr->rif_count++;
Ido Schimmel4724ba562017-03-10 08:53:39 +01003256
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003257 netdev_dbg(l3_dev, "RIF=%d created\n", rif_index);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003258
3259 return 0;
3260
3261err_rif_alloc:
3262 mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, false);
3263err_rif_fdb_op:
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003264 mlxsw_sp_rif_bridge_op(mlxsw_sp, vr->id, l3_dev, f->fid, rif_index,
3265 false);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003266err_rif_bridge_op:
3267 mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, false);
Ido Schimmel69132292017-03-10 08:53:42 +01003268err_port_flood_set:
3269 mlxsw_sp_vr_put(vr);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003270 return err;
3271}
3272
3273void mlxsw_sp_rif_bridge_destroy(struct mlxsw_sp *mlxsw_sp,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003274 struct mlxsw_sp_rif *rif)
Ido Schimmel4724ba562017-03-10 08:53:39 +01003275{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003276 struct mlxsw_sp_vr *vr = &mlxsw_sp->router.vrs[rif->vr_id];
3277 struct net_device *l3_dev = rif->dev;
3278 struct mlxsw_sp_fid *f = rif->f;
3279 u16 rif_index = rif->rif_index;
Ido Schimmel4724ba562017-03-10 08:53:39 +01003280
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003281 mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003282
Ido Schimmel69132292017-03-10 08:53:42 +01003283 vr->rif_count--;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003284 mlxsw_sp->rifs[rif_index] = NULL;
3285 f->rif = NULL;
Ido Schimmel4724ba562017-03-10 08:53:39 +01003286
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003287 kfree(rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003288
3289 mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, false);
3290
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003291 mlxsw_sp_rif_bridge_op(mlxsw_sp, vr->id, l3_dev, f->fid, rif_index,
3292 false);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003293
3294 mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, false);
3295
Ido Schimmel69132292017-03-10 08:53:42 +01003296 mlxsw_sp_vr_put(vr);
3297
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003298 netdev_dbg(l3_dev, "RIF=%d destroyed\n", rif_index);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003299}
3300
3301static int mlxsw_sp_inetaddr_bridge_event(struct net_device *l3_dev,
3302 struct net_device *br_dev,
3303 unsigned long event)
3304{
3305 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
3306 struct mlxsw_sp_fid *f;
3307
3308 /* FID can either be an actual FID if the L3 device is the
3309 * VLAN-aware bridge or a VLAN device on top. Otherwise, the
3310 * L3 device is a VLAN-unaware bridge and we get a vFID.
3311 */
3312 f = mlxsw_sp_bridge_fid_get(mlxsw_sp, l3_dev);
3313 if (WARN_ON(!f))
3314 return -EINVAL;
3315
3316 switch (event) {
3317 case NETDEV_UP:
3318 return mlxsw_sp_rif_bridge_create(mlxsw_sp, l3_dev, f);
3319 case NETDEV_DOWN:
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003320 mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003321 break;
3322 }
3323
3324 return 0;
3325}
3326
3327static int mlxsw_sp_inetaddr_vlan_event(struct net_device *vlan_dev,
3328 unsigned long event)
3329{
3330 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
3331 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(vlan_dev);
3332 u16 vid = vlan_dev_vlan_id(vlan_dev);
3333
3334 if (mlxsw_sp_port_dev_check(real_dev))
3335 return mlxsw_sp_inetaddr_vport_event(vlan_dev, real_dev, event,
3336 vid);
3337 else if (netif_is_lag_master(real_dev))
3338 return __mlxsw_sp_inetaddr_lag_event(vlan_dev, real_dev, event,
3339 vid);
3340 else if (netif_is_bridge_master(real_dev) &&
3341 mlxsw_sp->master_bridge.dev == real_dev)
3342 return mlxsw_sp_inetaddr_bridge_event(vlan_dev, real_dev,
3343 event);
3344
3345 return 0;
3346}
3347
Ido Schimmelb1e45522017-04-30 19:47:14 +03003348static int __mlxsw_sp_inetaddr_event(struct net_device *dev,
3349 unsigned long event)
3350{
3351 if (mlxsw_sp_port_dev_check(dev))
3352 return mlxsw_sp_inetaddr_port_event(dev, event);
3353 else if (netif_is_lag_master(dev))
3354 return mlxsw_sp_inetaddr_lag_event(dev, event);
3355 else if (netif_is_bridge_master(dev))
3356 return mlxsw_sp_inetaddr_bridge_event(dev, dev, event);
3357 else if (is_vlan_dev(dev))
3358 return mlxsw_sp_inetaddr_vlan_event(dev, event);
3359 else
3360 return 0;
3361}
3362
Ido Schimmel4724ba562017-03-10 08:53:39 +01003363int mlxsw_sp_inetaddr_event(struct notifier_block *unused,
3364 unsigned long event, void *ptr)
3365{
3366 struct in_ifaddr *ifa = (struct in_ifaddr *) ptr;
3367 struct net_device *dev = ifa->ifa_dev->dev;
3368 struct mlxsw_sp *mlxsw_sp;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003369 struct mlxsw_sp_rif *rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01003370 int err = 0;
3371
3372 mlxsw_sp = mlxsw_sp_lower_get(dev);
3373 if (!mlxsw_sp)
3374 goto out;
3375
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003376 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
3377 if (!mlxsw_sp_rif_should_config(rif, ifa->ifa_dev, event))
Ido Schimmel4724ba562017-03-10 08:53:39 +01003378 goto out;
3379
Ido Schimmelb1e45522017-04-30 19:47:14 +03003380 err = __mlxsw_sp_inetaddr_event(dev, event);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003381out:
3382 return notifier_from_errno(err);
3383}
3384
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003385static int mlxsw_sp_rif_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
Ido Schimmel4724ba562017-03-10 08:53:39 +01003386 const char *mac, int mtu)
3387{
3388 char ritr_pl[MLXSW_REG_RITR_LEN];
3389 int err;
3390
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003391 mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003392 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
3393 if (err)
3394 return err;
3395
3396 mlxsw_reg_ritr_mtu_set(ritr_pl, mtu);
3397 mlxsw_reg_ritr_if_mac_memcpy_to(ritr_pl, mac);
3398 mlxsw_reg_ritr_op_set(ritr_pl, MLXSW_REG_RITR_RIF_CREATE);
3399 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
3400}
3401
3402int mlxsw_sp_netdevice_router_port_event(struct net_device *dev)
3403{
3404 struct mlxsw_sp *mlxsw_sp;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003405 struct mlxsw_sp_rif *rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01003406 int err;
3407
3408 mlxsw_sp = mlxsw_sp_lower_get(dev);
3409 if (!mlxsw_sp)
3410 return 0;
3411
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003412 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
3413 if (!rif)
Ido Schimmel4724ba562017-03-10 08:53:39 +01003414 return 0;
3415
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003416 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, rif->f->fid, false);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003417 if (err)
3418 return err;
3419
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003420 err = mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, dev->dev_addr,
3421 dev->mtu);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003422 if (err)
3423 goto err_rif_edit;
3424
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003425 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, dev->dev_addr, rif->f->fid, true);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003426 if (err)
3427 goto err_rif_fdb_op;
3428
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003429 ether_addr_copy(rif->addr, dev->dev_addr);
3430 rif->mtu = dev->mtu;
Ido Schimmel4724ba562017-03-10 08:53:39 +01003431
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003432 netdev_dbg(dev, "Updated RIF=%d\n", rif->rif_index);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003433
3434 return 0;
3435
3436err_rif_fdb_op:
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003437 mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, rif->addr, rif->mtu);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003438err_rif_edit:
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003439 mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, rif->f->fid, true);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003440 return err;
3441}
3442
Ido Schimmelb1e45522017-04-30 19:47:14 +03003443static int mlxsw_sp_port_vrf_join(struct mlxsw_sp *mlxsw_sp,
3444 struct net_device *l3_dev)
Ido Schimmel7179eb52017-03-16 09:08:18 +01003445{
Ido Schimmelb1e45522017-04-30 19:47:14 +03003446 struct mlxsw_sp_rif *rif;
Ido Schimmel7179eb52017-03-16 09:08:18 +01003447
Ido Schimmelb1e45522017-04-30 19:47:14 +03003448 /* If netdev is already associated with a RIF, then we need to
3449 * destroy it and create a new one with the new virtual router ID.
Ido Schimmel7179eb52017-03-16 09:08:18 +01003450 */
Ido Schimmelb1e45522017-04-30 19:47:14 +03003451 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
3452 if (rif)
3453 __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_DOWN);
Ido Schimmel7179eb52017-03-16 09:08:18 +01003454
Ido Schimmelb1e45522017-04-30 19:47:14 +03003455 return __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_UP);
Ido Schimmel7179eb52017-03-16 09:08:18 +01003456}
3457
Ido Schimmelb1e45522017-04-30 19:47:14 +03003458static void mlxsw_sp_port_vrf_leave(struct mlxsw_sp *mlxsw_sp,
3459 struct net_device *l3_dev)
Ido Schimmel7179eb52017-03-16 09:08:18 +01003460{
Ido Schimmelb1e45522017-04-30 19:47:14 +03003461 struct mlxsw_sp_rif *rif;
Ido Schimmel7179eb52017-03-16 09:08:18 +01003462
Ido Schimmelb1e45522017-04-30 19:47:14 +03003463 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
3464 if (!rif)
Ido Schimmel7179eb52017-03-16 09:08:18 +01003465 return;
Ido Schimmelb1e45522017-04-30 19:47:14 +03003466 __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_DOWN);
Ido Schimmel7179eb52017-03-16 09:08:18 +01003467}
3468
Ido Schimmelb1e45522017-04-30 19:47:14 +03003469int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event,
3470 struct netdev_notifier_changeupper_info *info)
Ido Schimmel3d70e4582017-03-16 09:08:19 +01003471{
Ido Schimmelb1e45522017-04-30 19:47:14 +03003472 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
3473 int err = 0;
Ido Schimmel3d70e4582017-03-16 09:08:19 +01003474
Ido Schimmelb1e45522017-04-30 19:47:14 +03003475 if (!mlxsw_sp)
3476 return 0;
Ido Schimmel3d70e4582017-03-16 09:08:19 +01003477
Ido Schimmelb1e45522017-04-30 19:47:14 +03003478 switch (event) {
3479 case NETDEV_PRECHANGEUPPER:
3480 return 0;
3481 case NETDEV_CHANGEUPPER:
3482 if (info->linking)
3483 err = mlxsw_sp_port_vrf_join(mlxsw_sp, l3_dev);
3484 else
3485 mlxsw_sp_port_vrf_leave(mlxsw_sp, l3_dev);
3486 break;
3487 }
Ido Schimmel3d70e4582017-03-16 09:08:19 +01003488
Ido Schimmelb1e45522017-04-30 19:47:14 +03003489 return err;
Ido Schimmel3d70e4582017-03-16 09:08:19 +01003490}
3491
Ido Schimmelc3852ef2016-12-03 16:45:07 +01003492static void mlxsw_sp_router_fib_dump_flush(struct notifier_block *nb)
3493{
3494 struct mlxsw_sp *mlxsw_sp = container_of(nb, struct mlxsw_sp, fib_nb);
3495
3496 /* Flush pending FIB notifications and then flush the device's
3497 * table before requesting another dump. The FIB notification
3498 * block is unregistered, so no need to take RTNL.
3499 */
3500 mlxsw_core_flush_owq();
3501 mlxsw_sp_router_fib_flush(mlxsw_sp);
3502}
3503
Ido Schimmel4724ba562017-03-10 08:53:39 +01003504static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
3505{
3506 char rgcr_pl[MLXSW_REG_RGCR_LEN];
3507 u64 max_rifs;
3508 int err;
3509
3510 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_RIFS))
3511 return -EIO;
3512
3513 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
3514 mlxsw_sp->rifs = kcalloc(max_rifs, sizeof(struct mlxsw_sp_rif *),
3515 GFP_KERNEL);
3516 if (!mlxsw_sp->rifs)
3517 return -ENOMEM;
3518
3519 mlxsw_reg_rgcr_pack(rgcr_pl, true);
3520 mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs);
3521 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
3522 if (err)
3523 goto err_rgcr_fail;
3524
3525 return 0;
3526
3527err_rgcr_fail:
3528 kfree(mlxsw_sp->rifs);
3529 return err;
3530}
3531
3532static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
3533{
3534 char rgcr_pl[MLXSW_REG_RGCR_LEN];
3535 int i;
3536
3537 mlxsw_reg_rgcr_pack(rgcr_pl, false);
3538 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
3539
3540 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
3541 WARN_ON_ONCE(mlxsw_sp->rifs[i]);
3542
3543 kfree(mlxsw_sp->rifs);
3544}
3545
Jiri Pirkob45f64d2016-09-26 12:52:31 +02003546int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
3547{
3548 int err;
3549
3550 INIT_LIST_HEAD(&mlxsw_sp->router.nexthop_neighs_list);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02003551 err = __mlxsw_sp_router_init(mlxsw_sp);
3552 if (err)
3553 return err;
3554
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003555 err = rhashtable_init(&mlxsw_sp->router.nexthop_ht,
3556 &mlxsw_sp_nexthop_ht_params);
3557 if (err)
3558 goto err_nexthop_ht_init;
3559
Ido Schimmele9ad5e72017-02-08 11:16:29 +01003560 err = rhashtable_init(&mlxsw_sp->router.nexthop_group_ht,
3561 &mlxsw_sp_nexthop_group_ht_params);
3562 if (err)
3563 goto err_nexthop_group_ht_init;
3564
Ido Schimmel8494ab02017-03-24 08:02:47 +01003565 err = mlxsw_sp_lpm_init(mlxsw_sp);
3566 if (err)
3567 goto err_lpm_init;
3568
Jiri Pirkob45f64d2016-09-26 12:52:31 +02003569 err = mlxsw_sp_vrs_init(mlxsw_sp);
3570 if (err)
3571 goto err_vrs_init;
3572
Ido Schimmel8c9583a2016-10-27 15:12:57 +02003573 err = mlxsw_sp_neigh_init(mlxsw_sp);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02003574 if (err)
3575 goto err_neigh_init;
3576
3577 mlxsw_sp->fib_nb.notifier_call = mlxsw_sp_router_fib_event;
Ido Schimmelc3852ef2016-12-03 16:45:07 +01003578 err = register_fib_notifier(&mlxsw_sp->fib_nb,
3579 mlxsw_sp_router_fib_dump_flush);
3580 if (err)
3581 goto err_register_fib_notifier;
3582
Jiri Pirkob45f64d2016-09-26 12:52:31 +02003583 return 0;
3584
Ido Schimmelc3852ef2016-12-03 16:45:07 +01003585err_register_fib_notifier:
3586 mlxsw_sp_neigh_fini(mlxsw_sp);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02003587err_neigh_init:
3588 mlxsw_sp_vrs_fini(mlxsw_sp);
3589err_vrs_init:
Ido Schimmel8494ab02017-03-24 08:02:47 +01003590 mlxsw_sp_lpm_fini(mlxsw_sp);
3591err_lpm_init:
Ido Schimmele9ad5e72017-02-08 11:16:29 +01003592 rhashtable_destroy(&mlxsw_sp->router.nexthop_group_ht);
3593err_nexthop_group_ht_init:
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003594 rhashtable_destroy(&mlxsw_sp->router.nexthop_ht);
3595err_nexthop_ht_init:
Jiri Pirkob45f64d2016-09-26 12:52:31 +02003596 __mlxsw_sp_router_fini(mlxsw_sp);
3597 return err;
3598}
3599
3600void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
3601{
3602 unregister_fib_notifier(&mlxsw_sp->fib_nb);
3603 mlxsw_sp_neigh_fini(mlxsw_sp);
3604 mlxsw_sp_vrs_fini(mlxsw_sp);
Ido Schimmel8494ab02017-03-24 08:02:47 +01003605 mlxsw_sp_lpm_fini(mlxsw_sp);
Ido Schimmele9ad5e72017-02-08 11:16:29 +01003606 rhashtable_destroy(&mlxsw_sp->router.nexthop_group_ht);
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003607 rhashtable_destroy(&mlxsw_sp->router.nexthop_ht);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02003608 __mlxsw_sp_router_fini(mlxsw_sp);
3609}